1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Broadcom
9 * This is the general code for implementing KMS mode setting that
10 * doesn't clearly associate with any of the other objects (plane,
11 * crtc, HDMI encoder).
14 #include <linux/clk.h>
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_crtc.h>
19 #include <drm/drm_gem_framebuffer_helper.h>
20 #include <drm/drm_plane_helper.h>
21 #include <drm/drm_probe_helper.h>
22 #include <drm/drm_vblank.h>
27 #define HVS_NUM_CHANNELS 3
29 struct vc4_ctm_state {
30 struct drm_private_state base;
31 struct drm_color_ctm *ctm;
35 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
37 return container_of(priv, struct vc4_ctm_state, base);
40 struct vc4_hvs_state {
41 struct drm_private_state base;
45 struct drm_crtc_commit *pending_commit;
46 } fifo_state[HVS_NUM_CHANNELS];
49 static struct vc4_hvs_state *
50 to_vc4_hvs_state(struct drm_private_state *priv)
52 return container_of(priv, struct vc4_hvs_state, base);
55 struct vc4_load_tracker_state {
56 struct drm_private_state base;
61 static struct vc4_load_tracker_state *
62 to_vc4_load_tracker_state(struct drm_private_state *priv)
64 return container_of(priv, struct vc4_load_tracker_state, base);
67 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
68 struct drm_private_obj *manager)
70 struct drm_device *dev = state->dev;
71 struct vc4_dev *vc4 = to_vc4_dev(dev);
72 struct drm_private_state *priv_state;
75 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
79 priv_state = drm_atomic_get_private_obj_state(state, manager);
80 if (IS_ERR(priv_state))
81 return ERR_CAST(priv_state);
83 return to_vc4_ctm_state(priv_state);
86 static struct drm_private_state *
87 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
89 struct vc4_ctm_state *state;
91 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
95 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
100 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
101 struct drm_private_state *state)
103 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
108 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
109 .atomic_duplicate_state = vc4_ctm_duplicate_state,
110 .atomic_destroy_state = vc4_ctm_destroy_state,
113 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
115 struct vc4_dev *vc4 = to_vc4_dev(dev);
117 drm_atomic_private_obj_fini(&vc4->ctm_manager);
120 static int vc4_ctm_obj_init(struct vc4_dev *vc4)
122 struct vc4_ctm_state *ctm_state;
124 drm_modeset_lock_init(&vc4->ctm_state_lock);
126 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
130 drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
131 &vc4_ctm_state_funcs);
133 return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
136 /* Converts a DRM S31.32 value to the HW S0.9 format. */
137 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
142 r = in & BIT_ULL(63) ? BIT(9) : 0;
144 if ((in & GENMASK_ULL(62, 32)) > 0) {
145 /* We have zero integer bits so we can only saturate here. */
148 /* Otherwise take the 9 most important fractional bits. */
149 r |= (in >> 23) & GENMASK(8, 0);
156 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
158 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
159 struct drm_color_ctm *ctm = ctm_state->ctm;
161 if (ctm_state->fifo) {
162 HVS_WRITE(SCALER_OLEDCOEF2,
163 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
164 SCALER_OLEDCOEF2_R_TO_R) |
165 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
166 SCALER_OLEDCOEF2_R_TO_G) |
167 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
168 SCALER_OLEDCOEF2_R_TO_B));
169 HVS_WRITE(SCALER_OLEDCOEF1,
170 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
171 SCALER_OLEDCOEF1_G_TO_R) |
172 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
173 SCALER_OLEDCOEF1_G_TO_G) |
174 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
175 SCALER_OLEDCOEF1_G_TO_B));
176 HVS_WRITE(SCALER_OLEDCOEF0,
177 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
178 SCALER_OLEDCOEF0_B_TO_R) |
179 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
180 SCALER_OLEDCOEF0_B_TO_G) |
181 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
182 SCALER_OLEDCOEF0_B_TO_B));
185 HVS_WRITE(SCALER_OLEDOFFS,
186 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
189 static struct vc4_hvs_state *
190 vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
192 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
193 struct drm_private_state *priv_state;
195 priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
196 if (IS_ERR(priv_state))
197 return ERR_CAST(priv_state);
199 return to_vc4_hvs_state(priv_state);
202 static struct vc4_hvs_state *
203 vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
205 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
206 struct drm_private_state *priv_state;
208 priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
209 if (IS_ERR(priv_state))
210 return ERR_CAST(priv_state);
212 return to_vc4_hvs_state(priv_state);
215 static struct vc4_hvs_state *
216 vc4_hvs_get_global_state(struct drm_atomic_state *state)
218 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
219 struct drm_private_state *priv_state;
221 priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
222 if (IS_ERR(priv_state))
223 return ERR_CAST(priv_state);
225 return to_vc4_hvs_state(priv_state);
228 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
229 struct drm_atomic_state *state)
231 struct drm_crtc_state *crtc_state;
232 struct drm_crtc *crtc;
235 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
236 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
240 if (!crtc_state->active)
243 if (vc4_state->assigned_channel != 2)
247 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
249 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
251 * DSP3 is connected to FIFO2 unless the transposer is
252 * enabled. In this case, FIFO 2 is directly accessed by the
253 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
256 if (vc4_state->feed_txp)
257 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
259 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
261 dispctrl = HVS_READ(SCALER_DISPCTRL) &
262 ~SCALER_DISPCTRL_DSP3_MUX_MASK;
263 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
267 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
268 struct drm_atomic_state *state)
270 struct drm_crtc_state *crtc_state;
271 struct drm_crtc *crtc;
276 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
277 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
278 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
280 if (!vc4_state->update_muxing)
283 switch (vc4_crtc->data->hvs_output) {
285 mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
286 reg = HVS_READ(SCALER_DISPECTRL);
287 HVS_WRITE(SCALER_DISPECTRL,
288 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
289 VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
293 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
296 mux = vc4_state->assigned_channel;
298 reg = HVS_READ(SCALER_DISPCTRL);
299 HVS_WRITE(SCALER_DISPCTRL,
300 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
301 VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
305 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
308 mux = vc4_state->assigned_channel;
310 reg = HVS_READ(SCALER_DISPEOLN);
311 HVS_WRITE(SCALER_DISPEOLN,
312 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
313 VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
318 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
321 mux = vc4_state->assigned_channel;
323 reg = HVS_READ(SCALER_DISPDITHER);
324 HVS_WRITE(SCALER_DISPDITHER,
325 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
326 VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
335 static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
337 struct drm_device *dev = state->dev;
338 struct vc4_dev *vc4 = to_vc4_dev(dev);
339 struct vc4_hvs *hvs = vc4->hvs;
340 struct drm_crtc_state *old_crtc_state;
341 struct drm_crtc_state *new_crtc_state;
342 struct drm_crtc *crtc;
343 struct vc4_hvs_state *old_hvs_state;
346 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
347 struct vc4_crtc_state *vc4_crtc_state;
349 if (!new_crtc_state->commit)
352 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
353 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
357 clk_set_min_rate(hvs->core_clk, 500000000);
359 old_hvs_state = vc4_hvs_get_old_global_state(state);
363 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
364 struct vc4_crtc_state *vc4_crtc_state =
365 to_vc4_crtc_state(old_crtc_state);
366 unsigned int channel = vc4_crtc_state->assigned_channel;
369 if (channel == VC4_HVS_CHANNEL_DISABLED)
372 if (!old_hvs_state->fifo_state[channel].in_use)
375 ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
377 drm_err(dev, "Timed out waiting for commit\n");
380 drm_atomic_helper_commit_modeset_disables(dev, state);
382 vc4_ctm_commit(vc4, state);
385 vc5_hvs_pv_muxing_commit(vc4, state);
387 vc4_hvs_pv_muxing_commit(vc4, state);
389 drm_atomic_helper_commit_planes(dev, state, 0);
391 drm_atomic_helper_commit_modeset_enables(dev, state);
393 drm_atomic_helper_fake_vblank(state);
395 drm_atomic_helper_commit_hw_done(state);
397 drm_atomic_helper_wait_for_flip_done(dev, state);
399 drm_atomic_helper_cleanup_planes(dev, state);
402 clk_set_min_rate(hvs->core_clk, 0);
405 static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
407 struct drm_crtc_state *crtc_state;
408 struct vc4_hvs_state *hvs_state;
409 struct drm_crtc *crtc;
412 hvs_state = vc4_hvs_get_new_global_state(state);
416 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
417 struct vc4_crtc_state *vc4_crtc_state =
418 to_vc4_crtc_state(crtc_state);
419 unsigned int channel =
420 vc4_crtc_state->assigned_channel;
422 if (channel == VC4_HVS_CHANNEL_DISABLED)
425 if (!hvs_state->fifo_state[channel].in_use)
428 hvs_state->fifo_state[channel].pending_commit =
429 drm_crtc_commit_get(crtc_state->commit);
435 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
436 struct drm_file *file_priv,
437 const struct drm_mode_fb_cmd2 *mode_cmd)
439 struct drm_mode_fb_cmd2 mode_cmd_local;
441 /* If the user didn't specify a modifier, use the
442 * vc4_set_tiling_ioctl() state for the BO.
444 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
445 struct drm_gem_object *gem_obj;
448 gem_obj = drm_gem_object_lookup(file_priv,
449 mode_cmd->handles[0]);
451 DRM_DEBUG("Failed to look up GEM BO %d\n",
452 mode_cmd->handles[0]);
453 return ERR_PTR(-ENOENT);
455 bo = to_vc4_bo(gem_obj);
457 mode_cmd_local = *mode_cmd;
460 mode_cmd_local.modifier[0] =
461 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
463 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
466 drm_gem_object_put(gem_obj);
468 mode_cmd = &mode_cmd_local;
471 return drm_gem_fb_create(dev, file_priv, mode_cmd);
474 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
475 * at a time and the HW only supports S0.9 scalars. To account for the latter,
476 * we don't allow userland to set a CTM that we have no hope of approximating.
479 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
481 struct vc4_dev *vc4 = to_vc4_dev(dev);
482 struct vc4_ctm_state *ctm_state = NULL;
483 struct drm_crtc *crtc;
484 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
485 struct drm_color_ctm *ctm;
488 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
489 /* CTM is being disabled. */
490 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
491 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
492 if (IS_ERR(ctm_state))
493 return PTR_ERR(ctm_state);
498 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
499 if (new_crtc_state->ctm == old_crtc_state->ctm)
503 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
504 if (IS_ERR(ctm_state))
505 return PTR_ERR(ctm_state);
508 /* CTM is being enabled or the matrix changed. */
509 if (new_crtc_state->ctm) {
510 struct vc4_crtc_state *vc4_crtc_state =
511 to_vc4_crtc_state(new_crtc_state);
513 /* fifo is 1-based since 0 disables CTM. */
514 int fifo = vc4_crtc_state->assigned_channel + 1;
516 /* Check userland isn't trying to turn on CTM for more
517 * than one CRTC at a time.
519 if (ctm_state->fifo && ctm_state->fifo != fifo) {
520 DRM_DEBUG_DRIVER("Too many CTM configured\n");
524 /* Check we can approximate the specified CTM.
525 * We disallow scalars |c| > 1.0 since the HW has
528 ctm = new_crtc_state->ctm->data;
529 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
530 u64 val = ctm->matrix[i];
533 if (val > BIT_ULL(32))
537 ctm_state->fifo = fifo;
538 ctm_state->ctm = ctm;
545 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
547 struct drm_plane_state *old_plane_state, *new_plane_state;
548 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
549 struct vc4_load_tracker_state *load_state;
550 struct drm_private_state *priv_state;
551 struct drm_plane *plane;
554 priv_state = drm_atomic_get_private_obj_state(state,
556 if (IS_ERR(priv_state))
557 return PTR_ERR(priv_state);
559 load_state = to_vc4_load_tracker_state(priv_state);
560 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
561 new_plane_state, i) {
562 struct vc4_plane_state *vc4_plane_state;
564 if (old_plane_state->fb && old_plane_state->crtc) {
565 vc4_plane_state = to_vc4_plane_state(old_plane_state);
566 load_state->membus_load -= vc4_plane_state->membus_load;
567 load_state->hvs_load -= vc4_plane_state->hvs_load;
570 if (new_plane_state->fb && new_plane_state->crtc) {
571 vc4_plane_state = to_vc4_plane_state(new_plane_state);
572 load_state->membus_load += vc4_plane_state->membus_load;
573 load_state->hvs_load += vc4_plane_state->hvs_load;
577 /* Don't check the load when the tracker is disabled. */
578 if (!vc4->load_tracker_enabled)
581 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
582 * the system work when other blocks are accessing the memory.
584 if (load_state->membus_load > SZ_1G + SZ_512M)
587 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
588 * consider the maximum number of cycles is 240M.
590 if (load_state->hvs_load > 240000000ULL)
596 static struct drm_private_state *
597 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
599 struct vc4_load_tracker_state *state;
601 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
605 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
610 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
611 struct drm_private_state *state)
613 struct vc4_load_tracker_state *load_state;
615 load_state = to_vc4_load_tracker_state(state);
619 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
620 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
621 .atomic_destroy_state = vc4_load_tracker_destroy_state,
624 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
626 struct vc4_dev *vc4 = to_vc4_dev(dev);
628 drm_atomic_private_obj_fini(&vc4->load_tracker);
631 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
633 struct vc4_load_tracker_state *load_state;
635 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
639 drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
641 &vc4_load_tracker_state_funcs);
643 return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
646 static struct drm_private_state *
647 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
649 struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
650 struct vc4_hvs_state *state;
653 state = kzalloc(sizeof(*state), GFP_KERNEL);
657 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
660 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
661 state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
663 if (!old_state->fifo_state[i].pending_commit)
666 state->fifo_state[i].pending_commit =
667 drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
673 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
674 struct drm_private_state *state)
676 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
679 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
680 if (!hvs_state->fifo_state[i].pending_commit)
683 drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
689 static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
690 .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
691 .atomic_destroy_state = vc4_hvs_channels_destroy_state,
694 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
696 struct vc4_dev *vc4 = to_vc4_dev(dev);
698 drm_atomic_private_obj_fini(&vc4->hvs_channels);
701 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
703 struct vc4_hvs_state *state;
705 state = kzalloc(sizeof(*state), GFP_KERNEL);
709 drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
711 &vc4_hvs_state_funcs);
713 return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
717 * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
718 * the TXP (and therefore all the CRTCs found on that platform).
720 * The naive (and our initial) implementation would just iterate over
721 * all the active CRTCs, try to find a suitable FIFO, and then remove it
722 * from the pool of available FIFOs. However, there are a few corner
723 * cases that need to be considered:
725 * - When running in a dual-display setup (so with two CRTCs involved),
726 * we can update the state of a single CRTC (for example by changing
727 * its mode using xrandr under X11) without affecting the other. In
728 * this case, the other CRTC wouldn't be in the state at all, so we
729 * need to consider all the running CRTCs in the DRM device to assign
730 * a FIFO, not just the one in the state.
732 * - To fix the above, we can't use drm_atomic_get_crtc_state on all
733 * enabled CRTCs to pull their CRTC state into the global state, since
734 * a page flip would start considering their vblank to complete. Since
735 * we don't have a guarantee that they are actually active, that
736 * vblank might never happen, and shouldn't even be considered if we
737 * want to do a page flip on a single CRTC. That can be tested by
738 * doing a modetest -v first on HDMI1 and then on HDMI0.
740 * - Since we need the pixelvalve to be disabled and enabled back when
741 * the FIFO is changed, we should keep the FIFO assigned for as long
742 * as the CRTC is enabled, only considering it free again once that
743 * CRTC has been disabled. This can be tested by booting X11 on a
744 * single display, and changing the resolution down and then back up.
746 static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
747 struct drm_atomic_state *state)
749 struct vc4_hvs_state *hvs_new_state;
750 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
751 struct drm_crtc *crtc;
752 unsigned int unassigned_channels = 0;
755 hvs_new_state = vc4_hvs_get_global_state(state);
759 for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
760 if (!hvs_new_state->fifo_state[i].in_use)
761 unassigned_channels |= BIT(i);
763 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
764 struct vc4_crtc_state *old_vc4_crtc_state =
765 to_vc4_crtc_state(old_crtc_state);
766 struct vc4_crtc_state *new_vc4_crtc_state =
767 to_vc4_crtc_state(new_crtc_state);
768 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
769 unsigned int matching_channels;
770 unsigned int channel;
772 /* Nothing to do here, let's skip it */
773 if (old_crtc_state->enable == new_crtc_state->enable)
776 /* Muxing will need to be modified, mark it as such */
777 new_vc4_crtc_state->update_muxing = true;
779 /* If we're disabling our CRTC, we put back our channel */
780 if (!new_crtc_state->enable) {
781 channel = old_vc4_crtc_state->assigned_channel;
782 hvs_new_state->fifo_state[channel].in_use = false;
783 new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
788 * The problem we have to solve here is that we have
789 * up to 7 encoders, connected to up to 6 CRTCs.
791 * Those CRTCs, depending on the instance, can be
792 * routed to 1, 2 or 3 HVS FIFOs, and we need to set
793 * the change the muxing between FIFOs and outputs in
794 * the HVS accordingly.
796 * It would be pretty hard to come up with an
797 * algorithm that would generically solve
798 * this. However, the current routing trees we support
799 * allow us to simplify a bit the problem.
801 * Indeed, with the current supported layouts, if we
802 * try to assign in the ascending crtc index order the
803 * FIFOs, we can't fall into the situation where an
804 * earlier CRTC that had multiple routes is assigned
805 * one that was the only option for a later CRTC.
807 * If the layout changes and doesn't give us that in
808 * the future, we will need to have something smarter,
809 * but it works so far.
811 matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
812 if (!matching_channels)
815 channel = ffs(matching_channels) - 1;
816 new_vc4_crtc_state->assigned_channel = channel;
817 unassigned_channels &= ~BIT(channel);
818 hvs_new_state->fifo_state[channel].in_use = true;
825 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
829 ret = vc4_pv_muxing_atomic_check(dev, state);
833 ret = vc4_ctm_atomic_check(dev, state);
837 ret = drm_atomic_helper_check(dev, state);
841 return vc4_load_tracker_atomic_check(state);
844 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
845 .atomic_commit_setup = vc4_atomic_commit_setup,
846 .atomic_commit_tail = vc4_atomic_commit_tail,
849 static const struct drm_mode_config_funcs vc4_mode_funcs = {
850 .atomic_check = vc4_atomic_check,
851 .atomic_commit = drm_atomic_helper_commit,
852 .fb_create = vc4_fb_create,
855 int vc4_kms_load(struct drm_device *dev)
857 struct vc4_dev *vc4 = to_vc4_dev(dev);
858 bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
863 * The limits enforced by the load tracker aren't relevant for
864 * the BCM2711, but the load tracker computations are used for
865 * the core clock rate calculation.
868 /* Start with the load tracker enabled. Can be
869 * disabled through the debugfs load_tracker file.
871 vc4->load_tracker_enabled = true;
874 /* Set support for vblank irq fast disable, before drm_vblank_init() */
875 dev->vblank_disable_immediate = true;
877 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
879 dev_err(dev->dev, "failed to initialize vblank\n");
884 dev->mode_config.max_width = 7680;
885 dev->mode_config.max_height = 7680;
887 dev->mode_config.max_width = 2048;
888 dev->mode_config.max_height = 2048;
891 dev->mode_config.funcs = &vc4_mode_funcs;
892 dev->mode_config.helper_private = &vc4_mode_config_helpers;
893 dev->mode_config.preferred_depth = 24;
894 dev->mode_config.async_page_flip = true;
896 ret = vc4_ctm_obj_init(vc4);
900 ret = vc4_load_tracker_obj_init(vc4);
904 ret = vc4_hvs_channels_obj_init(vc4);
908 drm_mode_config_reset(dev);
910 drm_kms_helper_poll_init(dev);