drm/amd/display: Constify dcn30_res_pool_funcs
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
94bc373b
BL
100#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
79037324 102#endif
2200eb9e 103
a94d5569
DF
104#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 106
5ea23931
RL
107#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109
8c7aea40
NK
110/* Number of bytes in PSP header for firmware. */
111#define PSP_HEADER_BYTES 0x100
112
113/* Number of bytes in PSP footer for firmware. */
114#define PSP_FOOTER_BYTES 0x100
115
b8592b48
LL
116/**
117 * DOC: overview
118 *
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
122 *
123 * The root control structure is &struct amdgpu_display_manager.
124 */
125
7578ecda
AD
126/* basic init/fini API */
127static int amdgpu_dm_init(struct amdgpu_device *adev);
128static void amdgpu_dm_fini(struct amdgpu_device *adev);
129
1f6010a9
DF
130/*
131 * initializes drm_device display related structures, based on the information
7578ecda
AD
132 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133 * drm_encoder, drm_mode_config
134 *
135 * Returns 0 on success
136 */
137static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138/* removes and deallocates the drm structures, created by the above function */
139static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140
7578ecda 141static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 142 struct drm_plane *plane,
cc1fec57
NK
143 unsigned long possible_crtcs,
144 const struct dc_plane_cap *plane_cap);
7578ecda
AD
145static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 struct drm_plane *plane,
147 uint32_t link_index);
148static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 struct amdgpu_dm_connector *amdgpu_dm_connector,
150 uint32_t link_index,
151 struct amdgpu_encoder *amdgpu_encoder);
152static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 struct amdgpu_encoder *aencoder,
154 uint32_t link_index);
155
156static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157
158static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 struct drm_atomic_state *state,
160 bool nonblock);
161
162static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163
164static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 struct drm_atomic_state *state);
166
674e78ac
NK
167static void handle_cursor_update(struct drm_plane *plane,
168 struct drm_plane_state *old_plane_state);
7578ecda 169
8c322309
RL
170static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174
175
4562236b
HW
176/*
177 * dm_vblank_get_counter
178 *
179 * @brief
180 * Get counter for number of vertical blanks
181 *
182 * @param
183 * struct amdgpu_device *adev - [in] desired amdgpu device
184 * int disp_idx - [in] which CRTC to get the counter from
185 *
186 * @return
187 * Counter for vertical blanks
188 */
189static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190{
191 if (crtc >= adev->mode_info.num_crtc)
192 return 0;
193 else {
194 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
195 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196 acrtc->base.state);
4562236b 197
da5c47f6
AG
198
199 if (acrtc_state->stream == NULL) {
0971c40e
HW
200 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201 crtc);
4562236b
HW
202 return 0;
203 }
204
da5c47f6 205 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
206 }
207}
208
209static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 210 u32 *vbl, u32 *position)
4562236b 211{
81c50963
ST
212 uint32_t v_blank_start, v_blank_end, h_position, v_position;
213
4562236b
HW
214 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 return -EINVAL;
216 else {
217 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
218 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 acrtc->base.state);
4562236b 220
da5c47f6 221 if (acrtc_state->stream == NULL) {
0971c40e
HW
222 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223 crtc);
4562236b
HW
224 return 0;
225 }
226
81c50963
ST
227 /*
228 * TODO rework base driver to use values directly.
229 * for now parse it back into reg-format
230 */
da5c47f6 231 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
232 &v_blank_start,
233 &v_blank_end,
234 &h_position,
235 &v_position);
236
e806208d
AG
237 *position = v_position | (h_position << 16);
238 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
239 }
240
241 return 0;
242}
243
244static bool dm_is_idle(void *handle)
245{
246 /* XXX todo */
247 return true;
248}
249
250static int dm_wait_for_idle(void *handle)
251{
252 /* XXX todo */
253 return 0;
254}
255
256static bool dm_check_soft_reset(void *handle)
257{
258 return false;
259}
260
261static int dm_soft_reset(void *handle)
262{
263 /* XXX todo */
264 return 0;
265}
266
3ee6b26b
AD
267static struct amdgpu_crtc *
268get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 int otg_inst)
4562236b
HW
270{
271 struct drm_device *dev = adev->ddev;
272 struct drm_crtc *crtc;
273 struct amdgpu_crtc *amdgpu_crtc;
274
4562236b
HW
275 if (otg_inst == -1) {
276 WARN_ON(1);
277 return adev->mode_info.crtcs[0];
278 }
279
280 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 amdgpu_crtc = to_amdgpu_crtc(crtc);
282
283 if (amdgpu_crtc->otg_inst == otg_inst)
284 return amdgpu_crtc;
285 }
286
287 return NULL;
288}
289
66b0c973
MK
290static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291{
292 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294}
295
b8e8c934
HW
296/**
297 * dm_pflip_high_irq() - Handle pageflip interrupt
298 * @interrupt_params: ignored
299 *
300 * Handles the pageflip interrupt by notifying all interested parties
301 * that the pageflip has been completed.
302 */
4562236b
HW
303static void dm_pflip_high_irq(void *interrupt_params)
304{
4562236b
HW
305 struct amdgpu_crtc *amdgpu_crtc;
306 struct common_irq_params *irq_params = interrupt_params;
307 struct amdgpu_device *adev = irq_params->adev;
308 unsigned long flags;
71bbe51a
MK
309 struct drm_pending_vblank_event *e;
310 struct dm_crtc_state *acrtc_state;
311 uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 bool vrr_active;
4562236b
HW
313
314 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315
316 /* IRQ could occur when in initial stage */
1f6010a9 317 /* TODO work and BO cleanup */
4562236b
HW
318 if (amdgpu_crtc == NULL) {
319 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320 return;
321 }
322
323 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
324
325 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 amdgpu_crtc->pflip_status,
328 AMDGPU_FLIP_SUBMITTED,
329 amdgpu_crtc->crtc_id,
330 amdgpu_crtc);
331 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
332 return;
333 }
334
71bbe51a
MK
335 /* page flip completed. */
336 e = amdgpu_crtc->event;
337 amdgpu_crtc->event = NULL;
4562236b 338
71bbe51a
MK
339 if (!e)
340 WARN_ON(1);
1159898a 341
71bbe51a
MK
342 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344
345 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
346 if (!vrr_active ||
347 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 &v_blank_end, &hpos, &vpos) ||
349 (vpos < v_blank_start)) {
350 /* Update to correct count and vblank timestamp if racing with
351 * vblank irq. This also updates to the correct vblank timestamp
352 * even in VRR mode, as scanout is past the front-porch atm.
353 */
354 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 355
71bbe51a
MK
356 /* Wake up userspace by sending the pageflip event with proper
357 * count and timestamp of vblank of flip completion.
358 */
359 if (e) {
360 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361
362 /* Event sent, so done with vblank for this flip */
363 drm_crtc_vblank_put(&amdgpu_crtc->base);
364 }
365 } else if (e) {
366 /* VRR active and inside front-porch: vblank count and
367 * timestamp for pageflip event will only be up to date after
368 * drm_crtc_handle_vblank() has been executed from late vblank
369 * irq handler after start of back-porch (vline 0). We queue the
370 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 * updated timestamp and count, once it runs after us.
372 *
373 * We need to open-code this instead of using the helper
374 * drm_crtc_arm_vblank_event(), as that helper would
375 * call drm_crtc_accurate_vblank_count(), which we must
376 * not call in VRR mode while we are in front-porch!
377 */
378
379 /* sequence will be replaced by real count during send-out. */
380 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 e->pipe = amdgpu_crtc->crtc_id;
382
383 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
384 e = NULL;
385 }
4562236b 386
fdd1fe57
MK
387 /* Keep track of vblank of this flip for flip throttling. We use the
388 * cooked hw counter, as that one incremented at start of this vblank
389 * of pageflip completion, so last_flip_vblank is the forbidden count
390 * for queueing new pageflips if vsync + VRR is enabled.
391 */
e3eff4b5
TZ
392 amdgpu_crtc->last_flip_vblank =
393 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 394
54f5499a 395 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
396 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
397
71bbe51a
MK
398 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 vrr_active, (int) !e);
4562236b
HW
401}
402
d2574c33
MK
403static void dm_vupdate_high_irq(void *interrupt_params)
404{
405 struct common_irq_params *irq_params = interrupt_params;
406 struct amdgpu_device *adev = irq_params->adev;
407 struct amdgpu_crtc *acrtc;
408 struct dm_crtc_state *acrtc_state;
09aef2c4 409 unsigned long flags;
d2574c33
MK
410
411 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412
413 if (acrtc) {
414 acrtc_state = to_dm_crtc_state(acrtc->base.state);
415
7f2be468
LP
416 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417 acrtc->crtc_id,
418 amdgpu_dm_vrr_active(acrtc_state));
d2574c33
MK
419
420 /* Core vblank handling is done here after end of front-porch in
421 * vrr mode, as vblank timestamping will give valid results
422 * while now done after front-porch. This will also deliver
423 * page-flip completion events that have been queued to us
424 * if a pageflip happened inside front-porch.
425 */
09aef2c4 426 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 427 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
428
429 /* BTR processing for pre-DCE12 ASICs */
430 if (acrtc_state->stream &&
431 adev->family < AMDGPU_FAMILY_AI) {
432 spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 mod_freesync_handle_v_update(
434 adev->dm.freesync_module,
435 acrtc_state->stream,
436 &acrtc_state->vrr_params);
437
438 dc_stream_adjust_vmin_vmax(
439 adev->dm.dc,
440 acrtc_state->stream,
441 &acrtc_state->vrr_params.adjust);
442 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 }
444 }
d2574c33
MK
445 }
446}
447
b8e8c934
HW
448/**
449 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 450 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
451 *
452 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453 * event handler.
454 */
4562236b
HW
455static void dm_crtc_high_irq(void *interrupt_params)
456{
457 struct common_irq_params *irq_params = interrupt_params;
458 struct amdgpu_device *adev = irq_params->adev;
4562236b 459 struct amdgpu_crtc *acrtc;
180db303 460 struct dm_crtc_state *acrtc_state;
09aef2c4 461 unsigned long flags;
4562236b 462
b57de80a 463 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
464 if (!acrtc)
465 return;
466
467 acrtc_state = to_dm_crtc_state(acrtc->base.state);
468
2b5aed9a
MK
469 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 amdgpu_dm_vrr_active(acrtc_state),
471 acrtc_state->active_planes);
16f17eda 472
2346ef47
NK
473 /**
474 * Core vblank handling at start of front-porch is only possible
475 * in non-vrr mode, as only there vblank timestamping will give
476 * valid results while done in front-porch. Otherwise defer it
477 * to dm_vupdate_high_irq after end of front-porch.
478 */
479 if (!amdgpu_dm_vrr_active(acrtc_state))
480 drm_crtc_handle_vblank(&acrtc->base);
481
482 /**
483 * Following stuff must happen at start of vblank, for crc
484 * computation and below-the-range btr support in vrr mode.
485 */
16f17eda 486 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
487
488 /* BTR updates need to happen before VUPDATE on Vega and above. */
489 if (adev->family < AMDGPU_FAMILY_AI)
490 return;
16f17eda
LL
491
492 spin_lock_irqsave(&adev->ddev->event_lock, flags);
493
2346ef47 494 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
16f17eda 495 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
2346ef47
NK
496 mod_freesync_handle_v_update(adev->dm.freesync_module,
497 acrtc_state->stream,
498 &acrtc_state->vrr_params);
16f17eda 499
2346ef47
NK
500 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 &acrtc_state->vrr_params.adjust);
16f17eda
LL
502 }
503
2b5aed9a
MK
504 /*
505 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 * In that case, pageflip completion interrupts won't fire and pageflip
507 * completion events won't get delivered. Prevent this by sending
508 * pending pageflip events from here if a flip is still pending.
509 *
510 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 * avoid race conditions between flip programming and completion,
512 * which could cause too early flip completion events.
513 */
2346ef47
NK
514 if (adev->family >= AMDGPU_FAMILY_RV &&
515 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
2b5aed9a 516 acrtc_state->active_planes == 0) {
16f17eda
LL
517 if (acrtc->event) {
518 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519 acrtc->event = NULL;
520 drm_crtc_vblank_put(&acrtc->base);
521 }
522 acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 }
524
525 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526}
527
4562236b
HW
528static int dm_set_clockgating_state(void *handle,
529 enum amd_clockgating_state state)
530{
531 return 0;
532}
533
534static int dm_set_powergating_state(void *handle,
535 enum amd_powergating_state state)
536{
537 return 0;
538}
539
540/* Prototypes of private functions */
541static int dm_early_init(void* handle);
542
a32e24b4 543/* Allocate memory for FBC compressed data */
3e332d3a 544static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 545{
3e332d3a
RL
546 struct drm_device *dev = connector->dev;
547 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 548 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
549 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 struct drm_display_mode *mode;
42e67c3b
RL
551 unsigned long max_size = 0;
552
553 if (adev->dm.dc->fbc_compressor == NULL)
554 return;
a32e24b4 555
3e332d3a 556 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
557 return;
558
3e332d3a
RL
559 if (compressor->bo_ptr)
560 return;
42e67c3b 561
42e67c3b 562
3e332d3a
RL
563 list_for_each_entry(mode, &connector->modes, head) {
564 if (max_size < mode->htotal * mode->vtotal)
565 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
566 }
567
568 if (max_size) {
569 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 570 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 571 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
572
573 if (r)
42e67c3b
RL
574 DRM_ERROR("DM: Failed to initialize FBC\n");
575 else {
576 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 }
579
a32e24b4
RL
580 }
581
582}
a32e24b4 583
6ce8f316
NK
584static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 int pipe, bool *enabled,
586 unsigned char *buf, int max_bytes)
587{
588 struct drm_device *dev = dev_get_drvdata(kdev);
589 struct amdgpu_device *adev = dev->dev_private;
590 struct drm_connector *connector;
591 struct drm_connector_list_iter conn_iter;
592 struct amdgpu_dm_connector *aconnector;
593 int ret = 0;
594
595 *enabled = false;
596
597 mutex_lock(&adev->dm.audio_lock);
598
599 drm_connector_list_iter_begin(dev, &conn_iter);
600 drm_for_each_connector_iter(connector, &conn_iter) {
601 aconnector = to_amdgpu_dm_connector(connector);
602 if (aconnector->audio_inst != port)
603 continue;
604
605 *enabled = true;
606 ret = drm_eld_size(connector->eld);
607 memcpy(buf, connector->eld, min(max_bytes, ret));
608
609 break;
610 }
611 drm_connector_list_iter_end(&conn_iter);
612
613 mutex_unlock(&adev->dm.audio_lock);
614
615 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616
617 return ret;
618}
619
620static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 .get_eld = amdgpu_dm_audio_component_get_eld,
622};
623
624static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 struct device *hda_kdev, void *data)
626{
627 struct drm_device *dev = dev_get_drvdata(kdev);
628 struct amdgpu_device *adev = dev->dev_private;
629 struct drm_audio_component *acomp = data;
630
631 acomp->ops = &amdgpu_dm_audio_component_ops;
632 acomp->dev = kdev;
633 adev->dm.audio_component = acomp;
634
635 return 0;
636}
637
638static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 struct device *hda_kdev, void *data)
640{
641 struct drm_device *dev = dev_get_drvdata(kdev);
642 struct amdgpu_device *adev = dev->dev_private;
643 struct drm_audio_component *acomp = data;
644
645 acomp->ops = NULL;
646 acomp->dev = NULL;
647 adev->dm.audio_component = NULL;
648}
649
650static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 .bind = amdgpu_dm_audio_component_bind,
652 .unbind = amdgpu_dm_audio_component_unbind,
653};
654
655static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656{
657 int i, ret;
658
659 if (!amdgpu_audio)
660 return 0;
661
662 adev->mode_info.audio.enabled = true;
663
664 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665
666 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 adev->mode_info.audio.pin[i].channels = -1;
668 adev->mode_info.audio.pin[i].rate = -1;
669 adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 adev->mode_info.audio.pin[i].status_bits = 0;
671 adev->mode_info.audio.pin[i].category_code = 0;
672 adev->mode_info.audio.pin[i].connected = false;
673 adev->mode_info.audio.pin[i].id =
674 adev->dm.dc->res_pool->audios[i]->inst;
675 adev->mode_info.audio.pin[i].offset = 0;
676 }
677
678 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679 if (ret < 0)
680 return ret;
681
682 adev->dm.audio_registered = true;
683
684 return 0;
685}
686
687static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688{
689 if (!amdgpu_audio)
690 return;
691
692 if (!adev->mode_info.audio.enabled)
693 return;
694
695 if (adev->dm.audio_registered) {
696 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 adev->dm.audio_registered = false;
698 }
699
700 /* TODO: Disable audio? */
701
702 adev->mode_info.audio.enabled = false;
703}
704
dfd84d90 705static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
706{
707 struct drm_audio_component *acomp = adev->dm.audio_component;
708
709 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711
712 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713 pin, -1);
714 }
715}
716
743b9786
NK
717static int dm_dmub_hw_init(struct amdgpu_device *adev)
718{
743b9786
NK
719 const struct dmcub_firmware_header_v1_0 *hdr;
720 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 721 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
722 const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
725 struct dmub_srv_hw_params hw_params;
726 enum dmub_status status;
727 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 728 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
729 bool has_hw_support;
730
731 if (!dmub_srv)
732 /* DMUB isn't supported on the ASIC. */
733 return 0;
734
8c7aea40
NK
735 if (!fb_info) {
736 DRM_ERROR("No framebuffer info for DMUB service.\n");
737 return -EINVAL;
738 }
739
743b9786
NK
740 if (!dmub_fw) {
741 /* Firmware required for DMUB support. */
742 DRM_ERROR("No firmware provided for DMUB.\n");
743 return -EINVAL;
744 }
745
746 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 if (status != DMUB_STATUS_OK) {
748 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749 return -EINVAL;
750 }
751
752 if (!has_hw_support) {
753 DRM_INFO("DMUB unsupported on ASIC\n");
754 return 0;
755 }
756
757 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758
743b9786
NK
759 fw_inst_const = dmub_fw->data +
760 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 761 PSP_HEADER_BYTES;
743b9786
NK
762
763 fw_bss_data = dmub_fw->data +
764 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 le32_to_cpu(hdr->inst_const_bytes);
766
767 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
768 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770
771 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772
ddde28a5
HW
773 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 * amdgpu_ucode_init_single_fw will load dmub firmware
775 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 * will be done by dm_dmub_hw_init
777 */
778 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780 fw_inst_const_size);
781 }
782
a576b345
NK
783 if (fw_bss_data_size)
784 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
786
787 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
788 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 adev->bios_size);
790
791 /* Reset regions that need to be reset. */
792 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794
795 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797
798 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
800
801 /* Initialize hardware. */
802 memset(&hw_params, 0, sizeof(hw_params));
803 hw_params.fb_base = adev->gmc.fb_start;
804 hw_params.fb_offset = adev->gmc.aper_base;
805
31a7f4bb
HW
806 /* backdoor load firmware and trigger dmub running */
807 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 hw_params.load_inst_const = true;
809
743b9786
NK
810 if (dmcu)
811 hw_params.psp_version = dmcu->psp_version;
812
8c7aea40
NK
813 for (i = 0; i < fb_info->num_fb; ++i)
814 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
815
816 status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 if (status != DMUB_STATUS_OK) {
818 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819 return -EINVAL;
820 }
821
822 /* Wait for firmware load to finish. */
823 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 if (status != DMUB_STATUS_OK)
825 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826
827 /* Init DMCU and ABM if available. */
828 if (dmcu && abm) {
829 dmcu->funcs->dmcu_init(dmcu);
830 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 }
832
9a71c7d3
NK
833 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 if (!adev->dm.dc->ctx->dmub_srv) {
835 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836 return -ENOMEM;
837 }
838
743b9786
NK
839 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 adev->dm.dmcub_fw_version);
841
842 return 0;
843}
844
7578ecda 845static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
846{
847 struct dc_init_data init_data;
52704fca
BL
848#ifdef CONFIG_DRM_AMD_DC_HDCP
849 struct dc_callback_init init_params;
850#endif
743b9786 851 int r;
52704fca 852
4562236b
HW
853 adev->dm.ddev = adev->ddev;
854 adev->dm.adev = adev;
855
4562236b
HW
856 /* Zero all the fields */
857 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
858#ifdef CONFIG_DRM_AMD_DC_HDCP
859 memset(&init_params, 0, sizeof(init_params));
860#endif
4562236b 861
674e78ac 862 mutex_init(&adev->dm.dc_lock);
6ce8f316 863 mutex_init(&adev->dm.audio_lock);
674e78ac 864
4562236b
HW
865 if(amdgpu_dm_irq_init(adev)) {
866 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867 goto error;
868 }
869
870 init_data.asic_id.chip_family = adev->family;
871
2dc31ca1 872 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
873 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874
770d13b1 875 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
876 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 init_data.asic_id.atombios_base_address =
878 adev->mode_info.atom_context->bios;
879
880 init_data.driver = adev;
881
882 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883
884 if (!adev->dm.cgs_device) {
885 DRM_ERROR("amdgpu: failed to create cgs device.\n");
886 goto error;
887 }
888
889 init_data.cgs_device = adev->dm.cgs_device;
890
4562236b
HW
891 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892
60fb100b
AD
893 switch (adev->asic_type) {
894 case CHIP_CARRIZO:
895 case CHIP_STONEY:
896 case CHIP_RAVEN:
fe3db437 897 case CHIP_RENOIR:
6e227308 898 init_data.flags.gpu_vm_support = true;
60fb100b
AD
899 break;
900 default:
901 break;
902 }
6e227308 903
04b94af4
AD
904 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 init_data.flags.fbc_support = true;
906
d99f38ae
AD
907 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 init_data.flags.multi_mon_pp_mclk_switch = true;
909
eaf56410
LL
910 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 init_data.flags.disable_fractional_pwm = true;
912
27eaa492 913 init_data.flags.power_down_display_on_boot = true;
78ad75f8 914
48321c3d 915 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 916
4562236b
HW
917 /* Display Core create. */
918 adev->dm.dc = dc_create(&init_data);
919
423788c7 920 if (adev->dm.dc) {
76121231 921 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 922 } else {
76121231 923 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
924 goto error;
925 }
4562236b 926
8a791dab
HW
927 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 }
931
f99d8762
HW
932 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934
8a791dab
HW
935 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 adev->dm.dc->debug.disable_stutter = true;
937
938 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 adev->dm.dc->debug.disable_dsc = true;
940
941 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 adev->dm.dc->debug.disable_clock_gate = true;
943
743b9786
NK
944 r = dm_dmub_hw_init(adev);
945 if (r) {
946 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947 goto error;
948 }
949
bb6785c1
NK
950 dc_hardware_init(adev->dm.dc);
951
4562236b
HW
952 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 if (!adev->dm.freesync_module) {
954 DRM_ERROR(
955 "amdgpu: failed to initialize freesync_module.\n");
956 } else
f1ad2f5e 957 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
958 adev->dm.freesync_module);
959
e277adc5
LSL
960 amdgpu_dm_init_color_mod();
961
52704fca 962#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 963 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 964 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 965
96a3b32e
BL
966 if (!adev->dm.hdcp_workqueue)
967 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 else
969 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 970
96a3b32e
BL
971 dc_init_callbacks(adev->dm.dc, &init_params);
972 }
52704fca 973#endif
4562236b
HW
974 if (amdgpu_dm_initialize_drm_device(adev)) {
975 DRM_ERROR(
976 "amdgpu: failed to initialize sw for display support.\n");
977 goto error;
978 }
979
980 /* Update the actual used number of crtc */
981 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982
f74367e4
AD
983 /* create fake encoders for MST */
984 dm_dp_create_fake_mst_encoders(adev);
985
4562236b
HW
986 /* TODO: Add_display_info? */
987
988 /* TODO use dynamic cursor width */
ce75805e
AG
989 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
991
992 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
993 DRM_ERROR(
994 "amdgpu: failed to initialize sw for display support.\n");
995 goto error;
996 }
997
f1ad2f5e 998 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
999
1000 return 0;
1001error:
1002 amdgpu_dm_fini(adev);
1003
59d0f396 1004 return -EINVAL;
4562236b
HW
1005}
1006
7578ecda 1007static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1008{
f74367e4
AD
1009 int i;
1010
1011 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013 }
1014
6ce8f316
NK
1015 amdgpu_dm_audio_fini(adev);
1016
4562236b 1017 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1018
52704fca
BL
1019#ifdef CONFIG_DRM_AMD_DC_HDCP
1020 if (adev->dm.hdcp_workqueue) {
1021 hdcp_destroy(adev->dm.hdcp_workqueue);
1022 adev->dm.hdcp_workqueue = NULL;
1023 }
1024
1025 if (adev->dm.dc)
1026 dc_deinit_callbacks(adev->dm.dc);
1027#endif
9a71c7d3
NK
1028 if (adev->dm.dc->ctx->dmub_srv) {
1029 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 adev->dm.dc->ctx->dmub_srv = NULL;
1031 }
1032
743b9786
NK
1033 if (adev->dm.dmub_bo)
1034 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 &adev->dm.dmub_bo_gpu_addr,
1036 &adev->dm.dmub_bo_cpu_addr);
52704fca 1037
c8bdf2b6
ED
1038 /* DC Destroy TODO: Replace destroy DAL */
1039 if (adev->dm.dc)
1040 dc_destroy(&adev->dm.dc);
4562236b
HW
1041 /*
1042 * TODO: pageflip, vlank interrupt
1043 *
1044 * amdgpu_dm_irq_fini(adev);
1045 */
1046
1047 if (adev->dm.cgs_device) {
1048 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 adev->dm.cgs_device = NULL;
1050 }
1051 if (adev->dm.freesync_module) {
1052 mod_freesync_destroy(adev->dm.freesync_module);
1053 adev->dm.freesync_module = NULL;
1054 }
674e78ac 1055
6ce8f316 1056 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1057 mutex_destroy(&adev->dm.dc_lock);
1058
4562236b
HW
1059 return;
1060}
1061
a94d5569 1062static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1063{
a7669aff 1064 const char *fw_name_dmcu = NULL;
a94d5569
DF
1065 int r;
1066 const struct dmcu_firmware_header_v1_0 *hdr;
1067
1068 switch(adev->asic_type) {
55e56389
MR
1069#if defined(CONFIG_DRM_AMD_DC_SI)
1070 case CHIP_TAHITI:
1071 case CHIP_PITCAIRN:
1072 case CHIP_VERDE:
1073 case CHIP_OLAND:
1074#endif
a94d5569
DF
1075 case CHIP_BONAIRE:
1076 case CHIP_HAWAII:
1077 case CHIP_KAVERI:
1078 case CHIP_KABINI:
1079 case CHIP_MULLINS:
1080 case CHIP_TONGA:
1081 case CHIP_FIJI:
1082 case CHIP_CARRIZO:
1083 case CHIP_STONEY:
1084 case CHIP_POLARIS11:
1085 case CHIP_POLARIS10:
1086 case CHIP_POLARIS12:
1087 case CHIP_VEGAM:
1088 case CHIP_VEGA10:
1089 case CHIP_VEGA12:
1090 case CHIP_VEGA20:
476e955d 1091 case CHIP_NAVI10:
baebcf2e 1092 case CHIP_NAVI14:
30221ad8 1093 case CHIP_RENOIR:
79037324
BL
1094#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1095 case CHIP_SIENNA_CICHLID:
a6c5308f 1096 case CHIP_NAVY_FLOUNDER:
79037324 1097#endif
a94d5569 1098 return 0;
5ea23931
RL
1099 case CHIP_NAVI12:
1100 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1101 break;
a94d5569 1102 case CHIP_RAVEN:
a7669aff
HW
1103 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1104 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1105 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1106 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1107 else
a7669aff 1108 return 0;
a94d5569
DF
1109 break;
1110 default:
1111 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1112 return -EINVAL;
a94d5569
DF
1113 }
1114
1115 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1116 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1117 return 0;
1118 }
1119
1120 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1121 if (r == -ENOENT) {
1122 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1123 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1124 adev->dm.fw_dmcu = NULL;
1125 return 0;
1126 }
1127 if (r) {
1128 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1129 fw_name_dmcu);
1130 return r;
1131 }
1132
1133 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1134 if (r) {
1135 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1136 fw_name_dmcu);
1137 release_firmware(adev->dm.fw_dmcu);
1138 adev->dm.fw_dmcu = NULL;
1139 return r;
1140 }
1141
1142 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1143 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1144 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1145 adev->firmware.fw_size +=
1146 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1147
1148 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1149 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1150 adev->firmware.fw_size +=
1151 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1152
ee6e89c0
DF
1153 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1154
a94d5569
DF
1155 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1156
4562236b
HW
1157 return 0;
1158}
1159
743b9786
NK
1160static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1161{
1162 struct amdgpu_device *adev = ctx;
1163
1164 return dm_read_reg(adev->dm.dc->ctx, address);
1165}
1166
1167static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1168 uint32_t value)
1169{
1170 struct amdgpu_device *adev = ctx;
1171
1172 return dm_write_reg(adev->dm.dc->ctx, address, value);
1173}
1174
1175static int dm_dmub_sw_init(struct amdgpu_device *adev)
1176{
1177 struct dmub_srv_create_params create_params;
8c7aea40
NK
1178 struct dmub_srv_region_params region_params;
1179 struct dmub_srv_region_info region_info;
1180 struct dmub_srv_fb_params fb_params;
1181 struct dmub_srv_fb_info *fb_info;
1182 struct dmub_srv *dmub_srv;
743b9786
NK
1183 const struct dmcub_firmware_header_v1_0 *hdr;
1184 const char *fw_name_dmub;
1185 enum dmub_asic dmub_asic;
1186 enum dmub_status status;
1187 int r;
1188
1189 switch (adev->asic_type) {
1190 case CHIP_RENOIR:
1191 dmub_asic = DMUB_ASIC_DCN21;
1192 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1193 break;
79037324
BL
1194#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1195 case CHIP_SIENNA_CICHLID:
1196 dmub_asic = DMUB_ASIC_DCN30;
1197 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1198 break;
94bc373b
BL
1199 case CHIP_NAVY_FLOUNDER:
1200 dmub_asic = DMUB_ASIC_DCN30;
1201 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1202 break;
79037324 1203#endif
743b9786
NK
1204
1205 default:
1206 /* ASIC doesn't support DMUB. */
1207 return 0;
1208 }
1209
743b9786
NK
1210 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1211 if (r) {
1212 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1213 return 0;
1214 }
1215
1216 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1217 if (r) {
1218 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1219 return 0;
1220 }
1221
743b9786 1222 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1223
9a6ed547
NK
1224 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1226 AMDGPU_UCODE_ID_DMCUB;
1227 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1228 adev->dm.dmub_fw;
1229 adev->firmware.fw_size +=
1230 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1231
9a6ed547
NK
1232 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1233 adev->dm.dmcub_fw_version);
1234 }
1235
1236 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1237
8c7aea40
NK
1238 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1239 dmub_srv = adev->dm.dmub_srv;
1240
1241 if (!dmub_srv) {
1242 DRM_ERROR("Failed to allocate DMUB service!\n");
1243 return -ENOMEM;
1244 }
1245
1246 memset(&create_params, 0, sizeof(create_params));
1247 create_params.user_ctx = adev;
1248 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1249 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1250 create_params.asic = dmub_asic;
1251
1252 /* Create the DMUB service. */
1253 status = dmub_srv_create(dmub_srv, &create_params);
1254 if (status != DMUB_STATUS_OK) {
1255 DRM_ERROR("Error creating DMUB service: %d\n", status);
1256 return -EINVAL;
1257 }
1258
1259 /* Calculate the size of all the regions for the DMUB service. */
1260 memset(&region_params, 0, sizeof(region_params));
1261
1262 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1263 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1264 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1265 region_params.vbios_size = adev->bios_size;
0922b899 1266 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1267 adev->dm.dmub_fw->data +
1268 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1269 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1270 region_params.fw_inst_const =
1271 adev->dm.dmub_fw->data +
1272 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1273 PSP_HEADER_BYTES;
8c7aea40
NK
1274
1275 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1276 &region_info);
1277
1278 if (status != DMUB_STATUS_OK) {
1279 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1280 return -EINVAL;
1281 }
1282
1283 /*
1284 * Allocate a framebuffer based on the total size of all the regions.
1285 * TODO: Move this into GART.
1286 */
1287 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1288 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1289 &adev->dm.dmub_bo_gpu_addr,
1290 &adev->dm.dmub_bo_cpu_addr);
1291 if (r)
1292 return r;
1293
1294 /* Rebase the regions on the framebuffer address. */
1295 memset(&fb_params, 0, sizeof(fb_params));
1296 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1297 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1298 fb_params.region_info = &region_info;
1299
1300 adev->dm.dmub_fb_info =
1301 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1302 fb_info = adev->dm.dmub_fb_info;
1303
1304 if (!fb_info) {
1305 DRM_ERROR(
1306 "Failed to allocate framebuffer info for DMUB service!\n");
1307 return -ENOMEM;
1308 }
1309
1310 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1311 if (status != DMUB_STATUS_OK) {
1312 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1313 return -EINVAL;
1314 }
1315
743b9786
NK
1316 return 0;
1317}
1318
a94d5569
DF
1319static int dm_sw_init(void *handle)
1320{
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1322 int r;
1323
1324 r = dm_dmub_sw_init(adev);
1325 if (r)
1326 return r;
a94d5569
DF
1327
1328 return load_dmcu_fw(adev);
1329}
1330
4562236b
HW
1331static int dm_sw_fini(void *handle)
1332{
a94d5569
DF
1333 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1334
8c7aea40
NK
1335 kfree(adev->dm.dmub_fb_info);
1336 adev->dm.dmub_fb_info = NULL;
1337
743b9786
NK
1338 if (adev->dm.dmub_srv) {
1339 dmub_srv_destroy(adev->dm.dmub_srv);
1340 adev->dm.dmub_srv = NULL;
1341 }
1342
75e1658e
ND
1343 release_firmware(adev->dm.dmub_fw);
1344 adev->dm.dmub_fw = NULL;
743b9786 1345
75e1658e
ND
1346 release_firmware(adev->dm.fw_dmcu);
1347 adev->dm.fw_dmcu = NULL;
a94d5569 1348
4562236b
HW
1349 return 0;
1350}
1351
7abcf6b5 1352static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1353{
c84dec2f 1354 struct amdgpu_dm_connector *aconnector;
4562236b 1355 struct drm_connector *connector;
f8d2d39e 1356 struct drm_connector_list_iter iter;
7abcf6b5 1357 int ret = 0;
4562236b 1358
f8d2d39e
LP
1359 drm_connector_list_iter_begin(dev, &iter);
1360 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1361 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1362 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1363 aconnector->mst_mgr.aux) {
f1ad2f5e 1364 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1365 aconnector,
1366 aconnector->base.base.id);
7abcf6b5
AG
1367
1368 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1369 if (ret < 0) {
1370 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1371 aconnector->dc_link->type =
1372 dc_connection_single;
1373 break;
7abcf6b5 1374 }
f8d2d39e 1375 }
4562236b 1376 }
f8d2d39e 1377 drm_connector_list_iter_end(&iter);
4562236b 1378
7abcf6b5
AG
1379 return ret;
1380}
1381
1382static int dm_late_init(void *handle)
1383{
42e67c3b 1384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1385
bbf854dc
DF
1386 struct dmcu_iram_parameters params;
1387 unsigned int linear_lut[16];
1388 int i;
17bdb4a8 1389 struct dmcu *dmcu = NULL;
5cb32419 1390 bool ret = true;
bbf854dc 1391
17bdb4a8
JFZ
1392 if (!adev->dm.fw_dmcu)
1393 return detect_mst_link_for_all_connectors(adev->ddev);
1394
1395 dmcu = adev->dm.dc->res_pool->dmcu;
1396
bbf854dc
DF
1397 for (i = 0; i < 16; i++)
1398 linear_lut[i] = 0xFFFF * i / 15;
1399
1400 params.set = 0;
1401 params.backlight_ramping_start = 0xCCCC;
1402 params.backlight_ramping_reduction = 0xCCCCCCCC;
1403 params.backlight_lut_array_size = 16;
1404 params.backlight_lut_array = linear_lut;
1405
2ad0cdf9
AK
1406 /* Min backlight level after ABM reduction, Don't allow below 1%
1407 * 0xFFFF x 0.01 = 0x28F
1408 */
1409 params.min_abm_backlight = 0x28F;
1410
5cb32419
RL
1411 /* In the case where abm is implemented on dmcub,
1412 * dmcu object will be null.
1413 * ABM 2.4 and up are implemented on dmcub.
1414 */
1415 if (dmcu)
1416 ret = dmcu_load_iram(dmcu, params);
1417 else if (adev->dm.dc->ctx->dmub_srv)
1418 ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
bbf854dc 1419
14ed1c90
HW
1420 if (!ret)
1421 return -EINVAL;
bbf854dc 1422
42e67c3b 1423 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1424}
1425
1426static void s3_handle_mst(struct drm_device *dev, bool suspend)
1427{
c84dec2f 1428 struct amdgpu_dm_connector *aconnector;
4562236b 1429 struct drm_connector *connector;
f8d2d39e 1430 struct drm_connector_list_iter iter;
fe7553be
LP
1431 struct drm_dp_mst_topology_mgr *mgr;
1432 int ret;
1433 bool need_hotplug = false;
4562236b 1434
f8d2d39e
LP
1435 drm_connector_list_iter_begin(dev, &iter);
1436 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1437 aconnector = to_amdgpu_dm_connector(connector);
1438 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1439 aconnector->mst_port)
1440 continue;
1441
1442 mgr = &aconnector->mst_mgr;
1443
1444 if (suspend) {
1445 drm_dp_mst_topology_mgr_suspend(mgr);
1446 } else {
6f85f738 1447 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1448 if (ret < 0) {
1449 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1450 need_hotplug = true;
1451 }
1452 }
4562236b 1453 }
f8d2d39e 1454 drm_connector_list_iter_end(&iter);
fe7553be
LP
1455
1456 if (need_hotplug)
1457 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1458}
1459
9340dfd3
HW
1460static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1461{
1462 struct smu_context *smu = &adev->smu;
1463 int ret = 0;
1464
1465 if (!is_support_sw_smu(adev))
1466 return 0;
1467
1468 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1469 * on window driver dc implementation.
1470 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1471 * should be passed to smu during boot up and resume from s3.
1472 * boot up: dc calculate dcn watermark clock settings within dc_create,
1473 * dcn20_resource_construct
1474 * then call pplib functions below to pass the settings to smu:
1475 * smu_set_watermarks_for_clock_ranges
1476 * smu_set_watermarks_table
1477 * navi10_set_watermarks_table
1478 * smu_write_watermarks_table
1479 *
1480 * For Renoir, clock settings of dcn watermark are also fixed values.
1481 * dc has implemented different flow for window driver:
1482 * dc_hardware_init / dc_set_power_state
1483 * dcn10_init_hw
1484 * notify_wm_ranges
1485 * set_wm_ranges
1486 * -- Linux
1487 * smu_set_watermarks_for_clock_ranges
1488 * renoir_set_watermarks_table
1489 * smu_write_watermarks_table
1490 *
1491 * For Linux,
1492 * dc_hardware_init -> amdgpu_dm_init
1493 * dc_set_power_state --> dm_resume
1494 *
1495 * therefore, this function apply to navi10/12/14 but not Renoir
1496 * *
1497 */
1498 switch(adev->asic_type) {
1499 case CHIP_NAVI10:
1500 case CHIP_NAVI14:
1501 case CHIP_NAVI12:
1502 break;
1503 default:
1504 return 0;
1505 }
1506
e7a95eea
EQ
1507 ret = smu_write_watermarks_table(smu);
1508 if (ret) {
1509 DRM_ERROR("Failed to update WMTABLE!\n");
1510 return ret;
9340dfd3
HW
1511 }
1512
9340dfd3
HW
1513 return 0;
1514}
1515
b8592b48
LL
1516/**
1517 * dm_hw_init() - Initialize DC device
28d687ea 1518 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1519 *
1520 * Initialize the &struct amdgpu_display_manager device. This involves calling
1521 * the initializers of each DM component, then populating the struct with them.
1522 *
1523 * Although the function implies hardware initialization, both hardware and
1524 * software are initialized here. Splitting them out to their relevant init
1525 * hooks is a future TODO item.
1526 *
1527 * Some notable things that are initialized here:
1528 *
1529 * - Display Core, both software and hardware
1530 * - DC modules that we need (freesync and color management)
1531 * - DRM software states
1532 * - Interrupt sources and handlers
1533 * - Vblank support
1534 * - Debug FS entries, if enabled
1535 */
4562236b
HW
1536static int dm_hw_init(void *handle)
1537{
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 /* Create DAL display manager */
1540 amdgpu_dm_init(adev);
4562236b
HW
1541 amdgpu_dm_hpd_init(adev);
1542
4562236b
HW
1543 return 0;
1544}
1545
b8592b48
LL
1546/**
1547 * dm_hw_fini() - Teardown DC device
28d687ea 1548 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1549 *
1550 * Teardown components within &struct amdgpu_display_manager that require
1551 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552 * were loaded. Also flush IRQ workqueues and disable them.
1553 */
4562236b
HW
1554static int dm_hw_fini(void *handle)
1555{
1556 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557
1558 amdgpu_dm_hpd_fini(adev);
1559
1560 amdgpu_dm_irq_fini(adev);
21de3396 1561 amdgpu_dm_fini(adev);
4562236b
HW
1562 return 0;
1563}
1564
cdaae837
BL
1565
1566static int dm_enable_vblank(struct drm_crtc *crtc);
1567static void dm_disable_vblank(struct drm_crtc *crtc);
1568
1569static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1570 struct dc_state *state, bool enable)
1571{
1572 enum dc_irq_source irq_source;
1573 struct amdgpu_crtc *acrtc;
1574 int rc = -EBUSY;
1575 int i = 0;
1576
1577 for (i = 0; i < state->stream_count; i++) {
1578 acrtc = get_crtc_by_otg_inst(
1579 adev, state->stream_status[i].primary_otg_inst);
1580
1581 if (acrtc && state->stream_status[i].plane_count != 0) {
1582 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1583 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1584 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1585 acrtc->crtc_id, enable ? "en" : "dis", rc);
1586 if (rc)
1587 DRM_WARN("Failed to %s pflip interrupts\n",
1588 enable ? "enable" : "disable");
1589
1590 if (enable) {
1591 rc = dm_enable_vblank(&acrtc->base);
1592 if (rc)
1593 DRM_WARN("Failed to enable vblank interrupts\n");
1594 } else {
1595 dm_disable_vblank(&acrtc->base);
1596 }
1597
1598 }
1599 }
1600
1601}
1602
dfd84d90 1603static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1604{
1605 struct dc_state *context = NULL;
1606 enum dc_status res = DC_ERROR_UNEXPECTED;
1607 int i;
1608 struct dc_stream_state *del_streams[MAX_PIPES];
1609 int del_streams_count = 0;
1610
1611 memset(del_streams, 0, sizeof(del_streams));
1612
1613 context = dc_create_state(dc);
1614 if (context == NULL)
1615 goto context_alloc_fail;
1616
1617 dc_resource_state_copy_construct_current(dc, context);
1618
1619 /* First remove from context all streams */
1620 for (i = 0; i < context->stream_count; i++) {
1621 struct dc_stream_state *stream = context->streams[i];
1622
1623 del_streams[del_streams_count++] = stream;
1624 }
1625
1626 /* Remove all planes for removed streams and then remove the streams */
1627 for (i = 0; i < del_streams_count; i++) {
1628 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1629 res = DC_FAIL_DETACH_SURFACES;
1630 goto fail;
1631 }
1632
1633 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1634 if (res != DC_OK)
1635 goto fail;
1636 }
1637
1638
1639 res = dc_validate_global_state(dc, context, false);
1640
1641 if (res != DC_OK) {
1642 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1643 goto fail;
1644 }
1645
1646 res = dc_commit_state(dc, context);
1647
1648fail:
1649 dc_release_state(context);
1650
1651context_alloc_fail:
1652 return res;
1653}
1654
4562236b
HW
1655static int dm_suspend(void *handle)
1656{
1657 struct amdgpu_device *adev = handle;
1658 struct amdgpu_display_manager *dm = &adev->dm;
1659 int ret = 0;
4562236b 1660
df9c8d1a 1661 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1662 mutex_lock(&dm->dc_lock);
1663 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1664
1665 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1666
1667 amdgpu_dm_commit_zero_streams(dm->dc);
1668
1669 amdgpu_dm_irq_suspend(adev);
1670
1671 return ret;
1672 }
4562236b 1673
d2f0b53b
LHM
1674 WARN_ON(adev->dm.cached_state);
1675 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1676
4562236b
HW
1677 s3_handle_mst(adev->ddev, true);
1678
4562236b
HW
1679 amdgpu_dm_irq_suspend(adev);
1680
a3621485 1681
32f5062d 1682 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1683
1c2075d4 1684 return 0;
4562236b
HW
1685}
1686
1daf8c63
AD
1687static struct amdgpu_dm_connector *
1688amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1689 struct drm_crtc *crtc)
4562236b
HW
1690{
1691 uint32_t i;
c2cea706 1692 struct drm_connector_state *new_con_state;
4562236b
HW
1693 struct drm_connector *connector;
1694 struct drm_crtc *crtc_from_state;
1695
c2cea706
LSL
1696 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1697 crtc_from_state = new_con_state->crtc;
4562236b
HW
1698
1699 if (crtc_from_state == crtc)
c84dec2f 1700 return to_amdgpu_dm_connector(connector);
4562236b
HW
1701 }
1702
1703 return NULL;
1704}
1705
fbbdadf2
BL
1706static void emulated_link_detect(struct dc_link *link)
1707{
1708 struct dc_sink_init_data sink_init_data = { 0 };
1709 struct display_sink_capability sink_caps = { 0 };
1710 enum dc_edid_status edid_status;
1711 struct dc_context *dc_ctx = link->ctx;
1712 struct dc_sink *sink = NULL;
1713 struct dc_sink *prev_sink = NULL;
1714
1715 link->type = dc_connection_none;
1716 prev_sink = link->local_sink;
1717
1718 if (prev_sink != NULL)
1719 dc_sink_retain(prev_sink);
1720
1721 switch (link->connector_signal) {
1722 case SIGNAL_TYPE_HDMI_TYPE_A: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1725 break;
1726 }
1727
1728 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1729 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1731 break;
1732 }
1733
1734 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1735 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1737 break;
1738 }
1739
1740 case SIGNAL_TYPE_LVDS: {
1741 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1742 sink_caps.signal = SIGNAL_TYPE_LVDS;
1743 break;
1744 }
1745
1746 case SIGNAL_TYPE_EDP: {
1747 sink_caps.transaction_type =
1748 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1749 sink_caps.signal = SIGNAL_TYPE_EDP;
1750 break;
1751 }
1752
1753 case SIGNAL_TYPE_DISPLAY_PORT: {
1754 sink_caps.transaction_type =
1755 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1756 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1757 break;
1758 }
1759
1760 default:
1761 DC_ERROR("Invalid connector type! signal:%d\n",
1762 link->connector_signal);
1763 return;
1764 }
1765
1766 sink_init_data.link = link;
1767 sink_init_data.sink_signal = sink_caps.signal;
1768
1769 sink = dc_sink_create(&sink_init_data);
1770 if (!sink) {
1771 DC_ERROR("Failed to create sink!\n");
1772 return;
1773 }
1774
dcd5fb82 1775 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1776 link->local_sink = sink;
1777
1778 edid_status = dm_helpers_read_local_edid(
1779 link->ctx,
1780 link,
1781 sink);
1782
1783 if (edid_status != EDID_OK)
1784 DC_ERROR("Failed to read EDID");
1785
1786}
1787
cdaae837
BL
1788static void dm_gpureset_commit_state(struct dc_state *dc_state,
1789 struct amdgpu_display_manager *dm)
1790{
1791 struct {
1792 struct dc_surface_update surface_updates[MAX_SURFACES];
1793 struct dc_plane_info plane_infos[MAX_SURFACES];
1794 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1795 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1796 struct dc_stream_update stream_update;
1797 } * bundle;
1798 int k, m;
1799
1800 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1801
1802 if (!bundle) {
1803 dm_error("Failed to allocate update bundle\n");
1804 goto cleanup;
1805 }
1806
1807 for (k = 0; k < dc_state->stream_count; k++) {
1808 bundle->stream_update.stream = dc_state->streams[k];
1809
1810 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1811 bundle->surface_updates[m].surface =
1812 dc_state->stream_status->plane_states[m];
1813 bundle->surface_updates[m].surface->force_full_update =
1814 true;
1815 }
1816 dc_commit_updates_for_stream(
1817 dm->dc, bundle->surface_updates,
1818 dc_state->stream_status->plane_count,
1819 dc_state->streams[k], &bundle->stream_update, dc_state);
1820 }
1821
1822cleanup:
1823 kfree(bundle);
1824
1825 return;
1826}
1827
4562236b
HW
1828static int dm_resume(void *handle)
1829{
1830 struct amdgpu_device *adev = handle;
4562236b
HW
1831 struct drm_device *ddev = adev->ddev;
1832 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1833 struct amdgpu_dm_connector *aconnector;
4562236b 1834 struct drm_connector *connector;
f8d2d39e 1835 struct drm_connector_list_iter iter;
4562236b 1836 struct drm_crtc *crtc;
c2cea706 1837 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1838 struct dm_crtc_state *dm_new_crtc_state;
1839 struct drm_plane *plane;
1840 struct drm_plane_state *new_plane_state;
1841 struct dm_plane_state *dm_new_plane_state;
113b7a01 1842 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1843 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
1844 struct dc_state *dc_state;
1845 int i, r, j;
4562236b 1846
df9c8d1a 1847 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1848 dc_state = dm->cached_dc_state;
1849
1850 r = dm_dmub_hw_init(adev);
1851 if (r)
1852 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1853
1854 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1855 dc_resume(dm->dc);
1856
1857 amdgpu_dm_irq_resume_early(adev);
1858
1859 for (i = 0; i < dc_state->stream_count; i++) {
1860 dc_state->streams[i]->mode_changed = true;
1861 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1862 dc_state->stream_status->plane_states[j]->update_flags.raw
1863 = 0xffffffff;
1864 }
1865 }
1866
1867 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 1868
cdaae837
BL
1869 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1870
1871 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1872
1873 dc_release_state(dm->cached_dc_state);
1874 dm->cached_dc_state = NULL;
1875
1876 amdgpu_dm_irq_resume_late(adev);
1877
1878 mutex_unlock(&dm->dc_lock);
1879
1880 return 0;
1881 }
113b7a01
LL
1882 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1883 dc_release_state(dm_state->context);
1884 dm_state->context = dc_create_state(dm->dc);
1885 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1886 dc_resource_state_construct(dm->dc, dm_state->context);
1887
8c7aea40
NK
1888 /* Before powering on DC we need to re-initialize DMUB. */
1889 r = dm_dmub_hw_init(adev);
1890 if (r)
1891 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1892
a80aa93d
ML
1893 /* power on hardware */
1894 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1895
4562236b
HW
1896 /* program HPD filter */
1897 dc_resume(dm->dc);
1898
4562236b
HW
1899 /*
1900 * early enable HPD Rx IRQ, should be done before set mode as short
1901 * pulse interrupts are used for MST
1902 */
1903 amdgpu_dm_irq_resume_early(adev);
1904
d20ebea8 1905 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1906 s3_handle_mst(ddev, false);
1907
4562236b 1908 /* Do detection*/
f8d2d39e
LP
1909 drm_connector_list_iter_begin(ddev, &iter);
1910 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1911 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1912
1913 /*
1914 * this is the case when traversing through already created
1915 * MST connectors, should be skipped
1916 */
1917 if (aconnector->mst_port)
1918 continue;
1919
03ea364c 1920 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1921 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1922 DRM_ERROR("KMS: Failed to detect connector\n");
1923
1924 if (aconnector->base.force && new_connection_type == dc_connection_none)
1925 emulated_link_detect(aconnector->dc_link);
1926 else
1927 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1928
1929 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1930 aconnector->fake_enable = false;
1931
dcd5fb82
MF
1932 if (aconnector->dc_sink)
1933 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1934 aconnector->dc_sink = NULL;
1935 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1936 mutex_unlock(&aconnector->hpd_lock);
4562236b 1937 }
f8d2d39e 1938 drm_connector_list_iter_end(&iter);
4562236b 1939
1f6010a9 1940 /* Force mode set in atomic commit */
a80aa93d 1941 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1942 new_crtc_state->active_changed = true;
4f346e65 1943
fcb4019e
LSL
1944 /*
1945 * atomic_check is expected to create the dc states. We need to release
1946 * them here, since they were duplicated as part of the suspend
1947 * procedure.
1948 */
a80aa93d 1949 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1950 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1951 if (dm_new_crtc_state->stream) {
1952 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1953 dc_stream_release(dm_new_crtc_state->stream);
1954 dm_new_crtc_state->stream = NULL;
1955 }
1956 }
1957
a80aa93d 1958 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1959 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1960 if (dm_new_plane_state->dc_state) {
1961 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1962 dc_plane_state_release(dm_new_plane_state->dc_state);
1963 dm_new_plane_state->dc_state = NULL;
1964 }
1965 }
1966
2d1af6a1 1967 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1968
a80aa93d 1969 dm->cached_state = NULL;
0a214e2f 1970
9faa4237 1971 amdgpu_dm_irq_resume_late(adev);
4562236b 1972
9340dfd3
HW
1973 amdgpu_dm_smu_write_watermarks_table(adev);
1974
2d1af6a1 1975 return 0;
4562236b
HW
1976}
1977
b8592b48
LL
1978/**
1979 * DOC: DM Lifecycle
1980 *
1981 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1982 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1983 * the base driver's device list to be initialized and torn down accordingly.
1984 *
1985 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1986 */
1987
4562236b
HW
1988static const struct amd_ip_funcs amdgpu_dm_funcs = {
1989 .name = "dm",
1990 .early_init = dm_early_init,
7abcf6b5 1991 .late_init = dm_late_init,
4562236b
HW
1992 .sw_init = dm_sw_init,
1993 .sw_fini = dm_sw_fini,
1994 .hw_init = dm_hw_init,
1995 .hw_fini = dm_hw_fini,
1996 .suspend = dm_suspend,
1997 .resume = dm_resume,
1998 .is_idle = dm_is_idle,
1999 .wait_for_idle = dm_wait_for_idle,
2000 .check_soft_reset = dm_check_soft_reset,
2001 .soft_reset = dm_soft_reset,
2002 .set_clockgating_state = dm_set_clockgating_state,
2003 .set_powergating_state = dm_set_powergating_state,
2004};
2005
2006const struct amdgpu_ip_block_version dm_ip_block =
2007{
2008 .type = AMD_IP_BLOCK_TYPE_DCE,
2009 .major = 1,
2010 .minor = 0,
2011 .rev = 0,
2012 .funcs = &amdgpu_dm_funcs,
2013};
2014
ca3268c4 2015
b8592b48
LL
2016/**
2017 * DOC: atomic
2018 *
2019 * *WIP*
2020 */
0a323b84 2021
b3663f70 2022static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2023 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 2024 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2025 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 2026 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
2027};
2028
2029static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2030 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2031};
2032
94562810
RS
2033static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2034{
2035 u32 max_cll, min_cll, max, min, q, r;
2036 struct amdgpu_dm_backlight_caps *caps;
2037 struct amdgpu_display_manager *dm;
2038 struct drm_connector *conn_base;
2039 struct amdgpu_device *adev;
ec11fe37 2040 struct dc_link *link = NULL;
94562810
RS
2041 static const u8 pre_computed_values[] = {
2042 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2043 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2044
2045 if (!aconnector || !aconnector->dc_link)
2046 return;
2047
ec11fe37 2048 link = aconnector->dc_link;
2049 if (link->connector_signal != SIGNAL_TYPE_EDP)
2050 return;
2051
94562810
RS
2052 conn_base = &aconnector->base;
2053 adev = conn_base->dev->dev_private;
2054 dm = &adev->dm;
2055 caps = &dm->backlight_caps;
2056 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2057 caps->aux_support = false;
2058 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2059 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2060
2061 if (caps->ext_caps->bits.oled == 1 ||
2062 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2063 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2064 caps->aux_support = true;
2065
2066 /* From the specification (CTA-861-G), for calculating the maximum
2067 * luminance we need to use:
2068 * Luminance = 50*2**(CV/32)
2069 * Where CV is a one-byte value.
2070 * For calculating this expression we may need float point precision;
2071 * to avoid this complexity level, we take advantage that CV is divided
2072 * by a constant. From the Euclids division algorithm, we know that CV
2073 * can be written as: CV = 32*q + r. Next, we replace CV in the
2074 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2075 * need to pre-compute the value of r/32. For pre-computing the values
2076 * We just used the following Ruby line:
2077 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2078 * The results of the above expressions can be verified at
2079 * pre_computed_values.
2080 */
2081 q = max_cll >> 5;
2082 r = max_cll % 32;
2083 max = (1 << q) * pre_computed_values[r];
2084
2085 // min luminance: maxLum * (CV/255)^2 / 100
2086 q = DIV_ROUND_CLOSEST(min_cll, 255);
2087 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2088
2089 caps->aux_max_input_signal = max;
2090 caps->aux_min_input_signal = min;
2091}
2092
97e51c16
HW
2093void amdgpu_dm_update_connector_after_detect(
2094 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2095{
2096 struct drm_connector *connector = &aconnector->base;
2097 struct drm_device *dev = connector->dev;
b73a22d3 2098 struct dc_sink *sink;
4562236b
HW
2099
2100 /* MST handled by drm_mst framework */
2101 if (aconnector->mst_mgr.mst_state == true)
2102 return;
2103
2104
2105 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2106 if (sink)
2107 dc_sink_retain(sink);
4562236b 2108
1f6010a9
DF
2109 /*
2110 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2111 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2112 * Skip if already done during boot.
4562236b
HW
2113 */
2114 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2115 && aconnector->dc_em_sink) {
2116
1f6010a9
DF
2117 /*
2118 * For S3 resume with headless use eml_sink to fake stream
2119 * because on resume connector->sink is set to NULL
4562236b
HW
2120 */
2121 mutex_lock(&dev->mode_config.mutex);
2122
2123 if (sink) {
922aa1e1 2124 if (aconnector->dc_sink) {
98e6436d 2125 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2126 /*
2127 * retain and release below are used to
2128 * bump up refcount for sink because the link doesn't point
2129 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2130 * reshuffle by UMD we will get into unwanted dc_sink release
2131 */
dcd5fb82 2132 dc_sink_release(aconnector->dc_sink);
922aa1e1 2133 }
4562236b 2134 aconnector->dc_sink = sink;
dcd5fb82 2135 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2136 amdgpu_dm_update_freesync_caps(connector,
2137 aconnector->edid);
4562236b 2138 } else {
98e6436d 2139 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2140 if (!aconnector->dc_sink) {
4562236b 2141 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2142 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2143 }
4562236b
HW
2144 }
2145
2146 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2147
2148 if (sink)
2149 dc_sink_release(sink);
4562236b
HW
2150 return;
2151 }
2152
2153 /*
2154 * TODO: temporary guard to look for proper fix
2155 * if this sink is MST sink, we should not do anything
2156 */
dcd5fb82
MF
2157 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2158 dc_sink_release(sink);
4562236b 2159 return;
dcd5fb82 2160 }
4562236b
HW
2161
2162 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2163 /*
2164 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2165 * Do nothing!!
2166 */
f1ad2f5e 2167 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2168 aconnector->connector_id);
dcd5fb82
MF
2169 if (sink)
2170 dc_sink_release(sink);
4562236b
HW
2171 return;
2172 }
2173
f1ad2f5e 2174 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2175 aconnector->connector_id, aconnector->dc_sink, sink);
2176
2177 mutex_lock(&dev->mode_config.mutex);
2178
1f6010a9
DF
2179 /*
2180 * 1. Update status of the drm connector
2181 * 2. Send an event and let userspace tell us what to do
2182 */
4562236b 2183 if (sink) {
1f6010a9
DF
2184 /*
2185 * TODO: check if we still need the S3 mode update workaround.
2186 * If yes, put it here.
2187 */
4562236b 2188 if (aconnector->dc_sink)
98e6436d 2189 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2190
2191 aconnector->dc_sink = sink;
dcd5fb82 2192 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2193 if (sink->dc_edid.length == 0) {
4562236b 2194 aconnector->edid = NULL;
e6142dd5
AP
2195 if (aconnector->dc_link->aux_mode) {
2196 drm_dp_cec_unset_edid(
2197 &aconnector->dm_dp_aux.aux);
2198 }
900b3cb1 2199 } else {
4562236b 2200 aconnector->edid =
e6142dd5 2201 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2202
c555f023 2203 drm_connector_update_edid_property(connector,
e6142dd5
AP
2204 aconnector->edid);
2205
2206 if (aconnector->dc_link->aux_mode)
2207 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2208 aconnector->edid);
4562236b 2209 }
e6142dd5 2210
98e6436d 2211 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2212 update_connector_ext_caps(aconnector);
4562236b 2213 } else {
e86e8947 2214 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2215 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2216 drm_connector_update_edid_property(connector, NULL);
4562236b 2217 aconnector->num_modes = 0;
dcd5fb82 2218 dc_sink_release(aconnector->dc_sink);
4562236b 2219 aconnector->dc_sink = NULL;
5326c452 2220 aconnector->edid = NULL;
0c8620d6
BL
2221#ifdef CONFIG_DRM_AMD_DC_HDCP
2222 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2223 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2224 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2225#endif
4562236b
HW
2226 }
2227
2228 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2229
2230 if (sink)
2231 dc_sink_release(sink);
4562236b
HW
2232}
2233
2234static void handle_hpd_irq(void *param)
2235{
c84dec2f 2236 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2237 struct drm_connector *connector = &aconnector->base;
2238 struct drm_device *dev = connector->dev;
fbbdadf2 2239 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
2240#ifdef CONFIG_DRM_AMD_DC_HDCP
2241 struct amdgpu_device *adev = dev->dev_private;
2242#endif
4562236b 2243
1f6010a9
DF
2244 /*
2245 * In case of failure or MST no need to update connector status or notify the OS
2246 * since (for MST case) MST does this in its own context.
4562236b
HW
2247 */
2248 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2249
0c8620d6 2250#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2251 if (adev->dm.hdcp_workqueue)
96a3b32e 2252 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2253#endif
2e0ac3d6
HW
2254 if (aconnector->fake_enable)
2255 aconnector->fake_enable = false;
2256
fbbdadf2
BL
2257 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2258 DRM_ERROR("KMS: Failed to detect connector\n");
2259
2260 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2261 emulated_link_detect(aconnector->dc_link);
2262
2263
2264 drm_modeset_lock_all(dev);
2265 dm_restore_drm_connector_state(dev, connector);
2266 drm_modeset_unlock_all(dev);
2267
2268 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2269 drm_kms_helper_hotplug_event(dev);
2270
2271 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2272 amdgpu_dm_update_connector_after_detect(aconnector);
2273
2274
2275 drm_modeset_lock_all(dev);
2276 dm_restore_drm_connector_state(dev, connector);
2277 drm_modeset_unlock_all(dev);
2278
2279 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2280 drm_kms_helper_hotplug_event(dev);
2281 }
2282 mutex_unlock(&aconnector->hpd_lock);
2283
2284}
2285
c84dec2f 2286static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2287{
2288 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2289 uint8_t dret;
2290 bool new_irq_handled = false;
2291 int dpcd_addr;
2292 int dpcd_bytes_to_read;
2293
2294 const int max_process_count = 30;
2295 int process_count = 0;
2296
2297 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2298
2299 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2300 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2301 /* DPCD 0x200 - 0x201 for downstream IRQ */
2302 dpcd_addr = DP_SINK_COUNT;
2303 } else {
2304 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2305 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2306 dpcd_addr = DP_SINK_COUNT_ESI;
2307 }
2308
2309 dret = drm_dp_dpcd_read(
2310 &aconnector->dm_dp_aux.aux,
2311 dpcd_addr,
2312 esi,
2313 dpcd_bytes_to_read);
2314
2315 while (dret == dpcd_bytes_to_read &&
2316 process_count < max_process_count) {
2317 uint8_t retry;
2318 dret = 0;
2319
2320 process_count++;
2321
f1ad2f5e 2322 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2323 /* handle HPD short pulse irq */
2324 if (aconnector->mst_mgr.mst_state)
2325 drm_dp_mst_hpd_irq(
2326 &aconnector->mst_mgr,
2327 esi,
2328 &new_irq_handled);
4562236b
HW
2329
2330 if (new_irq_handled) {
2331 /* ACK at DPCD to notify down stream */
2332 const int ack_dpcd_bytes_to_write =
2333 dpcd_bytes_to_read - 1;
2334
2335 for (retry = 0; retry < 3; retry++) {
2336 uint8_t wret;
2337
2338 wret = drm_dp_dpcd_write(
2339 &aconnector->dm_dp_aux.aux,
2340 dpcd_addr + 1,
2341 &esi[1],
2342 ack_dpcd_bytes_to_write);
2343 if (wret == ack_dpcd_bytes_to_write)
2344 break;
2345 }
2346
1f6010a9 2347 /* check if there is new irq to be handled */
4562236b
HW
2348 dret = drm_dp_dpcd_read(
2349 &aconnector->dm_dp_aux.aux,
2350 dpcd_addr,
2351 esi,
2352 dpcd_bytes_to_read);
2353
2354 new_irq_handled = false;
d4a6e8a9 2355 } else {
4562236b 2356 break;
d4a6e8a9 2357 }
4562236b
HW
2358 }
2359
2360 if (process_count == max_process_count)
f1ad2f5e 2361 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2362}
2363
2364static void handle_hpd_rx_irq(void *param)
2365{
c84dec2f 2366 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2367 struct drm_connector *connector = &aconnector->base;
2368 struct drm_device *dev = connector->dev;
53cbf65c 2369 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2370 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2371 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2372#ifdef CONFIG_DRM_AMD_DC_HDCP
2373 union hpd_irq_data hpd_irq_data;
2374 struct amdgpu_device *adev = dev->dev_private;
2375
2376 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2377#endif
4562236b 2378
1f6010a9
DF
2379 /*
2380 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2381 * conflict, after implement i2c helper, this mutex should be
2382 * retired.
2383 */
53cbf65c 2384 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2385 mutex_lock(&aconnector->hpd_lock);
2386
2a0f9270
BL
2387
2388#ifdef CONFIG_DRM_AMD_DC_HDCP
2389 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2390#else
4e18814e 2391 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2392#endif
4562236b
HW
2393 !is_mst_root_connector) {
2394 /* Downstream Port status changed. */
fbbdadf2
BL
2395 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2396 DRM_ERROR("KMS: Failed to detect connector\n");
2397
2398 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2399 emulated_link_detect(dc_link);
2400
2401 if (aconnector->fake_enable)
2402 aconnector->fake_enable = false;
2403
2404 amdgpu_dm_update_connector_after_detect(aconnector);
2405
2406
2407 drm_modeset_lock_all(dev);
2408 dm_restore_drm_connector_state(dev, connector);
2409 drm_modeset_unlock_all(dev);
2410
2411 drm_kms_helper_hotplug_event(dev);
2412 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2413
2414 if (aconnector->fake_enable)
2415 aconnector->fake_enable = false;
2416
4562236b
HW
2417 amdgpu_dm_update_connector_after_detect(aconnector);
2418
2419
2420 drm_modeset_lock_all(dev);
2421 dm_restore_drm_connector_state(dev, connector);
2422 drm_modeset_unlock_all(dev);
2423
2424 drm_kms_helper_hotplug_event(dev);
2425 }
2426 }
2a0f9270 2427#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2428 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2429 if (adev->dm.hdcp_workqueue)
2430 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2431 }
2a0f9270 2432#endif
4562236b 2433 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2434 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2435 dm_handle_hpd_rx_irq(aconnector);
2436
e86e8947
HV
2437 if (dc_link->type != dc_connection_mst_branch) {
2438 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2439 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2440 }
4562236b
HW
2441}
2442
2443static void register_hpd_handlers(struct amdgpu_device *adev)
2444{
2445 struct drm_device *dev = adev->ddev;
2446 struct drm_connector *connector;
c84dec2f 2447 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2448 const struct dc_link *dc_link;
2449 struct dc_interrupt_params int_params = {0};
2450
2451 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2452 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2453
2454 list_for_each_entry(connector,
2455 &dev->mode_config.connector_list, head) {
2456
c84dec2f 2457 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2458 dc_link = aconnector->dc_link;
2459
2460 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2461 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2462 int_params.irq_source = dc_link->irq_source_hpd;
2463
2464 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2465 handle_hpd_irq,
2466 (void *) aconnector);
2467 }
2468
2469 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2470
2471 /* Also register for DP short pulse (hpd_rx). */
2472 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2473 int_params.irq_source = dc_link->irq_source_hpd_rx;
2474
2475 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2476 handle_hpd_rx_irq,
2477 (void *) aconnector);
2478 }
2479 }
2480}
2481
55e56389
MR
2482#if defined(CONFIG_DRM_AMD_DC_SI)
2483/* Register IRQ sources and initialize IRQ callbacks */
2484static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2485{
2486 struct dc *dc = adev->dm.dc;
2487 struct common_irq_params *c_irq_params;
2488 struct dc_interrupt_params int_params = {0};
2489 int r;
2490 int i;
2491 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2492
2493 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2494 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2495
2496 /*
2497 * Actions of amdgpu_irq_add_id():
2498 * 1. Register a set() function with base driver.
2499 * Base driver will call set() function to enable/disable an
2500 * interrupt in DC hardware.
2501 * 2. Register amdgpu_dm_irq_handler().
2502 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2503 * coming from DC hardware.
2504 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2505 * for acknowledging and handling. */
2506
2507 /* Use VBLANK interrupt */
2508 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2509 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2510 if (r) {
2511 DRM_ERROR("Failed to add crtc irq id!\n");
2512 return r;
2513 }
2514
2515 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2516 int_params.irq_source =
2517 dc_interrupt_to_irq_source(dc, i+1 , 0);
2518
2519 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2520
2521 c_irq_params->adev = adev;
2522 c_irq_params->irq_src = int_params.irq_source;
2523
2524 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2525 dm_crtc_high_irq, c_irq_params);
2526 }
2527
2528 /* Use GRPH_PFLIP interrupt */
2529 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2530 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2531 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2532 if (r) {
2533 DRM_ERROR("Failed to add page flip irq id!\n");
2534 return r;
2535 }
2536
2537 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2538 int_params.irq_source =
2539 dc_interrupt_to_irq_source(dc, i, 0);
2540
2541 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2542
2543 c_irq_params->adev = adev;
2544 c_irq_params->irq_src = int_params.irq_source;
2545
2546 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2547 dm_pflip_high_irq, c_irq_params);
2548
2549 }
2550
2551 /* HPD */
2552 r = amdgpu_irq_add_id(adev, client_id,
2553 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2554 if (r) {
2555 DRM_ERROR("Failed to add hpd irq id!\n");
2556 return r;
2557 }
2558
2559 register_hpd_handlers(adev);
2560
2561 return 0;
2562}
2563#endif
2564
4562236b
HW
2565/* Register IRQ sources and initialize IRQ callbacks */
2566static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2567{
2568 struct dc *dc = adev->dm.dc;
2569 struct common_irq_params *c_irq_params;
2570 struct dc_interrupt_params int_params = {0};
2571 int r;
2572 int i;
1ffdeca6 2573 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2574
84374725 2575 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2576 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2577
2578 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2579 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2580
1f6010a9
DF
2581 /*
2582 * Actions of amdgpu_irq_add_id():
4562236b
HW
2583 * 1. Register a set() function with base driver.
2584 * Base driver will call set() function to enable/disable an
2585 * interrupt in DC hardware.
2586 * 2. Register amdgpu_dm_irq_handler().
2587 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2588 * coming from DC hardware.
2589 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2590 * for acknowledging and handling. */
2591
b57de80a 2592 /* Use VBLANK interrupt */
e9029155 2593 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2594 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2595 if (r) {
2596 DRM_ERROR("Failed to add crtc irq id!\n");
2597 return r;
2598 }
2599
2600 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2601 int_params.irq_source =
3d761e79 2602 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2603
b57de80a 2604 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2605
2606 c_irq_params->adev = adev;
2607 c_irq_params->irq_src = int_params.irq_source;
2608
2609 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2610 dm_crtc_high_irq, c_irq_params);
2611 }
2612
d2574c33
MK
2613 /* Use VUPDATE interrupt */
2614 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2615 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2616 if (r) {
2617 DRM_ERROR("Failed to add vupdate irq id!\n");
2618 return r;
2619 }
2620
2621 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2622 int_params.irq_source =
2623 dc_interrupt_to_irq_source(dc, i, 0);
2624
2625 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2626
2627 c_irq_params->adev = adev;
2628 c_irq_params->irq_src = int_params.irq_source;
2629
2630 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2631 dm_vupdate_high_irq, c_irq_params);
2632 }
2633
3d761e79 2634 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2635 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2636 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2637 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2638 if (r) {
2639 DRM_ERROR("Failed to add page flip irq id!\n");
2640 return r;
2641 }
2642
2643 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2644 int_params.irq_source =
2645 dc_interrupt_to_irq_source(dc, i, 0);
2646
2647 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2648
2649 c_irq_params->adev = adev;
2650 c_irq_params->irq_src = int_params.irq_source;
2651
2652 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2653 dm_pflip_high_irq, c_irq_params);
2654
2655 }
2656
2657 /* HPD */
2c8ad2d5
AD
2658 r = amdgpu_irq_add_id(adev, client_id,
2659 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2660 if (r) {
2661 DRM_ERROR("Failed to add hpd irq id!\n");
2662 return r;
2663 }
2664
2665 register_hpd_handlers(adev);
2666
2667 return 0;
2668}
2669
b86a1aa3 2670#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2671/* Register IRQ sources and initialize IRQ callbacks */
2672static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2673{
2674 struct dc *dc = adev->dm.dc;
2675 struct common_irq_params *c_irq_params;
2676 struct dc_interrupt_params int_params = {0};
2677 int r;
2678 int i;
2679
2680 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2681 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2682
1f6010a9
DF
2683 /*
2684 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2685 * 1. Register a set() function with base driver.
2686 * Base driver will call set() function to enable/disable an
2687 * interrupt in DC hardware.
2688 * 2. Register amdgpu_dm_irq_handler().
2689 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2690 * coming from DC hardware.
2691 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2692 * for acknowledging and handling.
1f6010a9 2693 */
ff5ef992
AD
2694
2695 /* Use VSTARTUP interrupt */
2696 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2697 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2698 i++) {
3760f76c 2699 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2700
2701 if (r) {
2702 DRM_ERROR("Failed to add crtc irq id!\n");
2703 return r;
2704 }
2705
2706 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2707 int_params.irq_source =
2708 dc_interrupt_to_irq_source(dc, i, 0);
2709
2710 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2711
2712 c_irq_params->adev = adev;
2713 c_irq_params->irq_src = int_params.irq_source;
2714
2346ef47
NK
2715 amdgpu_dm_irq_register_interrupt(
2716 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2717 }
2718
2719 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2720 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2721 * to trigger at end of each vblank, regardless of state of the lock,
2722 * matching DCE behaviour.
2723 */
2724 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2725 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2726 i++) {
2727 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2728
2729 if (r) {
2730 DRM_ERROR("Failed to add vupdate irq id!\n");
2731 return r;
2732 }
2733
2734 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2735 int_params.irq_source =
2736 dc_interrupt_to_irq_source(dc, i, 0);
2737
2738 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2739
2740 c_irq_params->adev = adev;
2741 c_irq_params->irq_src = int_params.irq_source;
2742
ff5ef992 2743 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2744 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2745 }
2746
ff5ef992
AD
2747 /* Use GRPH_PFLIP interrupt */
2748 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2749 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2750 i++) {
3760f76c 2751 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2752 if (r) {
2753 DRM_ERROR("Failed to add page flip irq id!\n");
2754 return r;
2755 }
2756
2757 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2758 int_params.irq_source =
2759 dc_interrupt_to_irq_source(dc, i, 0);
2760
2761 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2762
2763 c_irq_params->adev = adev;
2764 c_irq_params->irq_src = int_params.irq_source;
2765
2766 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2767 dm_pflip_high_irq, c_irq_params);
2768
2769 }
2770
2771 /* HPD */
3760f76c 2772 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2773 &adev->hpd_irq);
2774 if (r) {
2775 DRM_ERROR("Failed to add hpd irq id!\n");
2776 return r;
2777 }
2778
2779 register_hpd_handlers(adev);
2780
2781 return 0;
2782}
2783#endif
2784
eb3dc897
NK
2785/*
2786 * Acquires the lock for the atomic state object and returns
2787 * the new atomic state.
2788 *
2789 * This should only be called during atomic check.
2790 */
2791static int dm_atomic_get_state(struct drm_atomic_state *state,
2792 struct dm_atomic_state **dm_state)
2793{
2794 struct drm_device *dev = state->dev;
2795 struct amdgpu_device *adev = dev->dev_private;
2796 struct amdgpu_display_manager *dm = &adev->dm;
2797 struct drm_private_state *priv_state;
eb3dc897
NK
2798
2799 if (*dm_state)
2800 return 0;
2801
eb3dc897
NK
2802 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2803 if (IS_ERR(priv_state))
2804 return PTR_ERR(priv_state);
2805
2806 *dm_state = to_dm_atomic_state(priv_state);
2807
2808 return 0;
2809}
2810
dfd84d90 2811static struct dm_atomic_state *
eb3dc897
NK
2812dm_atomic_get_new_state(struct drm_atomic_state *state)
2813{
2814 struct drm_device *dev = state->dev;
2815 struct amdgpu_device *adev = dev->dev_private;
2816 struct amdgpu_display_manager *dm = &adev->dm;
2817 struct drm_private_obj *obj;
2818 struct drm_private_state *new_obj_state;
2819 int i;
2820
2821 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2822 if (obj->funcs == dm->atomic_obj.funcs)
2823 return to_dm_atomic_state(new_obj_state);
2824 }
2825
2826 return NULL;
2827}
2828
dfd84d90 2829static struct dm_atomic_state *
eb3dc897
NK
2830dm_atomic_get_old_state(struct drm_atomic_state *state)
2831{
2832 struct drm_device *dev = state->dev;
2833 struct amdgpu_device *adev = dev->dev_private;
2834 struct amdgpu_display_manager *dm = &adev->dm;
2835 struct drm_private_obj *obj;
2836 struct drm_private_state *old_obj_state;
2837 int i;
2838
2839 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2840 if (obj->funcs == dm->atomic_obj.funcs)
2841 return to_dm_atomic_state(old_obj_state);
2842 }
2843
2844 return NULL;
2845}
2846
2847static struct drm_private_state *
2848dm_atomic_duplicate_state(struct drm_private_obj *obj)
2849{
2850 struct dm_atomic_state *old_state, *new_state;
2851
2852 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2853 if (!new_state)
2854 return NULL;
2855
2856 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2857
813d20dc
AW
2858 old_state = to_dm_atomic_state(obj->state);
2859
2860 if (old_state && old_state->context)
2861 new_state->context = dc_copy_state(old_state->context);
2862
eb3dc897
NK
2863 if (!new_state->context) {
2864 kfree(new_state);
2865 return NULL;
2866 }
2867
eb3dc897
NK
2868 return &new_state->base;
2869}
2870
2871static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2872 struct drm_private_state *state)
2873{
2874 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2875
2876 if (dm_state && dm_state->context)
2877 dc_release_state(dm_state->context);
2878
2879 kfree(dm_state);
2880}
2881
2882static struct drm_private_state_funcs dm_atomic_state_funcs = {
2883 .atomic_duplicate_state = dm_atomic_duplicate_state,
2884 .atomic_destroy_state = dm_atomic_destroy_state,
2885};
2886
4562236b
HW
2887static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2888{
eb3dc897 2889 struct dm_atomic_state *state;
4562236b
HW
2890 int r;
2891
2892 adev->mode_info.mode_config_initialized = true;
2893
4562236b 2894 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2895 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2896
2897 adev->ddev->mode_config.max_width = 16384;
2898 adev->ddev->mode_config.max_height = 16384;
2899
2900 adev->ddev->mode_config.preferred_depth = 24;
2901 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2902 /* indicates support for immediate flip */
4562236b
HW
2903 adev->ddev->mode_config.async_page_flip = true;
2904
770d13b1 2905 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2906
eb3dc897
NK
2907 state = kzalloc(sizeof(*state), GFP_KERNEL);
2908 if (!state)
2909 return -ENOMEM;
2910
813d20dc 2911 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2912 if (!state->context) {
2913 kfree(state);
2914 return -ENOMEM;
2915 }
2916
2917 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2918
8c1a765b
DA
2919 drm_atomic_private_obj_init(adev->ddev,
2920 &adev->dm.atomic_obj,
eb3dc897
NK
2921 &state->base,
2922 &dm_atomic_state_funcs);
2923
3dc9b1ce 2924 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2925 if (r)
2926 return r;
2927
6ce8f316
NK
2928 r = amdgpu_dm_audio_init(adev);
2929 if (r)
2930 return r;
2931
4562236b
HW
2932 return 0;
2933}
2934
206bbafe
DF
2935#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2936#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 2937#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 2938
4562236b
HW
2939#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2940 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2941
206bbafe
DF
2942static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2943{
2944#if defined(CONFIG_ACPI)
2945 struct amdgpu_dm_backlight_caps caps;
2946
2947 if (dm->backlight_caps.caps_valid)
2948 return;
2949
2950 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2951 if (caps.caps_valid) {
94562810
RS
2952 dm->backlight_caps.caps_valid = true;
2953 if (caps.aux_support)
2954 return;
206bbafe
DF
2955 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2956 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
2957 } else {
2958 dm->backlight_caps.min_input_signal =
2959 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2960 dm->backlight_caps.max_input_signal =
2961 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2962 }
2963#else
94562810
RS
2964 if (dm->backlight_caps.aux_support)
2965 return;
2966
8bcbc9ef
DF
2967 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2968 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2969#endif
2970}
2971
94562810
RS
2972static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2973{
2974 bool rc;
2975
2976 if (!link)
2977 return 1;
2978
2979 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2980 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2981
2982 return rc ? 0 : 1;
2983}
2984
2985static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2986 const uint32_t user_brightness)
2987{
2988 u32 min, max, conversion_pace;
2989 u32 brightness = user_brightness;
2990
2991 if (!caps)
2992 goto out;
2993
2994 if (!caps->aux_support) {
2995 max = caps->max_input_signal;
2996 min = caps->min_input_signal;
2997 /*
2998 * The brightness input is in the range 0-255
2999 * It needs to be rescaled to be between the
3000 * requested min and max input signal
3001 * It also needs to be scaled up by 0x101 to
3002 * match the DC interface which has a range of
3003 * 0 to 0xffff
3004 */
3005 conversion_pace = 0x101;
3006 brightness =
3007 user_brightness
3008 * conversion_pace
3009 * (max - min)
3010 / AMDGPU_MAX_BL_LEVEL
3011 + min * conversion_pace;
3012 } else {
3013 /* TODO
3014 * We are doing a linear interpolation here, which is OK but
3015 * does not provide the optimal result. We probably want
3016 * something close to the Perceptual Quantizer (PQ) curve.
3017 */
3018 max = caps->aux_max_input_signal;
3019 min = caps->aux_min_input_signal;
3020
3021 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
3022 + user_brightness * max;
3023 // Multiple the value by 1000 since we use millinits
3024 brightness *= 1000;
3025 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
3026 }
3027
3028out:
3029 return brightness;
3030}
3031
4562236b
HW
3032static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3033{
3034 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3035 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3036 struct dc_link *link = NULL;
3037 u32 brightness;
3038 bool rc;
4562236b 3039
206bbafe
DF
3040 amdgpu_dm_update_backlight_caps(dm);
3041 caps = dm->backlight_caps;
94562810
RS
3042
3043 link = (struct dc_link *)dm->backlight_link;
3044
3045 brightness = convert_brightness(&caps, bd->props.brightness);
3046 // Change brightness based on AUX property
3047 if (caps.aux_support)
3048 return set_backlight_via_aux(link, brightness);
3049
3050 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3051
3052 return rc ? 0 : 1;
4562236b
HW
3053}
3054
3055static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3056{
620a0d27
DF
3057 struct amdgpu_display_manager *dm = bl_get_data(bd);
3058 int ret = dc_link_get_backlight_level(dm->backlight_link);
3059
3060 if (ret == DC_ERROR_UNEXPECTED)
3061 return bd->props.brightness;
3062 return ret;
4562236b
HW
3063}
3064
3065static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3066 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3067 .get_brightness = amdgpu_dm_backlight_get_brightness,
3068 .update_status = amdgpu_dm_backlight_update_status,
3069};
3070
7578ecda
AD
3071static void
3072amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3073{
3074 char bl_name[16];
3075 struct backlight_properties props = { 0 };
3076
206bbafe
DF
3077 amdgpu_dm_update_backlight_caps(dm);
3078
4562236b 3079 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3080 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3081 props.type = BACKLIGHT_RAW;
3082
3083 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3084 dm->adev->ddev->primary->index);
3085
3086 dm->backlight_dev = backlight_device_register(bl_name,
3087 dm->adev->ddev->dev,
3088 dm,
3089 &amdgpu_dm_backlight_ops,
3090 &props);
3091
74baea42 3092 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3093 DRM_ERROR("DM: Backlight registration failed!\n");
3094 else
f1ad2f5e 3095 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3096}
3097
3098#endif
3099
df534fff 3100static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3101 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3102 enum drm_plane_type plane_type,
3103 const struct dc_plane_cap *plane_cap)
df534fff 3104{
f180b4bc 3105 struct drm_plane *plane;
df534fff
S
3106 unsigned long possible_crtcs;
3107 int ret = 0;
3108
f180b4bc 3109 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3110 if (!plane) {
3111 DRM_ERROR("KMS: Failed to allocate plane\n");
3112 return -ENOMEM;
3113 }
b2fddb13 3114 plane->type = plane_type;
df534fff
S
3115
3116 /*
b2fddb13
NK
3117 * HACK: IGT tests expect that the primary plane for a CRTC
3118 * can only have one possible CRTC. Only expose support for
3119 * any CRTC if they're not going to be used as a primary plane
3120 * for a CRTC - like overlay or underlay planes.
df534fff
S
3121 */
3122 possible_crtcs = 1 << plane_id;
3123 if (plane_id >= dm->dc->caps.max_streams)
3124 possible_crtcs = 0xff;
3125
cc1fec57 3126 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3127
3128 if (ret) {
3129 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3130 kfree(plane);
df534fff
S
3131 return ret;
3132 }
3133
54087768
NK
3134 if (mode_info)
3135 mode_info->planes[plane_id] = plane;
3136
df534fff
S
3137 return ret;
3138}
3139
89fc8d4e
HW
3140
3141static void register_backlight_device(struct amdgpu_display_manager *dm,
3142 struct dc_link *link)
3143{
3144#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3145 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3146
3147 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3148 link->type != dc_connection_none) {
1f6010a9
DF
3149 /*
3150 * Event if registration failed, we should continue with
89fc8d4e
HW
3151 * DM initialization because not having a backlight control
3152 * is better then a black screen.
3153 */
3154 amdgpu_dm_register_backlight_device(dm);
3155
3156 if (dm->backlight_dev)
3157 dm->backlight_link = link;
3158 }
3159#endif
3160}
3161
3162
1f6010a9
DF
3163/*
3164 * In this architecture, the association
4562236b
HW
3165 * connector -> encoder -> crtc
3166 * id not really requried. The crtc and connector will hold the
3167 * display_index as an abstraction to use with DAL component
3168 *
3169 * Returns 0 on success
3170 */
7578ecda 3171static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3172{
3173 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3174 int32_t i;
c84dec2f 3175 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3176 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3177 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3178 uint32_t link_cnt;
cc1fec57 3179 int32_t primary_planes;
fbbdadf2 3180 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3181 const struct dc_plane_cap *plane;
4562236b
HW
3182
3183 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3184 if (amdgpu_dm_mode_config_init(dm->adev)) {
3185 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3186 return -EINVAL;
4562236b
HW
3187 }
3188
b2fddb13
NK
3189 /* There is one primary plane per CRTC */
3190 primary_planes = dm->dc->caps.max_streams;
54087768 3191 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3192
b2fddb13
NK
3193 /*
3194 * Initialize primary planes, implicit planes for legacy IOCTLS.
3195 * Order is reversed to match iteration order in atomic check.
3196 */
3197 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3198 plane = &dm->dc->caps.planes[i];
3199
b2fddb13 3200 if (initialize_plane(dm, mode_info, i,
cc1fec57 3201 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3202 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3203 goto fail;
d4e13b0d 3204 }
df534fff 3205 }
92f3ac40 3206
0d579c7e
NK
3207 /*
3208 * Initialize overlay planes, index starting after primary planes.
3209 * These planes have a higher DRM index than the primary planes since
3210 * they should be considered as having a higher z-order.
3211 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3212 *
3213 * Only support DCN for now, and only expose one so we don't encourage
3214 * userspace to use up all the pipes.
0d579c7e 3215 */
cc1fec57
NK
3216 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3217 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3218
3219 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3220 continue;
3221
3222 if (!plane->blends_with_above || !plane->blends_with_below)
3223 continue;
3224
ea36ad34 3225 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3226 continue;
3227
54087768 3228 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3229 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3230 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3231 goto fail;
d4e13b0d 3232 }
cc1fec57
NK
3233
3234 /* Only create one overlay plane. */
3235 break;
d4e13b0d 3236 }
4562236b 3237
d4e13b0d 3238 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3239 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3240 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3241 goto fail;
4562236b 3242 }
4562236b 3243
ab2541b6 3244 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
3245
3246 /* loops over all connectors on the board */
3247 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3248 struct dc_link *link = NULL;
4562236b
HW
3249
3250 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3251 DRM_ERROR(
3252 "KMS: Cannot support more than %d display indexes\n",
3253 AMDGPU_DM_MAX_DISPLAY_INDEX);
3254 continue;
3255 }
3256
3257 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3258 if (!aconnector)
cd8a2ae8 3259 goto fail;
4562236b
HW
3260
3261 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3262 if (!aencoder)
cd8a2ae8 3263 goto fail;
4562236b
HW
3264
3265 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3266 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3267 goto fail;
4562236b
HW
3268 }
3269
3270 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3271 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3272 goto fail;
4562236b
HW
3273 }
3274
89fc8d4e
HW
3275 link = dc_get_link_at_index(dm->dc, i);
3276
fbbdadf2
BL
3277 if (!dc_link_detect_sink(link, &new_connection_type))
3278 DRM_ERROR("KMS: Failed to detect connector\n");
3279
3280 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3281 emulated_link_detect(link);
3282 amdgpu_dm_update_connector_after_detect(aconnector);
3283
3284 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3285 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3286 register_backlight_device(dm, link);
397a9bc5
RL
3287 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3288 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3289 }
3290
3291
4562236b
HW
3292 }
3293
3294 /* Software is initialized. Now we can register interrupt handlers. */
3295 switch (adev->asic_type) {
55e56389
MR
3296#if defined(CONFIG_DRM_AMD_DC_SI)
3297 case CHIP_TAHITI:
3298 case CHIP_PITCAIRN:
3299 case CHIP_VERDE:
3300 case CHIP_OLAND:
3301 if (dce60_register_irq_handlers(dm->adev)) {
3302 DRM_ERROR("DM: Failed to initialize IRQ\n");
3303 goto fail;
3304 }
3305 break;
3306#endif
4562236b
HW
3307 case CHIP_BONAIRE:
3308 case CHIP_HAWAII:
cd4b356f
AD
3309 case CHIP_KAVERI:
3310 case CHIP_KABINI:
3311 case CHIP_MULLINS:
4562236b
HW
3312 case CHIP_TONGA:
3313 case CHIP_FIJI:
3314 case CHIP_CARRIZO:
3315 case CHIP_STONEY:
3316 case CHIP_POLARIS11:
3317 case CHIP_POLARIS10:
b264d345 3318 case CHIP_POLARIS12:
7737de91 3319 case CHIP_VEGAM:
2c8ad2d5 3320 case CHIP_VEGA10:
2325ff30 3321 case CHIP_VEGA12:
1fe6bf2f 3322 case CHIP_VEGA20:
4562236b
HW
3323 if (dce110_register_irq_handlers(dm->adev)) {
3324 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3325 goto fail;
4562236b
HW
3326 }
3327 break;
b86a1aa3 3328#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3329 case CHIP_RAVEN:
fbd2afe5 3330 case CHIP_NAVI12:
476e955d 3331 case CHIP_NAVI10:
fce651e3 3332 case CHIP_NAVI14:
30221ad8 3333 case CHIP_RENOIR:
79037324
BL
3334#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3335 case CHIP_SIENNA_CICHLID:
a6c5308f 3336 case CHIP_NAVY_FLOUNDER:
79037324 3337#endif
ff5ef992
AD
3338 if (dcn10_register_irq_handlers(dm->adev)) {
3339 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3340 goto fail;
ff5ef992
AD
3341 }
3342 break;
3343#endif
4562236b 3344 default:
e63f8673 3345 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3346 goto fail;
4562236b
HW
3347 }
3348
2d673560
NK
3349 /* No userspace support. */
3350 dm->dc->debug.disable_tri_buf = true;
3351
4562236b 3352 return 0;
cd8a2ae8 3353fail:
4562236b 3354 kfree(aencoder);
4562236b 3355 kfree(aconnector);
54087768 3356
59d0f396 3357 return -EINVAL;
4562236b
HW
3358}
3359
7578ecda 3360static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3361{
3362 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3363 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3364 return;
3365}
3366
3367/******************************************************************************
3368 * amdgpu_display_funcs functions
3369 *****************************************************************************/
3370
1f6010a9 3371/*
4562236b
HW
3372 * dm_bandwidth_update - program display watermarks
3373 *
3374 * @adev: amdgpu_device pointer
3375 *
3376 * Calculate and program the display watermarks and line buffer allocation.
3377 */
3378static void dm_bandwidth_update(struct amdgpu_device *adev)
3379{
49c07a99 3380 /* TODO: implement later */
4562236b
HW
3381}
3382
39cc5be2 3383static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3384 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3385 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3386 .backlight_set_level = NULL, /* never called for DC */
3387 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3388 .hpd_sense = NULL,/* called unconditionally */
3389 .hpd_set_polarity = NULL, /* called unconditionally */
3390 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3391 .page_flip_get_scanoutpos =
3392 dm_crtc_get_scanoutpos,/* called unconditionally */
3393 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3394 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3395};
3396
3397#if defined(CONFIG_DEBUG_KERNEL_DC)
3398
3ee6b26b
AD
3399static ssize_t s3_debug_store(struct device *device,
3400 struct device_attribute *attr,
3401 const char *buf,
3402 size_t count)
4562236b
HW
3403{
3404 int ret;
3405 int s3_state;
ef1de361 3406 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
3407 struct amdgpu_device *adev = drm_dev->dev_private;
3408
3409 ret = kstrtoint(buf, 0, &s3_state);
3410
3411 if (ret == 0) {
3412 if (s3_state) {
3413 dm_resume(adev);
4562236b
HW
3414 drm_kms_helper_hotplug_event(adev->ddev);
3415 } else
3416 dm_suspend(adev);
3417 }
3418
3419 return ret == 0 ? count : 0;
3420}
3421
3422DEVICE_ATTR_WO(s3_debug);
3423
3424#endif
3425
3426static int dm_early_init(void *handle)
3427{
3428 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3429
4562236b 3430 switch (adev->asic_type) {
55e56389
MR
3431#if defined(CONFIG_DRM_AMD_DC_SI)
3432 case CHIP_TAHITI:
3433 case CHIP_PITCAIRN:
3434 case CHIP_VERDE:
3435 adev->mode_info.num_crtc = 6;
3436 adev->mode_info.num_hpd = 6;
3437 adev->mode_info.num_dig = 6;
3438 break;
3439 case CHIP_OLAND:
3440 adev->mode_info.num_crtc = 2;
3441 adev->mode_info.num_hpd = 2;
3442 adev->mode_info.num_dig = 2;
3443 break;
3444#endif
4562236b
HW
3445 case CHIP_BONAIRE:
3446 case CHIP_HAWAII:
3447 adev->mode_info.num_crtc = 6;
3448 adev->mode_info.num_hpd = 6;
3449 adev->mode_info.num_dig = 6;
4562236b 3450 break;
cd4b356f
AD
3451 case CHIP_KAVERI:
3452 adev->mode_info.num_crtc = 4;
3453 adev->mode_info.num_hpd = 6;
3454 adev->mode_info.num_dig = 7;
cd4b356f
AD
3455 break;
3456 case CHIP_KABINI:
3457 case CHIP_MULLINS:
3458 adev->mode_info.num_crtc = 2;
3459 adev->mode_info.num_hpd = 6;
3460 adev->mode_info.num_dig = 6;
cd4b356f 3461 break;
4562236b
HW
3462 case CHIP_FIJI:
3463 case CHIP_TONGA:
3464 adev->mode_info.num_crtc = 6;
3465 adev->mode_info.num_hpd = 6;
3466 adev->mode_info.num_dig = 7;
4562236b
HW
3467 break;
3468 case CHIP_CARRIZO:
3469 adev->mode_info.num_crtc = 3;
3470 adev->mode_info.num_hpd = 6;
3471 adev->mode_info.num_dig = 9;
4562236b
HW
3472 break;
3473 case CHIP_STONEY:
3474 adev->mode_info.num_crtc = 2;
3475 adev->mode_info.num_hpd = 6;
3476 adev->mode_info.num_dig = 9;
4562236b
HW
3477 break;
3478 case CHIP_POLARIS11:
b264d345 3479 case CHIP_POLARIS12:
4562236b
HW
3480 adev->mode_info.num_crtc = 5;
3481 adev->mode_info.num_hpd = 5;
3482 adev->mode_info.num_dig = 5;
4562236b
HW
3483 break;
3484 case CHIP_POLARIS10:
7737de91 3485 case CHIP_VEGAM:
4562236b
HW
3486 adev->mode_info.num_crtc = 6;
3487 adev->mode_info.num_hpd = 6;
3488 adev->mode_info.num_dig = 6;
4562236b 3489 break;
2c8ad2d5 3490 case CHIP_VEGA10:
2325ff30 3491 case CHIP_VEGA12:
1fe6bf2f 3492 case CHIP_VEGA20:
2c8ad2d5
AD
3493 adev->mode_info.num_crtc = 6;
3494 adev->mode_info.num_hpd = 6;
3495 adev->mode_info.num_dig = 6;
3496 break;
b86a1aa3 3497#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3498 case CHIP_RAVEN:
3499 adev->mode_info.num_crtc = 4;
3500 adev->mode_info.num_hpd = 4;
3501 adev->mode_info.num_dig = 4;
ff5ef992 3502 break;
476e955d 3503#endif
476e955d 3504 case CHIP_NAVI10:
fbd2afe5 3505 case CHIP_NAVI12:
79037324
BL
3506#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3507 case CHIP_SIENNA_CICHLID:
a6c5308f 3508 case CHIP_NAVY_FLOUNDER:
79037324 3509#endif
476e955d
HW
3510 adev->mode_info.num_crtc = 6;
3511 adev->mode_info.num_hpd = 6;
3512 adev->mode_info.num_dig = 6;
3513 break;
fce651e3
BL
3514 case CHIP_NAVI14:
3515 adev->mode_info.num_crtc = 5;
3516 adev->mode_info.num_hpd = 5;
3517 adev->mode_info.num_dig = 5;
3518 break;
30221ad8
BL
3519 case CHIP_RENOIR:
3520 adev->mode_info.num_crtc = 4;
3521 adev->mode_info.num_hpd = 4;
3522 adev->mode_info.num_dig = 4;
3523 break;
4562236b 3524 default:
e63f8673 3525 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3526 return -EINVAL;
3527 }
3528
c8dd5715
MD
3529 amdgpu_dm_set_irq_funcs(adev);
3530
39cc5be2
AD
3531 if (adev->mode_info.funcs == NULL)
3532 adev->mode_info.funcs = &dm_display_funcs;
3533
1f6010a9
DF
3534 /*
3535 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3536 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3537 * amdgpu_device_init()
3538 */
4562236b
HW
3539#if defined(CONFIG_DEBUG_KERNEL_DC)
3540 device_create_file(
3541 adev->ddev->dev,
3542 &dev_attr_s3_debug);
3543#endif
3544
3545 return 0;
3546}
3547
9b690ef3 3548static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3549 struct dc_stream_state *new_stream,
3550 struct dc_stream_state *old_stream)
9b690ef3 3551{
2afda735 3552 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3553}
3554
3555static bool modereset_required(struct drm_crtc_state *crtc_state)
3556{
2afda735 3557 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3558}
3559
7578ecda 3560static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3561{
3562 drm_encoder_cleanup(encoder);
3563 kfree(encoder);
3564}
3565
3566static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3567 .destroy = amdgpu_dm_encoder_destroy,
3568};
3569
e7b07cee 3570
695af5f9
NK
3571static int fill_dc_scaling_info(const struct drm_plane_state *state,
3572 struct dc_scaling_info *scaling_info)
e7b07cee 3573{
6491f0c0 3574 int scale_w, scale_h;
e7b07cee 3575
695af5f9 3576 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3577
695af5f9
NK
3578 /* Source is fixed 16.16 but we ignore mantissa for now... */
3579 scaling_info->src_rect.x = state->src_x >> 16;
3580 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3581
695af5f9
NK
3582 scaling_info->src_rect.width = state->src_w >> 16;
3583 if (scaling_info->src_rect.width == 0)
3584 return -EINVAL;
3585
3586 scaling_info->src_rect.height = state->src_h >> 16;
3587 if (scaling_info->src_rect.height == 0)
3588 return -EINVAL;
3589
3590 scaling_info->dst_rect.x = state->crtc_x;
3591 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3592
3593 if (state->crtc_w == 0)
695af5f9 3594 return -EINVAL;
e7b07cee 3595
695af5f9 3596 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3597
3598 if (state->crtc_h == 0)
695af5f9 3599 return -EINVAL;
e7b07cee 3600
695af5f9 3601 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3602
695af5f9
NK
3603 /* DRM doesn't specify clipping on destination output. */
3604 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3605
6491f0c0
NK
3606 /* TODO: Validate scaling per-format with DC plane caps */
3607 scale_w = scaling_info->dst_rect.width * 1000 /
3608 scaling_info->src_rect.width;
e7b07cee 3609
6491f0c0
NK
3610 if (scale_w < 250 || scale_w > 16000)
3611 return -EINVAL;
3612
3613 scale_h = scaling_info->dst_rect.height * 1000 /
3614 scaling_info->src_rect.height;
3615
3616 if (scale_h < 250 || scale_h > 16000)
3617 return -EINVAL;
3618
695af5f9
NK
3619 /*
3620 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3621 * assume reasonable defaults based on the format.
3622 */
e7b07cee 3623
695af5f9 3624 return 0;
4562236b 3625}
695af5f9 3626
3ee6b26b 3627static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
5888f07a 3628 uint64_t *tiling_flags, bool *tmz_surface)
e7b07cee 3629{
e68d14dd 3630 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 3631 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3632
e7b07cee 3633 if (unlikely(r)) {
1f6010a9 3634 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3635 if (r != -ERESTARTSYS)
3636 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3637 return r;
3638 }
3639
e7b07cee
HW
3640 if (tiling_flags)
3641 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3642
5888f07a
HW
3643 if (tmz_surface)
3644 *tmz_surface = amdgpu_bo_encrypted(rbo);
3645
e7b07cee
HW
3646 amdgpu_bo_unreserve(rbo);
3647
3648 return r;
3649}
3650
7df7e505
NK
3651static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3652{
3653 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3654
3655 return offset ? (address + offset * 256) : 0;
3656}
3657
695af5f9
NK
3658static int
3659fill_plane_dcc_attributes(struct amdgpu_device *adev,
3660 const struct amdgpu_framebuffer *afb,
3661 const enum surface_pixel_format format,
3662 const enum dc_rotation_angle rotation,
12e2b2d4 3663 const struct plane_size *plane_size,
695af5f9
NK
3664 const union dc_tiling_info *tiling_info,
3665 const uint64_t info,
3666 struct dc_plane_dcc_param *dcc,
87b7ebc2
RS
3667 struct dc_plane_address *address,
3668 bool force_disable_dcc)
7df7e505
NK
3669{
3670 struct dc *dc = adev->dm.dc;
8daa1218
NC
3671 struct dc_dcc_surface_param input;
3672 struct dc_surface_dcc_cap output;
7df7e505
NK
3673 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3674 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3675 uint64_t dcc_address;
3676
8daa1218
NC
3677 memset(&input, 0, sizeof(input));
3678 memset(&output, 0, sizeof(output));
3679
87b7ebc2
RS
3680 if (force_disable_dcc)
3681 return 0;
3682
7df7e505 3683 if (!offset)
09e5665a
NK
3684 return 0;
3685
695af5f9 3686 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3687 return 0;
7df7e505
NK
3688
3689 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3690 return -EINVAL;
7df7e505 3691
695af5f9 3692 input.format = format;
12e2b2d4
DL
3693 input.surface_size.width = plane_size->surface_size.width;
3694 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3695 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3696
695af5f9 3697 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3698 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3699 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3700 input.scan = SCAN_DIRECTION_VERTICAL;
3701
3702 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3703 return -EINVAL;
7df7e505
NK
3704
3705 if (!output.capable)
09e5665a 3706 return -EINVAL;
7df7e505
NK
3707
3708 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3709 return -EINVAL;
7df7e505 3710
09e5665a 3711 dcc->enable = 1;
12e2b2d4 3712 dcc->meta_pitch =
7df7e505 3713 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3714 dcc->independent_64b_blks = i64b;
7df7e505
NK
3715
3716 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3717 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3718 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3719
09e5665a
NK
3720 return 0;
3721}
3722
3723static int
320932bf 3724fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3725 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3726 const enum surface_pixel_format format,
3727 const enum dc_rotation_angle rotation,
3728 const uint64_t tiling_flags,
09e5665a 3729 union dc_tiling_info *tiling_info,
12e2b2d4 3730 struct plane_size *plane_size,
09e5665a 3731 struct dc_plane_dcc_param *dcc,
87b7ebc2 3732 struct dc_plane_address *address,
5888f07a 3733 bool tmz_surface,
87b7ebc2 3734 bool force_disable_dcc)
09e5665a 3735{
320932bf 3736 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3737 int ret;
3738
3739 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3740 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3741 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3742 memset(address, 0, sizeof(*address));
3743
5888f07a
HW
3744 address->tmz_surface = tmz_surface;
3745
695af5f9 3746 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3747 plane_size->surface_size.x = 0;
3748 plane_size->surface_size.y = 0;
3749 plane_size->surface_size.width = fb->width;
3750 plane_size->surface_size.height = fb->height;
3751 plane_size->surface_pitch =
320932bf
NK
3752 fb->pitches[0] / fb->format->cpp[0];
3753
e0634e8d
NK
3754 address->type = PLN_ADDR_TYPE_GRAPHICS;
3755 address->grph.addr.low_part = lower_32_bits(afb->address);
3756 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3757 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3758 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3759
12e2b2d4
DL
3760 plane_size->surface_size.x = 0;
3761 plane_size->surface_size.y = 0;
3762 plane_size->surface_size.width = fb->width;
3763 plane_size->surface_size.height = fb->height;
3764 plane_size->surface_pitch =
320932bf
NK
3765 fb->pitches[0] / fb->format->cpp[0];
3766
12e2b2d4
DL
3767 plane_size->chroma_size.x = 0;
3768 plane_size->chroma_size.y = 0;
320932bf 3769 /* TODO: set these based on surface format */
12e2b2d4
DL
3770 plane_size->chroma_size.width = fb->width / 2;
3771 plane_size->chroma_size.height = fb->height / 2;
320932bf 3772
12e2b2d4 3773 plane_size->chroma_pitch =
320932bf
NK
3774 fb->pitches[1] / fb->format->cpp[1];
3775
e0634e8d
NK
3776 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3777 address->video_progressive.luma_addr.low_part =
3778 lower_32_bits(afb->address);
3779 address->video_progressive.luma_addr.high_part =
3780 upper_32_bits(afb->address);
3781 address->video_progressive.chroma_addr.low_part =
3782 lower_32_bits(chroma_addr);
3783 address->video_progressive.chroma_addr.high_part =
3784 upper_32_bits(chroma_addr);
3785 }
09e5665a
NK
3786
3787 /* Fill GFX8 params */
3788 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3789 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3790
3791 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3792 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3793 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3794 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3795 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3796
3797 /* XXX fix me for VI */
3798 tiling_info->gfx8.num_banks = num_banks;
3799 tiling_info->gfx8.array_mode =
3800 DC_ARRAY_2D_TILED_THIN1;
3801 tiling_info->gfx8.tile_split = tile_split;
3802 tiling_info->gfx8.bank_width = bankw;
3803 tiling_info->gfx8.bank_height = bankh;
3804 tiling_info->gfx8.tile_aspect = mtaspect;
3805 tiling_info->gfx8.tile_mode =
3806 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3807 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3808 == DC_ARRAY_1D_TILED_THIN1) {
3809 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3810 }
3811
3812 tiling_info->gfx8.pipe_config =
3813 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3814
3815 if (adev->asic_type == CHIP_VEGA10 ||
3816 adev->asic_type == CHIP_VEGA12 ||
3817 adev->asic_type == CHIP_VEGA20 ||
476e955d 3818 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3819 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3820 adev->asic_type == CHIP_NAVI12 ||
79037324
BL
3821#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3822 adev->asic_type == CHIP_SIENNA_CICHLID ||
a6c5308f 3823 adev->asic_type == CHIP_NAVY_FLOUNDER ||
79037324 3824#endif
30221ad8 3825 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3826 adev->asic_type == CHIP_RAVEN) {
3827 /* Fill GFX9 params */
3828 tiling_info->gfx9.num_pipes =
3829 adev->gfx.config.gb_addr_config_fields.num_pipes;
3830 tiling_info->gfx9.num_banks =
3831 adev->gfx.config.gb_addr_config_fields.num_banks;
3832 tiling_info->gfx9.pipe_interleave =
3833 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3834 tiling_info->gfx9.num_shader_engines =
3835 adev->gfx.config.gb_addr_config_fields.num_se;
3836 tiling_info->gfx9.max_compressed_frags =
3837 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3838 tiling_info->gfx9.num_rb_per_se =
3839 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3840 tiling_info->gfx9.swizzle =
3841 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3842 tiling_info->gfx9.shaderEnable = 1;
3843
79037324 3844#ifdef CONFIG_DRM_AMD_DC_DCN3_0
a6c5308f
BL
3845 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3846 adev->asic_type == CHIP_NAVY_FLOUNDER)
79037324 3847 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
79037324 3848#endif
695af5f9
NK
3849 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3850 plane_size, tiling_info,
87b7ebc2
RS
3851 tiling_flags, dcc, address,
3852 force_disable_dcc);
09e5665a
NK
3853 if (ret)
3854 return ret;
3855 }
3856
3857 return 0;
7df7e505
NK
3858}
3859
d74004b6 3860static void
695af5f9 3861fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3862 bool *per_pixel_alpha, bool *global_alpha,
3863 int *global_alpha_value)
3864{
3865 *per_pixel_alpha = false;
3866 *global_alpha = false;
3867 *global_alpha_value = 0xff;
3868
3869 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3870 return;
3871
3872 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3873 static const uint32_t alpha_formats[] = {
3874 DRM_FORMAT_ARGB8888,
3875 DRM_FORMAT_RGBA8888,
3876 DRM_FORMAT_ABGR8888,
3877 };
3878 uint32_t format = plane_state->fb->format->format;
3879 unsigned int i;
3880
3881 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3882 if (format == alpha_formats[i]) {
3883 *per_pixel_alpha = true;
3884 break;
3885 }
3886 }
3887 }
3888
3889 if (plane_state->alpha < 0xffff) {
3890 *global_alpha = true;
3891 *global_alpha_value = plane_state->alpha >> 8;
3892 }
3893}
3894
004fefa3
NK
3895static int
3896fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3897 const enum surface_pixel_format format,
004fefa3
NK
3898 enum dc_color_space *color_space)
3899{
3900 bool full_range;
3901
3902 *color_space = COLOR_SPACE_SRGB;
3903
3904 /* DRM color properties only affect non-RGB formats. */
695af5f9 3905 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3906 return 0;
3907
3908 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3909
3910 switch (plane_state->color_encoding) {
3911 case DRM_COLOR_YCBCR_BT601:
3912 if (full_range)
3913 *color_space = COLOR_SPACE_YCBCR601;
3914 else
3915 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3916 break;
3917
3918 case DRM_COLOR_YCBCR_BT709:
3919 if (full_range)
3920 *color_space = COLOR_SPACE_YCBCR709;
3921 else
3922 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3923 break;
3924
3925 case DRM_COLOR_YCBCR_BT2020:
3926 if (full_range)
3927 *color_space = COLOR_SPACE_2020_YCBCR;
3928 else
3929 return -EINVAL;
3930 break;
3931
3932 default:
3933 return -EINVAL;
3934 }
3935
3936 return 0;
3937}
3938
695af5f9
NK
3939static int
3940fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3941 const struct drm_plane_state *plane_state,
3942 const uint64_t tiling_flags,
3943 struct dc_plane_info *plane_info,
87b7ebc2 3944 struct dc_plane_address *address,
5888f07a 3945 bool tmz_surface,
87b7ebc2 3946 bool force_disable_dcc)
695af5f9
NK
3947{
3948 const struct drm_framebuffer *fb = plane_state->fb;
3949 const struct amdgpu_framebuffer *afb =
3950 to_amdgpu_framebuffer(plane_state->fb);
3951 struct drm_format_name_buf format_name;
3952 int ret;
3953
3954 memset(plane_info, 0, sizeof(*plane_info));
3955
3956 switch (fb->format->format) {
3957 case DRM_FORMAT_C8:
3958 plane_info->format =
3959 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3960 break;
3961 case DRM_FORMAT_RGB565:
3962 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3963 break;
3964 case DRM_FORMAT_XRGB8888:
3965 case DRM_FORMAT_ARGB8888:
3966 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3967 break;
3968 case DRM_FORMAT_XRGB2101010:
3969 case DRM_FORMAT_ARGB2101010:
3970 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3971 break;
3972 case DRM_FORMAT_XBGR2101010:
3973 case DRM_FORMAT_ABGR2101010:
3974 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3975 break;
3976 case DRM_FORMAT_XBGR8888:
3977 case DRM_FORMAT_ABGR8888:
3978 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3979 break;
3980 case DRM_FORMAT_NV21:
3981 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3982 break;
3983 case DRM_FORMAT_NV12:
3984 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3985 break;
cbec6477
SW
3986 case DRM_FORMAT_P010:
3987 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3988 break;
492548dc
SW
3989 case DRM_FORMAT_XRGB16161616F:
3990 case DRM_FORMAT_ARGB16161616F:
3991 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3992 break;
2a5195dc
MK
3993 case DRM_FORMAT_XBGR16161616F:
3994 case DRM_FORMAT_ABGR16161616F:
3995 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3996 break;
695af5f9
NK
3997 default:
3998 DRM_ERROR(
3999 "Unsupported screen format %s\n",
4000 drm_get_format_name(fb->format->format, &format_name));
4001 return -EINVAL;
4002 }
4003
4004 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4005 case DRM_MODE_ROTATE_0:
4006 plane_info->rotation = ROTATION_ANGLE_0;
4007 break;
4008 case DRM_MODE_ROTATE_90:
4009 plane_info->rotation = ROTATION_ANGLE_90;
4010 break;
4011 case DRM_MODE_ROTATE_180:
4012 plane_info->rotation = ROTATION_ANGLE_180;
4013 break;
4014 case DRM_MODE_ROTATE_270:
4015 plane_info->rotation = ROTATION_ANGLE_270;
4016 break;
4017 default:
4018 plane_info->rotation = ROTATION_ANGLE_0;
4019 break;
4020 }
4021
4022 plane_info->visible = true;
4023 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4024
6d83a32d
MS
4025 plane_info->layer_index = 0;
4026
695af5f9
NK
4027 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4028 &plane_info->color_space);
4029 if (ret)
4030 return ret;
4031
4032 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4033 plane_info->rotation, tiling_flags,
4034 &plane_info->tiling_info,
4035 &plane_info->plane_size,
5888f07a 4036 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4037 force_disable_dcc);
695af5f9
NK
4038 if (ret)
4039 return ret;
4040
4041 fill_blending_from_plane_state(
4042 plane_state, &plane_info->per_pixel_alpha,
4043 &plane_info->global_alpha, &plane_info->global_alpha_value);
4044
4045 return 0;
4046}
4047
4048static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4049 struct dc_plane_state *dc_plane_state,
4050 struct drm_plane_state *plane_state,
4051 struct drm_crtc_state *crtc_state)
e7b07cee 4052{
cf020d49 4053 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
4054 const struct amdgpu_framebuffer *amdgpu_fb =
4055 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
4056 struct dc_scaling_info scaling_info;
4057 struct dc_plane_info plane_info;
4058 uint64_t tiling_flags;
4059 int ret;
5888f07a 4060 bool tmz_surface = false;
87b7ebc2 4061 bool force_disable_dcc = false;
e7b07cee 4062
695af5f9
NK
4063 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4064 if (ret)
4065 return ret;
e7b07cee 4066
695af5f9
NK
4067 dc_plane_state->src_rect = scaling_info.src_rect;
4068 dc_plane_state->dst_rect = scaling_info.dst_rect;
4069 dc_plane_state->clip_rect = scaling_info.clip_rect;
4070 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4071
5888f07a 4072 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
e7b07cee
HW
4073 if (ret)
4074 return ret;
4075
87b7ebc2 4076 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
695af5f9
NK
4077 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
4078 &plane_info,
87b7ebc2 4079 &dc_plane_state->address,
5888f07a 4080 tmz_surface,
87b7ebc2 4081 force_disable_dcc);
004fefa3
NK
4082 if (ret)
4083 return ret;
4084
695af5f9
NK
4085 dc_plane_state->format = plane_info.format;
4086 dc_plane_state->color_space = plane_info.color_space;
4087 dc_plane_state->format = plane_info.format;
4088 dc_plane_state->plane_size = plane_info.plane_size;
4089 dc_plane_state->rotation = plane_info.rotation;
4090 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4091 dc_plane_state->stereo_format = plane_info.stereo_format;
4092 dc_plane_state->tiling_info = plane_info.tiling_info;
4093 dc_plane_state->visible = plane_info.visible;
4094 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4095 dc_plane_state->global_alpha = plane_info.global_alpha;
4096 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4097 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4098 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4099
e277adc5
LSL
4100 /*
4101 * Always set input transfer function, since plane state is refreshed
4102 * every time.
4103 */
cf020d49
NK
4104 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4105 if (ret)
4106 return ret;
e7b07cee 4107
cf020d49 4108 return 0;
e7b07cee
HW
4109}
4110
3ee6b26b
AD
4111static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4112 const struct dm_connector_state *dm_state,
4113 struct dc_stream_state *stream)
e7b07cee
HW
4114{
4115 enum amdgpu_rmx_type rmx_type;
4116
4117 struct rect src = { 0 }; /* viewport in composition space*/
4118 struct rect dst = { 0 }; /* stream addressable area */
4119
4120 /* no mode. nothing to be done */
4121 if (!mode)
4122 return;
4123
4124 /* Full screen scaling by default */
4125 src.width = mode->hdisplay;
4126 src.height = mode->vdisplay;
4127 dst.width = stream->timing.h_addressable;
4128 dst.height = stream->timing.v_addressable;
4129
f4791779
HW
4130 if (dm_state) {
4131 rmx_type = dm_state->scaling;
4132 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4133 if (src.width * dst.height <
4134 src.height * dst.width) {
4135 /* height needs less upscaling/more downscaling */
4136 dst.width = src.width *
4137 dst.height / src.height;
4138 } else {
4139 /* width needs less upscaling/more downscaling */
4140 dst.height = src.height *
4141 dst.width / src.width;
4142 }
4143 } else if (rmx_type == RMX_CENTER) {
4144 dst = src;
e7b07cee 4145 }
e7b07cee 4146
f4791779
HW
4147 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4148 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4149
f4791779
HW
4150 if (dm_state->underscan_enable) {
4151 dst.x += dm_state->underscan_hborder / 2;
4152 dst.y += dm_state->underscan_vborder / 2;
4153 dst.width -= dm_state->underscan_hborder;
4154 dst.height -= dm_state->underscan_vborder;
4155 }
e7b07cee
HW
4156 }
4157
4158 stream->src = src;
4159 stream->dst = dst;
4160
f1ad2f5e 4161 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4162 dst.x, dst.y, dst.width, dst.height);
4163
4164}
4165
3ee6b26b 4166static enum dc_color_depth
42ba01fc 4167convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4168 bool is_y420, int requested_bpc)
e7b07cee 4169{
1bc22f20 4170 uint8_t bpc;
01c22997 4171
1bc22f20
SW
4172 if (is_y420) {
4173 bpc = 8;
4174
4175 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4176 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4177 bpc = 16;
4178 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4179 bpc = 12;
4180 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4181 bpc = 10;
4182 } else {
4183 bpc = (uint8_t)connector->display_info.bpc;
4184 /* Assume 8 bpc by default if no bpc is specified. */
4185 bpc = bpc ? bpc : 8;
4186 }
e7b07cee 4187
cbd14ae7 4188 if (requested_bpc > 0) {
01c22997
NK
4189 /*
4190 * Cap display bpc based on the user requested value.
4191 *
4192 * The value for state->max_bpc may not correctly updated
4193 * depending on when the connector gets added to the state
4194 * or if this was called outside of atomic check, so it
4195 * can't be used directly.
4196 */
cbd14ae7 4197 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4198
1825fd34
NK
4199 /* Round down to the nearest even number. */
4200 bpc = bpc - (bpc & 1);
4201 }
07e3a1cf 4202
e7b07cee
HW
4203 switch (bpc) {
4204 case 0:
1f6010a9
DF
4205 /*
4206 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4207 * EDID revision before 1.4
4208 * TODO: Fix edid parsing
4209 */
4210 return COLOR_DEPTH_888;
4211 case 6:
4212 return COLOR_DEPTH_666;
4213 case 8:
4214 return COLOR_DEPTH_888;
4215 case 10:
4216 return COLOR_DEPTH_101010;
4217 case 12:
4218 return COLOR_DEPTH_121212;
4219 case 14:
4220 return COLOR_DEPTH_141414;
4221 case 16:
4222 return COLOR_DEPTH_161616;
4223 default:
4224 return COLOR_DEPTH_UNDEFINED;
4225 }
4226}
4227
3ee6b26b
AD
4228static enum dc_aspect_ratio
4229get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4230{
e11d4147
LSL
4231 /* 1-1 mapping, since both enums follow the HDMI spec. */
4232 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4233}
4234
3ee6b26b
AD
4235static enum dc_color_space
4236get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4237{
4238 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4239
4240 switch (dc_crtc_timing->pixel_encoding) {
4241 case PIXEL_ENCODING_YCBCR422:
4242 case PIXEL_ENCODING_YCBCR444:
4243 case PIXEL_ENCODING_YCBCR420:
4244 {
4245 /*
4246 * 27030khz is the separation point between HDTV and SDTV
4247 * according to HDMI spec, we use YCbCr709 and YCbCr601
4248 * respectively
4249 */
380604e2 4250 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4251 if (dc_crtc_timing->flags.Y_ONLY)
4252 color_space =
4253 COLOR_SPACE_YCBCR709_LIMITED;
4254 else
4255 color_space = COLOR_SPACE_YCBCR709;
4256 } else {
4257 if (dc_crtc_timing->flags.Y_ONLY)
4258 color_space =
4259 COLOR_SPACE_YCBCR601_LIMITED;
4260 else
4261 color_space = COLOR_SPACE_YCBCR601;
4262 }
4263
4264 }
4265 break;
4266 case PIXEL_ENCODING_RGB:
4267 color_space = COLOR_SPACE_SRGB;
4268 break;
4269
4270 default:
4271 WARN_ON(1);
4272 break;
4273 }
4274
4275 return color_space;
4276}
4277
ea117312
TA
4278static bool adjust_colour_depth_from_display_info(
4279 struct dc_crtc_timing *timing_out,
4280 const struct drm_display_info *info)
400443e8 4281{
ea117312 4282 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4283 int normalized_clk;
400443e8 4284 do {
380604e2 4285 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4286 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4287 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4288 normalized_clk /= 2;
4289 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4290 switch (depth) {
4291 case COLOR_DEPTH_888:
4292 break;
400443e8
ML
4293 case COLOR_DEPTH_101010:
4294 normalized_clk = (normalized_clk * 30) / 24;
4295 break;
4296 case COLOR_DEPTH_121212:
4297 normalized_clk = (normalized_clk * 36) / 24;
4298 break;
4299 case COLOR_DEPTH_161616:
4300 normalized_clk = (normalized_clk * 48) / 24;
4301 break;
4302 default:
ea117312
TA
4303 /* The above depths are the only ones valid for HDMI. */
4304 return false;
400443e8 4305 }
ea117312
TA
4306 if (normalized_clk <= info->max_tmds_clock) {
4307 timing_out->display_color_depth = depth;
4308 return true;
4309 }
4310 } while (--depth > COLOR_DEPTH_666);
4311 return false;
400443e8 4312}
e7b07cee 4313
42ba01fc
NK
4314static void fill_stream_properties_from_drm_display_mode(
4315 struct dc_stream_state *stream,
4316 const struct drm_display_mode *mode_in,
4317 const struct drm_connector *connector,
4318 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4319 const struct dc_stream_state *old_stream,
4320 int requested_bpc)
e7b07cee
HW
4321{
4322 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4323 const struct drm_display_info *info = &connector->display_info;
d4252eee 4324 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4325 struct hdmi_vendor_infoframe hv_frame;
4326 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4327
acf83f86
WL
4328 memset(&hv_frame, 0, sizeof(hv_frame));
4329 memset(&avi_frame, 0, sizeof(avi_frame));
4330
e7b07cee
HW
4331 timing_out->h_border_left = 0;
4332 timing_out->h_border_right = 0;
4333 timing_out->v_border_top = 0;
4334 timing_out->v_border_bottom = 0;
4335 /* TODO: un-hardcode */
fe61a2f1 4336 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4337 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4338 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4339 else if (drm_mode_is_420_also(info, mode_in)
4340 && aconnector->force_yuv420_output)
4341 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4342 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4343 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4344 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4345 else
4346 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4347
4348 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4349 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4350 connector,
4351 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4352 requested_bpc);
e7b07cee
HW
4353 timing_out->scan_type = SCANNING_TYPE_NODATA;
4354 timing_out->hdmi_vic = 0;
b333730d
BL
4355
4356 if(old_stream) {
4357 timing_out->vic = old_stream->timing.vic;
4358 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4359 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4360 } else {
4361 timing_out->vic = drm_match_cea_mode(mode_in);
4362 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4363 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4364 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4365 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4366 }
e7b07cee 4367
1cb1d477
WL
4368 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4369 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4370 timing_out->vic = avi_frame.video_code;
4371 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4372 timing_out->hdmi_vic = hv_frame.vic;
4373 }
4374
e7b07cee
HW
4375 timing_out->h_addressable = mode_in->crtc_hdisplay;
4376 timing_out->h_total = mode_in->crtc_htotal;
4377 timing_out->h_sync_width =
4378 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4379 timing_out->h_front_porch =
4380 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4381 timing_out->v_total = mode_in->crtc_vtotal;
4382 timing_out->v_addressable = mode_in->crtc_vdisplay;
4383 timing_out->v_front_porch =
4384 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4385 timing_out->v_sync_width =
4386 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4387 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4388 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4389
4390 stream->output_color_space = get_output_color_space(timing_out);
4391
e43a432c
AK
4392 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4393 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4394 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4395 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4396 drm_mode_is_420_also(info, mode_in) &&
4397 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4398 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4399 adjust_colour_depth_from_display_info(timing_out, info);
4400 }
4401 }
e7b07cee
HW
4402}
4403
3ee6b26b
AD
4404static void fill_audio_info(struct audio_info *audio_info,
4405 const struct drm_connector *drm_connector,
4406 const struct dc_sink *dc_sink)
e7b07cee
HW
4407{
4408 int i = 0;
4409 int cea_revision = 0;
4410 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4411
4412 audio_info->manufacture_id = edid_caps->manufacturer_id;
4413 audio_info->product_id = edid_caps->product_id;
4414
4415 cea_revision = drm_connector->display_info.cea_rev;
4416
090afc1e 4417 strscpy(audio_info->display_name,
d2b2562c 4418 edid_caps->display_name,
090afc1e 4419 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4420
b830ebc9 4421 if (cea_revision >= 3) {
e7b07cee
HW
4422 audio_info->mode_count = edid_caps->audio_mode_count;
4423
4424 for (i = 0; i < audio_info->mode_count; ++i) {
4425 audio_info->modes[i].format_code =
4426 (enum audio_format_code)
4427 (edid_caps->audio_modes[i].format_code);
4428 audio_info->modes[i].channel_count =
4429 edid_caps->audio_modes[i].channel_count;
4430 audio_info->modes[i].sample_rates.all =
4431 edid_caps->audio_modes[i].sample_rate;
4432 audio_info->modes[i].sample_size =
4433 edid_caps->audio_modes[i].sample_size;
4434 }
4435 }
4436
4437 audio_info->flags.all = edid_caps->speaker_flags;
4438
4439 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4440 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4441 audio_info->video_latency = drm_connector->video_latency[0];
4442 audio_info->audio_latency = drm_connector->audio_latency[0];
4443 }
4444
4445 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4446
4447}
4448
3ee6b26b
AD
4449static void
4450copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4451 struct drm_display_mode *dst_mode)
e7b07cee
HW
4452{
4453 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4454 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4455 dst_mode->crtc_clock = src_mode->crtc_clock;
4456 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4457 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4458 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4459 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4460 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4461 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4462 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4463 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4464 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4465 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4466 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4467}
4468
3ee6b26b
AD
4469static void
4470decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4471 const struct drm_display_mode *native_mode,
4472 bool scale_enabled)
e7b07cee
HW
4473{
4474 if (scale_enabled) {
4475 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4476 } else if (native_mode->clock == drm_mode->clock &&
4477 native_mode->htotal == drm_mode->htotal &&
4478 native_mode->vtotal == drm_mode->vtotal) {
4479 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4480 } else {
4481 /* no scaling nor amdgpu inserted, no need to patch */
4482 }
4483}
4484
aed15309
ML
4485static struct dc_sink *
4486create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4487{
2e0ac3d6 4488 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4489 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4490 sink_init_data.link = aconnector->dc_link;
4491 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4492
4493 sink = dc_sink_create(&sink_init_data);
423788c7 4494 if (!sink) {
2e0ac3d6 4495 DRM_ERROR("Failed to create sink!\n");
aed15309 4496 return NULL;
423788c7 4497 }
2e0ac3d6 4498 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4499
aed15309 4500 return sink;
2e0ac3d6
HW
4501}
4502
fa2123db
ML
4503static void set_multisync_trigger_params(
4504 struct dc_stream_state *stream)
4505{
4506 if (stream->triggered_crtc_reset.enabled) {
4507 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4508 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4509 }
4510}
4511
4512static void set_master_stream(struct dc_stream_state *stream_set[],
4513 int stream_count)
4514{
4515 int j, highest_rfr = 0, master_stream = 0;
4516
4517 for (j = 0; j < stream_count; j++) {
4518 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4519 int refresh_rate = 0;
4520
380604e2 4521 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4522 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4523 if (refresh_rate > highest_rfr) {
4524 highest_rfr = refresh_rate;
4525 master_stream = j;
4526 }
4527 }
4528 }
4529 for (j = 0; j < stream_count; j++) {
03736f4c 4530 if (stream_set[j])
fa2123db
ML
4531 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4532 }
4533}
4534
4535static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4536{
4537 int i = 0;
4538
4539 if (context->stream_count < 2)
4540 return;
4541 for (i = 0; i < context->stream_count ; i++) {
4542 if (!context->streams[i])
4543 continue;
1f6010a9
DF
4544 /*
4545 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4546 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4547 * For now it's set to false
fa2123db
ML
4548 */
4549 set_multisync_trigger_params(context->streams[i]);
4550 }
4551 set_master_stream(context->streams, context->stream_count);
4552}
4553
3ee6b26b
AD
4554static struct dc_stream_state *
4555create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4556 const struct drm_display_mode *drm_mode,
b333730d 4557 const struct dm_connector_state *dm_state,
cbd14ae7
SW
4558 const struct dc_stream_state *old_stream,
4559 int requested_bpc)
e7b07cee
HW
4560{
4561 struct drm_display_mode *preferred_mode = NULL;
391ef035 4562 struct drm_connector *drm_connector;
42ba01fc
NK
4563 const struct drm_connector_state *con_state =
4564 dm_state ? &dm_state->base : NULL;
0971c40e 4565 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4566 struct drm_display_mode mode = *drm_mode;
4567 bool native_mode_found = false;
b333730d
BL
4568 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4569 int mode_refresh;
58124bf8 4570 int preferred_refresh = 0;
defeb878 4571#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4572 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4573#endif
df2f1015 4574 uint32_t link_bandwidth_kbps;
b333730d 4575
aed15309 4576 struct dc_sink *sink = NULL;
b830ebc9 4577 if (aconnector == NULL) {
e7b07cee 4578 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4579 return stream;
e7b07cee
HW
4580 }
4581
e7b07cee 4582 drm_connector = &aconnector->base;
2e0ac3d6 4583
f4ac176e 4584 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4585 sink = create_fake_sink(aconnector);
4586 if (!sink)
4587 return stream;
aed15309
ML
4588 } else {
4589 sink = aconnector->dc_sink;
dcd5fb82 4590 dc_sink_retain(sink);
f4ac176e 4591 }
2e0ac3d6 4592
aed15309 4593 stream = dc_create_stream_for_sink(sink);
4562236b 4594
b830ebc9 4595 if (stream == NULL) {
e7b07cee 4596 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4597 goto finish;
e7b07cee
HW
4598 }
4599
ceb3dbb4
JL
4600 stream->dm_stream_context = aconnector;
4601
4a36fcba
WL
4602 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4603 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4604
e7b07cee
HW
4605 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4606 /* Search for preferred mode */
4607 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4608 native_mode_found = true;
4609 break;
4610 }
4611 }
4612 if (!native_mode_found)
4613 preferred_mode = list_first_entry_or_null(
4614 &aconnector->base.modes,
4615 struct drm_display_mode,
4616 head);
4617
b333730d
BL
4618 mode_refresh = drm_mode_vrefresh(&mode);
4619
b830ebc9 4620 if (preferred_mode == NULL) {
1f6010a9
DF
4621 /*
4622 * This may not be an error, the use case is when we have no
e7b07cee
HW
4623 * usermode calls to reset and set mode upon hotplug. In this
4624 * case, we call set mode ourselves to restore the previous mode
4625 * and the modelist may not be filled in in time.
4626 */
f1ad2f5e 4627 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4628 } else {
4629 decide_crtc_timing_for_drm_display_mode(
4630 &mode, preferred_mode,
f4791779 4631 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4632 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4633 }
4634
f783577c
JFZ
4635 if (!dm_state)
4636 drm_mode_set_crtcinfo(&mode, 0);
4637
b333730d
BL
4638 /*
4639 * If scaling is enabled and refresh rate didn't change
4640 * we copy the vic and polarities of the old timings
4641 */
4642 if (!scale || mode_refresh != preferred_refresh)
4643 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4644 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
4645 else
4646 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4647 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 4648
df2f1015
DF
4649 stream->timing.flags.DSC = 0;
4650
4651 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4652#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4653 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4654 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 4655 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015 4656 &dsc_caps);
defeb878 4657#endif
df2f1015
DF
4658 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4659 dc_link_get_link_cap(aconnector->dc_link));
4660
defeb878 4661#if defined(CONFIG_DRM_AMD_DC_DCN)
097e6d98 4662 if (dsc_caps.is_dsc_supported) {
0417df16 4663 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4664 &dsc_caps,
0417df16 4665 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4666 link_bandwidth_kbps,
4667 &stream->timing,
4668 &stream->timing.dsc_cfg))
4669 stream->timing.flags.DSC = 1;
27e84dd7 4670 /* Overwrite the stream flag if DSC is enabled through debugfs */
097e6d98
EB
4671 if (aconnector->dsc_settings.dsc_clock_en)
4672 stream->timing.flags.DSC = 1;
734e4c97 4673
27e84dd7
EB
4674 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
4675 stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
734e4c97
EB
4676 aconnector->dsc_settings.dsc_slice_width);
4677
4678 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
4679 stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
4680 aconnector->dsc_settings.dsc_slice_height);
5268bf13
EB
4681
4682 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4683 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 4684 }
39a4eb85 4685#endif
df2f1015 4686 }
39a4eb85 4687
e7b07cee
HW
4688 update_stream_scaling_settings(&mode, dm_state, stream);
4689
4690 fill_audio_info(
4691 &stream->audio_info,
4692 drm_connector,
aed15309 4693 sink);
e7b07cee 4694
ceb3dbb4 4695 update_stream_signal(stream, sink);
9182b4cb 4696
d832fc3b
WL
4697 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4698 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
8a488f5d
RL
4699 if (stream->link->psr_settings.psr_feature_enabled) {
4700 //
4701 // should decide stream support vsc sdp colorimetry capability
4702 // before building vsc info packet
4703 //
4704 stream->use_vsc_sdp_for_colorimetry = false;
4705 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4706 stream->use_vsc_sdp_for_colorimetry =
4707 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4708 } else {
4709 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4710 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 4711 }
8a488f5d 4712 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 4713 }
aed15309 4714finish:
dcd5fb82 4715 dc_sink_release(sink);
9e3efe3e 4716
e7b07cee
HW
4717 return stream;
4718}
4719
7578ecda 4720static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4721{
4722 drm_crtc_cleanup(crtc);
4723 kfree(crtc);
4724}
4725
4726static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4727 struct drm_crtc_state *state)
e7b07cee
HW
4728{
4729 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4730
4731 /* TODO Destroy dc_stream objects are stream object is flattened */
4732 if (cur->stream)
4733 dc_stream_release(cur->stream);
4734
4735
4736 __drm_atomic_helper_crtc_destroy_state(state);
4737
4738
4739 kfree(state);
4740}
4741
4742static void dm_crtc_reset_state(struct drm_crtc *crtc)
4743{
4744 struct dm_crtc_state *state;
4745
4746 if (crtc->state)
4747 dm_crtc_destroy_state(crtc, crtc->state);
4748
4749 state = kzalloc(sizeof(*state), GFP_KERNEL);
4750 if (WARN_ON(!state))
4751 return;
4752
4753 crtc->state = &state->base;
4754 crtc->state->crtc = crtc;
4755
4756}
4757
4758static struct drm_crtc_state *
4759dm_crtc_duplicate_state(struct drm_crtc *crtc)
4760{
4761 struct dm_crtc_state *state, *cur;
4762
4763 cur = to_dm_crtc_state(crtc->state);
4764
4765 if (WARN_ON(!crtc->state))
4766 return NULL;
4767
2004f45e 4768 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4769 if (!state)
4770 return NULL;
e7b07cee
HW
4771
4772 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4773
4774 if (cur->stream) {
4775 state->stream = cur->stream;
4776 dc_stream_retain(state->stream);
4777 }
4778
d6ef9b41 4779 state->active_planes = cur->active_planes;
180db303 4780 state->vrr_params = cur->vrr_params;
98e6436d 4781 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4782 state->abm_level = cur->abm_level;
bb47de73
NK
4783 state->vrr_supported = cur->vrr_supported;
4784 state->freesync_config = cur->freesync_config;
14b25846 4785 state->crc_src = cur->crc_src;
cf020d49
NK
4786 state->cm_has_degamma = cur->cm_has_degamma;
4787 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4788
e7b07cee
HW
4789 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4790
4791 return &state->base;
4792}
4793
d2574c33
MK
4794static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4795{
4796 enum dc_irq_source irq_source;
4797 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4798 struct amdgpu_device *adev = crtc->dev->dev_private;
4799 int rc;
4800
4801 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4802
4803 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4804
4805 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4806 acrtc->crtc_id, enable ? "en" : "dis", rc);
4807 return rc;
4808}
589d2739
HW
4809
4810static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4811{
4812 enum dc_irq_source irq_source;
4813 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4814 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4815 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4816 int rc = 0;
4817
4818 if (enable) {
4819 /* vblank irq on -> Only need vupdate irq in vrr mode */
4820 if (amdgpu_dm_vrr_active(acrtc_state))
4821 rc = dm_set_vupdate_irq(crtc, true);
4822 } else {
4823 /* vblank irq off -> vupdate irq off */
4824 rc = dm_set_vupdate_irq(crtc, false);
4825 }
4826
4827 if (rc)
4828 return rc;
589d2739
HW
4829
4830 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4831 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4832}
4833
4834static int dm_enable_vblank(struct drm_crtc *crtc)
4835{
4836 return dm_set_vblank(crtc, true);
4837}
4838
4839static void dm_disable_vblank(struct drm_crtc *crtc)
4840{
4841 dm_set_vblank(crtc, false);
4842}
4843
e7b07cee
HW
4844/* Implemented only the options currently availible for the driver */
4845static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4846 .reset = dm_crtc_reset_state,
4847 .destroy = amdgpu_dm_crtc_destroy,
4848 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4849 .set_config = drm_atomic_helper_set_config,
4850 .page_flip = drm_atomic_helper_page_flip,
4851 .atomic_duplicate_state = dm_crtc_duplicate_state,
4852 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4853 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4854 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4855 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 4856 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
4857 .enable_vblank = dm_enable_vblank,
4858 .disable_vblank = dm_disable_vblank,
e3eff4b5 4859 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
4860};
4861
4862static enum drm_connector_status
4863amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4864{
4865 bool connected;
c84dec2f 4866 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4867
1f6010a9
DF
4868 /*
4869 * Notes:
e7b07cee
HW
4870 * 1. This interface is NOT called in context of HPD irq.
4871 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4872 * makes it a bad place for *any* MST-related activity.
4873 */
e7b07cee 4874
8580d60b
HW
4875 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4876 !aconnector->fake_enable)
e7b07cee
HW
4877 connected = (aconnector->dc_sink != NULL);
4878 else
4879 connected = (aconnector->base.force == DRM_FORCE_ON);
4880
4881 return (connected ? connector_status_connected :
4882 connector_status_disconnected);
4883}
4884
3ee6b26b
AD
4885int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4886 struct drm_connector_state *connector_state,
4887 struct drm_property *property,
4888 uint64_t val)
e7b07cee
HW
4889{
4890 struct drm_device *dev = connector->dev;
4891 struct amdgpu_device *adev = dev->dev_private;
4892 struct dm_connector_state *dm_old_state =
4893 to_dm_connector_state(connector->state);
4894 struct dm_connector_state *dm_new_state =
4895 to_dm_connector_state(connector_state);
4896
4897 int ret = -EINVAL;
4898
4899 if (property == dev->mode_config.scaling_mode_property) {
4900 enum amdgpu_rmx_type rmx_type;
4901
4902 switch (val) {
4903 case DRM_MODE_SCALE_CENTER:
4904 rmx_type = RMX_CENTER;
4905 break;
4906 case DRM_MODE_SCALE_ASPECT:
4907 rmx_type = RMX_ASPECT;
4908 break;
4909 case DRM_MODE_SCALE_FULLSCREEN:
4910 rmx_type = RMX_FULL;
4911 break;
4912 case DRM_MODE_SCALE_NONE:
4913 default:
4914 rmx_type = RMX_OFF;
4915 break;
4916 }
4917
4918 if (dm_old_state->scaling == rmx_type)
4919 return 0;
4920
4921 dm_new_state->scaling = rmx_type;
4922 ret = 0;
4923 } else if (property == adev->mode_info.underscan_hborder_property) {
4924 dm_new_state->underscan_hborder = val;
4925 ret = 0;
4926 } else if (property == adev->mode_info.underscan_vborder_property) {
4927 dm_new_state->underscan_vborder = val;
4928 ret = 0;
4929 } else if (property == adev->mode_info.underscan_property) {
4930 dm_new_state->underscan_enable = val;
4931 ret = 0;
c1ee92f9
DF
4932 } else if (property == adev->mode_info.abm_level_property) {
4933 dm_new_state->abm_level = val;
4934 ret = 0;
e7b07cee
HW
4935 }
4936
4937 return ret;
4938}
4939
3ee6b26b
AD
4940int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4941 const struct drm_connector_state *state,
4942 struct drm_property *property,
4943 uint64_t *val)
e7b07cee
HW
4944{
4945 struct drm_device *dev = connector->dev;
4946 struct amdgpu_device *adev = dev->dev_private;
4947 struct dm_connector_state *dm_state =
4948 to_dm_connector_state(state);
4949 int ret = -EINVAL;
4950
4951 if (property == dev->mode_config.scaling_mode_property) {
4952 switch (dm_state->scaling) {
4953 case RMX_CENTER:
4954 *val = DRM_MODE_SCALE_CENTER;
4955 break;
4956 case RMX_ASPECT:
4957 *val = DRM_MODE_SCALE_ASPECT;
4958 break;
4959 case RMX_FULL:
4960 *val = DRM_MODE_SCALE_FULLSCREEN;
4961 break;
4962 case RMX_OFF:
4963 default:
4964 *val = DRM_MODE_SCALE_NONE;
4965 break;
4966 }
4967 ret = 0;
4968 } else if (property == adev->mode_info.underscan_hborder_property) {
4969 *val = dm_state->underscan_hborder;
4970 ret = 0;
4971 } else if (property == adev->mode_info.underscan_vborder_property) {
4972 *val = dm_state->underscan_vborder;
4973 ret = 0;
4974 } else if (property == adev->mode_info.underscan_property) {
4975 *val = dm_state->underscan_enable;
4976 ret = 0;
c1ee92f9
DF
4977 } else if (property == adev->mode_info.abm_level_property) {
4978 *val = dm_state->abm_level;
4979 ret = 0;
e7b07cee 4980 }
c1ee92f9 4981
e7b07cee
HW
4982 return ret;
4983}
4984
526c654a
ED
4985static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4986{
4987 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4988
4989 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4990}
4991
7578ecda 4992static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4993{
c84dec2f 4994 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4995 const struct dc_link *link = aconnector->dc_link;
4996 struct amdgpu_device *adev = connector->dev->dev_private;
4997 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4998
e7b07cee
HW
4999#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5000 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5001
89fc8d4e 5002 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5003 link->type != dc_connection_none &&
5004 dm->backlight_dev) {
5005 backlight_device_unregister(dm->backlight_dev);
5006 dm->backlight_dev = NULL;
e7b07cee
HW
5007 }
5008#endif
dcd5fb82
MF
5009
5010 if (aconnector->dc_em_sink)
5011 dc_sink_release(aconnector->dc_em_sink);
5012 aconnector->dc_em_sink = NULL;
5013 if (aconnector->dc_sink)
5014 dc_sink_release(aconnector->dc_sink);
5015 aconnector->dc_sink = NULL;
5016
e86e8947 5017 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5018 drm_connector_unregister(connector);
5019 drm_connector_cleanup(connector);
526c654a
ED
5020 if (aconnector->i2c) {
5021 i2c_del_adapter(&aconnector->i2c->base);
5022 kfree(aconnector->i2c);
5023 }
7daec99f 5024 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5025
e7b07cee
HW
5026 kfree(connector);
5027}
5028
5029void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5030{
5031 struct dm_connector_state *state =
5032 to_dm_connector_state(connector->state);
5033
df099b9b
LSL
5034 if (connector->state)
5035 __drm_atomic_helper_connector_destroy_state(connector->state);
5036
e7b07cee
HW
5037 kfree(state);
5038
5039 state = kzalloc(sizeof(*state), GFP_KERNEL);
5040
5041 if (state) {
5042 state->scaling = RMX_OFF;
5043 state->underscan_enable = false;
5044 state->underscan_hborder = 0;
5045 state->underscan_vborder = 0;
01933ba4 5046 state->base.max_requested_bpc = 8;
3261e013
ML
5047 state->vcpi_slots = 0;
5048 state->pbn = 0;
c3e50f89
NK
5049 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5050 state->abm_level = amdgpu_dm_abm_level;
5051
df099b9b 5052 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5053 }
5054}
5055
3ee6b26b
AD
5056struct drm_connector_state *
5057amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5058{
5059 struct dm_connector_state *state =
5060 to_dm_connector_state(connector->state);
5061
5062 struct dm_connector_state *new_state =
5063 kmemdup(state, sizeof(*state), GFP_KERNEL);
5064
98e6436d
AK
5065 if (!new_state)
5066 return NULL;
e7b07cee 5067
98e6436d
AK
5068 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5069
5070 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5071 new_state->abm_level = state->abm_level;
922454c2
NK
5072 new_state->scaling = state->scaling;
5073 new_state->underscan_enable = state->underscan_enable;
5074 new_state->underscan_hborder = state->underscan_hborder;
5075 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5076 new_state->vcpi_slots = state->vcpi_slots;
5077 new_state->pbn = state->pbn;
98e6436d 5078 return &new_state->base;
e7b07cee
HW
5079}
5080
14f04fa4
AD
5081static int
5082amdgpu_dm_connector_late_register(struct drm_connector *connector)
5083{
5084 struct amdgpu_dm_connector *amdgpu_dm_connector =
5085 to_amdgpu_dm_connector(connector);
00a8037e 5086 int r;
14f04fa4 5087
00a8037e
AD
5088 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5089 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5090 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5091 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5092 if (r)
5093 return r;
5094 }
5095
5096#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5097 connector_debugfs_init(amdgpu_dm_connector);
5098#endif
5099
5100 return 0;
5101}
5102
e7b07cee
HW
5103static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5104 .reset = amdgpu_dm_connector_funcs_reset,
5105 .detect = amdgpu_dm_connector_detect,
5106 .fill_modes = drm_helper_probe_single_connector_modes,
5107 .destroy = amdgpu_dm_connector_destroy,
5108 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5109 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5110 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5111 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5112 .late_register = amdgpu_dm_connector_late_register,
526c654a 5113 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5114};
5115
e7b07cee
HW
5116static int get_modes(struct drm_connector *connector)
5117{
5118 return amdgpu_dm_connector_get_modes(connector);
5119}
5120
c84dec2f 5121static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5122{
5123 struct dc_sink_init_data init_params = {
5124 .link = aconnector->dc_link,
5125 .sink_signal = SIGNAL_TYPE_VIRTUAL
5126 };
70e8ffc5 5127 struct edid *edid;
e7b07cee 5128
a89ff457 5129 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5130 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5131 aconnector->base.name);
5132
5133 aconnector->base.force = DRM_FORCE_OFF;
5134 aconnector->base.override_edid = false;
5135 return;
5136 }
5137
70e8ffc5
HW
5138 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5139
e7b07cee
HW
5140 aconnector->edid = edid;
5141
5142 aconnector->dc_em_sink = dc_link_add_remote_sink(
5143 aconnector->dc_link,
5144 (uint8_t *)edid,
5145 (edid->extensions + 1) * EDID_LENGTH,
5146 &init_params);
5147
dcd5fb82 5148 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5149 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5150 aconnector->dc_link->local_sink :
5151 aconnector->dc_em_sink;
dcd5fb82
MF
5152 dc_sink_retain(aconnector->dc_sink);
5153 }
e7b07cee
HW
5154}
5155
c84dec2f 5156static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5157{
5158 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5159
1f6010a9
DF
5160 /*
5161 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5162 * Those settings have to be != 0 to get initial modeset
5163 */
5164 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5165 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5166 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5167 }
5168
5169
5170 aconnector->base.override_edid = true;
5171 create_eml_sink(aconnector);
5172}
5173
cbd14ae7
SW
5174static struct dc_stream_state *
5175create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5176 const struct drm_display_mode *drm_mode,
5177 const struct dm_connector_state *dm_state,
5178 const struct dc_stream_state *old_stream)
5179{
5180 struct drm_connector *connector = &aconnector->base;
5181 struct amdgpu_device *adev = connector->dev->dev_private;
5182 struct dc_stream_state *stream;
4b7da34b
SW
5183 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5184 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5185 enum dc_status dc_result = DC_OK;
5186
5187 do {
5188 stream = create_stream_for_sink(aconnector, drm_mode,
5189 dm_state, old_stream,
5190 requested_bpc);
5191 if (stream == NULL) {
5192 DRM_ERROR("Failed to create stream for sink!\n");
5193 break;
5194 }
5195
5196 dc_result = dc_validate_stream(adev->dm.dc, stream);
5197
5198 if (dc_result != DC_OK) {
74a16675 5199 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5200 drm_mode->hdisplay,
5201 drm_mode->vdisplay,
5202 drm_mode->clock,
74a16675
RS
5203 dc_result,
5204 dc_status_to_str(dc_result));
cbd14ae7
SW
5205
5206 dc_stream_release(stream);
5207 stream = NULL;
5208 requested_bpc -= 2; /* lower bpc to retry validation */
5209 }
5210
5211 } while (stream == NULL && requested_bpc >= 6);
5212
5213 return stream;
5214}
5215
ba9ca088 5216enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5217 struct drm_display_mode *mode)
e7b07cee
HW
5218{
5219 int result = MODE_ERROR;
5220 struct dc_sink *dc_sink;
e7b07cee 5221 /* TODO: Unhardcode stream count */
0971c40e 5222 struct dc_stream_state *stream;
c84dec2f 5223 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5224
5225 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5226 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5227 return result;
5228
1f6010a9
DF
5229 /*
5230 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5231 * EDID mgmt
5232 */
5233 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5234 !aconnector->dc_em_sink)
5235 handle_edid_mgmt(aconnector);
5236
c84dec2f 5237 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5238
b830ebc9 5239 if (dc_sink == NULL) {
e7b07cee
HW
5240 DRM_ERROR("dc_sink is NULL!\n");
5241 goto fail;
5242 }
5243
cbd14ae7
SW
5244 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5245 if (stream) {
5246 dc_stream_release(stream);
e7b07cee 5247 result = MODE_OK;
cbd14ae7 5248 }
e7b07cee
HW
5249
5250fail:
5251 /* TODO: error handling*/
5252 return result;
5253}
5254
88694af9
NK
5255static int fill_hdr_info_packet(const struct drm_connector_state *state,
5256 struct dc_info_packet *out)
5257{
5258 struct hdmi_drm_infoframe frame;
5259 unsigned char buf[30]; /* 26 + 4 */
5260 ssize_t len;
5261 int ret, i;
5262
5263 memset(out, 0, sizeof(*out));
5264
5265 if (!state->hdr_output_metadata)
5266 return 0;
5267
5268 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5269 if (ret)
5270 return ret;
5271
5272 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5273 if (len < 0)
5274 return (int)len;
5275
5276 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5277 if (len != 30)
5278 return -EINVAL;
5279
5280 /* Prepare the infopacket for DC. */
5281 switch (state->connector->connector_type) {
5282 case DRM_MODE_CONNECTOR_HDMIA:
5283 out->hb0 = 0x87; /* type */
5284 out->hb1 = 0x01; /* version */
5285 out->hb2 = 0x1A; /* length */
5286 out->sb[0] = buf[3]; /* checksum */
5287 i = 1;
5288 break;
5289
5290 case DRM_MODE_CONNECTOR_DisplayPort:
5291 case DRM_MODE_CONNECTOR_eDP:
5292 out->hb0 = 0x00; /* sdp id, zero */
5293 out->hb1 = 0x87; /* type */
5294 out->hb2 = 0x1D; /* payload len - 1 */
5295 out->hb3 = (0x13 << 2); /* sdp version */
5296 out->sb[0] = 0x01; /* version */
5297 out->sb[1] = 0x1A; /* length */
5298 i = 2;
5299 break;
5300
5301 default:
5302 return -EINVAL;
5303 }
5304
5305 memcpy(&out->sb[i], &buf[4], 26);
5306 out->valid = true;
5307
5308 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5309 sizeof(out->sb), false);
5310
5311 return 0;
5312}
5313
5314static bool
5315is_hdr_metadata_different(const struct drm_connector_state *old_state,
5316 const struct drm_connector_state *new_state)
5317{
5318 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5319 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5320
5321 if (old_blob != new_blob) {
5322 if (old_blob && new_blob &&
5323 old_blob->length == new_blob->length)
5324 return memcmp(old_blob->data, new_blob->data,
5325 old_blob->length);
5326
5327 return true;
5328 }
5329
5330 return false;
5331}
5332
5333static int
5334amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5335 struct drm_atomic_state *state)
88694af9 5336{
51e857af
SP
5337 struct drm_connector_state *new_con_state =
5338 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5339 struct drm_connector_state *old_con_state =
5340 drm_atomic_get_old_connector_state(state, conn);
5341 struct drm_crtc *crtc = new_con_state->crtc;
5342 struct drm_crtc_state *new_crtc_state;
5343 int ret;
5344
5345 if (!crtc)
5346 return 0;
5347
5348 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5349 struct dc_info_packet hdr_infopacket;
5350
5351 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5352 if (ret)
5353 return ret;
5354
5355 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5356 if (IS_ERR(new_crtc_state))
5357 return PTR_ERR(new_crtc_state);
5358
5359 /*
5360 * DC considers the stream backends changed if the
5361 * static metadata changes. Forcing the modeset also
5362 * gives a simple way for userspace to switch from
b232d4ed
NK
5363 * 8bpc to 10bpc when setting the metadata to enter
5364 * or exit HDR.
5365 *
5366 * Changing the static metadata after it's been
5367 * set is permissible, however. So only force a
5368 * modeset if we're entering or exiting HDR.
88694af9 5369 */
b232d4ed
NK
5370 new_crtc_state->mode_changed =
5371 !old_con_state->hdr_output_metadata ||
5372 !new_con_state->hdr_output_metadata;
88694af9
NK
5373 }
5374
5375 return 0;
5376}
5377
e7b07cee
HW
5378static const struct drm_connector_helper_funcs
5379amdgpu_dm_connector_helper_funcs = {
5380 /*
1f6010a9 5381 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5382 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5383 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5384 * in get_modes call back, not just return the modes count
5385 */
e7b07cee
HW
5386 .get_modes = get_modes,
5387 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5388 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5389};
5390
5391static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5392{
5393}
5394
bc92c065
NK
5395static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5396{
5397 struct drm_device *dev = new_crtc_state->crtc->dev;
5398 struct drm_plane *plane;
5399
5400 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5401 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5402 return true;
5403 }
5404
5405 return false;
5406}
5407
d6ef9b41 5408static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5409{
5410 struct drm_atomic_state *state = new_crtc_state->state;
5411 struct drm_plane *plane;
5412 int num_active = 0;
5413
5414 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5415 struct drm_plane_state *new_plane_state;
5416
5417 /* Cursor planes are "fake". */
5418 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5419 continue;
5420
5421 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5422
5423 if (!new_plane_state) {
5424 /*
5425 * The plane is enable on the CRTC and hasn't changed
5426 * state. This means that it previously passed
5427 * validation and is therefore enabled.
5428 */
5429 num_active += 1;
5430 continue;
5431 }
5432
5433 /* We need a framebuffer to be considered enabled. */
5434 num_active += (new_plane_state->fb != NULL);
5435 }
5436
d6ef9b41
NK
5437 return num_active;
5438}
5439
8fe684e9
NK
5440static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5441 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
5442{
5443 struct dm_crtc_state *dm_new_crtc_state =
5444 to_dm_crtc_state(new_crtc_state);
5445
5446 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
5447
5448 if (!dm_new_crtc_state->stream)
5449 return;
5450
5451 dm_new_crtc_state->active_planes =
5452 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
5453}
5454
3ee6b26b
AD
5455static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5456 struct drm_crtc_state *state)
e7b07cee
HW
5457{
5458 struct amdgpu_device *adev = crtc->dev->dev_private;
5459 struct dc *dc = adev->dm.dc;
5460 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5461 int ret = -EINVAL;
5462
8fe684e9 5463 dm_update_crtc_active_planes(crtc, state);
d6ef9b41 5464
9b690ef3
BL
5465 if (unlikely(!dm_crtc_state->stream &&
5466 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5467 WARN_ON(1);
5468 return ret;
5469 }
5470
1f6010a9 5471 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
5472 if (!dm_crtc_state->stream)
5473 return 0;
5474
bc92c065
NK
5475 /*
5476 * We want at least one hardware plane enabled to use
5477 * the stream with a cursor enabled.
5478 */
c14a005c 5479 if (state->enable && state->active &&
bc92c065 5480 does_crtc_have_active_cursor(state) &&
d6ef9b41 5481 dm_crtc_state->active_planes == 0)
c14a005c
NK
5482 return -EINVAL;
5483
62c933f9 5484 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5485 return 0;
5486
5487 return ret;
5488}
5489
3ee6b26b
AD
5490static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5491 const struct drm_display_mode *mode,
5492 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5493{
5494 return true;
5495}
5496
5497static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5498 .disable = dm_crtc_helper_disable,
5499 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5500 .mode_fixup = dm_crtc_helper_mode_fixup,
5501 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5502};
5503
5504static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5505{
5506
5507}
5508
3261e013
ML
5509static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5510{
5511 switch (display_color_depth) {
5512 case COLOR_DEPTH_666:
5513 return 6;
5514 case COLOR_DEPTH_888:
5515 return 8;
5516 case COLOR_DEPTH_101010:
5517 return 10;
5518 case COLOR_DEPTH_121212:
5519 return 12;
5520 case COLOR_DEPTH_141414:
5521 return 14;
5522 case COLOR_DEPTH_161616:
5523 return 16;
5524 default:
5525 break;
5526 }
5527 return 0;
5528}
5529
3ee6b26b
AD
5530static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5531 struct drm_crtc_state *crtc_state,
5532 struct drm_connector_state *conn_state)
e7b07cee 5533{
3261e013
ML
5534 struct drm_atomic_state *state = crtc_state->state;
5535 struct drm_connector *connector = conn_state->connector;
5536 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5537 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5538 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5539 struct drm_dp_mst_topology_mgr *mst_mgr;
5540 struct drm_dp_mst_port *mst_port;
5541 enum dc_color_depth color_depth;
5542 int clock, bpp = 0;
1bc22f20 5543 bool is_y420 = false;
3261e013
ML
5544
5545 if (!aconnector->port || !aconnector->dc_sink)
5546 return 0;
5547
5548 mst_port = aconnector->port;
5549 mst_mgr = &aconnector->mst_port->mst_mgr;
5550
5551 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5552 return 0;
5553
5554 if (!state->duplicated) {
cbd14ae7 5555 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
5556 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5557 aconnector->force_yuv420_output;
cbd14ae7
SW
5558 color_depth = convert_color_depth_from_display_info(connector,
5559 is_y420,
5560 max_bpc);
3261e013
ML
5561 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5562 clock = adjusted_mode->clock;
dc48529f 5563 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5564 }
5565 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5566 mst_mgr,
5567 mst_port,
1c6c1cb5 5568 dm_new_connector_state->pbn,
03ca9600 5569 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
5570 if (dm_new_connector_state->vcpi_slots < 0) {
5571 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5572 return dm_new_connector_state->vcpi_slots;
5573 }
e7b07cee
HW
5574 return 0;
5575}
5576
5577const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5578 .disable = dm_encoder_helper_disable,
5579 .atomic_check = dm_encoder_helper_atomic_check
5580};
5581
d9fe1a4c 5582#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5583static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5584 struct dc_state *dc_state)
5585{
5586 struct dc_stream_state *stream = NULL;
5587 struct drm_connector *connector;
5588 struct drm_connector_state *new_con_state, *old_con_state;
5589 struct amdgpu_dm_connector *aconnector;
5590 struct dm_connector_state *dm_conn_state;
5591 int i, j, clock, bpp;
5592 int vcpi, pbn_div, pbn = 0;
5593
5594 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5595
5596 aconnector = to_amdgpu_dm_connector(connector);
5597
5598 if (!aconnector->port)
5599 continue;
5600
5601 if (!new_con_state || !new_con_state->crtc)
5602 continue;
5603
5604 dm_conn_state = to_dm_connector_state(new_con_state);
5605
5606 for (j = 0; j < dc_state->stream_count; j++) {
5607 stream = dc_state->streams[j];
5608 if (!stream)
5609 continue;
5610
5611 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5612 break;
5613
5614 stream = NULL;
5615 }
5616
5617 if (!stream)
5618 continue;
5619
5620 if (stream->timing.flags.DSC != 1) {
5621 drm_dp_mst_atomic_enable_dsc(state,
5622 aconnector->port,
5623 dm_conn_state->pbn,
5624 0,
5625 false);
5626 continue;
5627 }
5628
5629 pbn_div = dm_mst_get_pbn_divider(stream->link);
5630 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5631 clock = stream->timing.pix_clk_100hz / 10;
5632 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5633 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5634 aconnector->port,
5635 pbn, pbn_div,
5636 true);
5637 if (vcpi < 0)
5638 return vcpi;
5639
5640 dm_conn_state->pbn = pbn;
5641 dm_conn_state->vcpi_slots = vcpi;
5642 }
5643 return 0;
5644}
d9fe1a4c 5645#endif
29b9ba74 5646
e7b07cee
HW
5647static void dm_drm_plane_reset(struct drm_plane *plane)
5648{
5649 struct dm_plane_state *amdgpu_state = NULL;
5650
5651 if (plane->state)
5652 plane->funcs->atomic_destroy_state(plane, plane->state);
5653
5654 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5655 WARN_ON(amdgpu_state == NULL);
1f6010a9 5656
7ddaef96
NK
5657 if (amdgpu_state)
5658 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5659}
5660
5661static struct drm_plane_state *
5662dm_drm_plane_duplicate_state(struct drm_plane *plane)
5663{
5664 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5665
5666 old_dm_plane_state = to_dm_plane_state(plane->state);
5667 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5668 if (!dm_plane_state)
5669 return NULL;
5670
5671 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5672
3be5262e
HW
5673 if (old_dm_plane_state->dc_state) {
5674 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5675 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5676 }
5677
5678 return &dm_plane_state->base;
5679}
5680
dfd84d90 5681static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5682 struct drm_plane_state *state)
e7b07cee
HW
5683{
5684 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5685
3be5262e
HW
5686 if (dm_plane_state->dc_state)
5687 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5688
0627bbd3 5689 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5690}
5691
5692static const struct drm_plane_funcs dm_plane_funcs = {
5693 .update_plane = drm_atomic_helper_update_plane,
5694 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5695 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5696 .reset = dm_drm_plane_reset,
5697 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5698 .atomic_destroy_state = dm_drm_plane_destroy_state,
5699};
5700
3ee6b26b
AD
5701static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5702 struct drm_plane_state *new_state)
e7b07cee
HW
5703{
5704 struct amdgpu_framebuffer *afb;
5705 struct drm_gem_object *obj;
5d43be0c 5706 struct amdgpu_device *adev;
e7b07cee 5707 struct amdgpu_bo *rbo;
e7b07cee 5708 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5709 struct list_head list;
5710 struct ttm_validate_buffer tv;
5711 struct ww_acquire_ctx ticket;
e0634e8d 5712 uint64_t tiling_flags;
5d43be0c
CK
5713 uint32_t domain;
5714 int r;
5888f07a 5715 bool tmz_surface = false;
87b7ebc2 5716 bool force_disable_dcc = false;
e7b07cee
HW
5717
5718 dm_plane_state_old = to_dm_plane_state(plane->state);
5719 dm_plane_state_new = to_dm_plane_state(new_state);
5720
5721 if (!new_state->fb) {
f1ad2f5e 5722 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5723 return 0;
5724 }
5725
5726 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5727 obj = new_state->fb->obj[0];
e7b07cee 5728 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5729 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5730 INIT_LIST_HEAD(&list);
5731
5732 tv.bo = &rbo->tbo;
5733 tv.num_shared = 1;
5734 list_add(&tv.head, &list);
5735
9165fb87 5736 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5737 if (r) {
5738 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5739 return r;
0f257b09 5740 }
e7b07cee 5741
5d43be0c 5742 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5743 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5744 else
5745 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5746
7b7c6c81 5747 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5748 if (unlikely(r != 0)) {
30b7c614
HW
5749 if (r != -ERESTARTSYS)
5750 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5751 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5752 return r;
5753 }
5754
bb812f1e
JZ
5755 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5756 if (unlikely(r != 0)) {
5757 amdgpu_bo_unpin(rbo);
0f257b09 5758 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5759 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5760 return r;
5761 }
7df7e505
NK
5762
5763 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5764
5888f07a
HW
5765 tmz_surface = amdgpu_bo_encrypted(rbo);
5766
0f257b09 5767 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5768
7b7c6c81 5769 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5770
5771 amdgpu_bo_ref(rbo);
5772
3be5262e
HW
5773 if (dm_plane_state_new->dc_state &&
5774 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5775 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 5776
87b7ebc2 5777 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
320932bf 5778 fill_plane_buffer_attributes(
695af5f9
NK
5779 adev, afb, plane_state->format, plane_state->rotation,
5780 tiling_flags, &plane_state->tiling_info,
320932bf 5781 &plane_state->plane_size, &plane_state->dcc,
5888f07a 5782 &plane_state->address, tmz_surface,
87b7ebc2 5783 force_disable_dcc);
e7b07cee
HW
5784 }
5785
e7b07cee
HW
5786 return 0;
5787}
5788
3ee6b26b
AD
5789static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5790 struct drm_plane_state *old_state)
e7b07cee
HW
5791{
5792 struct amdgpu_bo *rbo;
e7b07cee
HW
5793 int r;
5794
5795 if (!old_state->fb)
5796 return;
5797
e68d14dd 5798 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5799 r = amdgpu_bo_reserve(rbo, false);
5800 if (unlikely(r)) {
5801 DRM_ERROR("failed to reserve rbo before unpin\n");
5802 return;
b830ebc9
HW
5803 }
5804
5805 amdgpu_bo_unpin(rbo);
5806 amdgpu_bo_unreserve(rbo);
5807 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5808}
5809
8c44515b
AP
5810static int dm_plane_helper_check_state(struct drm_plane_state *state,
5811 struct drm_crtc_state *new_crtc_state)
5812{
5813 int max_downscale = 0;
5814 int max_upscale = INT_MAX;
5815
5816 /* TODO: These should be checked against DC plane caps */
5817 return drm_atomic_helper_check_plane_state(
5818 state, new_crtc_state, max_downscale, max_upscale, true, true);
5819}
5820
7578ecda
AD
5821static int dm_plane_atomic_check(struct drm_plane *plane,
5822 struct drm_plane_state *state)
cbd19488
AG
5823{
5824 struct amdgpu_device *adev = plane->dev->dev_private;
5825 struct dc *dc = adev->dm.dc;
78171832 5826 struct dm_plane_state *dm_plane_state;
695af5f9 5827 struct dc_scaling_info scaling_info;
8c44515b 5828 struct drm_crtc_state *new_crtc_state;
695af5f9 5829 int ret;
78171832
NK
5830
5831 dm_plane_state = to_dm_plane_state(state);
cbd19488 5832
3be5262e 5833 if (!dm_plane_state->dc_state)
9a3329b1 5834 return 0;
cbd19488 5835
8c44515b
AP
5836 new_crtc_state =
5837 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5838 if (!new_crtc_state)
5839 return -EINVAL;
5840
5841 ret = dm_plane_helper_check_state(state, new_crtc_state);
5842 if (ret)
5843 return ret;
5844
695af5f9
NK
5845 ret = fill_dc_scaling_info(state, &scaling_info);
5846 if (ret)
5847 return ret;
a05bcff1 5848
62c933f9 5849 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5850 return 0;
5851
5852 return -EINVAL;
5853}
5854
674e78ac
NK
5855static int dm_plane_atomic_async_check(struct drm_plane *plane,
5856 struct drm_plane_state *new_plane_state)
5857{
5858 /* Only support async updates on cursor planes. */
5859 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5860 return -EINVAL;
5861
5862 return 0;
5863}
5864
5865static void dm_plane_atomic_async_update(struct drm_plane *plane,
5866 struct drm_plane_state *new_state)
5867{
5868 struct drm_plane_state *old_state =
5869 drm_atomic_get_old_plane_state(new_state->state, plane);
5870
332af874 5871 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5872
5873 plane->state->src_x = new_state->src_x;
5874 plane->state->src_y = new_state->src_y;
5875 plane->state->src_w = new_state->src_w;
5876 plane->state->src_h = new_state->src_h;
5877 plane->state->crtc_x = new_state->crtc_x;
5878 plane->state->crtc_y = new_state->crtc_y;
5879 plane->state->crtc_w = new_state->crtc_w;
5880 plane->state->crtc_h = new_state->crtc_h;
5881
5882 handle_cursor_update(plane, old_state);
5883}
5884
e7b07cee
HW
5885static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5886 .prepare_fb = dm_plane_helper_prepare_fb,
5887 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5888 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5889 .atomic_async_check = dm_plane_atomic_async_check,
5890 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5891};
5892
5893/*
5894 * TODO: these are currently initialized to rgb formats only.
5895 * For future use cases we should either initialize them dynamically based on
5896 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5897 * check will succeed, and let DC implement proper check
e7b07cee 5898 */
d90371b0 5899static const uint32_t rgb_formats[] = {
e7b07cee
HW
5900 DRM_FORMAT_XRGB8888,
5901 DRM_FORMAT_ARGB8888,
5902 DRM_FORMAT_RGBA8888,
5903 DRM_FORMAT_XRGB2101010,
5904 DRM_FORMAT_XBGR2101010,
5905 DRM_FORMAT_ARGB2101010,
5906 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5907 DRM_FORMAT_XBGR8888,
5908 DRM_FORMAT_ABGR8888,
46dd9ff7 5909 DRM_FORMAT_RGB565,
e7b07cee
HW
5910};
5911
0d579c7e
NK
5912static const uint32_t overlay_formats[] = {
5913 DRM_FORMAT_XRGB8888,
5914 DRM_FORMAT_ARGB8888,
5915 DRM_FORMAT_RGBA8888,
5916 DRM_FORMAT_XBGR8888,
5917 DRM_FORMAT_ABGR8888,
7267a1a9 5918 DRM_FORMAT_RGB565
e7b07cee
HW
5919};
5920
5921static const u32 cursor_formats[] = {
5922 DRM_FORMAT_ARGB8888
5923};
5924
37c6a93b
NK
5925static int get_plane_formats(const struct drm_plane *plane,
5926 const struct dc_plane_cap *plane_cap,
5927 uint32_t *formats, int max_formats)
e7b07cee 5928{
37c6a93b
NK
5929 int i, num_formats = 0;
5930
5931 /*
5932 * TODO: Query support for each group of formats directly from
5933 * DC plane caps. This will require adding more formats to the
5934 * caps list.
5935 */
e7b07cee 5936
f180b4bc 5937 switch (plane->type) {
e7b07cee 5938 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5939 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5940 if (num_formats >= max_formats)
5941 break;
5942
5943 formats[num_formats++] = rgb_formats[i];
5944 }
5945
ea36ad34 5946 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5947 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
5948 if (plane_cap && plane_cap->pixel_format_support.p010)
5949 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
5950 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5951 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5952 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
5953 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5954 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 5955 }
e7b07cee 5956 break;
37c6a93b 5957
e7b07cee 5958 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5959 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5960 if (num_formats >= max_formats)
5961 break;
5962
5963 formats[num_formats++] = overlay_formats[i];
5964 }
e7b07cee 5965 break;
37c6a93b 5966
e7b07cee 5967 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5968 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5969 if (num_formats >= max_formats)
5970 break;
5971
5972 formats[num_formats++] = cursor_formats[i];
5973 }
e7b07cee
HW
5974 break;
5975 }
5976
37c6a93b
NK
5977 return num_formats;
5978}
5979
5980static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5981 struct drm_plane *plane,
5982 unsigned long possible_crtcs,
5983 const struct dc_plane_cap *plane_cap)
5984{
5985 uint32_t formats[32];
5986 int num_formats;
5987 int res = -EPERM;
ecc874a6 5988 unsigned int supported_rotations;
37c6a93b
NK
5989
5990 num_formats = get_plane_formats(plane, plane_cap, formats,
5991 ARRAY_SIZE(formats));
5992
5993 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5994 &dm_plane_funcs, formats, num_formats,
5995 NULL, plane->type, NULL);
5996 if (res)
5997 return res;
5998
cc1fec57
NK
5999 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6000 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6001 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6002 BIT(DRM_MODE_BLEND_PREMULTI);
6003
6004 drm_plane_create_alpha_property(plane);
6005 drm_plane_create_blend_mode_property(plane, blend_caps);
6006 }
6007
fc8e5230 6008 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6009 plane_cap &&
6010 (plane_cap->pixel_format_support.nv12 ||
6011 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6012 /* This only affects YUV formats. */
6013 drm_plane_create_color_properties(
6014 plane,
6015 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6016 BIT(DRM_COLOR_YCBCR_BT709) |
6017 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6018 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6019 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6020 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6021 }
6022
ecc874a6
PLG
6023 supported_rotations =
6024 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6025 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6026
f784112f
MR
6027 if (dm->adev->asic_type >= CHIP_BONAIRE)
6028 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6029 supported_rotations);
ecc874a6 6030
f180b4bc 6031 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6032
96719c54 6033 /* Create (reset) the plane state */
f180b4bc
HW
6034 if (plane->funcs->reset)
6035 plane->funcs->reset(plane);
96719c54 6036
37c6a93b 6037 return 0;
e7b07cee
HW
6038}
6039
7578ecda
AD
6040static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6041 struct drm_plane *plane,
6042 uint32_t crtc_index)
e7b07cee
HW
6043{
6044 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6045 struct drm_plane *cursor_plane;
e7b07cee
HW
6046
6047 int res = -ENOMEM;
6048
6049 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6050 if (!cursor_plane)
6051 goto fail;
6052
f180b4bc 6053 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6054 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6055
6056 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6057 if (!acrtc)
6058 goto fail;
6059
6060 res = drm_crtc_init_with_planes(
6061 dm->ddev,
6062 &acrtc->base,
6063 plane,
f180b4bc 6064 cursor_plane,
e7b07cee
HW
6065 &amdgpu_dm_crtc_funcs, NULL);
6066
6067 if (res)
6068 goto fail;
6069
6070 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6071
96719c54
HW
6072 /* Create (reset) the plane state */
6073 if (acrtc->base.funcs->reset)
6074 acrtc->base.funcs->reset(&acrtc->base);
6075
e7b07cee
HW
6076 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6077 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6078
6079 acrtc->crtc_id = crtc_index;
6080 acrtc->base.enabled = false;
c37e2d29 6081 acrtc->otg_inst = -1;
e7b07cee
HW
6082
6083 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6084 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6085 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6086 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
6087
6088 return 0;
6089
6090fail:
b830ebc9
HW
6091 kfree(acrtc);
6092 kfree(cursor_plane);
e7b07cee
HW
6093 return res;
6094}
6095
6096
6097static int to_drm_connector_type(enum signal_type st)
6098{
6099 switch (st) {
6100 case SIGNAL_TYPE_HDMI_TYPE_A:
6101 return DRM_MODE_CONNECTOR_HDMIA;
6102 case SIGNAL_TYPE_EDP:
6103 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6104 case SIGNAL_TYPE_LVDS:
6105 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6106 case SIGNAL_TYPE_RGB:
6107 return DRM_MODE_CONNECTOR_VGA;
6108 case SIGNAL_TYPE_DISPLAY_PORT:
6109 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6110 return DRM_MODE_CONNECTOR_DisplayPort;
6111 case SIGNAL_TYPE_DVI_DUAL_LINK:
6112 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6113 return DRM_MODE_CONNECTOR_DVID;
6114 case SIGNAL_TYPE_VIRTUAL:
6115 return DRM_MODE_CONNECTOR_VIRTUAL;
6116
6117 default:
6118 return DRM_MODE_CONNECTOR_Unknown;
6119 }
6120}
6121
2b4c1c05
DV
6122static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6123{
62afb4ad
JRS
6124 struct drm_encoder *encoder;
6125
6126 /* There is only one encoder per connector */
6127 drm_connector_for_each_possible_encoder(connector, encoder)
6128 return encoder;
6129
6130 return NULL;
2b4c1c05
DV
6131}
6132
e7b07cee
HW
6133static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6134{
e7b07cee
HW
6135 struct drm_encoder *encoder;
6136 struct amdgpu_encoder *amdgpu_encoder;
6137
2b4c1c05 6138 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6139
6140 if (encoder == NULL)
6141 return;
6142
6143 amdgpu_encoder = to_amdgpu_encoder(encoder);
6144
6145 amdgpu_encoder->native_mode.clock = 0;
6146
6147 if (!list_empty(&connector->probed_modes)) {
6148 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6149
e7b07cee 6150 list_for_each_entry(preferred_mode,
b830ebc9
HW
6151 &connector->probed_modes,
6152 head) {
6153 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6154 amdgpu_encoder->native_mode = *preferred_mode;
6155
e7b07cee
HW
6156 break;
6157 }
6158
6159 }
6160}
6161
3ee6b26b
AD
6162static struct drm_display_mode *
6163amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6164 char *name,
6165 int hdisplay, int vdisplay)
e7b07cee
HW
6166{
6167 struct drm_device *dev = encoder->dev;
6168 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6169 struct drm_display_mode *mode = NULL;
6170 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6171
6172 mode = drm_mode_duplicate(dev, native_mode);
6173
b830ebc9 6174 if (mode == NULL)
e7b07cee
HW
6175 return NULL;
6176
6177 mode->hdisplay = hdisplay;
6178 mode->vdisplay = vdisplay;
6179 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6180 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6181
6182 return mode;
6183
6184}
6185
6186static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6187 struct drm_connector *connector)
e7b07cee
HW
6188{
6189 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6190 struct drm_display_mode *mode = NULL;
6191 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6192 struct amdgpu_dm_connector *amdgpu_dm_connector =
6193 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6194 int i;
6195 int n;
6196 struct mode_size {
6197 char name[DRM_DISPLAY_MODE_LEN];
6198 int w;
6199 int h;
b830ebc9 6200 } common_modes[] = {
e7b07cee
HW
6201 { "640x480", 640, 480},
6202 { "800x600", 800, 600},
6203 { "1024x768", 1024, 768},
6204 { "1280x720", 1280, 720},
6205 { "1280x800", 1280, 800},
6206 {"1280x1024", 1280, 1024},
6207 { "1440x900", 1440, 900},
6208 {"1680x1050", 1680, 1050},
6209 {"1600x1200", 1600, 1200},
6210 {"1920x1080", 1920, 1080},
6211 {"1920x1200", 1920, 1200}
6212 };
6213
b830ebc9 6214 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6215
6216 for (i = 0; i < n; i++) {
6217 struct drm_display_mode *curmode = NULL;
6218 bool mode_existed = false;
6219
6220 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6221 common_modes[i].h > native_mode->vdisplay ||
6222 (common_modes[i].w == native_mode->hdisplay &&
6223 common_modes[i].h == native_mode->vdisplay))
6224 continue;
e7b07cee
HW
6225
6226 list_for_each_entry(curmode, &connector->probed_modes, head) {
6227 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6228 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6229 mode_existed = true;
6230 break;
6231 }
6232 }
6233
6234 if (mode_existed)
6235 continue;
6236
6237 mode = amdgpu_dm_create_common_mode(encoder,
6238 common_modes[i].name, common_modes[i].w,
6239 common_modes[i].h);
6240 drm_mode_probed_add(connector, mode);
c84dec2f 6241 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6242 }
6243}
6244
3ee6b26b
AD
6245static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6246 struct edid *edid)
e7b07cee 6247{
c84dec2f
HW
6248 struct amdgpu_dm_connector *amdgpu_dm_connector =
6249 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6250
6251 if (edid) {
6252 /* empty probed_modes */
6253 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6254 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6255 drm_add_edid_modes(connector, edid);
6256
f1e5e913
YMM
6257 /* sorting the probed modes before calling function
6258 * amdgpu_dm_get_native_mode() since EDID can have
6259 * more than one preferred mode. The modes that are
6260 * later in the probed mode list could be of higher
6261 * and preferred resolution. For example, 3840x2160
6262 * resolution in base EDID preferred timing and 4096x2160
6263 * preferred resolution in DID extension block later.
6264 */
6265 drm_mode_sort(&connector->probed_modes);
e7b07cee 6266 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6267 } else {
c84dec2f 6268 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6269 }
e7b07cee
HW
6270}
6271
7578ecda 6272static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6273{
c84dec2f
HW
6274 struct amdgpu_dm_connector *amdgpu_dm_connector =
6275 to_amdgpu_dm_connector(connector);
e7b07cee 6276 struct drm_encoder *encoder;
c84dec2f 6277 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6278
2b4c1c05 6279 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6280
85ee15d6 6281 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
6282 amdgpu_dm_connector->num_modes =
6283 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6284 } else {
6285 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6286 amdgpu_dm_connector_add_common_modes(encoder, connector);
6287 }
3e332d3a 6288 amdgpu_dm_fbc_init(connector);
5099114b 6289
c84dec2f 6290 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6291}
6292
3ee6b26b
AD
6293void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6294 struct amdgpu_dm_connector *aconnector,
6295 int connector_type,
6296 struct dc_link *link,
6297 int link_index)
e7b07cee
HW
6298{
6299 struct amdgpu_device *adev = dm->ddev->dev_private;
6300
f04bee34
NK
6301 /*
6302 * Some of the properties below require access to state, like bpc.
6303 * Allocate some default initial connector state with our reset helper.
6304 */
6305 if (aconnector->base.funcs->reset)
6306 aconnector->base.funcs->reset(&aconnector->base);
6307
e7b07cee
HW
6308 aconnector->connector_id = link_index;
6309 aconnector->dc_link = link;
6310 aconnector->base.interlace_allowed = false;
6311 aconnector->base.doublescan_allowed = false;
6312 aconnector->base.stereo_allowed = false;
6313 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6314 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6315 aconnector->audio_inst = -1;
e7b07cee
HW
6316 mutex_init(&aconnector->hpd_lock);
6317
1f6010a9
DF
6318 /*
6319 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6320 * which means HPD hot plug not supported
6321 */
e7b07cee
HW
6322 switch (connector_type) {
6323 case DRM_MODE_CONNECTOR_HDMIA:
6324 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6325 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6326 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6327 break;
6328 case DRM_MODE_CONNECTOR_DisplayPort:
6329 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6330 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6331 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6332 break;
6333 case DRM_MODE_CONNECTOR_DVID:
6334 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6335 break;
6336 default:
6337 break;
6338 }
6339
6340 drm_object_attach_property(&aconnector->base.base,
6341 dm->ddev->mode_config.scaling_mode_property,
6342 DRM_MODE_SCALE_NONE);
6343
6344 drm_object_attach_property(&aconnector->base.base,
6345 adev->mode_info.underscan_property,
6346 UNDERSCAN_OFF);
6347 drm_object_attach_property(&aconnector->base.base,
6348 adev->mode_info.underscan_hborder_property,
6349 0);
6350 drm_object_attach_property(&aconnector->base.base,
6351 adev->mode_info.underscan_vborder_property,
6352 0);
1825fd34 6353
8c61b31e
JFZ
6354 if (!aconnector->mst_port)
6355 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 6356
4a8ca46b
RL
6357 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6358 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6359 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 6360
c1ee92f9 6361 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 6362 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
6363 drm_object_attach_property(&aconnector->base.base,
6364 adev->mode_info.abm_level_property, 0);
6365 }
bb47de73
NK
6366
6367 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
6368 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6369 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
6370 drm_object_attach_property(
6371 &aconnector->base.base,
6372 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6373
8c61b31e
JFZ
6374 if (!aconnector->mst_port)
6375 drm_connector_attach_vrr_capable_property(&aconnector->base);
6376
0c8620d6 6377#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 6378 if (adev->dm.hdcp_workqueue)
53e108aa 6379 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 6380#endif
bb47de73 6381 }
e7b07cee
HW
6382}
6383
7578ecda
AD
6384static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6385 struct i2c_msg *msgs, int num)
e7b07cee
HW
6386{
6387 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6388 struct ddc_service *ddc_service = i2c->ddc_service;
6389 struct i2c_command cmd;
6390 int i;
6391 int result = -EIO;
6392
b830ebc9 6393 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
6394
6395 if (!cmd.payloads)
6396 return result;
6397
6398 cmd.number_of_payloads = num;
6399 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6400 cmd.speed = 100;
6401
6402 for (i = 0; i < num; i++) {
6403 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6404 cmd.payloads[i].address = msgs[i].addr;
6405 cmd.payloads[i].length = msgs[i].len;
6406 cmd.payloads[i].data = msgs[i].buf;
6407 }
6408
c85e6e54
DF
6409 if (dc_submit_i2c(
6410 ddc_service->ctx->dc,
6411 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
6412 &cmd))
6413 result = num;
6414
6415 kfree(cmd.payloads);
6416 return result;
6417}
6418
7578ecda 6419static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
6420{
6421 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6422}
6423
6424static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6425 .master_xfer = amdgpu_dm_i2c_xfer,
6426 .functionality = amdgpu_dm_i2c_func,
6427};
6428
3ee6b26b
AD
6429static struct amdgpu_i2c_adapter *
6430create_i2c(struct ddc_service *ddc_service,
6431 int link_index,
6432 int *res)
e7b07cee
HW
6433{
6434 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6435 struct amdgpu_i2c_adapter *i2c;
6436
b830ebc9 6437 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
6438 if (!i2c)
6439 return NULL;
e7b07cee
HW
6440 i2c->base.owner = THIS_MODULE;
6441 i2c->base.class = I2C_CLASS_DDC;
6442 i2c->base.dev.parent = &adev->pdev->dev;
6443 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 6444 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
6445 i2c_set_adapdata(&i2c->base, i2c);
6446 i2c->ddc_service = ddc_service;
c85e6e54 6447 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
6448
6449 return i2c;
6450}
6451
89fc8d4e 6452
1f6010a9
DF
6453/*
6454 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
6455 * dc_link which will be represented by this aconnector.
6456 */
7578ecda
AD
6457static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6458 struct amdgpu_dm_connector *aconnector,
6459 uint32_t link_index,
6460 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6461{
6462 int res = 0;
6463 int connector_type;
6464 struct dc *dc = dm->dc;
6465 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6466 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6467
6468 link->priv = aconnector;
e7b07cee 6469
f1ad2f5e 6470 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6471
6472 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6473 if (!i2c) {
6474 DRM_ERROR("Failed to create i2c adapter data\n");
6475 return -ENOMEM;
6476 }
6477
e7b07cee
HW
6478 aconnector->i2c = i2c;
6479 res = i2c_add_adapter(&i2c->base);
6480
6481 if (res) {
6482 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6483 goto out_free;
6484 }
6485
6486 connector_type = to_drm_connector_type(link->connector_signal);
6487
17165de2 6488 res = drm_connector_init_with_ddc(
e7b07cee
HW
6489 dm->ddev,
6490 &aconnector->base,
6491 &amdgpu_dm_connector_funcs,
17165de2
AP
6492 connector_type,
6493 &i2c->base);
e7b07cee
HW
6494
6495 if (res) {
6496 DRM_ERROR("connector_init failed\n");
6497 aconnector->connector_id = -1;
6498 goto out_free;
6499 }
6500
6501 drm_connector_helper_add(
6502 &aconnector->base,
6503 &amdgpu_dm_connector_helper_funcs);
6504
6505 amdgpu_dm_connector_init_helper(
6506 dm,
6507 aconnector,
6508 connector_type,
6509 link,
6510 link_index);
6511
cde4c44d 6512 drm_connector_attach_encoder(
e7b07cee
HW
6513 &aconnector->base, &aencoder->base);
6514
e7b07cee
HW
6515 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6516 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 6517 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 6518
e7b07cee
HW
6519out_free:
6520 if (res) {
6521 kfree(i2c);
6522 aconnector->i2c = NULL;
6523 }
6524 return res;
6525}
6526
6527int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6528{
6529 switch (adev->mode_info.num_crtc) {
6530 case 1:
6531 return 0x1;
6532 case 2:
6533 return 0x3;
6534 case 3:
6535 return 0x7;
6536 case 4:
6537 return 0xf;
6538 case 5:
6539 return 0x1f;
6540 case 6:
6541 default:
6542 return 0x3f;
6543 }
6544}
6545
7578ecda
AD
6546static int amdgpu_dm_encoder_init(struct drm_device *dev,
6547 struct amdgpu_encoder *aencoder,
6548 uint32_t link_index)
e7b07cee
HW
6549{
6550 struct amdgpu_device *adev = dev->dev_private;
6551
6552 int res = drm_encoder_init(dev,
6553 &aencoder->base,
6554 &amdgpu_dm_encoder_funcs,
6555 DRM_MODE_ENCODER_TMDS,
6556 NULL);
6557
6558 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6559
6560 if (!res)
6561 aencoder->encoder_id = link_index;
6562 else
6563 aencoder->encoder_id = -1;
6564
6565 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6566
6567 return res;
6568}
6569
3ee6b26b
AD
6570static void manage_dm_interrupts(struct amdgpu_device *adev,
6571 struct amdgpu_crtc *acrtc,
6572 bool enable)
e7b07cee
HW
6573{
6574 /*
8fe684e9
NK
6575 * We have no guarantee that the frontend index maps to the same
6576 * backend index - some even map to more than one.
6577 *
6578 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
6579 */
6580 int irq_type =
734dd01d 6581 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6582 adev,
6583 acrtc->crtc_id);
6584
6585 if (enable) {
6586 drm_crtc_vblank_on(&acrtc->base);
6587 amdgpu_irq_get(
6588 adev,
6589 &adev->pageflip_irq,
6590 irq_type);
6591 } else {
6592
6593 amdgpu_irq_put(
6594 adev,
6595 &adev->pageflip_irq,
6596 irq_type);
6597 drm_crtc_vblank_off(&acrtc->base);
6598 }
6599}
6600
8fe684e9
NK
6601static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6602 struct amdgpu_crtc *acrtc)
6603{
6604 int irq_type =
6605 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6606
6607 /**
6608 * This reads the current state for the IRQ and force reapplies
6609 * the setting to hardware.
6610 */
6611 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6612}
6613
3ee6b26b
AD
6614static bool
6615is_scaling_state_different(const struct dm_connector_state *dm_state,
6616 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6617{
6618 if (dm_state->scaling != old_dm_state->scaling)
6619 return true;
6620 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6621 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6622 return true;
6623 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6624 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6625 return true;
b830ebc9
HW
6626 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6627 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6628 return true;
e7b07cee
HW
6629 return false;
6630}
6631
0c8620d6
BL
6632#ifdef CONFIG_DRM_AMD_DC_HDCP
6633static bool is_content_protection_different(struct drm_connector_state *state,
6634 const struct drm_connector_state *old_state,
6635 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6636{
6637 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6638
53e108aa
BL
6639 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6640 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6641 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6642 return true;
6643 }
6644
0c8620d6
BL
6645 /* CP is being re enabled, ignore this */
6646 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6647 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6648 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6649 return false;
6650 }
6651
6652 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6653 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6654 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6655 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6656
6657 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6658 * hot-plug, headless s3, dpms
6659 */
6660 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6661 aconnector->dc_sink != NULL)
6662 return true;
6663
6664 if (old_state->content_protection == state->content_protection)
6665 return false;
6666
6667 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6668 return true;
6669
6670 return false;
6671}
6672
0c8620d6 6673#endif
3ee6b26b
AD
6674static void remove_stream(struct amdgpu_device *adev,
6675 struct amdgpu_crtc *acrtc,
6676 struct dc_stream_state *stream)
e7b07cee
HW
6677{
6678 /* this is the update mode case */
e7b07cee
HW
6679
6680 acrtc->otg_inst = -1;
6681 acrtc->enabled = false;
6682}
6683
7578ecda
AD
6684static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6685 struct dc_cursor_position *position)
2a8f6ccb 6686{
f4c2cc43 6687 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6688 int x, y;
6689 int xorigin = 0, yorigin = 0;
6690
e371e19c
NK
6691 position->enable = false;
6692 position->x = 0;
6693 position->y = 0;
6694
6695 if (!crtc || !plane->state->fb)
2a8f6ccb 6696 return 0;
2a8f6ccb
HW
6697
6698 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6699 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6700 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6701 __func__,
6702 plane->state->crtc_w,
6703 plane->state->crtc_h);
6704 return -EINVAL;
6705 }
6706
6707 x = plane->state->crtc_x;
6708 y = plane->state->crtc_y;
c14a005c 6709
e371e19c
NK
6710 if (x <= -amdgpu_crtc->max_cursor_width ||
6711 y <= -amdgpu_crtc->max_cursor_height)
6712 return 0;
6713
2a8f6ccb
HW
6714 if (x < 0) {
6715 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6716 x = 0;
6717 }
6718 if (y < 0) {
6719 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6720 y = 0;
6721 }
6722 position->enable = true;
d243b6ff 6723 position->translate_by_source = true;
2a8f6ccb
HW
6724 position->x = x;
6725 position->y = y;
6726 position->x_hotspot = xorigin;
6727 position->y_hotspot = yorigin;
6728
6729 return 0;
6730}
6731
3ee6b26b
AD
6732static void handle_cursor_update(struct drm_plane *plane,
6733 struct drm_plane_state *old_plane_state)
e7b07cee 6734{
674e78ac 6735 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
6736 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6737 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6738 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6739 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6740 uint64_t address = afb ? afb->address : 0;
6741 struct dc_cursor_position position;
6742 struct dc_cursor_attributes attributes;
6743 int ret;
6744
e7b07cee
HW
6745 if (!plane->state->fb && !old_plane_state->fb)
6746 return;
6747
f1ad2f5e 6748 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6749 __func__,
6750 amdgpu_crtc->crtc_id,
6751 plane->state->crtc_w,
6752 plane->state->crtc_h);
2a8f6ccb
HW
6753
6754 ret = get_cursor_position(plane, crtc, &position);
6755 if (ret)
6756 return;
6757
6758 if (!position.enable) {
6759 /* turn off cursor */
674e78ac
NK
6760 if (crtc_state && crtc_state->stream) {
6761 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6762 dc_stream_set_cursor_position(crtc_state->stream,
6763 &position);
674e78ac
NK
6764 mutex_unlock(&adev->dm.dc_lock);
6765 }
2a8f6ccb 6766 return;
e7b07cee 6767 }
e7b07cee 6768
2a8f6ccb
HW
6769 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6770 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6771
c1cefe11 6772 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6773 attributes.address.high_part = upper_32_bits(address);
6774 attributes.address.low_part = lower_32_bits(address);
6775 attributes.width = plane->state->crtc_w;
6776 attributes.height = plane->state->crtc_h;
6777 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6778 attributes.rotation_angle = 0;
6779 attributes.attribute_flags.value = 0;
6780
6781 attributes.pitch = attributes.width;
6782
886daac9 6783 if (crtc_state->stream) {
674e78ac 6784 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6785 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6786 &attributes))
6787 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6788
2a8f6ccb
HW
6789 if (!dc_stream_set_cursor_position(crtc_state->stream,
6790 &position))
6791 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6792 mutex_unlock(&adev->dm.dc_lock);
886daac9 6793 }
2a8f6ccb 6794}
e7b07cee
HW
6795
6796static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6797{
6798
6799 assert_spin_locked(&acrtc->base.dev->event_lock);
6800 WARN_ON(acrtc->event);
6801
6802 acrtc->event = acrtc->base.state->event;
6803
6804 /* Set the flip status */
6805 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6806
6807 /* Mark this event as consumed */
6808 acrtc->base.state->event = NULL;
6809
6810 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6811 acrtc->crtc_id);
6812}
6813
bb47de73
NK
6814static void update_freesync_state_on_stream(
6815 struct amdgpu_display_manager *dm,
6816 struct dm_crtc_state *new_crtc_state,
180db303
NK
6817 struct dc_stream_state *new_stream,
6818 struct dc_plane_state *surface,
6819 u32 flip_timestamp_in_us)
bb47de73 6820{
09aef2c4 6821 struct mod_vrr_params vrr_params;
bb47de73 6822 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
6823 struct amdgpu_device *adev = dm->adev;
6824 unsigned long flags;
bb47de73
NK
6825
6826 if (!new_stream)
6827 return;
6828
6829 /*
6830 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6831 * For now it's sufficient to just guard against these conditions.
6832 */
6833
6834 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6835 return;
6836
09aef2c4
MK
6837 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6838 vrr_params = new_crtc_state->vrr_params;
6839
180db303
NK
6840 if (surface) {
6841 mod_freesync_handle_preflip(
6842 dm->freesync_module,
6843 surface,
6844 new_stream,
6845 flip_timestamp_in_us,
6846 &vrr_params);
09aef2c4
MK
6847
6848 if (adev->family < AMDGPU_FAMILY_AI &&
6849 amdgpu_dm_vrr_active(new_crtc_state)) {
6850 mod_freesync_handle_v_update(dm->freesync_module,
6851 new_stream, &vrr_params);
e63e2491
EB
6852
6853 /* Need to call this before the frame ends. */
6854 dc_stream_adjust_vmin_vmax(dm->dc,
6855 new_crtc_state->stream,
6856 &vrr_params.adjust);
09aef2c4 6857 }
180db303 6858 }
bb47de73
NK
6859
6860 mod_freesync_build_vrr_infopacket(
6861 dm->freesync_module,
6862 new_stream,
180db303 6863 &vrr_params,
ecd0136b
HT
6864 PACKET_TYPE_VRR,
6865 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6866 &vrr_infopacket);
6867
8a48b44c 6868 new_crtc_state->freesync_timing_changed |=
180db303
NK
6869 (memcmp(&new_crtc_state->vrr_params.adjust,
6870 &vrr_params.adjust,
6871 sizeof(vrr_params.adjust)) != 0);
bb47de73 6872
8a48b44c 6873 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6874 (memcmp(&new_crtc_state->vrr_infopacket,
6875 &vrr_infopacket,
6876 sizeof(vrr_infopacket)) != 0);
6877
180db303 6878 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6879 new_crtc_state->vrr_infopacket = vrr_infopacket;
6880
180db303 6881 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6882 new_stream->vrr_infopacket = vrr_infopacket;
6883
6884 if (new_crtc_state->freesync_vrr_info_changed)
6885 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6886 new_crtc_state->base.crtc->base.id,
6887 (int)new_crtc_state->base.vrr_enabled,
180db303 6888 (int)vrr_params.state);
09aef2c4
MK
6889
6890 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6891}
6892
e854194c
MK
6893static void pre_update_freesync_state_on_stream(
6894 struct amdgpu_display_manager *dm,
6895 struct dm_crtc_state *new_crtc_state)
6896{
6897 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6898 struct mod_vrr_params vrr_params;
e854194c 6899 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6900 struct amdgpu_device *adev = dm->adev;
6901 unsigned long flags;
e854194c
MK
6902
6903 if (!new_stream)
6904 return;
6905
6906 /*
6907 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6908 * For now it's sufficient to just guard against these conditions.
6909 */
6910 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6911 return;
6912
09aef2c4
MK
6913 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6914 vrr_params = new_crtc_state->vrr_params;
6915
e854194c
MK
6916 if (new_crtc_state->vrr_supported &&
6917 config.min_refresh_in_uhz &&
6918 config.max_refresh_in_uhz) {
6919 config.state = new_crtc_state->base.vrr_enabled ?
6920 VRR_STATE_ACTIVE_VARIABLE :
6921 VRR_STATE_INACTIVE;
6922 } else {
6923 config.state = VRR_STATE_UNSUPPORTED;
6924 }
6925
6926 mod_freesync_build_vrr_params(dm->freesync_module,
6927 new_stream,
6928 &config, &vrr_params);
6929
6930 new_crtc_state->freesync_timing_changed |=
6931 (memcmp(&new_crtc_state->vrr_params.adjust,
6932 &vrr_params.adjust,
6933 sizeof(vrr_params.adjust)) != 0);
6934
6935 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6936 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6937}
6938
66b0c973
MK
6939static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6940 struct dm_crtc_state *new_state)
6941{
6942 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6943 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6944
6945 if (!old_vrr_active && new_vrr_active) {
6946 /* Transition VRR inactive -> active:
6947 * While VRR is active, we must not disable vblank irq, as a
6948 * reenable after disable would compute bogus vblank/pflip
6949 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6950 *
6951 * We also need vupdate irq for the actual core vblank handling
6952 * at end of vblank.
66b0c973 6953 */
d2574c33 6954 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6955 drm_crtc_vblank_get(new_state->base.crtc);
6956 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6957 __func__, new_state->base.crtc->base.id);
6958 } else if (old_vrr_active && !new_vrr_active) {
6959 /* Transition VRR active -> inactive:
6960 * Allow vblank irq disable again for fixed refresh rate.
6961 */
d2574c33 6962 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6963 drm_crtc_vblank_put(new_state->base.crtc);
6964 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6965 __func__, new_state->base.crtc->base.id);
6966 }
6967}
6968
8ad27806
NK
6969static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6970{
6971 struct drm_plane *plane;
6972 struct drm_plane_state *old_plane_state, *new_plane_state;
6973 int i;
6974
6975 /*
6976 * TODO: Make this per-stream so we don't issue redundant updates for
6977 * commits with multiple streams.
6978 */
6979 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6980 new_plane_state, i)
6981 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6982 handle_cursor_update(plane, old_plane_state);
6983}
6984
3be5262e 6985static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6986 struct dc_state *dc_state,
3ee6b26b
AD
6987 struct drm_device *dev,
6988 struct amdgpu_display_manager *dm,
6989 struct drm_crtc *pcrtc,
420cd472 6990 bool wait_for_vblank)
e7b07cee 6991{
570c91d5 6992 uint32_t i;
8a48b44c 6993 uint64_t timestamp_ns;
e7b07cee 6994 struct drm_plane *plane;
0bc9706d 6995 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6996 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6997 struct drm_crtc_state *new_pcrtc_state =
6998 drm_atomic_get_new_crtc_state(state, pcrtc);
6999 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7000 struct dm_crtc_state *dm_old_crtc_state =
7001 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7002 int planes_count = 0, vpos, hpos;
570c91d5 7003 long r;
e7b07cee 7004 unsigned long flags;
8a48b44c 7005 struct amdgpu_bo *abo;
09e5665a 7006 uint64_t tiling_flags;
5888f07a 7007 bool tmz_surface = false;
fdd1fe57
MK
7008 uint32_t target_vblank, last_flip_vblank;
7009 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7010 bool pflip_present = false;
bc7f670e
DF
7011 struct {
7012 struct dc_surface_update surface_updates[MAX_SURFACES];
7013 struct dc_plane_info plane_infos[MAX_SURFACES];
7014 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7015 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7016 struct dc_stream_update stream_update;
74aa7bd4 7017 } *bundle;
bc7f670e 7018
74aa7bd4 7019 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7020
74aa7bd4
DF
7021 if (!bundle) {
7022 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7023 goto cleanup;
7024 }
e7b07cee 7025
8ad27806
NK
7026 /*
7027 * Disable the cursor first if we're disabling all the planes.
7028 * It'll remain on the screen after the planes are re-enabled
7029 * if we don't.
7030 */
7031 if (acrtc_state->active_planes == 0)
7032 amdgpu_dm_commit_cursors(state);
7033
e7b07cee 7034 /* update planes when needed */
0bc9706d
LSL
7035 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7036 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7037 struct drm_crtc_state *new_crtc_state;
0bc9706d 7038 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 7039 bool plane_needs_flip;
c7af5f77 7040 struct dc_plane_state *dc_plane;
54d76575 7041 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7042
80c218d5
NK
7043 /* Cursor plane is handled after stream updates */
7044 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7045 continue;
e7b07cee 7046
f5ba60fe
DD
7047 if (!fb || !crtc || pcrtc != crtc)
7048 continue;
7049
7050 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7051 if (!new_crtc_state->active)
e7b07cee
HW
7052 continue;
7053
bc7f670e 7054 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7055
74aa7bd4 7056 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7057 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7058 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7059 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7060 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7061 }
8a48b44c 7062
695af5f9
NK
7063 fill_dc_scaling_info(new_plane_state,
7064 &bundle->scaling_infos[planes_count]);
8a48b44c 7065
695af5f9
NK
7066 bundle->surface_updates[planes_count].scaling_info =
7067 &bundle->scaling_infos[planes_count];
8a48b44c 7068
f5031000 7069 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7070
f5031000 7071 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7072
f5031000
DF
7073 if (!plane_needs_flip) {
7074 planes_count += 1;
7075 continue;
7076 }
8a48b44c 7077
2fac0f53
CK
7078 abo = gem_to_amdgpu_bo(fb->obj[0]);
7079
f8308898
AG
7080 /*
7081 * Wait for all fences on this FB. Do limited wait to avoid
7082 * deadlock during GPU reset when this fence will not signal
7083 * but we hold reservation lock for the BO.
7084 */
52791eee 7085 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7086 false,
f8308898
AG
7087 msecs_to_jiffies(5000));
7088 if (unlikely(r <= 0))
ed8a5fb2 7089 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7090
f5031000 7091 /*
178b0013
DV
7092 * We cannot reserve buffers here, which means the normal flag
7093 * access functions don't work. Paper over this with READ_ONCE,
7094 * but maybe the flags are invariant enough that not even that
7095 * would be needed.
f5031000 7096 */
178b0013
DV
7097 tiling_flags = READ_ONCE(abo->tiling_flags);
7098 tmz_surface = READ_ONCE(abo->flags) & AMDGPU_GEM_CREATE_ENCRYPTED;
8a48b44c 7099
695af5f9
NK
7100 fill_dc_plane_info_and_addr(
7101 dm->adev, new_plane_state, tiling_flags,
7102 &bundle->plane_infos[planes_count],
87b7ebc2 7103 &bundle->flip_addrs[planes_count].address,
5888f07a 7104 tmz_surface,
87b7ebc2
RS
7105 false);
7106
7107 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7108 new_plane_state->plane->index,
7109 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7110
7111 bundle->surface_updates[planes_count].plane_info =
7112 &bundle->plane_infos[planes_count];
8a48b44c 7113
caff0e66
NK
7114 /*
7115 * Only allow immediate flips for fast updates that don't
7116 * change FB pitch, DCC state, rotation or mirroing.
7117 */
f5031000 7118 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7119 crtc->state->async_flip &&
caff0e66 7120 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7121
f5031000
DF
7122 timestamp_ns = ktime_get_ns();
7123 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7124 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7125 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7126
f5031000
DF
7127 if (!bundle->surface_updates[planes_count].surface) {
7128 DRM_ERROR("No surface for CRTC: id=%d\n",
7129 acrtc_attach->crtc_id);
7130 continue;
bc7f670e
DF
7131 }
7132
f5031000
DF
7133 if (plane == pcrtc->primary)
7134 update_freesync_state_on_stream(
7135 dm,
7136 acrtc_state,
7137 acrtc_state->stream,
7138 dc_plane,
7139 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7140
f5031000
DF
7141 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7142 __func__,
7143 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7144 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7145
7146 planes_count += 1;
7147
8a48b44c
DF
7148 }
7149
74aa7bd4 7150 if (pflip_present) {
634092b1
MK
7151 if (!vrr_active) {
7152 /* Use old throttling in non-vrr fixed refresh rate mode
7153 * to keep flip scheduling based on target vblank counts
7154 * working in a backwards compatible way, e.g., for
7155 * clients using the GLX_OML_sync_control extension or
7156 * DRI3/Present extension with defined target_msc.
7157 */
e3eff4b5 7158 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7159 }
7160 else {
7161 /* For variable refresh rate mode only:
7162 * Get vblank of last completed flip to avoid > 1 vrr
7163 * flips per video frame by use of throttling, but allow
7164 * flip programming anywhere in the possibly large
7165 * variable vrr vblank interval for fine-grained flip
7166 * timing control and more opportunity to avoid stutter
7167 * on late submission of flips.
7168 */
7169 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7170 last_flip_vblank = acrtc_attach->last_flip_vblank;
7171 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7172 }
7173
fdd1fe57 7174 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7175
7176 /*
7177 * Wait until we're out of the vertical blank period before the one
7178 * targeted by the flip
7179 */
7180 while ((acrtc_attach->enabled &&
7181 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7182 0, &vpos, &hpos, NULL,
7183 NULL, &pcrtc->hwmode)
7184 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7185 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7186 (int)(target_vblank -
e3eff4b5 7187 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7188 usleep_range(1000, 1100);
7189 }
7190
8fe684e9
NK
7191 /**
7192 * Prepare the flip event for the pageflip interrupt to handle.
7193 *
7194 * This only works in the case where we've already turned on the
7195 * appropriate hardware blocks (eg. HUBP) so in the transition case
7196 * from 0 -> n planes we have to skip a hardware generated event
7197 * and rely on sending it from software.
7198 */
7199 if (acrtc_attach->base.state->event &&
7200 acrtc_state->active_planes > 0) {
8a48b44c
DF
7201 drm_crtc_vblank_get(pcrtc);
7202
7203 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7204
7205 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7206 prepare_flip_isr(acrtc_attach);
7207
7208 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7209 }
7210
7211 if (acrtc_state->stream) {
8a48b44c 7212 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7213 bundle->stream_update.vrr_infopacket =
8a48b44c 7214 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7215 }
e7b07cee
HW
7216 }
7217
bc92c065 7218 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7219 if ((planes_count || acrtc_state->active_planes == 0) &&
7220 acrtc_state->stream) {
b6e881c9 7221 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7222 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7223 bundle->stream_update.src = acrtc_state->stream->src;
7224 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7225 }
7226
cf020d49
NK
7227 if (new_pcrtc_state->color_mgmt_changed) {
7228 /*
7229 * TODO: This isn't fully correct since we've actually
7230 * already modified the stream in place.
7231 */
7232 bundle->stream_update.gamut_remap =
7233 &acrtc_state->stream->gamut_remap_matrix;
7234 bundle->stream_update.output_csc_transform =
7235 &acrtc_state->stream->csc_color_matrix;
7236 bundle->stream_update.out_transfer_func =
7237 acrtc_state->stream->out_transfer_func;
7238 }
bc7f670e 7239
8a48b44c 7240 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7241 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7242 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7243
e63e2491
EB
7244 /*
7245 * If FreeSync state on the stream has changed then we need to
7246 * re-adjust the min/max bounds now that DC doesn't handle this
7247 * as part of commit.
7248 */
7249 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7250 amdgpu_dm_vrr_active(acrtc_state)) {
7251 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7252 dc_stream_adjust_vmin_vmax(
7253 dm->dc, acrtc_state->stream,
7254 &acrtc_state->vrr_params.adjust);
7255 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7256 }
bc7f670e 7257 mutex_lock(&dm->dc_lock);
8c322309 7258 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7259 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7260 amdgpu_dm_psr_disable(acrtc_state->stream);
7261
bc7f670e 7262 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7263 bundle->surface_updates,
bc7f670e
DF
7264 planes_count,
7265 acrtc_state->stream,
74aa7bd4 7266 &bundle->stream_update,
bc7f670e 7267 dc_state);
8c322309 7268
8fe684e9
NK
7269 /**
7270 * Enable or disable the interrupts on the backend.
7271 *
7272 * Most pipes are put into power gating when unused.
7273 *
7274 * When power gating is enabled on a pipe we lose the
7275 * interrupt enablement state when power gating is disabled.
7276 *
7277 * So we need to update the IRQ control state in hardware
7278 * whenever the pipe turns on (since it could be previously
7279 * power gated) or off (since some pipes can't be power gated
7280 * on some ASICs).
7281 */
7282 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7283 dm_update_pflip_irq_state(
7284 (struct amdgpu_device *)dev->dev_private,
7285 acrtc_attach);
7286
8c322309 7287 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7288 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7289 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7290 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7291 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7292 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7293 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7294 amdgpu_dm_psr_enable(acrtc_state->stream);
7295 }
7296
bc7f670e 7297 mutex_unlock(&dm->dc_lock);
e7b07cee 7298 }
4b510503 7299
8ad27806
NK
7300 /*
7301 * Update cursor state *after* programming all the planes.
7302 * This avoids redundant programming in the case where we're going
7303 * to be disabling a single plane - those pipes are being disabled.
7304 */
7305 if (acrtc_state->active_planes)
7306 amdgpu_dm_commit_cursors(state);
80c218d5 7307
4b510503 7308cleanup:
74aa7bd4 7309 kfree(bundle);
e7b07cee
HW
7310}
7311
6ce8f316
NK
7312static void amdgpu_dm_commit_audio(struct drm_device *dev,
7313 struct drm_atomic_state *state)
7314{
7315 struct amdgpu_device *adev = dev->dev_private;
7316 struct amdgpu_dm_connector *aconnector;
7317 struct drm_connector *connector;
7318 struct drm_connector_state *old_con_state, *new_con_state;
7319 struct drm_crtc_state *new_crtc_state;
7320 struct dm_crtc_state *new_dm_crtc_state;
7321 const struct dc_stream_status *status;
7322 int i, inst;
7323
7324 /* Notify device removals. */
7325 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7326 if (old_con_state->crtc != new_con_state->crtc) {
7327 /* CRTC changes require notification. */
7328 goto notify;
7329 }
7330
7331 if (!new_con_state->crtc)
7332 continue;
7333
7334 new_crtc_state = drm_atomic_get_new_crtc_state(
7335 state, new_con_state->crtc);
7336
7337 if (!new_crtc_state)
7338 continue;
7339
7340 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7341 continue;
7342
7343 notify:
7344 aconnector = to_amdgpu_dm_connector(connector);
7345
7346 mutex_lock(&adev->dm.audio_lock);
7347 inst = aconnector->audio_inst;
7348 aconnector->audio_inst = -1;
7349 mutex_unlock(&adev->dm.audio_lock);
7350
7351 amdgpu_dm_audio_eld_notify(adev, inst);
7352 }
7353
7354 /* Notify audio device additions. */
7355 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7356 if (!new_con_state->crtc)
7357 continue;
7358
7359 new_crtc_state = drm_atomic_get_new_crtc_state(
7360 state, new_con_state->crtc);
7361
7362 if (!new_crtc_state)
7363 continue;
7364
7365 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7366 continue;
7367
7368 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7369 if (!new_dm_crtc_state->stream)
7370 continue;
7371
7372 status = dc_stream_get_status(new_dm_crtc_state->stream);
7373 if (!status)
7374 continue;
7375
7376 aconnector = to_amdgpu_dm_connector(connector);
7377
7378 mutex_lock(&adev->dm.audio_lock);
7379 inst = status->audio_inst;
7380 aconnector->audio_inst = inst;
7381 mutex_unlock(&adev->dm.audio_lock);
7382
7383 amdgpu_dm_audio_eld_notify(adev, inst);
7384 }
7385}
7386
1f6010a9 7387/*
27b3f4fc
LSL
7388 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7389 * @crtc_state: the DRM CRTC state
7390 * @stream_state: the DC stream state.
7391 *
7392 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7393 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7394 */
7395static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7396 struct dc_stream_state *stream_state)
7397{
b9952f93 7398 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 7399}
e7b07cee 7400
7578ecda
AD
7401static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7402 struct drm_atomic_state *state,
7403 bool nonblock)
e7b07cee
HW
7404{
7405 struct drm_crtc *crtc;
c2cea706 7406 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7407 struct amdgpu_device *adev = dev->dev_private;
7408 int i;
7409
7410 /*
d6ef9b41
NK
7411 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7412 * a modeset, being disabled, or have no active planes.
7413 *
7414 * It's done in atomic commit rather than commit tail for now since
7415 * some of these interrupt handlers access the current CRTC state and
7416 * potentially the stream pointer itself.
7417 *
7418 * Since the atomic state is swapped within atomic commit and not within
7419 * commit tail this would leave to new state (that hasn't been committed yet)
7420 * being accesssed from within the handlers.
7421 *
7422 * TODO: Fix this so we can do this in commit tail and not have to block
7423 * in atomic check.
e7b07cee 7424 */
c2cea706 7425 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee
HW
7426 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7427
8fe684e9
NK
7428 if (old_crtc_state->active &&
7429 (!new_crtc_state->active ||
57638021 7430 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
7431 manage_dm_interrupts(adev, acrtc, false);
7432 }
1f6010a9
DF
7433 /*
7434 * Add check here for SoC's that support hardware cursor plane, to
7435 * unset legacy_cursor_update
7436 */
e7b07cee
HW
7437
7438 return drm_atomic_helper_commit(dev, state, nonblock);
7439
7440 /*TODO Handle EINTR, reenable IRQ*/
7441}
7442
b8592b48
LL
7443/**
7444 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7445 * @state: The atomic state to commit
7446 *
7447 * This will tell DC to commit the constructed DC state from atomic_check,
7448 * programming the hardware. Any failures here implies a hardware failure, since
7449 * atomic check should have filtered anything non-kosher.
7450 */
7578ecda 7451static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7452{
7453 struct drm_device *dev = state->dev;
7454 struct amdgpu_device *adev = dev->dev_private;
7455 struct amdgpu_display_manager *dm = &adev->dm;
7456 struct dm_atomic_state *dm_state;
eb3dc897 7457 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7458 uint32_t i, j;
5cc6dcbd 7459 struct drm_crtc *crtc;
0bc9706d 7460 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7461 unsigned long flags;
7462 bool wait_for_vblank = true;
7463 struct drm_connector *connector;
c2cea706 7464 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7465 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7466 int crtc_disable_count = 0;
e7b07cee
HW
7467
7468 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7469
eb3dc897
NK
7470 dm_state = dm_atomic_get_new_state(state);
7471 if (dm_state && dm_state->context) {
7472 dc_state = dm_state->context;
7473 } else {
7474 /* No state changes, retain current state. */
813d20dc 7475 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7476 ASSERT(dc_state_temp);
7477 dc_state = dc_state_temp;
7478 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7479 }
e7b07cee
HW
7480
7481 /* update changed items */
0bc9706d 7482 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7483 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7484
54d76575
LSL
7485 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7486 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7487
f1ad2f5e 7488 DRM_DEBUG_DRIVER(
e7b07cee
HW
7489 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7490 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7491 "connectors_changed:%d\n",
7492 acrtc->crtc_id,
0bc9706d
LSL
7493 new_crtc_state->enable,
7494 new_crtc_state->active,
7495 new_crtc_state->planes_changed,
7496 new_crtc_state->mode_changed,
7497 new_crtc_state->active_changed,
7498 new_crtc_state->connectors_changed);
e7b07cee 7499
27b3f4fc
LSL
7500 /* Copy all transient state flags into dc state */
7501 if (dm_new_crtc_state->stream) {
7502 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7503 dm_new_crtc_state->stream);
7504 }
7505
e7b07cee
HW
7506 /* handles headless hotplug case, updating new_state and
7507 * aconnector as needed
7508 */
7509
54d76575 7510 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7511
f1ad2f5e 7512 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7513
54d76575 7514 if (!dm_new_crtc_state->stream) {
e7b07cee 7515 /*
b830ebc9
HW
7516 * this could happen because of issues with
7517 * userspace notifications delivery.
7518 * In this case userspace tries to set mode on
1f6010a9
DF
7519 * display which is disconnected in fact.
7520 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7521 * We expect reset mode will come soon.
7522 *
7523 * This can also happen when unplug is done
7524 * during resume sequence ended
7525 *
7526 * In this case, we want to pretend we still
7527 * have a sink to keep the pipe running so that
7528 * hw state is consistent with the sw state
7529 */
f1ad2f5e 7530 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7531 __func__, acrtc->base.base.id);
7532 continue;
7533 }
7534
54d76575
LSL
7535 if (dm_old_crtc_state->stream)
7536 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7537
97028037
LP
7538 pm_runtime_get_noresume(dev->dev);
7539
e7b07cee 7540 acrtc->enabled = true;
0bc9706d
LSL
7541 acrtc->hw_mode = new_crtc_state->mode;
7542 crtc->hwmode = new_crtc_state->mode;
7543 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7544 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7545 /* i.e. reset mode */
8c322309 7546 if (dm_old_crtc_state->stream) {
d1ebfdd8 7547 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7548 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7549
54d76575 7550 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8c322309 7551 }
e7b07cee
HW
7552 }
7553 } /* for_each_crtc_in_state() */
7554
eb3dc897
NK
7555 if (dc_state) {
7556 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7557 mutex_lock(&dm->dc_lock);
eb3dc897 7558 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7559 mutex_unlock(&dm->dc_lock);
fa2123db 7560 }
e7b07cee 7561
0bc9706d 7562 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7563 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7564
54d76575 7565 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7566
54d76575 7567 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7568 const struct dc_stream_status *status =
54d76575 7569 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7570
eb3dc897 7571 if (!status)
09f609c3
LL
7572 status = dc_stream_get_status_from_state(dc_state,
7573 dm_new_crtc_state->stream);
eb3dc897 7574
e7b07cee 7575 if (!status)
54d76575 7576 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7577 else
7578 acrtc->otg_inst = status->primary_otg_inst;
7579 }
7580 }
0c8620d6
BL
7581#ifdef CONFIG_DRM_AMD_DC_HDCP
7582 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7583 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7584 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7585 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7586
7587 new_crtc_state = NULL;
7588
7589 if (acrtc)
7590 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7591
7592 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7593
7594 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7595 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7596 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7597 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7598 continue;
7599 }
7600
7601 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7602 hdcp_update_display(
7603 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7604 new_con_state->hdcp_content_type,
b1abe558
BL
7605 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7606 : false);
0c8620d6
BL
7607 }
7608#endif
e7b07cee 7609
02d6a6fc 7610 /* Handle connector state changes */
c2cea706 7611 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7612 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7613 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7614 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7615 struct dc_surface_update dummy_updates[MAX_SURFACES];
7616 struct dc_stream_update stream_update;
b232d4ed 7617 struct dc_info_packet hdr_packet;
e7b07cee 7618 struct dc_stream_status *status = NULL;
b232d4ed 7619 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7620
19afd799
NC
7621 memset(&dummy_updates, 0, sizeof(dummy_updates));
7622 memset(&stream_update, 0, sizeof(stream_update));
7623
44d09c6a 7624 if (acrtc) {
0bc9706d 7625 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7626 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7627 }
0bc9706d 7628
e7b07cee 7629 /* Skip any modesets/resets */
0bc9706d 7630 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7631 continue;
7632
54d76575 7633 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7634 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7635
b232d4ed
NK
7636 scaling_changed = is_scaling_state_different(dm_new_con_state,
7637 dm_old_con_state);
7638
7639 abm_changed = dm_new_crtc_state->abm_level !=
7640 dm_old_crtc_state->abm_level;
7641
7642 hdr_changed =
7643 is_hdr_metadata_different(old_con_state, new_con_state);
7644
7645 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7646 continue;
e7b07cee 7647
b6e881c9 7648 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7649 if (scaling_changed) {
02d6a6fc 7650 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7651 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7652
02d6a6fc
DF
7653 stream_update.src = dm_new_crtc_state->stream->src;
7654 stream_update.dst = dm_new_crtc_state->stream->dst;
7655 }
7656
b232d4ed 7657 if (abm_changed) {
02d6a6fc
DF
7658 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7659
7660 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7661 }
70e8ffc5 7662
b232d4ed
NK
7663 if (hdr_changed) {
7664 fill_hdr_info_packet(new_con_state, &hdr_packet);
7665 stream_update.hdr_static_metadata = &hdr_packet;
7666 }
7667
54d76575 7668 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7669 WARN_ON(!status);
3be5262e 7670 WARN_ON(!status->plane_count);
e7b07cee 7671
02d6a6fc
DF
7672 /*
7673 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7674 * Here we create an empty update on each plane.
7675 * To fix this, DC should permit updating only stream properties.
7676 */
7677 for (j = 0; j < status->plane_count; j++)
7678 dummy_updates[j].surface = status->plane_states[0];
7679
7680
7681 mutex_lock(&dm->dc_lock);
7682 dc_commit_updates_for_stream(dm->dc,
7683 dummy_updates,
7684 status->plane_count,
7685 dm_new_crtc_state->stream,
7686 &stream_update,
7687 dc_state);
7688 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7689 }
7690
b5e83f6f 7691 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7692 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7693 new_crtc_state, i) {
fe2a1965
LP
7694 if (old_crtc_state->active && !new_crtc_state->active)
7695 crtc_disable_count++;
7696
54d76575 7697 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7698 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7699
057be086
NK
7700 /* Update freesync active state. */
7701 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7702
66b0c973
MK
7703 /* Handle vrr on->off / off->on transitions */
7704 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7705 dm_new_crtc_state);
e7b07cee
HW
7706 }
7707
8fe684e9
NK
7708 /**
7709 * Enable interrupts for CRTCs that are newly enabled or went through
7710 * a modeset. It was intentionally deferred until after the front end
7711 * state was modified to wait until the OTG was on and so the IRQ
7712 * handlers didn't access stale or invalid state.
7713 */
7714 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7715 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7716
7717 if (new_crtc_state->active &&
7718 (!old_crtc_state->active ||
7719 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7720 manage_dm_interrupts(adev, acrtc, true);
7721#ifdef CONFIG_DEBUG_FS
7722 /**
7723 * Frontend may have changed so reapply the CRC capture
7724 * settings for the stream.
7725 */
7726 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7727
7728 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7729 amdgpu_dm_crtc_configure_crc_source(
7730 crtc, dm_new_crtc_state,
7731 dm_new_crtc_state->crc_src);
7732 }
7733#endif
7734 }
7735 }
e7b07cee 7736
420cd472 7737 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7738 if (new_crtc_state->async_flip)
420cd472
DF
7739 wait_for_vblank = false;
7740
e7b07cee 7741 /* update planes when needed per crtc*/
5cc6dcbd 7742 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7743 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7744
54d76575 7745 if (dm_new_crtc_state->stream)
eb3dc897 7746 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7747 dm, crtc, wait_for_vblank);
e7b07cee
HW
7748 }
7749
6ce8f316
NK
7750 /* Update audio instances for each connector. */
7751 amdgpu_dm_commit_audio(dev, state);
7752
e7b07cee
HW
7753 /*
7754 * send vblank event on all events not handled in flip and
7755 * mark consumed event for drm_atomic_helper_commit_hw_done
7756 */
7757 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 7758 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7759
0bc9706d
LSL
7760 if (new_crtc_state->event)
7761 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7762
0bc9706d 7763 new_crtc_state->event = NULL;
e7b07cee
HW
7764 }
7765 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7766
29c8f234
LL
7767 /* Signal HW programming completion */
7768 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7769
7770 if (wait_for_vblank)
320a1274 7771 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7772
7773 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7774
1f6010a9
DF
7775 /*
7776 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7777 * so we can put the GPU into runtime suspend if we're not driving any
7778 * displays anymore
7779 */
fe2a1965
LP
7780 for (i = 0; i < crtc_disable_count; i++)
7781 pm_runtime_put_autosuspend(dev->dev);
97028037 7782 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7783
7784 if (dc_state_temp)
7785 dc_release_state(dc_state_temp);
e7b07cee
HW
7786}
7787
7788
7789static int dm_force_atomic_commit(struct drm_connector *connector)
7790{
7791 int ret = 0;
7792 struct drm_device *ddev = connector->dev;
7793 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7794 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7795 struct drm_plane *plane = disconnected_acrtc->base.primary;
7796 struct drm_connector_state *conn_state;
7797 struct drm_crtc_state *crtc_state;
7798 struct drm_plane_state *plane_state;
7799
7800 if (!state)
7801 return -ENOMEM;
7802
7803 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7804
7805 /* Construct an atomic state to restore previous display setting */
7806
7807 /*
7808 * Attach connectors to drm_atomic_state
7809 */
7810 conn_state = drm_atomic_get_connector_state(state, connector);
7811
7812 ret = PTR_ERR_OR_ZERO(conn_state);
7813 if (ret)
7814 goto err;
7815
7816 /* Attach crtc to drm_atomic_state*/
7817 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7818
7819 ret = PTR_ERR_OR_ZERO(crtc_state);
7820 if (ret)
7821 goto err;
7822
7823 /* force a restore */
7824 crtc_state->mode_changed = true;
7825
7826 /* Attach plane to drm_atomic_state */
7827 plane_state = drm_atomic_get_plane_state(state, plane);
7828
7829 ret = PTR_ERR_OR_ZERO(plane_state);
7830 if (ret)
7831 goto err;
7832
7833
7834 /* Call commit internally with the state we just constructed */
7835 ret = drm_atomic_commit(state);
7836 if (!ret)
7837 return 0;
7838
7839err:
7840 DRM_ERROR("Restoring old state failed with %i\n", ret);
7841 drm_atomic_state_put(state);
7842
7843 return ret;
7844}
7845
7846/*
1f6010a9
DF
7847 * This function handles all cases when set mode does not come upon hotplug.
7848 * This includes when a display is unplugged then plugged back into the
7849 * same port and when running without usermode desktop manager supprot
e7b07cee 7850 */
3ee6b26b
AD
7851void dm_restore_drm_connector_state(struct drm_device *dev,
7852 struct drm_connector *connector)
e7b07cee 7853{
c84dec2f 7854 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7855 struct amdgpu_crtc *disconnected_acrtc;
7856 struct dm_crtc_state *acrtc_state;
7857
7858 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7859 return;
7860
7861 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7862 if (!disconnected_acrtc)
7863 return;
e7b07cee 7864
70e8ffc5
HW
7865 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7866 if (!acrtc_state->stream)
e7b07cee
HW
7867 return;
7868
7869 /*
7870 * If the previous sink is not released and different from the current,
7871 * we deduce we are in a state where we can not rely on usermode call
7872 * to turn on the display, so we do it here
7873 */
7874 if (acrtc_state->stream->sink != aconnector->dc_sink)
7875 dm_force_atomic_commit(&aconnector->base);
7876}
7877
1f6010a9 7878/*
e7b07cee
HW
7879 * Grabs all modesetting locks to serialize against any blocking commits,
7880 * Waits for completion of all non blocking commits.
7881 */
3ee6b26b
AD
7882static int do_aquire_global_lock(struct drm_device *dev,
7883 struct drm_atomic_state *state)
e7b07cee
HW
7884{
7885 struct drm_crtc *crtc;
7886 struct drm_crtc_commit *commit;
7887 long ret;
7888
1f6010a9
DF
7889 /*
7890 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7891 * ensure that when the framework release it the
7892 * extra locks we are locking here will get released to
7893 */
7894 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7895 if (ret)
7896 return ret;
7897
7898 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7899 spin_lock(&crtc->commit_lock);
7900 commit = list_first_entry_or_null(&crtc->commit_list,
7901 struct drm_crtc_commit, commit_entry);
7902 if (commit)
7903 drm_crtc_commit_get(commit);
7904 spin_unlock(&crtc->commit_lock);
7905
7906 if (!commit)
7907 continue;
7908
1f6010a9
DF
7909 /*
7910 * Make sure all pending HW programming completed and
e7b07cee
HW
7911 * page flips done
7912 */
7913 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7914
7915 if (ret > 0)
7916 ret = wait_for_completion_interruptible_timeout(
7917 &commit->flip_done, 10*HZ);
7918
7919 if (ret == 0)
7920 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7921 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7922
7923 drm_crtc_commit_put(commit);
7924 }
7925
7926 return ret < 0 ? ret : 0;
7927}
7928
bb47de73
NK
7929static void get_freesync_config_for_crtc(
7930 struct dm_crtc_state *new_crtc_state,
7931 struct dm_connector_state *new_con_state)
98e6436d
AK
7932{
7933 struct mod_freesync_config config = {0};
98e6436d
AK
7934 struct amdgpu_dm_connector *aconnector =
7935 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7936 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7937 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7938
a057ec46 7939 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7940 vrefresh >= aconnector->min_vfreq &&
7941 vrefresh <= aconnector->max_vfreq;
bb47de73 7942
a057ec46
IB
7943 if (new_crtc_state->vrr_supported) {
7944 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7945 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7946 VRR_STATE_ACTIVE_VARIABLE :
7947 VRR_STATE_INACTIVE;
7948 config.min_refresh_in_uhz =
7949 aconnector->min_vfreq * 1000000;
7950 config.max_refresh_in_uhz =
7951 aconnector->max_vfreq * 1000000;
69ff8845 7952 config.vsif_supported = true;
180db303 7953 config.btr = true;
98e6436d
AK
7954 }
7955
bb47de73
NK
7956 new_crtc_state->freesync_config = config;
7957}
98e6436d 7958
bb47de73
NK
7959static void reset_freesync_config_for_crtc(
7960 struct dm_crtc_state *new_crtc_state)
7961{
7962 new_crtc_state->vrr_supported = false;
98e6436d 7963
180db303
NK
7964 memset(&new_crtc_state->vrr_params, 0,
7965 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7966 memset(&new_crtc_state->vrr_infopacket, 0,
7967 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7968}
7969
4b9674e5
LL
7970static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7971 struct drm_atomic_state *state,
7972 struct drm_crtc *crtc,
7973 struct drm_crtc_state *old_crtc_state,
7974 struct drm_crtc_state *new_crtc_state,
7975 bool enable,
7976 bool *lock_and_validation_needed)
e7b07cee 7977{
eb3dc897 7978 struct dm_atomic_state *dm_state = NULL;
54d76575 7979 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7980 struct dc_stream_state *new_stream;
62f55537 7981 int ret = 0;
d4d4a645 7982
1f6010a9
DF
7983 /*
7984 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7985 * update changed items
7986 */
4b9674e5
LL
7987 struct amdgpu_crtc *acrtc = NULL;
7988 struct amdgpu_dm_connector *aconnector = NULL;
7989 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7990 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7991
4b9674e5 7992 new_stream = NULL;
9635b754 7993
4b9674e5
LL
7994 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7995 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7996 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7997 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7998
4b9674e5
LL
7999 /* TODO This hack should go away */
8000 if (aconnector && enable) {
8001 /* Make sure fake sink is created in plug-in scenario */
8002 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8003 &aconnector->base);
8004 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8005 &aconnector->base);
19f89e23 8006
4b9674e5
LL
8007 if (IS_ERR(drm_new_conn_state)) {
8008 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8009 goto fail;
8010 }
19f89e23 8011
4b9674e5
LL
8012 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8013 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8014
02d35a67
JFZ
8015 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8016 goto skip_modeset;
8017
cbd14ae7
SW
8018 new_stream = create_validate_stream_for_sink(aconnector,
8019 &new_crtc_state->mode,
8020 dm_new_conn_state,
8021 dm_old_crtc_state->stream);
19f89e23 8022
4b9674e5
LL
8023 /*
8024 * we can have no stream on ACTION_SET if a display
8025 * was disconnected during S3, in this case it is not an
8026 * error, the OS will be updated after detection, and
8027 * will do the right thing on next atomic commit
8028 */
19f89e23 8029
4b9674e5
LL
8030 if (!new_stream) {
8031 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8032 __func__, acrtc->base.base.id);
8033 ret = -ENOMEM;
8034 goto fail;
8035 }
e7b07cee 8036
3d4e52d0
VL
8037 /*
8038 * TODO: Check VSDB bits to decide whether this should
8039 * be enabled or not.
8040 */
8041 new_stream->triggered_crtc_reset.enabled =
8042 dm->force_timing_sync;
8043
4b9674e5 8044 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8045
88694af9
NK
8046 ret = fill_hdr_info_packet(drm_new_conn_state,
8047 &new_stream->hdr_static_metadata);
8048 if (ret)
8049 goto fail;
8050
7e930949
NK
8051 /*
8052 * If we already removed the old stream from the context
8053 * (and set the new stream to NULL) then we can't reuse
8054 * the old stream even if the stream and scaling are unchanged.
8055 * We'll hit the BUG_ON and black screen.
8056 *
8057 * TODO: Refactor this function to allow this check to work
8058 * in all conditions.
8059 */
8060 if (dm_new_crtc_state->stream &&
8061 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8062 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8063 new_crtc_state->mode_changed = false;
8064 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8065 new_crtc_state->mode_changed);
62f55537 8066 }
4b9674e5 8067 }
b830ebc9 8068
02d35a67 8069 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8070 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8071 goto skip_modeset;
e7b07cee 8072
4b9674e5
LL
8073 DRM_DEBUG_DRIVER(
8074 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8075 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8076 "connectors_changed:%d\n",
8077 acrtc->crtc_id,
8078 new_crtc_state->enable,
8079 new_crtc_state->active,
8080 new_crtc_state->planes_changed,
8081 new_crtc_state->mode_changed,
8082 new_crtc_state->active_changed,
8083 new_crtc_state->connectors_changed);
62f55537 8084
4b9674e5
LL
8085 /* Remove stream for any changed/disabled CRTC */
8086 if (!enable) {
62f55537 8087
4b9674e5
LL
8088 if (!dm_old_crtc_state->stream)
8089 goto skip_modeset;
eb3dc897 8090
4b9674e5
LL
8091 ret = dm_atomic_get_state(state, &dm_state);
8092 if (ret)
8093 goto fail;
e7b07cee 8094
4b9674e5
LL
8095 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8096 crtc->base.id);
62f55537 8097
4b9674e5
LL
8098 /* i.e. reset mode */
8099 if (dc_remove_stream_from_ctx(
8100 dm->dc,
8101 dm_state->context,
8102 dm_old_crtc_state->stream) != DC_OK) {
8103 ret = -EINVAL;
8104 goto fail;
8105 }
62f55537 8106
4b9674e5
LL
8107 dc_stream_release(dm_old_crtc_state->stream);
8108 dm_new_crtc_state->stream = NULL;
bb47de73 8109
4b9674e5 8110 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8111
4b9674e5 8112 *lock_and_validation_needed = true;
62f55537 8113
4b9674e5
LL
8114 } else {/* Add stream for any updated/enabled CRTC */
8115 /*
8116 * Quick fix to prevent NULL pointer on new_stream when
8117 * added MST connectors not found in existing crtc_state in the chained mode
8118 * TODO: need to dig out the root cause of that
8119 */
8120 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8121 goto skip_modeset;
62f55537 8122
4b9674e5
LL
8123 if (modereset_required(new_crtc_state))
8124 goto skip_modeset;
62f55537 8125
4b9674e5
LL
8126 if (modeset_required(new_crtc_state, new_stream,
8127 dm_old_crtc_state->stream)) {
62f55537 8128
4b9674e5 8129 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8130
4b9674e5
LL
8131 ret = dm_atomic_get_state(state, &dm_state);
8132 if (ret)
8133 goto fail;
27b3f4fc 8134
4b9674e5 8135 dm_new_crtc_state->stream = new_stream;
62f55537 8136
4b9674e5 8137 dc_stream_retain(new_stream);
1dc90497 8138
4b9674e5
LL
8139 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8140 crtc->base.id);
1dc90497 8141
4b9674e5
LL
8142 if (dc_add_stream_to_ctx(
8143 dm->dc,
8144 dm_state->context,
8145 dm_new_crtc_state->stream) != DC_OK) {
8146 ret = -EINVAL;
8147 goto fail;
9b690ef3
BL
8148 }
8149
4b9674e5
LL
8150 *lock_and_validation_needed = true;
8151 }
8152 }
e277adc5 8153
4b9674e5
LL
8154skip_modeset:
8155 /* Release extra reference */
8156 if (new_stream)
8157 dc_stream_release(new_stream);
e277adc5 8158
4b9674e5
LL
8159 /*
8160 * We want to do dc stream updates that do not require a
8161 * full modeset below.
8162 */
2afda735 8163 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8164 return 0;
8165 /*
8166 * Given above conditions, the dc state cannot be NULL because:
8167 * 1. We're in the process of enabling CRTCs (just been added
8168 * to the dc context, or already is on the context)
8169 * 2. Has a valid connector attached, and
8170 * 3. Is currently active and enabled.
8171 * => The dc stream state currently exists.
8172 */
8173 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8174
4b9674e5
LL
8175 /* Scaling or underscan settings */
8176 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8177 update_stream_scaling_settings(
8178 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8179
b05e2c5e
DF
8180 /* ABM settings */
8181 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8182
4b9674e5
LL
8183 /*
8184 * Color management settings. We also update color properties
8185 * when a modeset is needed, to ensure it gets reprogrammed.
8186 */
8187 if (dm_new_crtc_state->base.color_mgmt_changed ||
8188 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8189 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8190 if (ret)
8191 goto fail;
62f55537 8192 }
e7b07cee 8193
4b9674e5
LL
8194 /* Update Freesync settings. */
8195 get_freesync_config_for_crtc(dm_new_crtc_state,
8196 dm_new_conn_state);
8197
62f55537 8198 return ret;
9635b754
DS
8199
8200fail:
8201 if (new_stream)
8202 dc_stream_release(new_stream);
8203 return ret;
62f55537 8204}
9b690ef3 8205
f6ff2a08
NK
8206static bool should_reset_plane(struct drm_atomic_state *state,
8207 struct drm_plane *plane,
8208 struct drm_plane_state *old_plane_state,
8209 struct drm_plane_state *new_plane_state)
8210{
8211 struct drm_plane *other;
8212 struct drm_plane_state *old_other_state, *new_other_state;
8213 struct drm_crtc_state *new_crtc_state;
8214 int i;
8215
70a1efac
NK
8216 /*
8217 * TODO: Remove this hack once the checks below are sufficient
8218 * enough to determine when we need to reset all the planes on
8219 * the stream.
8220 */
8221 if (state->allow_modeset)
8222 return true;
8223
f6ff2a08
NK
8224 /* Exit early if we know that we're adding or removing the plane. */
8225 if (old_plane_state->crtc != new_plane_state->crtc)
8226 return true;
8227
8228 /* old crtc == new_crtc == NULL, plane not in context. */
8229 if (!new_plane_state->crtc)
8230 return false;
8231
8232 new_crtc_state =
8233 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8234
8235 if (!new_crtc_state)
8236 return true;
8237
7316c4ad
NK
8238 /* CRTC Degamma changes currently require us to recreate planes. */
8239 if (new_crtc_state->color_mgmt_changed)
8240 return true;
8241
f6ff2a08
NK
8242 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8243 return true;
8244
8245 /*
8246 * If there are any new primary or overlay planes being added or
8247 * removed then the z-order can potentially change. To ensure
8248 * correct z-order and pipe acquisition the current DC architecture
8249 * requires us to remove and recreate all existing planes.
8250 *
8251 * TODO: Come up with a more elegant solution for this.
8252 */
8253 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8254 if (other->type == DRM_PLANE_TYPE_CURSOR)
8255 continue;
8256
8257 if (old_other_state->crtc != new_plane_state->crtc &&
8258 new_other_state->crtc != new_plane_state->crtc)
8259 continue;
8260
8261 if (old_other_state->crtc != new_other_state->crtc)
8262 return true;
8263
8264 /* TODO: Remove this once we can handle fast format changes. */
8265 if (old_other_state->fb && new_other_state->fb &&
8266 old_other_state->fb->format != new_other_state->fb->format)
8267 return true;
8268 }
8269
8270 return false;
8271}
8272
9e869063
LL
8273static int dm_update_plane_state(struct dc *dc,
8274 struct drm_atomic_state *state,
8275 struct drm_plane *plane,
8276 struct drm_plane_state *old_plane_state,
8277 struct drm_plane_state *new_plane_state,
8278 bool enable,
8279 bool *lock_and_validation_needed)
62f55537 8280{
eb3dc897
NK
8281
8282 struct dm_atomic_state *dm_state = NULL;
62f55537 8283 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 8284 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 8285 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 8286 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 8287 struct amdgpu_crtc *new_acrtc;
f6ff2a08 8288 bool needs_reset;
62f55537 8289 int ret = 0;
e7b07cee 8290
9b690ef3 8291
9e869063
LL
8292 new_plane_crtc = new_plane_state->crtc;
8293 old_plane_crtc = old_plane_state->crtc;
8294 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8295 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 8296
626bf90f
SS
8297 /*TODO Implement better atomic check for cursor plane */
8298 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8299 if (!enable || !new_plane_crtc ||
8300 drm_atomic_plane_disabling(plane->state, new_plane_state))
8301 return 0;
8302
8303 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8304
8305 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8306 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8307 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8308 new_plane_state->crtc_w, new_plane_state->crtc_h);
8309 return -EINVAL;
8310 }
8311
9e869063 8312 return 0;
626bf90f 8313 }
9b690ef3 8314
f6ff2a08
NK
8315 needs_reset = should_reset_plane(state, plane, old_plane_state,
8316 new_plane_state);
8317
9e869063
LL
8318 /* Remove any changed/removed planes */
8319 if (!enable) {
f6ff2a08 8320 if (!needs_reset)
9e869063 8321 return 0;
a7b06724 8322
9e869063
LL
8323 if (!old_plane_crtc)
8324 return 0;
62f55537 8325
9e869063
LL
8326 old_crtc_state = drm_atomic_get_old_crtc_state(
8327 state, old_plane_crtc);
8328 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 8329
9e869063
LL
8330 if (!dm_old_crtc_state->stream)
8331 return 0;
62f55537 8332
9e869063
LL
8333 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8334 plane->base.id, old_plane_crtc->base.id);
9b690ef3 8335
9e869063
LL
8336 ret = dm_atomic_get_state(state, &dm_state);
8337 if (ret)
8338 return ret;
eb3dc897 8339
9e869063
LL
8340 if (!dc_remove_plane_from_context(
8341 dc,
8342 dm_old_crtc_state->stream,
8343 dm_old_plane_state->dc_state,
8344 dm_state->context)) {
62f55537 8345
9e869063
LL
8346 ret = EINVAL;
8347 return ret;
8348 }
e7b07cee 8349
9b690ef3 8350
9e869063
LL
8351 dc_plane_state_release(dm_old_plane_state->dc_state);
8352 dm_new_plane_state->dc_state = NULL;
1dc90497 8353
9e869063 8354 *lock_and_validation_needed = true;
1dc90497 8355
9e869063
LL
8356 } else { /* Add new planes */
8357 struct dc_plane_state *dc_new_plane_state;
1dc90497 8358
9e869063
LL
8359 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8360 return 0;
e7b07cee 8361
9e869063
LL
8362 if (!new_plane_crtc)
8363 return 0;
e7b07cee 8364
9e869063
LL
8365 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8366 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 8367
9e869063
LL
8368 if (!dm_new_crtc_state->stream)
8369 return 0;
62f55537 8370
f6ff2a08 8371 if (!needs_reset)
9e869063 8372 return 0;
62f55537 8373
8c44515b
AP
8374 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8375 if (ret)
8376 return ret;
8377
9e869063 8378 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 8379
9e869063
LL
8380 dc_new_plane_state = dc_create_plane_state(dc);
8381 if (!dc_new_plane_state)
8382 return -ENOMEM;
62f55537 8383
9e869063
LL
8384 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8385 plane->base.id, new_plane_crtc->base.id);
8c45c5db 8386
695af5f9 8387 ret = fill_dc_plane_attributes(
9e869063
LL
8388 new_plane_crtc->dev->dev_private,
8389 dc_new_plane_state,
8390 new_plane_state,
8391 new_crtc_state);
8392 if (ret) {
8393 dc_plane_state_release(dc_new_plane_state);
8394 return ret;
8395 }
62f55537 8396
9e869063
LL
8397 ret = dm_atomic_get_state(state, &dm_state);
8398 if (ret) {
8399 dc_plane_state_release(dc_new_plane_state);
8400 return ret;
8401 }
eb3dc897 8402
9e869063
LL
8403 /*
8404 * Any atomic check errors that occur after this will
8405 * not need a release. The plane state will be attached
8406 * to the stream, and therefore part of the atomic
8407 * state. It'll be released when the atomic state is
8408 * cleaned.
8409 */
8410 if (!dc_add_plane_to_context(
8411 dc,
8412 dm_new_crtc_state->stream,
8413 dc_new_plane_state,
8414 dm_state->context)) {
62f55537 8415
9e869063
LL
8416 dc_plane_state_release(dc_new_plane_state);
8417 return -EINVAL;
8418 }
8c45c5db 8419
9e869063 8420 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 8421
9e869063
LL
8422 /* Tell DC to do a full surface update every time there
8423 * is a plane change. Inefficient, but works for now.
8424 */
8425 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8426
8427 *lock_and_validation_needed = true;
62f55537 8428 }
e7b07cee
HW
8429
8430
62f55537
AG
8431 return ret;
8432}
a87fa993 8433
eb3dc897 8434static int
f843b308 8435dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
eb3dc897
NK
8436 struct drm_atomic_state *state,
8437 enum surface_update_type *out_type)
8438{
f843b308 8439 struct dc *dc = dm->dc;
eb3dc897
NK
8440 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8441 int i, j, num_plane, ret = 0;
a87fa993
BL
8442 struct drm_plane_state *old_plane_state, *new_plane_state;
8443 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
d3b65841 8444 struct drm_crtc *new_plane_crtc;
a87fa993
BL
8445 struct drm_plane *plane;
8446
8447 struct drm_crtc *crtc;
8448 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8449 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8450 struct dc_stream_status *status = NULL;
a87fa993 8451 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7527791e
RL
8452 struct surface_info_bundle {
8453 struct dc_surface_update surface_updates[MAX_SURFACES];
8454 struct dc_plane_info plane_infos[MAX_SURFACES];
8455 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8456 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8457 struct dc_stream_update stream_update;
8458 } *bundle;
a87fa993 8459
7527791e 8460 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
fe96b99d 8461
7527791e
RL
8462 if (!bundle) {
8463 DRM_ERROR("Failed to allocate update bundle\n");
4f712911
BL
8464 /* Set type to FULL to avoid crashing in DC*/
8465 update_type = UPDATE_TYPE_FULL;
eb3dc897 8466 goto cleanup;
4f712911 8467 }
a87fa993
BL
8468
8469 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2aa632c5 8470
7527791e 8471 memset(bundle, 0, sizeof(struct surface_info_bundle));
c448a53a 8472
a87fa993
BL
8473 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8474 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8475 num_plane = 0;
8476
6836d239
NK
8477 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8478 update_type = UPDATE_TYPE_FULL;
8479 goto cleanup;
8480 }
a87fa993 8481
6836d239 8482 if (!new_dm_crtc_state->stream)
c744e974 8483 continue;
eb3dc897 8484
c744e974 8485 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
2cc450ce
NK
8486 const struct amdgpu_framebuffer *amdgpu_fb =
8487 to_amdgpu_framebuffer(new_plane_state->fb);
7527791e
RL
8488 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8489 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8490 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
2cc450ce 8491 uint64_t tiling_flags;
5888f07a 8492 bool tmz_surface = false;
2cc450ce 8493
c744e974 8494 new_plane_crtc = new_plane_state->crtc;
c744e974
NK
8495 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8496 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 8497
c744e974
NK
8498 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8499 continue;
eb3dc897 8500
6836d239
NK
8501 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8502 update_type = UPDATE_TYPE_FULL;
8503 goto cleanup;
8504 }
8505
c744e974
NK
8506 if (crtc != new_plane_crtc)
8507 continue;
8508
7527791e
RL
8509 bundle->surface_updates[num_plane].surface =
8510 new_dm_plane_state->dc_state;
c744e974
NK
8511
8512 if (new_crtc_state->mode_changed) {
7527791e
RL
8513 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8514 bundle->stream_update.src = new_dm_crtc_state->stream->src;
c744e974
NK
8515 }
8516
8517 if (new_crtc_state->color_mgmt_changed) {
7527791e 8518 bundle->surface_updates[num_plane].gamma =
c744e974 8519 new_dm_plane_state->dc_state->gamma_correction;
7527791e 8520 bundle->surface_updates[num_plane].in_transfer_func =
c744e974 8521 new_dm_plane_state->dc_state->in_transfer_func;
44efb784
SW
8522 bundle->surface_updates[num_plane].gamut_remap_matrix =
8523 &new_dm_plane_state->dc_state->gamut_remap_matrix;
7527791e 8524 bundle->stream_update.gamut_remap =
c744e974 8525 &new_dm_crtc_state->stream->gamut_remap_matrix;
7527791e 8526 bundle->stream_update.output_csc_transform =
cf020d49 8527 &new_dm_crtc_state->stream->csc_color_matrix;
7527791e 8528 bundle->stream_update.out_transfer_func =
c744e974 8529 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
8530 }
8531
004b3938 8532 ret = fill_dc_scaling_info(new_plane_state,
7527791e 8533 scaling_info);
004b3938
NK
8534 if (ret)
8535 goto cleanup;
8536
7527791e 8537 bundle->surface_updates[num_plane].scaling_info = scaling_info;
004b3938 8538
2cc450ce 8539 if (amdgpu_fb) {
5888f07a 8540 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
2cc450ce
NK
8541 if (ret)
8542 goto cleanup;
8543
2cc450ce
NK
8544 ret = fill_dc_plane_info_and_addr(
8545 dm->adev, new_plane_state, tiling_flags,
7527791e 8546 plane_info,
5888f07a 8547 &flip_addr->address, tmz_surface,
87b7ebc2 8548 false);
2cc450ce
NK
8549 if (ret)
8550 goto cleanup;
8551
7527791e
RL
8552 bundle->surface_updates[num_plane].plane_info = plane_info;
8553 bundle->surface_updates[num_plane].flip_addr = flip_addr;
2cc450ce
NK
8554 }
8555
c744e974
NK
8556 num_plane++;
8557 }
8558
8559 if (num_plane == 0)
8560 continue;
8561
8562 ret = dm_atomic_get_state(state, &dm_state);
8563 if (ret)
8564 goto cleanup;
8565
8566 old_dm_state = dm_atomic_get_old_state(state);
8567 if (!old_dm_state) {
8568 ret = -EINVAL;
8569 goto cleanup;
8570 }
8571
8572 status = dc_stream_get_status_from_state(old_dm_state->context,
8573 new_dm_crtc_state->stream);
7527791e 8574 bundle->stream_update.stream = new_dm_crtc_state->stream;
f843b308
NK
8575 /*
8576 * TODO: DC modifies the surface during this call so we need
8577 * to lock here - find a way to do this without locking.
8578 */
8579 mutex_lock(&dm->dc_lock);
7527791e
RL
8580 update_type = dc_check_update_surfaces_for_stream(
8581 dc, bundle->surface_updates, num_plane,
8582 &bundle->stream_update, status);
f843b308 8583 mutex_unlock(&dm->dc_lock);
c744e974
NK
8584
8585 if (update_type > UPDATE_TYPE_MED) {
a87fa993 8586 update_type = UPDATE_TYPE_FULL;
eb3dc897 8587 goto cleanup;
a87fa993
BL
8588 }
8589 }
8590
eb3dc897 8591cleanup:
7527791e 8592 kfree(bundle);
a87fa993 8593
eb3dc897
NK
8594 *out_type = update_type;
8595 return ret;
a87fa993 8596}
e10517b3 8597#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
8598static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8599{
8600 struct drm_connector *connector;
8601 struct drm_connector_state *conn_state;
8602 struct amdgpu_dm_connector *aconnector = NULL;
8603 int i;
8604 for_each_new_connector_in_state(state, connector, conn_state, i) {
8605 if (conn_state->crtc != crtc)
8606 continue;
8607
8608 aconnector = to_amdgpu_dm_connector(connector);
8609 if (!aconnector->port || !aconnector->mst_port)
8610 aconnector = NULL;
8611 else
8612 break;
8613 }
8614
8615 if (!aconnector)
8616 return 0;
8617
8618 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8619}
e10517b3 8620#endif
44be939f 8621
b8592b48
LL
8622/**
8623 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8624 * @dev: The DRM device
8625 * @state: The atomic state to commit
8626 *
8627 * Validate that the given atomic state is programmable by DC into hardware.
8628 * This involves constructing a &struct dc_state reflecting the new hardware
8629 * state we wish to commit, then querying DC to see if it is programmable. It's
8630 * important not to modify the existing DC state. Otherwise, atomic_check
8631 * may unexpectedly commit hardware changes.
8632 *
8633 * When validating the DC state, it's important that the right locks are
8634 * acquired. For full updates case which removes/adds/updates streams on one
8635 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8636 * that any such full update commit will wait for completion of any outstanding
8637 * flip using DRMs synchronization events. See
8638 * dm_determine_update_type_for_commit()
8639 *
8640 * Note that DM adds the affected connectors for all CRTCs in state, when that
8641 * might not seem necessary. This is because DC stream creation requires the
8642 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8643 * be possible but non-trivial - a possible TODO item.
8644 *
8645 * Return: -Error code if validation failed.
8646 */
7578ecda
AD
8647static int amdgpu_dm_atomic_check(struct drm_device *dev,
8648 struct drm_atomic_state *state)
62f55537 8649{
62f55537 8650 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 8651 struct dm_atomic_state *dm_state = NULL;
62f55537 8652 struct dc *dc = adev->dm.dc;
62f55537 8653 struct drm_connector *connector;
c2cea706 8654 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8655 struct drm_crtc *crtc;
fc9e9920 8656 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8657 struct drm_plane *plane;
8658 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
8659 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8660 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
74a16675 8661 enum dc_status status;
1e88ad0a 8662 int ret, i;
e7b07cee 8663
62f55537
AG
8664 /*
8665 * This bool will be set for true for any modeset/reset
8666 * or plane update which implies non fast surface update.
8667 */
8668 bool lock_and_validation_needed = false;
8669
8670 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8671 if (ret)
8672 goto fail;
62f55537 8673
473e2d16
SW
8674 /* Check connector changes */
8675 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8676 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8677 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8678
8679 /* Skip connectors that are disabled or part of modeset already. */
8680 if (!old_con_state->crtc && !new_con_state->crtc)
8681 continue;
8682
8683 if (!new_con_state->crtc)
8684 continue;
8685
8686 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8687 if (IS_ERR(new_crtc_state)) {
8688 ret = PTR_ERR(new_crtc_state);
8689 goto fail;
8690 }
8691
8692 if (dm_old_con_state->abm_level !=
8693 dm_new_con_state->abm_level)
8694 new_crtc_state->connectors_changed = true;
8695 }
8696
e10517b3 8697#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
8698 if (adev->asic_type >= CHIP_NAVI10) {
8699 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8700 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8701 ret = add_affected_mst_dsc_crtcs(state, crtc);
8702 if (ret)
8703 goto fail;
8704 }
8705 }
8706 }
e10517b3 8707#endif
1e88ad0a
S
8708 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8709 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8710 !new_crtc_state->color_mgmt_changed &&
a93587b3 8711 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8712 continue;
7bef1af3 8713
1e88ad0a
S
8714 if (!new_crtc_state->enable)
8715 continue;
fc9e9920 8716
1e88ad0a
S
8717 ret = drm_atomic_add_affected_connectors(state, crtc);
8718 if (ret)
8719 return ret;
fc9e9920 8720
1e88ad0a
S
8721 ret = drm_atomic_add_affected_planes(state, crtc);
8722 if (ret)
8723 goto fail;
e7b07cee
HW
8724 }
8725
2d9e6431
NK
8726 /*
8727 * Add all primary and overlay planes on the CRTC to the state
8728 * whenever a plane is enabled to maintain correct z-ordering
8729 * and to enable fast surface updates.
8730 */
8731 drm_for_each_crtc(crtc, dev) {
8732 bool modified = false;
8733
8734 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8735 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8736 continue;
8737
8738 if (new_plane_state->crtc == crtc ||
8739 old_plane_state->crtc == crtc) {
8740 modified = true;
8741 break;
8742 }
8743 }
8744
8745 if (!modified)
8746 continue;
8747
8748 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8749 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8750 continue;
8751
8752 new_plane_state =
8753 drm_atomic_get_plane_state(state, plane);
8754
8755 if (IS_ERR(new_plane_state)) {
8756 ret = PTR_ERR(new_plane_state);
8757 goto fail;
8758 }
8759 }
8760 }
8761
62f55537 8762 /* Remove exiting planes if they are modified */
9e869063
LL
8763 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8764 ret = dm_update_plane_state(dc, state, plane,
8765 old_plane_state,
8766 new_plane_state,
8767 false,
8768 &lock_and_validation_needed);
8769 if (ret)
8770 goto fail;
62f55537
AG
8771 }
8772
8773 /* Disable all crtcs which require disable */
4b9674e5
LL
8774 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8775 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8776 old_crtc_state,
8777 new_crtc_state,
8778 false,
8779 &lock_and_validation_needed);
8780 if (ret)
8781 goto fail;
62f55537
AG
8782 }
8783
8784 /* Enable all crtcs which require enable */
4b9674e5
LL
8785 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8786 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8787 old_crtc_state,
8788 new_crtc_state,
8789 true,
8790 &lock_and_validation_needed);
8791 if (ret)
8792 goto fail;
62f55537
AG
8793 }
8794
8795 /* Add new/modified planes */
9e869063
LL
8796 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8797 ret = dm_update_plane_state(dc, state, plane,
8798 old_plane_state,
8799 new_plane_state,
8800 true,
8801 &lock_and_validation_needed);
8802 if (ret)
8803 goto fail;
62f55537
AG
8804 }
8805
b349f76e
ES
8806 /* Run this here since we want to validate the streams we created */
8807 ret = drm_atomic_helper_check_planes(dev, state);
8808 if (ret)
8809 goto fail;
62f55537 8810
43d10d30
NK
8811 if (state->legacy_cursor_update) {
8812 /*
8813 * This is a fast cursor update coming from the plane update
8814 * helper, check if it can be done asynchronously for better
8815 * performance.
8816 */
8817 state->async_update =
8818 !drm_atomic_helper_async_check(dev, state);
8819
8820 /*
8821 * Skip the remaining global validation if this is an async
8822 * update. Cursor updates can be done without affecting
8823 * state or bandwidth calcs and this avoids the performance
8824 * penalty of locking the private state object and
8825 * allocating a new dc_state.
8826 */
8827 if (state->async_update)
8828 return 0;
8829 }
8830
ebdd27e1 8831 /* Check scaling and underscan changes*/
1f6010a9 8832 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8833 * new stream into context w\o causing full reset. Need to
8834 * decide how to handle.
8835 */
c2cea706 8836 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8837 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8838 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8839 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8840
8841 /* Skip any modesets/resets */
0bc9706d
LSL
8842 if (!acrtc || drm_atomic_crtc_needs_modeset(
8843 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8844 continue;
8845
b830ebc9 8846 /* Skip any thing not scale or underscan changes */
54d76575 8847 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8848 continue;
8849
a87fa993 8850 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
8851 lock_and_validation_needed = true;
8852 }
8853
f843b308 8854 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
eb3dc897
NK
8855 if (ret)
8856 goto fail;
a87fa993
BL
8857
8858 if (overall_update_type < update_type)
8859 overall_update_type = update_type;
8860
8861 /*
8862 * lock_and_validation_needed was an old way to determine if we need to set
8863 * the global lock. Leaving it in to check if we broke any corner cases
8864 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8865 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8866 */
8867 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8868 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
e7b07cee 8869
a87fa993 8870 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
8871 ret = dm_atomic_get_state(state, &dm_state);
8872 if (ret)
8873 goto fail;
e7b07cee
HW
8874
8875 ret = do_aquire_global_lock(dev, state);
8876 if (ret)
8877 goto fail;
1dc90497 8878
d9fe1a4c 8879#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8880 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8881 goto fail;
8882
29b9ba74
ML
8883 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8884 if (ret)
8885 goto fail;
d9fe1a4c 8886#endif
29b9ba74 8887
ded58c7b
ZL
8888 /*
8889 * Perform validation of MST topology in the state:
8890 * We need to perform MST atomic check before calling
8891 * dc_validate_global_state(), or there is a chance
8892 * to get stuck in an infinite loop and hang eventually.
8893 */
8894 ret = drm_dp_mst_atomic_check(state);
8895 if (ret)
8896 goto fail;
74a16675
RS
8897 status = dc_validate_global_state(dc, dm_state->context, false);
8898 if (status != DC_OK) {
8899 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8900 dc_status_to_str(status), status);
e7b07cee
HW
8901 ret = -EINVAL;
8902 goto fail;
8903 }
bd200d19 8904 } else {
674e78ac 8905 /*
bd200d19
NK
8906 * The commit is a fast update. Fast updates shouldn't change
8907 * the DC context, affect global validation, and can have their
8908 * commit work done in parallel with other commits not touching
8909 * the same resource. If we have a new DC context as part of
8910 * the DM atomic state from validation we need to free it and
8911 * retain the existing one instead.
76195175
MR
8912 *
8913 * Furthermore, since the DM atomic state only contains the DC
8914 * context and can safely be annulled, we can free the state
8915 * and clear the associated private object now to free
8916 * some memory and avoid a possible use-after-free later.
674e78ac 8917 */
bd200d19 8918
76195175
MR
8919 for (i = 0; i < state->num_private_objs; i++) {
8920 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 8921
76195175
MR
8922 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8923 int j = state->num_private_objs-1;
bd200d19 8924
76195175
MR
8925 dm_atomic_destroy_state(obj,
8926 state->private_objs[i].state);
8927
8928 /* If i is not at the end of the array then the
8929 * last element needs to be moved to where i was
8930 * before the array can safely be truncated.
8931 */
8932 if (i != j)
8933 state->private_objs[i] =
8934 state->private_objs[j];
bd200d19 8935
76195175
MR
8936 state->private_objs[j].ptr = NULL;
8937 state->private_objs[j].state = NULL;
8938 state->private_objs[j].old_state = NULL;
8939 state->private_objs[j].new_state = NULL;
8940
8941 state->num_private_objs = j;
8942 break;
8943 }
bd200d19 8944 }
e7b07cee
HW
8945 }
8946
caff0e66
NK
8947 /* Store the overall update type for use later in atomic check. */
8948 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8949 struct dm_crtc_state *dm_new_crtc_state =
8950 to_dm_crtc_state(new_crtc_state);
8951
8952 dm_new_crtc_state->update_type = (int)overall_update_type;
e7b07cee
HW
8953 }
8954
8955 /* Must be success */
8956 WARN_ON(ret);
8957 return ret;
8958
8959fail:
8960 if (ret == -EDEADLK)
01e28f9c 8961 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8962 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8963 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8964 else
01e28f9c 8965 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8966
8967 return ret;
8968}
8969
3ee6b26b
AD
8970static bool is_dp_capable_without_timing_msa(struct dc *dc,
8971 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8972{
8973 uint8_t dpcd_data;
8974 bool capable = false;
8975
c84dec2f 8976 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8977 dm_helpers_dp_read_dpcd(
8978 NULL,
c84dec2f 8979 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8980 DP_DOWN_STREAM_PORT_COUNT,
8981 &dpcd_data,
8982 sizeof(dpcd_data))) {
8983 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8984 }
8985
8986 return capable;
8987}
98e6436d
AK
8988void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8989 struct edid *edid)
e7b07cee
HW
8990{
8991 int i;
e7b07cee
HW
8992 bool edid_check_required;
8993 struct detailed_timing *timing;
8994 struct detailed_non_pixel *data;
8995 struct detailed_data_monitor_range *range;
c84dec2f
HW
8996 struct amdgpu_dm_connector *amdgpu_dm_connector =
8997 to_amdgpu_dm_connector(connector);
bb47de73 8998 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8999
9000 struct drm_device *dev = connector->dev;
9001 struct amdgpu_device *adev = dev->dev_private;
bb47de73 9002 bool freesync_capable = false;
b830ebc9 9003
8218d7f1
HW
9004 if (!connector->state) {
9005 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9006 goto update;
8218d7f1
HW
9007 }
9008
98e6436d
AK
9009 if (!edid) {
9010 dm_con_state = to_dm_connector_state(connector->state);
9011
9012 amdgpu_dm_connector->min_vfreq = 0;
9013 amdgpu_dm_connector->max_vfreq = 0;
9014 amdgpu_dm_connector->pixel_clock_mhz = 0;
9015
bb47de73 9016 goto update;
98e6436d
AK
9017 }
9018
8218d7f1
HW
9019 dm_con_state = to_dm_connector_state(connector->state);
9020
e7b07cee 9021 edid_check_required = false;
c84dec2f 9022 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9023 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9024 goto update;
e7b07cee
HW
9025 }
9026 if (!adev->dm.freesync_module)
bb47de73 9027 goto update;
e7b07cee
HW
9028 /*
9029 * if edid non zero restrict freesync only for dp and edp
9030 */
9031 if (edid) {
c84dec2f
HW
9032 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9033 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
9034 edid_check_required = is_dp_capable_without_timing_msa(
9035 adev->dm.dc,
c84dec2f 9036 amdgpu_dm_connector);
e7b07cee
HW
9037 }
9038 }
e7b07cee
HW
9039 if (edid_check_required == true && (edid->version > 1 ||
9040 (edid->version == 1 && edid->revision > 1))) {
9041 for (i = 0; i < 4; i++) {
9042
9043 timing = &edid->detailed_timings[i];
9044 data = &timing->data.other_data;
9045 range = &data->data.range;
9046 /*
9047 * Check if monitor has continuous frequency mode
9048 */
9049 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9050 continue;
9051 /*
9052 * Check for flag range limits only. If flag == 1 then
9053 * no additional timing information provided.
9054 * Default GTF, GTF Secondary curve and CVT are not
9055 * supported
9056 */
9057 if (range->flags != 1)
9058 continue;
9059
c84dec2f
HW
9060 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9061 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9062 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
9063 range->pixel_clock_mhz * 10;
9064 break;
9065 }
9066
c84dec2f 9067 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
9068 amdgpu_dm_connector->min_vfreq > 10) {
9069
bb47de73 9070 freesync_capable = true;
e7b07cee
HW
9071 }
9072 }
bb47de73
NK
9073
9074update:
9075 if (dm_con_state)
9076 dm_con_state->freesync_capable = freesync_capable;
9077
9078 if (connector->vrr_capable_property)
9079 drm_connector_set_vrr_capable_property(connector,
9080 freesync_capable);
e7b07cee
HW
9081}
9082
8c322309
RL
9083static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9084{
9085 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9086
9087 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9088 return;
9089 if (link->type == dc_connection_none)
9090 return;
9091 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9092 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
9093 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9094
9095 if (dpcd_data[0] == 0) {
1cfbbdde 9096 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
9097 link->psr_settings.psr_feature_enabled = false;
9098 } else {
1cfbbdde 9099 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
9100 link->psr_settings.psr_feature_enabled = true;
9101 }
9102
9103 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9104 }
9105}
9106
9107/*
9108 * amdgpu_dm_link_setup_psr() - configure psr link
9109 * @stream: stream state
9110 *
9111 * Return: true if success
9112 */
9113static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9114{
9115 struct dc_link *link = NULL;
9116 struct psr_config psr_config = {0};
9117 struct psr_context psr_context = {0};
8c322309
RL
9118 bool ret = false;
9119
9120 if (stream == NULL)
9121 return false;
9122
9123 link = stream->link;
8c322309 9124
d1ebfdd8 9125 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
9126
9127 if (psr_config.psr_version > 0) {
9128 psr_config.psr_exit_link_training_required = 0x1;
9129 psr_config.psr_frame_capture_indication_req = 0;
9130 psr_config.psr_rfb_setup_time = 0x37;
9131 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9132 psr_config.allow_smu_optimizations = 0x0;
9133
9134 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9135
9136 }
d1ebfdd8 9137 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9138
9139 return ret;
9140}
9141
9142/*
9143 * amdgpu_dm_psr_enable() - enable psr f/w
9144 * @stream: stream state
9145 *
9146 * Return: true if success
9147 */
9148bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9149{
9150 struct dc_link *link = stream->link;
5b5abe95
AK
9151 unsigned int vsync_rate_hz = 0;
9152 struct dc_static_screen_params params = {0};
9153 /* Calculate number of static frames before generating interrupt to
9154 * enter PSR.
9155 */
5b5abe95
AK
9156 // Init fail safe of 2 frames static
9157 unsigned int num_frames_static = 2;
8c322309
RL
9158
9159 DRM_DEBUG_DRIVER("Enabling psr...\n");
9160
5b5abe95
AK
9161 vsync_rate_hz = div64_u64(div64_u64((
9162 stream->timing.pix_clk_100hz * 100),
9163 stream->timing.v_total),
9164 stream->timing.h_total);
9165
9166 /* Round up
9167 * Calculate number of frames such that at least 30 ms of time has
9168 * passed.
9169 */
7aa62404
RL
9170 if (vsync_rate_hz != 0) {
9171 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9172 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9173 }
5b5abe95
AK
9174
9175 params.triggers.cursor_update = true;
9176 params.triggers.overlay_update = true;
9177 params.triggers.surface_update = true;
9178 params.num_frames = num_frames_static;
8c322309 9179
5b5abe95 9180 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9181 &stream, 1,
5b5abe95 9182 &params);
8c322309
RL
9183
9184 return dc_link_set_psr_allow_active(link, true, false);
9185}
9186
9187/*
9188 * amdgpu_dm_psr_disable() - disable psr f/w
9189 * @stream: stream state
9190 *
9191 * Return: true if success
9192 */
9193static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9194{
9195
9196 DRM_DEBUG_DRIVER("Disabling psr...\n");
9197
9198 return dc_link_set_psr_allow_active(stream->link, false, true);
9199}
3d4e52d0
VL
9200
9201void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9202{
9203 struct amdgpu_device *adev = dev->dev_private;
9204 struct dc *dc = adev->dm.dc;
9205 int i;
9206
9207 mutex_lock(&adev->dm.dc_lock);
9208 if (dc->current_state) {
9209 for (i = 0; i < dc->current_state->stream_count; ++i)
9210 dc->current_state->streams[i]
9211 ->triggered_crtc_reset.enabled =
9212 adev->dm.force_timing_sync;
9213
9214 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9215 dc_trigger_sync(dc, dc->current_state);
9216 }
9217 mutex_unlock(&adev->dm.dc_lock);
9218}