drm/amd/display: Reject overlay plane configurations in multi-display scenarios
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
94bc373b
BL
100#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
79037324 102#endif
2200eb9e 103
a94d5569
DF
104#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 106
5ea23931
RL
107#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109
8c7aea40
NK
110/* Number of bytes in PSP header for firmware. */
111#define PSP_HEADER_BYTES 0x100
112
113/* Number of bytes in PSP footer for firmware. */
114#define PSP_FOOTER_BYTES 0x100
115
b8592b48
LL
116/**
117 * DOC: overview
118 *
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
122 *
123 * The root control structure is &struct amdgpu_display_manager.
124 */
125
7578ecda
AD
126/* basic init/fini API */
127static int amdgpu_dm_init(struct amdgpu_device *adev);
128static void amdgpu_dm_fini(struct amdgpu_device *adev);
129
1f6010a9
DF
130/*
131 * initializes drm_device display related structures, based on the information
7578ecda
AD
132 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133 * drm_encoder, drm_mode_config
134 *
135 * Returns 0 on success
136 */
137static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138/* removes and deallocates the drm structures, created by the above function */
139static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140
7578ecda 141static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 142 struct drm_plane *plane,
cc1fec57
NK
143 unsigned long possible_crtcs,
144 const struct dc_plane_cap *plane_cap);
7578ecda
AD
145static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 struct drm_plane *plane,
147 uint32_t link_index);
148static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 struct amdgpu_dm_connector *amdgpu_dm_connector,
150 uint32_t link_index,
151 struct amdgpu_encoder *amdgpu_encoder);
152static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 struct amdgpu_encoder *aencoder,
154 uint32_t link_index);
155
156static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157
158static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 struct drm_atomic_state *state,
160 bool nonblock);
161
162static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163
164static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 struct drm_atomic_state *state);
166
674e78ac
NK
167static void handle_cursor_update(struct drm_plane *plane,
168 struct drm_plane_state *old_plane_state);
7578ecda 169
8c322309
RL
170static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 174static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 175
4562236b
HW
176/*
177 * dm_vblank_get_counter
178 *
179 * @brief
180 * Get counter for number of vertical blanks
181 *
182 * @param
183 * struct amdgpu_device *adev - [in] desired amdgpu device
184 * int disp_idx - [in] which CRTC to get the counter from
185 *
186 * @return
187 * Counter for vertical blanks
188 */
189static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190{
191 if (crtc >= adev->mode_info.num_crtc)
192 return 0;
193 else {
194 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
195 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196 acrtc->base.state);
4562236b 197
da5c47f6
AG
198
199 if (acrtc_state->stream == NULL) {
0971c40e
HW
200 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201 crtc);
4562236b
HW
202 return 0;
203 }
204
da5c47f6 205 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
206 }
207}
208
209static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 210 u32 *vbl, u32 *position)
4562236b 211{
81c50963
ST
212 uint32_t v_blank_start, v_blank_end, h_position, v_position;
213
4562236b
HW
214 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 return -EINVAL;
216 else {
217 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
218 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 acrtc->base.state);
4562236b 220
da5c47f6 221 if (acrtc_state->stream == NULL) {
0971c40e
HW
222 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223 crtc);
4562236b
HW
224 return 0;
225 }
226
81c50963
ST
227 /*
228 * TODO rework base driver to use values directly.
229 * for now parse it back into reg-format
230 */
da5c47f6 231 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
232 &v_blank_start,
233 &v_blank_end,
234 &h_position,
235 &v_position);
236
e806208d
AG
237 *position = v_position | (h_position << 16);
238 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
239 }
240
241 return 0;
242}
243
244static bool dm_is_idle(void *handle)
245{
246 /* XXX todo */
247 return true;
248}
249
250static int dm_wait_for_idle(void *handle)
251{
252 /* XXX todo */
253 return 0;
254}
255
256static bool dm_check_soft_reset(void *handle)
257{
258 return false;
259}
260
261static int dm_soft_reset(void *handle)
262{
263 /* XXX todo */
264 return 0;
265}
266
3ee6b26b
AD
267static struct amdgpu_crtc *
268get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 int otg_inst)
4562236b
HW
270{
271 struct drm_device *dev = adev->ddev;
272 struct drm_crtc *crtc;
273 struct amdgpu_crtc *amdgpu_crtc;
274
4562236b
HW
275 if (otg_inst == -1) {
276 WARN_ON(1);
277 return adev->mode_info.crtcs[0];
278 }
279
280 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 amdgpu_crtc = to_amdgpu_crtc(crtc);
282
283 if (amdgpu_crtc->otg_inst == otg_inst)
284 return amdgpu_crtc;
285 }
286
287 return NULL;
288}
289
66b0c973
MK
290static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291{
292 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294}
295
b8e8c934
HW
296/**
297 * dm_pflip_high_irq() - Handle pageflip interrupt
298 * @interrupt_params: ignored
299 *
300 * Handles the pageflip interrupt by notifying all interested parties
301 * that the pageflip has been completed.
302 */
4562236b
HW
303static void dm_pflip_high_irq(void *interrupt_params)
304{
4562236b
HW
305 struct amdgpu_crtc *amdgpu_crtc;
306 struct common_irq_params *irq_params = interrupt_params;
307 struct amdgpu_device *adev = irq_params->adev;
308 unsigned long flags;
71bbe51a
MK
309 struct drm_pending_vblank_event *e;
310 struct dm_crtc_state *acrtc_state;
311 uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 bool vrr_active;
4562236b
HW
313
314 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315
316 /* IRQ could occur when in initial stage */
1f6010a9 317 /* TODO work and BO cleanup */
4562236b
HW
318 if (amdgpu_crtc == NULL) {
319 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320 return;
321 }
322
323 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
324
325 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 amdgpu_crtc->pflip_status,
328 AMDGPU_FLIP_SUBMITTED,
329 amdgpu_crtc->crtc_id,
330 amdgpu_crtc);
331 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
332 return;
333 }
334
71bbe51a
MK
335 /* page flip completed. */
336 e = amdgpu_crtc->event;
337 amdgpu_crtc->event = NULL;
4562236b 338
71bbe51a
MK
339 if (!e)
340 WARN_ON(1);
1159898a 341
71bbe51a
MK
342 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344
345 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
346 if (!vrr_active ||
347 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 &v_blank_end, &hpos, &vpos) ||
349 (vpos < v_blank_start)) {
350 /* Update to correct count and vblank timestamp if racing with
351 * vblank irq. This also updates to the correct vblank timestamp
352 * even in VRR mode, as scanout is past the front-porch atm.
353 */
354 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 355
71bbe51a
MK
356 /* Wake up userspace by sending the pageflip event with proper
357 * count and timestamp of vblank of flip completion.
358 */
359 if (e) {
360 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361
362 /* Event sent, so done with vblank for this flip */
363 drm_crtc_vblank_put(&amdgpu_crtc->base);
364 }
365 } else if (e) {
366 /* VRR active and inside front-porch: vblank count and
367 * timestamp for pageflip event will only be up to date after
368 * drm_crtc_handle_vblank() has been executed from late vblank
369 * irq handler after start of back-porch (vline 0). We queue the
370 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 * updated timestamp and count, once it runs after us.
372 *
373 * We need to open-code this instead of using the helper
374 * drm_crtc_arm_vblank_event(), as that helper would
375 * call drm_crtc_accurate_vblank_count(), which we must
376 * not call in VRR mode while we are in front-porch!
377 */
378
379 /* sequence will be replaced by real count during send-out. */
380 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 e->pipe = amdgpu_crtc->crtc_id;
382
383 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
384 e = NULL;
385 }
4562236b 386
fdd1fe57
MK
387 /* Keep track of vblank of this flip for flip throttling. We use the
388 * cooked hw counter, as that one incremented at start of this vblank
389 * of pageflip completion, so last_flip_vblank is the forbidden count
390 * for queueing new pageflips if vsync + VRR is enabled.
391 */
e3eff4b5
TZ
392 amdgpu_crtc->last_flip_vblank =
393 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 394
54f5499a 395 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
396 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
397
71bbe51a
MK
398 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 vrr_active, (int) !e);
4562236b
HW
401}
402
d2574c33
MK
403static void dm_vupdate_high_irq(void *interrupt_params)
404{
405 struct common_irq_params *irq_params = interrupt_params;
406 struct amdgpu_device *adev = irq_params->adev;
407 struct amdgpu_crtc *acrtc;
408 struct dm_crtc_state *acrtc_state;
09aef2c4 409 unsigned long flags;
d2574c33
MK
410
411 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412
413 if (acrtc) {
414 acrtc_state = to_dm_crtc_state(acrtc->base.state);
415
7f2be468
LP
416 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417 acrtc->crtc_id,
418 amdgpu_dm_vrr_active(acrtc_state));
d2574c33
MK
419
420 /* Core vblank handling is done here after end of front-porch in
421 * vrr mode, as vblank timestamping will give valid results
422 * while now done after front-porch. This will also deliver
423 * page-flip completion events that have been queued to us
424 * if a pageflip happened inside front-porch.
425 */
09aef2c4 426 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 427 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
428
429 /* BTR processing for pre-DCE12 ASICs */
430 if (acrtc_state->stream &&
431 adev->family < AMDGPU_FAMILY_AI) {
432 spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 mod_freesync_handle_v_update(
434 adev->dm.freesync_module,
435 acrtc_state->stream,
436 &acrtc_state->vrr_params);
437
438 dc_stream_adjust_vmin_vmax(
439 adev->dm.dc,
440 acrtc_state->stream,
441 &acrtc_state->vrr_params.adjust);
442 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 }
444 }
d2574c33
MK
445 }
446}
447
b8e8c934
HW
448/**
449 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 450 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
451 *
452 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453 * event handler.
454 */
4562236b
HW
455static void dm_crtc_high_irq(void *interrupt_params)
456{
457 struct common_irq_params *irq_params = interrupt_params;
458 struct amdgpu_device *adev = irq_params->adev;
4562236b 459 struct amdgpu_crtc *acrtc;
180db303 460 struct dm_crtc_state *acrtc_state;
09aef2c4 461 unsigned long flags;
4562236b 462
b57de80a 463 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
464 if (!acrtc)
465 return;
466
467 acrtc_state = to_dm_crtc_state(acrtc->base.state);
468
2b5aed9a
MK
469 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 amdgpu_dm_vrr_active(acrtc_state),
471 acrtc_state->active_planes);
16f17eda 472
2346ef47
NK
473 /**
474 * Core vblank handling at start of front-porch is only possible
475 * in non-vrr mode, as only there vblank timestamping will give
476 * valid results while done in front-porch. Otherwise defer it
477 * to dm_vupdate_high_irq after end of front-porch.
478 */
479 if (!amdgpu_dm_vrr_active(acrtc_state))
480 drm_crtc_handle_vblank(&acrtc->base);
481
482 /**
483 * Following stuff must happen at start of vblank, for crc
484 * computation and below-the-range btr support in vrr mode.
485 */
16f17eda 486 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
487
488 /* BTR updates need to happen before VUPDATE on Vega and above. */
489 if (adev->family < AMDGPU_FAMILY_AI)
490 return;
16f17eda
LL
491
492 spin_lock_irqsave(&adev->ddev->event_lock, flags);
493
2346ef47 494 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
16f17eda 495 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
2346ef47
NK
496 mod_freesync_handle_v_update(adev->dm.freesync_module,
497 acrtc_state->stream,
498 &acrtc_state->vrr_params);
16f17eda 499
2346ef47
NK
500 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 &acrtc_state->vrr_params.adjust);
16f17eda
LL
502 }
503
2b5aed9a
MK
504 /*
505 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 * In that case, pageflip completion interrupts won't fire and pageflip
507 * completion events won't get delivered. Prevent this by sending
508 * pending pageflip events from here if a flip is still pending.
509 *
510 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 * avoid race conditions between flip programming and completion,
512 * which could cause too early flip completion events.
513 */
2346ef47
NK
514 if (adev->family >= AMDGPU_FAMILY_RV &&
515 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
2b5aed9a 516 acrtc_state->active_planes == 0) {
16f17eda
LL
517 if (acrtc->event) {
518 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519 acrtc->event = NULL;
520 drm_crtc_vblank_put(&acrtc->base);
521 }
522 acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 }
524
525 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526}
527
4562236b
HW
528static int dm_set_clockgating_state(void *handle,
529 enum amd_clockgating_state state)
530{
531 return 0;
532}
533
534static int dm_set_powergating_state(void *handle,
535 enum amd_powergating_state state)
536{
537 return 0;
538}
539
540/* Prototypes of private functions */
541static int dm_early_init(void* handle);
542
a32e24b4 543/* Allocate memory for FBC compressed data */
3e332d3a 544static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 545{
3e332d3a
RL
546 struct drm_device *dev = connector->dev;
547 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 548 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
549 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 struct drm_display_mode *mode;
42e67c3b
RL
551 unsigned long max_size = 0;
552
553 if (adev->dm.dc->fbc_compressor == NULL)
554 return;
a32e24b4 555
3e332d3a 556 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
557 return;
558
3e332d3a
RL
559 if (compressor->bo_ptr)
560 return;
42e67c3b 561
42e67c3b 562
3e332d3a
RL
563 list_for_each_entry(mode, &connector->modes, head) {
564 if (max_size < mode->htotal * mode->vtotal)
565 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
566 }
567
568 if (max_size) {
569 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 570 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 571 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
572
573 if (r)
42e67c3b
RL
574 DRM_ERROR("DM: Failed to initialize FBC\n");
575 else {
576 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 }
579
a32e24b4
RL
580 }
581
582}
a32e24b4 583
6ce8f316
NK
584static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 int pipe, bool *enabled,
586 unsigned char *buf, int max_bytes)
587{
588 struct drm_device *dev = dev_get_drvdata(kdev);
589 struct amdgpu_device *adev = dev->dev_private;
590 struct drm_connector *connector;
591 struct drm_connector_list_iter conn_iter;
592 struct amdgpu_dm_connector *aconnector;
593 int ret = 0;
594
595 *enabled = false;
596
597 mutex_lock(&adev->dm.audio_lock);
598
599 drm_connector_list_iter_begin(dev, &conn_iter);
600 drm_for_each_connector_iter(connector, &conn_iter) {
601 aconnector = to_amdgpu_dm_connector(connector);
602 if (aconnector->audio_inst != port)
603 continue;
604
605 *enabled = true;
606 ret = drm_eld_size(connector->eld);
607 memcpy(buf, connector->eld, min(max_bytes, ret));
608
609 break;
610 }
611 drm_connector_list_iter_end(&conn_iter);
612
613 mutex_unlock(&adev->dm.audio_lock);
614
615 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616
617 return ret;
618}
619
620static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 .get_eld = amdgpu_dm_audio_component_get_eld,
622};
623
624static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 struct device *hda_kdev, void *data)
626{
627 struct drm_device *dev = dev_get_drvdata(kdev);
628 struct amdgpu_device *adev = dev->dev_private;
629 struct drm_audio_component *acomp = data;
630
631 acomp->ops = &amdgpu_dm_audio_component_ops;
632 acomp->dev = kdev;
633 adev->dm.audio_component = acomp;
634
635 return 0;
636}
637
638static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 struct device *hda_kdev, void *data)
640{
641 struct drm_device *dev = dev_get_drvdata(kdev);
642 struct amdgpu_device *adev = dev->dev_private;
643 struct drm_audio_component *acomp = data;
644
645 acomp->ops = NULL;
646 acomp->dev = NULL;
647 adev->dm.audio_component = NULL;
648}
649
650static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 .bind = amdgpu_dm_audio_component_bind,
652 .unbind = amdgpu_dm_audio_component_unbind,
653};
654
655static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656{
657 int i, ret;
658
659 if (!amdgpu_audio)
660 return 0;
661
662 adev->mode_info.audio.enabled = true;
663
664 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665
666 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 adev->mode_info.audio.pin[i].channels = -1;
668 adev->mode_info.audio.pin[i].rate = -1;
669 adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 adev->mode_info.audio.pin[i].status_bits = 0;
671 adev->mode_info.audio.pin[i].category_code = 0;
672 adev->mode_info.audio.pin[i].connected = false;
673 adev->mode_info.audio.pin[i].id =
674 adev->dm.dc->res_pool->audios[i]->inst;
675 adev->mode_info.audio.pin[i].offset = 0;
676 }
677
678 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679 if (ret < 0)
680 return ret;
681
682 adev->dm.audio_registered = true;
683
684 return 0;
685}
686
687static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688{
689 if (!amdgpu_audio)
690 return;
691
692 if (!adev->mode_info.audio.enabled)
693 return;
694
695 if (adev->dm.audio_registered) {
696 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 adev->dm.audio_registered = false;
698 }
699
700 /* TODO: Disable audio? */
701
702 adev->mode_info.audio.enabled = false;
703}
704
dfd84d90 705static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
706{
707 struct drm_audio_component *acomp = adev->dm.audio_component;
708
709 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711
712 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713 pin, -1);
714 }
715}
716
743b9786
NK
717static int dm_dmub_hw_init(struct amdgpu_device *adev)
718{
743b9786
NK
719 const struct dmcub_firmware_header_v1_0 *hdr;
720 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 721 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
722 const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
725 struct dmub_srv_hw_params hw_params;
726 enum dmub_status status;
727 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 728 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
729 bool has_hw_support;
730
731 if (!dmub_srv)
732 /* DMUB isn't supported on the ASIC. */
733 return 0;
734
8c7aea40
NK
735 if (!fb_info) {
736 DRM_ERROR("No framebuffer info for DMUB service.\n");
737 return -EINVAL;
738 }
739
743b9786
NK
740 if (!dmub_fw) {
741 /* Firmware required for DMUB support. */
742 DRM_ERROR("No firmware provided for DMUB.\n");
743 return -EINVAL;
744 }
745
746 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 if (status != DMUB_STATUS_OK) {
748 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749 return -EINVAL;
750 }
751
752 if (!has_hw_support) {
753 DRM_INFO("DMUB unsupported on ASIC\n");
754 return 0;
755 }
756
757 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758
743b9786
NK
759 fw_inst_const = dmub_fw->data +
760 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 761 PSP_HEADER_BYTES;
743b9786
NK
762
763 fw_bss_data = dmub_fw->data +
764 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 le32_to_cpu(hdr->inst_const_bytes);
766
767 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
768 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770
771 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772
ddde28a5
HW
773 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 * amdgpu_ucode_init_single_fw will load dmub firmware
775 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 * will be done by dm_dmub_hw_init
777 */
778 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780 fw_inst_const_size);
781 }
782
a576b345
NK
783 if (fw_bss_data_size)
784 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
786
787 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
788 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 adev->bios_size);
790
791 /* Reset regions that need to be reset. */
792 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794
795 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797
798 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
800
801 /* Initialize hardware. */
802 memset(&hw_params, 0, sizeof(hw_params));
803 hw_params.fb_base = adev->gmc.fb_start;
804 hw_params.fb_offset = adev->gmc.aper_base;
805
31a7f4bb
HW
806 /* backdoor load firmware and trigger dmub running */
807 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 hw_params.load_inst_const = true;
809
743b9786
NK
810 if (dmcu)
811 hw_params.psp_version = dmcu->psp_version;
812
8c7aea40
NK
813 for (i = 0; i < fb_info->num_fb; ++i)
814 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
815
816 status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 if (status != DMUB_STATUS_OK) {
818 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819 return -EINVAL;
820 }
821
822 /* Wait for firmware load to finish. */
823 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 if (status != DMUB_STATUS_OK)
825 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826
827 /* Init DMCU and ABM if available. */
828 if (dmcu && abm) {
829 dmcu->funcs->dmcu_init(dmcu);
830 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 }
832
9a71c7d3
NK
833 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 if (!adev->dm.dc->ctx->dmub_srv) {
835 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836 return -ENOMEM;
837 }
838
743b9786
NK
839 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 adev->dm.dmcub_fw_version);
841
842 return 0;
843}
844
7578ecda 845static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
846{
847 struct dc_init_data init_data;
52704fca
BL
848#ifdef CONFIG_DRM_AMD_DC_HDCP
849 struct dc_callback_init init_params;
850#endif
743b9786 851 int r;
52704fca 852
4562236b
HW
853 adev->dm.ddev = adev->ddev;
854 adev->dm.adev = adev;
855
4562236b
HW
856 /* Zero all the fields */
857 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
858#ifdef CONFIG_DRM_AMD_DC_HDCP
859 memset(&init_params, 0, sizeof(init_params));
860#endif
4562236b 861
674e78ac 862 mutex_init(&adev->dm.dc_lock);
6ce8f316 863 mutex_init(&adev->dm.audio_lock);
674e78ac 864
4562236b
HW
865 if(amdgpu_dm_irq_init(adev)) {
866 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867 goto error;
868 }
869
870 init_data.asic_id.chip_family = adev->family;
871
2dc31ca1 872 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
873 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874
770d13b1 875 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
876 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 init_data.asic_id.atombios_base_address =
878 adev->mode_info.atom_context->bios;
879
880 init_data.driver = adev;
881
882 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883
884 if (!adev->dm.cgs_device) {
885 DRM_ERROR("amdgpu: failed to create cgs device.\n");
886 goto error;
887 }
888
889 init_data.cgs_device = adev->dm.cgs_device;
890
4562236b
HW
891 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892
60fb100b
AD
893 switch (adev->asic_type) {
894 case CHIP_CARRIZO:
895 case CHIP_STONEY:
896 case CHIP_RAVEN:
fe3db437 897 case CHIP_RENOIR:
6e227308 898 init_data.flags.gpu_vm_support = true;
60fb100b
AD
899 break;
900 default:
901 break;
902 }
6e227308 903
04b94af4
AD
904 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 init_data.flags.fbc_support = true;
906
d99f38ae
AD
907 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 init_data.flags.multi_mon_pp_mclk_switch = true;
909
eaf56410
LL
910 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 init_data.flags.disable_fractional_pwm = true;
912
27eaa492 913 init_data.flags.power_down_display_on_boot = true;
78ad75f8 914
48321c3d 915 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 916
4562236b
HW
917 /* Display Core create. */
918 adev->dm.dc = dc_create(&init_data);
919
423788c7 920 if (adev->dm.dc) {
76121231 921 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 922 } else {
76121231 923 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
924 goto error;
925 }
4562236b 926
8a791dab
HW
927 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 }
931
f99d8762
HW
932 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934
8a791dab
HW
935 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 adev->dm.dc->debug.disable_stutter = true;
937
938 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 adev->dm.dc->debug.disable_dsc = true;
940
941 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 adev->dm.dc->debug.disable_clock_gate = true;
943
743b9786
NK
944 r = dm_dmub_hw_init(adev);
945 if (r) {
946 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947 goto error;
948 }
949
bb6785c1
NK
950 dc_hardware_init(adev->dm.dc);
951
4562236b
HW
952 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 if (!adev->dm.freesync_module) {
954 DRM_ERROR(
955 "amdgpu: failed to initialize freesync_module.\n");
956 } else
f1ad2f5e 957 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
958 adev->dm.freesync_module);
959
e277adc5
LSL
960 amdgpu_dm_init_color_mod();
961
52704fca 962#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 963 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 964 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 965
96a3b32e
BL
966 if (!adev->dm.hdcp_workqueue)
967 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 else
969 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 970
96a3b32e
BL
971 dc_init_callbacks(adev->dm.dc, &init_params);
972 }
52704fca 973#endif
4562236b
HW
974 if (amdgpu_dm_initialize_drm_device(adev)) {
975 DRM_ERROR(
976 "amdgpu: failed to initialize sw for display support.\n");
977 goto error;
978 }
979
980 /* Update the actual used number of crtc */
981 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982
f74367e4
AD
983 /* create fake encoders for MST */
984 dm_dp_create_fake_mst_encoders(adev);
985
4562236b
HW
986 /* TODO: Add_display_info? */
987
988 /* TODO use dynamic cursor width */
ce75805e
AG
989 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
991
992 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
993 DRM_ERROR(
994 "amdgpu: failed to initialize sw for display support.\n");
995 goto error;
996 }
997
f1ad2f5e 998 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
999
1000 return 0;
1001error:
1002 amdgpu_dm_fini(adev);
1003
59d0f396 1004 return -EINVAL;
4562236b
HW
1005}
1006
7578ecda 1007static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1008{
f74367e4
AD
1009 int i;
1010
1011 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013 }
1014
6ce8f316
NK
1015 amdgpu_dm_audio_fini(adev);
1016
4562236b 1017 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1018
52704fca
BL
1019#ifdef CONFIG_DRM_AMD_DC_HDCP
1020 if (adev->dm.hdcp_workqueue) {
1021 hdcp_destroy(adev->dm.hdcp_workqueue);
1022 adev->dm.hdcp_workqueue = NULL;
1023 }
1024
1025 if (adev->dm.dc)
1026 dc_deinit_callbacks(adev->dm.dc);
1027#endif
9a71c7d3
NK
1028 if (adev->dm.dc->ctx->dmub_srv) {
1029 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 adev->dm.dc->ctx->dmub_srv = NULL;
1031 }
1032
743b9786
NK
1033 if (adev->dm.dmub_bo)
1034 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 &adev->dm.dmub_bo_gpu_addr,
1036 &adev->dm.dmub_bo_cpu_addr);
52704fca 1037
c8bdf2b6
ED
1038 /* DC Destroy TODO: Replace destroy DAL */
1039 if (adev->dm.dc)
1040 dc_destroy(&adev->dm.dc);
4562236b
HW
1041 /*
1042 * TODO: pageflip, vlank interrupt
1043 *
1044 * amdgpu_dm_irq_fini(adev);
1045 */
1046
1047 if (adev->dm.cgs_device) {
1048 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 adev->dm.cgs_device = NULL;
1050 }
1051 if (adev->dm.freesync_module) {
1052 mod_freesync_destroy(adev->dm.freesync_module);
1053 adev->dm.freesync_module = NULL;
1054 }
674e78ac 1055
6ce8f316 1056 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1057 mutex_destroy(&adev->dm.dc_lock);
1058
4562236b
HW
1059 return;
1060}
1061
a94d5569 1062static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1063{
a7669aff 1064 const char *fw_name_dmcu = NULL;
a94d5569
DF
1065 int r;
1066 const struct dmcu_firmware_header_v1_0 *hdr;
1067
1068 switch(adev->asic_type) {
55e56389
MR
1069#if defined(CONFIG_DRM_AMD_DC_SI)
1070 case CHIP_TAHITI:
1071 case CHIP_PITCAIRN:
1072 case CHIP_VERDE:
1073 case CHIP_OLAND:
1074#endif
a94d5569
DF
1075 case CHIP_BONAIRE:
1076 case CHIP_HAWAII:
1077 case CHIP_KAVERI:
1078 case CHIP_KABINI:
1079 case CHIP_MULLINS:
1080 case CHIP_TONGA:
1081 case CHIP_FIJI:
1082 case CHIP_CARRIZO:
1083 case CHIP_STONEY:
1084 case CHIP_POLARIS11:
1085 case CHIP_POLARIS10:
1086 case CHIP_POLARIS12:
1087 case CHIP_VEGAM:
1088 case CHIP_VEGA10:
1089 case CHIP_VEGA12:
1090 case CHIP_VEGA20:
476e955d 1091 case CHIP_NAVI10:
baebcf2e 1092 case CHIP_NAVI14:
30221ad8 1093 case CHIP_RENOIR:
79037324
BL
1094#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1095 case CHIP_SIENNA_CICHLID:
a6c5308f 1096 case CHIP_NAVY_FLOUNDER:
79037324 1097#endif
a94d5569 1098 return 0;
5ea23931
RL
1099 case CHIP_NAVI12:
1100 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1101 break;
a94d5569 1102 case CHIP_RAVEN:
a7669aff
HW
1103 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1104 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1105 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1106 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1107 else
a7669aff 1108 return 0;
a94d5569
DF
1109 break;
1110 default:
1111 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1112 return -EINVAL;
a94d5569
DF
1113 }
1114
1115 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1116 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1117 return 0;
1118 }
1119
1120 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1121 if (r == -ENOENT) {
1122 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1123 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1124 adev->dm.fw_dmcu = NULL;
1125 return 0;
1126 }
1127 if (r) {
1128 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1129 fw_name_dmcu);
1130 return r;
1131 }
1132
1133 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1134 if (r) {
1135 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1136 fw_name_dmcu);
1137 release_firmware(adev->dm.fw_dmcu);
1138 adev->dm.fw_dmcu = NULL;
1139 return r;
1140 }
1141
1142 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1143 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1144 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1145 adev->firmware.fw_size +=
1146 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1147
1148 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1149 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1150 adev->firmware.fw_size +=
1151 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1152
ee6e89c0
DF
1153 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1154
a94d5569
DF
1155 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1156
4562236b
HW
1157 return 0;
1158}
1159
743b9786
NK
1160static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1161{
1162 struct amdgpu_device *adev = ctx;
1163
1164 return dm_read_reg(adev->dm.dc->ctx, address);
1165}
1166
1167static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1168 uint32_t value)
1169{
1170 struct amdgpu_device *adev = ctx;
1171
1172 return dm_write_reg(adev->dm.dc->ctx, address, value);
1173}
1174
1175static int dm_dmub_sw_init(struct amdgpu_device *adev)
1176{
1177 struct dmub_srv_create_params create_params;
8c7aea40
NK
1178 struct dmub_srv_region_params region_params;
1179 struct dmub_srv_region_info region_info;
1180 struct dmub_srv_fb_params fb_params;
1181 struct dmub_srv_fb_info *fb_info;
1182 struct dmub_srv *dmub_srv;
743b9786
NK
1183 const struct dmcub_firmware_header_v1_0 *hdr;
1184 const char *fw_name_dmub;
1185 enum dmub_asic dmub_asic;
1186 enum dmub_status status;
1187 int r;
1188
1189 switch (adev->asic_type) {
1190 case CHIP_RENOIR:
1191 dmub_asic = DMUB_ASIC_DCN21;
1192 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1193 break;
79037324
BL
1194#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1195 case CHIP_SIENNA_CICHLID:
1196 dmub_asic = DMUB_ASIC_DCN30;
1197 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1198 break;
94bc373b
BL
1199 case CHIP_NAVY_FLOUNDER:
1200 dmub_asic = DMUB_ASIC_DCN30;
1201 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1202 break;
79037324 1203#endif
743b9786
NK
1204
1205 default:
1206 /* ASIC doesn't support DMUB. */
1207 return 0;
1208 }
1209
743b9786
NK
1210 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1211 if (r) {
1212 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1213 return 0;
1214 }
1215
1216 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1217 if (r) {
1218 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1219 return 0;
1220 }
1221
743b9786 1222 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1223
9a6ed547
NK
1224 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1226 AMDGPU_UCODE_ID_DMCUB;
1227 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1228 adev->dm.dmub_fw;
1229 adev->firmware.fw_size +=
1230 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1231
9a6ed547
NK
1232 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1233 adev->dm.dmcub_fw_version);
1234 }
1235
1236 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1237
8c7aea40
NK
1238 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1239 dmub_srv = adev->dm.dmub_srv;
1240
1241 if (!dmub_srv) {
1242 DRM_ERROR("Failed to allocate DMUB service!\n");
1243 return -ENOMEM;
1244 }
1245
1246 memset(&create_params, 0, sizeof(create_params));
1247 create_params.user_ctx = adev;
1248 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1249 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1250 create_params.asic = dmub_asic;
1251
1252 /* Create the DMUB service. */
1253 status = dmub_srv_create(dmub_srv, &create_params);
1254 if (status != DMUB_STATUS_OK) {
1255 DRM_ERROR("Error creating DMUB service: %d\n", status);
1256 return -EINVAL;
1257 }
1258
1259 /* Calculate the size of all the regions for the DMUB service. */
1260 memset(&region_params, 0, sizeof(region_params));
1261
1262 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1263 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1264 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1265 region_params.vbios_size = adev->bios_size;
0922b899 1266 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1267 adev->dm.dmub_fw->data +
1268 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1269 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1270 region_params.fw_inst_const =
1271 adev->dm.dmub_fw->data +
1272 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1273 PSP_HEADER_BYTES;
8c7aea40
NK
1274
1275 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1276 &region_info);
1277
1278 if (status != DMUB_STATUS_OK) {
1279 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1280 return -EINVAL;
1281 }
1282
1283 /*
1284 * Allocate a framebuffer based on the total size of all the regions.
1285 * TODO: Move this into GART.
1286 */
1287 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1288 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1289 &adev->dm.dmub_bo_gpu_addr,
1290 &adev->dm.dmub_bo_cpu_addr);
1291 if (r)
1292 return r;
1293
1294 /* Rebase the regions on the framebuffer address. */
1295 memset(&fb_params, 0, sizeof(fb_params));
1296 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1297 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1298 fb_params.region_info = &region_info;
1299
1300 adev->dm.dmub_fb_info =
1301 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1302 fb_info = adev->dm.dmub_fb_info;
1303
1304 if (!fb_info) {
1305 DRM_ERROR(
1306 "Failed to allocate framebuffer info for DMUB service!\n");
1307 return -ENOMEM;
1308 }
1309
1310 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1311 if (status != DMUB_STATUS_OK) {
1312 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1313 return -EINVAL;
1314 }
1315
743b9786
NK
1316 return 0;
1317}
1318
a94d5569
DF
1319static int dm_sw_init(void *handle)
1320{
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1322 int r;
1323
1324 r = dm_dmub_sw_init(adev);
1325 if (r)
1326 return r;
a94d5569
DF
1327
1328 return load_dmcu_fw(adev);
1329}
1330
4562236b
HW
1331static int dm_sw_fini(void *handle)
1332{
a94d5569
DF
1333 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1334
8c7aea40
NK
1335 kfree(adev->dm.dmub_fb_info);
1336 adev->dm.dmub_fb_info = NULL;
1337
743b9786
NK
1338 if (adev->dm.dmub_srv) {
1339 dmub_srv_destroy(adev->dm.dmub_srv);
1340 adev->dm.dmub_srv = NULL;
1341 }
1342
75e1658e
ND
1343 release_firmware(adev->dm.dmub_fw);
1344 adev->dm.dmub_fw = NULL;
743b9786 1345
75e1658e
ND
1346 release_firmware(adev->dm.fw_dmcu);
1347 adev->dm.fw_dmcu = NULL;
a94d5569 1348
4562236b
HW
1349 return 0;
1350}
1351
7abcf6b5 1352static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1353{
c84dec2f 1354 struct amdgpu_dm_connector *aconnector;
4562236b 1355 struct drm_connector *connector;
f8d2d39e 1356 struct drm_connector_list_iter iter;
7abcf6b5 1357 int ret = 0;
4562236b 1358
f8d2d39e
LP
1359 drm_connector_list_iter_begin(dev, &iter);
1360 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1361 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1362 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1363 aconnector->mst_mgr.aux) {
f1ad2f5e 1364 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1365 aconnector,
1366 aconnector->base.base.id);
7abcf6b5
AG
1367
1368 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1369 if (ret < 0) {
1370 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1371 aconnector->dc_link->type =
1372 dc_connection_single;
1373 break;
7abcf6b5 1374 }
f8d2d39e 1375 }
4562236b 1376 }
f8d2d39e 1377 drm_connector_list_iter_end(&iter);
4562236b 1378
7abcf6b5
AG
1379 return ret;
1380}
1381
1382static int dm_late_init(void *handle)
1383{
42e67c3b 1384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1385
bbf854dc
DF
1386 struct dmcu_iram_parameters params;
1387 unsigned int linear_lut[16];
1388 int i;
17bdb4a8 1389 struct dmcu *dmcu = NULL;
5cb32419 1390 bool ret = true;
bbf854dc 1391
17bdb4a8
JFZ
1392 if (!adev->dm.fw_dmcu)
1393 return detect_mst_link_for_all_connectors(adev->ddev);
1394
1395 dmcu = adev->dm.dc->res_pool->dmcu;
1396
bbf854dc
DF
1397 for (i = 0; i < 16; i++)
1398 linear_lut[i] = 0xFFFF * i / 15;
1399
1400 params.set = 0;
1401 params.backlight_ramping_start = 0xCCCC;
1402 params.backlight_ramping_reduction = 0xCCCCCCCC;
1403 params.backlight_lut_array_size = 16;
1404 params.backlight_lut_array = linear_lut;
1405
2ad0cdf9
AK
1406 /* Min backlight level after ABM reduction, Don't allow below 1%
1407 * 0xFFFF x 0.01 = 0x28F
1408 */
1409 params.min_abm_backlight = 0x28F;
1410
5cb32419
RL
1411 /* In the case where abm is implemented on dmcub,
1412 * dmcu object will be null.
1413 * ABM 2.4 and up are implemented on dmcub.
1414 */
1415 if (dmcu)
1416 ret = dmcu_load_iram(dmcu, params);
1417 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1418 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1419
14ed1c90
HW
1420 if (!ret)
1421 return -EINVAL;
bbf854dc 1422
42e67c3b 1423 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1424}
1425
1426static void s3_handle_mst(struct drm_device *dev, bool suspend)
1427{
c84dec2f 1428 struct amdgpu_dm_connector *aconnector;
4562236b 1429 struct drm_connector *connector;
f8d2d39e 1430 struct drm_connector_list_iter iter;
fe7553be
LP
1431 struct drm_dp_mst_topology_mgr *mgr;
1432 int ret;
1433 bool need_hotplug = false;
4562236b 1434
f8d2d39e
LP
1435 drm_connector_list_iter_begin(dev, &iter);
1436 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1437 aconnector = to_amdgpu_dm_connector(connector);
1438 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1439 aconnector->mst_port)
1440 continue;
1441
1442 mgr = &aconnector->mst_mgr;
1443
1444 if (suspend) {
1445 drm_dp_mst_topology_mgr_suspend(mgr);
1446 } else {
6f85f738 1447 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1448 if (ret < 0) {
1449 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1450 need_hotplug = true;
1451 }
1452 }
4562236b 1453 }
f8d2d39e 1454 drm_connector_list_iter_end(&iter);
fe7553be
LP
1455
1456 if (need_hotplug)
1457 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1458}
1459
9340dfd3
HW
1460static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1461{
1462 struct smu_context *smu = &adev->smu;
1463 int ret = 0;
1464
1465 if (!is_support_sw_smu(adev))
1466 return 0;
1467
1468 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1469 * on window driver dc implementation.
1470 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1471 * should be passed to smu during boot up and resume from s3.
1472 * boot up: dc calculate dcn watermark clock settings within dc_create,
1473 * dcn20_resource_construct
1474 * then call pplib functions below to pass the settings to smu:
1475 * smu_set_watermarks_for_clock_ranges
1476 * smu_set_watermarks_table
1477 * navi10_set_watermarks_table
1478 * smu_write_watermarks_table
1479 *
1480 * For Renoir, clock settings of dcn watermark are also fixed values.
1481 * dc has implemented different flow for window driver:
1482 * dc_hardware_init / dc_set_power_state
1483 * dcn10_init_hw
1484 * notify_wm_ranges
1485 * set_wm_ranges
1486 * -- Linux
1487 * smu_set_watermarks_for_clock_ranges
1488 * renoir_set_watermarks_table
1489 * smu_write_watermarks_table
1490 *
1491 * For Linux,
1492 * dc_hardware_init -> amdgpu_dm_init
1493 * dc_set_power_state --> dm_resume
1494 *
1495 * therefore, this function apply to navi10/12/14 but not Renoir
1496 * *
1497 */
1498 switch(adev->asic_type) {
1499 case CHIP_NAVI10:
1500 case CHIP_NAVI14:
1501 case CHIP_NAVI12:
1502 break;
1503 default:
1504 return 0;
1505 }
1506
e7a95eea
EQ
1507 ret = smu_write_watermarks_table(smu);
1508 if (ret) {
1509 DRM_ERROR("Failed to update WMTABLE!\n");
1510 return ret;
9340dfd3
HW
1511 }
1512
9340dfd3
HW
1513 return 0;
1514}
1515
b8592b48
LL
1516/**
1517 * dm_hw_init() - Initialize DC device
28d687ea 1518 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1519 *
1520 * Initialize the &struct amdgpu_display_manager device. This involves calling
1521 * the initializers of each DM component, then populating the struct with them.
1522 *
1523 * Although the function implies hardware initialization, both hardware and
1524 * software are initialized here. Splitting them out to their relevant init
1525 * hooks is a future TODO item.
1526 *
1527 * Some notable things that are initialized here:
1528 *
1529 * - Display Core, both software and hardware
1530 * - DC modules that we need (freesync and color management)
1531 * - DRM software states
1532 * - Interrupt sources and handlers
1533 * - Vblank support
1534 * - Debug FS entries, if enabled
1535 */
4562236b
HW
1536static int dm_hw_init(void *handle)
1537{
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 /* Create DAL display manager */
1540 amdgpu_dm_init(adev);
4562236b
HW
1541 amdgpu_dm_hpd_init(adev);
1542
4562236b
HW
1543 return 0;
1544}
1545
b8592b48
LL
1546/**
1547 * dm_hw_fini() - Teardown DC device
28d687ea 1548 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1549 *
1550 * Teardown components within &struct amdgpu_display_manager that require
1551 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552 * were loaded. Also flush IRQ workqueues and disable them.
1553 */
4562236b
HW
1554static int dm_hw_fini(void *handle)
1555{
1556 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557
1558 amdgpu_dm_hpd_fini(adev);
1559
1560 amdgpu_dm_irq_fini(adev);
21de3396 1561 amdgpu_dm_fini(adev);
4562236b
HW
1562 return 0;
1563}
1564
cdaae837
BL
1565
1566static int dm_enable_vblank(struct drm_crtc *crtc);
1567static void dm_disable_vblank(struct drm_crtc *crtc);
1568
1569static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1570 struct dc_state *state, bool enable)
1571{
1572 enum dc_irq_source irq_source;
1573 struct amdgpu_crtc *acrtc;
1574 int rc = -EBUSY;
1575 int i = 0;
1576
1577 for (i = 0; i < state->stream_count; i++) {
1578 acrtc = get_crtc_by_otg_inst(
1579 adev, state->stream_status[i].primary_otg_inst);
1580
1581 if (acrtc && state->stream_status[i].plane_count != 0) {
1582 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1583 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1584 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1585 acrtc->crtc_id, enable ? "en" : "dis", rc);
1586 if (rc)
1587 DRM_WARN("Failed to %s pflip interrupts\n",
1588 enable ? "enable" : "disable");
1589
1590 if (enable) {
1591 rc = dm_enable_vblank(&acrtc->base);
1592 if (rc)
1593 DRM_WARN("Failed to enable vblank interrupts\n");
1594 } else {
1595 dm_disable_vblank(&acrtc->base);
1596 }
1597
1598 }
1599 }
1600
1601}
1602
dfd84d90 1603static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1604{
1605 struct dc_state *context = NULL;
1606 enum dc_status res = DC_ERROR_UNEXPECTED;
1607 int i;
1608 struct dc_stream_state *del_streams[MAX_PIPES];
1609 int del_streams_count = 0;
1610
1611 memset(del_streams, 0, sizeof(del_streams));
1612
1613 context = dc_create_state(dc);
1614 if (context == NULL)
1615 goto context_alloc_fail;
1616
1617 dc_resource_state_copy_construct_current(dc, context);
1618
1619 /* First remove from context all streams */
1620 for (i = 0; i < context->stream_count; i++) {
1621 struct dc_stream_state *stream = context->streams[i];
1622
1623 del_streams[del_streams_count++] = stream;
1624 }
1625
1626 /* Remove all planes for removed streams and then remove the streams */
1627 for (i = 0; i < del_streams_count; i++) {
1628 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1629 res = DC_FAIL_DETACH_SURFACES;
1630 goto fail;
1631 }
1632
1633 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1634 if (res != DC_OK)
1635 goto fail;
1636 }
1637
1638
1639 res = dc_validate_global_state(dc, context, false);
1640
1641 if (res != DC_OK) {
1642 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1643 goto fail;
1644 }
1645
1646 res = dc_commit_state(dc, context);
1647
1648fail:
1649 dc_release_state(context);
1650
1651context_alloc_fail:
1652 return res;
1653}
1654
4562236b
HW
1655static int dm_suspend(void *handle)
1656{
1657 struct amdgpu_device *adev = handle;
1658 struct amdgpu_display_manager *dm = &adev->dm;
1659 int ret = 0;
4562236b 1660
f1403342 1661 if (adev->in_gpu_reset) {
cdaae837
BL
1662 mutex_lock(&dm->dc_lock);
1663 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1664
1665 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1666
1667 amdgpu_dm_commit_zero_streams(dm->dc);
1668
1669 amdgpu_dm_irq_suspend(adev);
1670
1671 return ret;
1672 }
4562236b 1673
d2f0b53b
LHM
1674 WARN_ON(adev->dm.cached_state);
1675 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1676
4562236b
HW
1677 s3_handle_mst(adev->ddev, true);
1678
4562236b
HW
1679 amdgpu_dm_irq_suspend(adev);
1680
a3621485 1681
32f5062d 1682 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1683
1c2075d4 1684 return 0;
4562236b
HW
1685}
1686
1daf8c63
AD
1687static struct amdgpu_dm_connector *
1688amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1689 struct drm_crtc *crtc)
4562236b
HW
1690{
1691 uint32_t i;
c2cea706 1692 struct drm_connector_state *new_con_state;
4562236b
HW
1693 struct drm_connector *connector;
1694 struct drm_crtc *crtc_from_state;
1695
c2cea706
LSL
1696 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1697 crtc_from_state = new_con_state->crtc;
4562236b
HW
1698
1699 if (crtc_from_state == crtc)
c84dec2f 1700 return to_amdgpu_dm_connector(connector);
4562236b
HW
1701 }
1702
1703 return NULL;
1704}
1705
fbbdadf2
BL
1706static void emulated_link_detect(struct dc_link *link)
1707{
1708 struct dc_sink_init_data sink_init_data = { 0 };
1709 struct display_sink_capability sink_caps = { 0 };
1710 enum dc_edid_status edid_status;
1711 struct dc_context *dc_ctx = link->ctx;
1712 struct dc_sink *sink = NULL;
1713 struct dc_sink *prev_sink = NULL;
1714
1715 link->type = dc_connection_none;
1716 prev_sink = link->local_sink;
1717
1718 if (prev_sink != NULL)
1719 dc_sink_retain(prev_sink);
1720
1721 switch (link->connector_signal) {
1722 case SIGNAL_TYPE_HDMI_TYPE_A: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1725 break;
1726 }
1727
1728 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1729 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1731 break;
1732 }
1733
1734 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1735 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1737 break;
1738 }
1739
1740 case SIGNAL_TYPE_LVDS: {
1741 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1742 sink_caps.signal = SIGNAL_TYPE_LVDS;
1743 break;
1744 }
1745
1746 case SIGNAL_TYPE_EDP: {
1747 sink_caps.transaction_type =
1748 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1749 sink_caps.signal = SIGNAL_TYPE_EDP;
1750 break;
1751 }
1752
1753 case SIGNAL_TYPE_DISPLAY_PORT: {
1754 sink_caps.transaction_type =
1755 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1756 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1757 break;
1758 }
1759
1760 default:
1761 DC_ERROR("Invalid connector type! signal:%d\n",
1762 link->connector_signal);
1763 return;
1764 }
1765
1766 sink_init_data.link = link;
1767 sink_init_data.sink_signal = sink_caps.signal;
1768
1769 sink = dc_sink_create(&sink_init_data);
1770 if (!sink) {
1771 DC_ERROR("Failed to create sink!\n");
1772 return;
1773 }
1774
dcd5fb82 1775 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1776 link->local_sink = sink;
1777
1778 edid_status = dm_helpers_read_local_edid(
1779 link->ctx,
1780 link,
1781 sink);
1782
1783 if (edid_status != EDID_OK)
1784 DC_ERROR("Failed to read EDID");
1785
1786}
1787
cdaae837
BL
1788static void dm_gpureset_commit_state(struct dc_state *dc_state,
1789 struct amdgpu_display_manager *dm)
1790{
1791 struct {
1792 struct dc_surface_update surface_updates[MAX_SURFACES];
1793 struct dc_plane_info plane_infos[MAX_SURFACES];
1794 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1795 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1796 struct dc_stream_update stream_update;
1797 } * bundle;
1798 int k, m;
1799
1800 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1801
1802 if (!bundle) {
1803 dm_error("Failed to allocate update bundle\n");
1804 goto cleanup;
1805 }
1806
1807 for (k = 0; k < dc_state->stream_count; k++) {
1808 bundle->stream_update.stream = dc_state->streams[k];
1809
1810 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1811 bundle->surface_updates[m].surface =
1812 dc_state->stream_status->plane_states[m];
1813 bundle->surface_updates[m].surface->force_full_update =
1814 true;
1815 }
1816 dc_commit_updates_for_stream(
1817 dm->dc, bundle->surface_updates,
1818 dc_state->stream_status->plane_count,
1819 dc_state->streams[k], &bundle->stream_update, dc_state);
1820 }
1821
1822cleanup:
1823 kfree(bundle);
1824
1825 return;
1826}
1827
4562236b
HW
1828static int dm_resume(void *handle)
1829{
1830 struct amdgpu_device *adev = handle;
4562236b
HW
1831 struct drm_device *ddev = adev->ddev;
1832 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1833 struct amdgpu_dm_connector *aconnector;
4562236b 1834 struct drm_connector *connector;
f8d2d39e 1835 struct drm_connector_list_iter iter;
4562236b 1836 struct drm_crtc *crtc;
c2cea706 1837 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1838 struct dm_crtc_state *dm_new_crtc_state;
1839 struct drm_plane *plane;
1840 struct drm_plane_state *new_plane_state;
1841 struct dm_plane_state *dm_new_plane_state;
113b7a01 1842 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1843 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
1844 struct dc_state *dc_state;
1845 int i, r, j;
4562236b 1846
f1403342 1847 if (adev->in_gpu_reset) {
cdaae837
BL
1848 dc_state = dm->cached_dc_state;
1849
1850 r = dm_dmub_hw_init(adev);
1851 if (r)
1852 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1853
1854 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1855 dc_resume(dm->dc);
1856
1857 amdgpu_dm_irq_resume_early(adev);
1858
1859 for (i = 0; i < dc_state->stream_count; i++) {
1860 dc_state->streams[i]->mode_changed = true;
1861 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1862 dc_state->stream_status->plane_states[j]->update_flags.raw
1863 = 0xffffffff;
1864 }
1865 }
1866
1867 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 1868
cdaae837
BL
1869 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1870
1871 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1872
1873 dc_release_state(dm->cached_dc_state);
1874 dm->cached_dc_state = NULL;
1875
1876 amdgpu_dm_irq_resume_late(adev);
1877
1878 mutex_unlock(&dm->dc_lock);
1879
1880 return 0;
1881 }
113b7a01
LL
1882 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1883 dc_release_state(dm_state->context);
1884 dm_state->context = dc_create_state(dm->dc);
1885 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1886 dc_resource_state_construct(dm->dc, dm_state->context);
1887
8c7aea40
NK
1888 /* Before powering on DC we need to re-initialize DMUB. */
1889 r = dm_dmub_hw_init(adev);
1890 if (r)
1891 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1892
a80aa93d
ML
1893 /* power on hardware */
1894 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1895
4562236b
HW
1896 /* program HPD filter */
1897 dc_resume(dm->dc);
1898
4562236b
HW
1899 /*
1900 * early enable HPD Rx IRQ, should be done before set mode as short
1901 * pulse interrupts are used for MST
1902 */
1903 amdgpu_dm_irq_resume_early(adev);
1904
d20ebea8 1905 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1906 s3_handle_mst(ddev, false);
1907
4562236b 1908 /* Do detection*/
f8d2d39e
LP
1909 drm_connector_list_iter_begin(ddev, &iter);
1910 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1911 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1912
1913 /*
1914 * this is the case when traversing through already created
1915 * MST connectors, should be skipped
1916 */
1917 if (aconnector->mst_port)
1918 continue;
1919
03ea364c 1920 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1921 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1922 DRM_ERROR("KMS: Failed to detect connector\n");
1923
1924 if (aconnector->base.force && new_connection_type == dc_connection_none)
1925 emulated_link_detect(aconnector->dc_link);
1926 else
1927 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1928
1929 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1930 aconnector->fake_enable = false;
1931
dcd5fb82
MF
1932 if (aconnector->dc_sink)
1933 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1934 aconnector->dc_sink = NULL;
1935 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1936 mutex_unlock(&aconnector->hpd_lock);
4562236b 1937 }
f8d2d39e 1938 drm_connector_list_iter_end(&iter);
4562236b 1939
1f6010a9 1940 /* Force mode set in atomic commit */
a80aa93d 1941 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1942 new_crtc_state->active_changed = true;
4f346e65 1943
fcb4019e
LSL
1944 /*
1945 * atomic_check is expected to create the dc states. We need to release
1946 * them here, since they were duplicated as part of the suspend
1947 * procedure.
1948 */
a80aa93d 1949 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1950 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1951 if (dm_new_crtc_state->stream) {
1952 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1953 dc_stream_release(dm_new_crtc_state->stream);
1954 dm_new_crtc_state->stream = NULL;
1955 }
1956 }
1957
a80aa93d 1958 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1959 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1960 if (dm_new_plane_state->dc_state) {
1961 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1962 dc_plane_state_release(dm_new_plane_state->dc_state);
1963 dm_new_plane_state->dc_state = NULL;
1964 }
1965 }
1966
2d1af6a1 1967 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1968
a80aa93d 1969 dm->cached_state = NULL;
0a214e2f 1970
9faa4237 1971 amdgpu_dm_irq_resume_late(adev);
4562236b 1972
9340dfd3
HW
1973 amdgpu_dm_smu_write_watermarks_table(adev);
1974
2d1af6a1 1975 return 0;
4562236b
HW
1976}
1977
b8592b48
LL
1978/**
1979 * DOC: DM Lifecycle
1980 *
1981 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1982 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1983 * the base driver's device list to be initialized and torn down accordingly.
1984 *
1985 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1986 */
1987
4562236b
HW
1988static const struct amd_ip_funcs amdgpu_dm_funcs = {
1989 .name = "dm",
1990 .early_init = dm_early_init,
7abcf6b5 1991 .late_init = dm_late_init,
4562236b
HW
1992 .sw_init = dm_sw_init,
1993 .sw_fini = dm_sw_fini,
1994 .hw_init = dm_hw_init,
1995 .hw_fini = dm_hw_fini,
1996 .suspend = dm_suspend,
1997 .resume = dm_resume,
1998 .is_idle = dm_is_idle,
1999 .wait_for_idle = dm_wait_for_idle,
2000 .check_soft_reset = dm_check_soft_reset,
2001 .soft_reset = dm_soft_reset,
2002 .set_clockgating_state = dm_set_clockgating_state,
2003 .set_powergating_state = dm_set_powergating_state,
2004};
2005
2006const struct amdgpu_ip_block_version dm_ip_block =
2007{
2008 .type = AMD_IP_BLOCK_TYPE_DCE,
2009 .major = 1,
2010 .minor = 0,
2011 .rev = 0,
2012 .funcs = &amdgpu_dm_funcs,
2013};
2014
ca3268c4 2015
b8592b48
LL
2016/**
2017 * DOC: atomic
2018 *
2019 * *WIP*
2020 */
0a323b84 2021
b3663f70 2022static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2023 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 2024 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2025 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 2026 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
2027};
2028
2029static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2030 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2031};
2032
94562810
RS
2033static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2034{
2035 u32 max_cll, min_cll, max, min, q, r;
2036 struct amdgpu_dm_backlight_caps *caps;
2037 struct amdgpu_display_manager *dm;
2038 struct drm_connector *conn_base;
2039 struct amdgpu_device *adev;
ec11fe37 2040 struct dc_link *link = NULL;
94562810
RS
2041 static const u8 pre_computed_values[] = {
2042 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2043 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2044
2045 if (!aconnector || !aconnector->dc_link)
2046 return;
2047
ec11fe37 2048 link = aconnector->dc_link;
2049 if (link->connector_signal != SIGNAL_TYPE_EDP)
2050 return;
2051
94562810
RS
2052 conn_base = &aconnector->base;
2053 adev = conn_base->dev->dev_private;
2054 dm = &adev->dm;
2055 caps = &dm->backlight_caps;
2056 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2057 caps->aux_support = false;
2058 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2059 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2060
2061 if (caps->ext_caps->bits.oled == 1 ||
2062 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2063 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2064 caps->aux_support = true;
2065
2066 /* From the specification (CTA-861-G), for calculating the maximum
2067 * luminance we need to use:
2068 * Luminance = 50*2**(CV/32)
2069 * Where CV is a one-byte value.
2070 * For calculating this expression we may need float point precision;
2071 * to avoid this complexity level, we take advantage that CV is divided
2072 * by a constant. From the Euclids division algorithm, we know that CV
2073 * can be written as: CV = 32*q + r. Next, we replace CV in the
2074 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2075 * need to pre-compute the value of r/32. For pre-computing the values
2076 * We just used the following Ruby line:
2077 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2078 * The results of the above expressions can be verified at
2079 * pre_computed_values.
2080 */
2081 q = max_cll >> 5;
2082 r = max_cll % 32;
2083 max = (1 << q) * pre_computed_values[r];
2084
2085 // min luminance: maxLum * (CV/255)^2 / 100
2086 q = DIV_ROUND_CLOSEST(min_cll, 255);
2087 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2088
2089 caps->aux_max_input_signal = max;
2090 caps->aux_min_input_signal = min;
2091}
2092
97e51c16
HW
2093void amdgpu_dm_update_connector_after_detect(
2094 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2095{
2096 struct drm_connector *connector = &aconnector->base;
2097 struct drm_device *dev = connector->dev;
b73a22d3 2098 struct dc_sink *sink;
4562236b
HW
2099
2100 /* MST handled by drm_mst framework */
2101 if (aconnector->mst_mgr.mst_state == true)
2102 return;
2103
2104
2105 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2106 if (sink)
2107 dc_sink_retain(sink);
4562236b 2108
1f6010a9
DF
2109 /*
2110 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2111 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2112 * Skip if already done during boot.
4562236b
HW
2113 */
2114 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2115 && aconnector->dc_em_sink) {
2116
1f6010a9
DF
2117 /*
2118 * For S3 resume with headless use eml_sink to fake stream
2119 * because on resume connector->sink is set to NULL
4562236b
HW
2120 */
2121 mutex_lock(&dev->mode_config.mutex);
2122
2123 if (sink) {
922aa1e1 2124 if (aconnector->dc_sink) {
98e6436d 2125 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2126 /*
2127 * retain and release below are used to
2128 * bump up refcount for sink because the link doesn't point
2129 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2130 * reshuffle by UMD we will get into unwanted dc_sink release
2131 */
dcd5fb82 2132 dc_sink_release(aconnector->dc_sink);
922aa1e1 2133 }
4562236b 2134 aconnector->dc_sink = sink;
dcd5fb82 2135 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2136 amdgpu_dm_update_freesync_caps(connector,
2137 aconnector->edid);
4562236b 2138 } else {
98e6436d 2139 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2140 if (!aconnector->dc_sink) {
4562236b 2141 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2142 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2143 }
4562236b
HW
2144 }
2145
2146 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2147
2148 if (sink)
2149 dc_sink_release(sink);
4562236b
HW
2150 return;
2151 }
2152
2153 /*
2154 * TODO: temporary guard to look for proper fix
2155 * if this sink is MST sink, we should not do anything
2156 */
dcd5fb82
MF
2157 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2158 dc_sink_release(sink);
4562236b 2159 return;
dcd5fb82 2160 }
4562236b
HW
2161
2162 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2163 /*
2164 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2165 * Do nothing!!
2166 */
f1ad2f5e 2167 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2168 aconnector->connector_id);
dcd5fb82
MF
2169 if (sink)
2170 dc_sink_release(sink);
4562236b
HW
2171 return;
2172 }
2173
f1ad2f5e 2174 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2175 aconnector->connector_id, aconnector->dc_sink, sink);
2176
2177 mutex_lock(&dev->mode_config.mutex);
2178
1f6010a9
DF
2179 /*
2180 * 1. Update status of the drm connector
2181 * 2. Send an event and let userspace tell us what to do
2182 */
4562236b 2183 if (sink) {
1f6010a9
DF
2184 /*
2185 * TODO: check if we still need the S3 mode update workaround.
2186 * If yes, put it here.
2187 */
4562236b 2188 if (aconnector->dc_sink)
98e6436d 2189 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2190
2191 aconnector->dc_sink = sink;
dcd5fb82 2192 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2193 if (sink->dc_edid.length == 0) {
4562236b 2194 aconnector->edid = NULL;
e6142dd5
AP
2195 if (aconnector->dc_link->aux_mode) {
2196 drm_dp_cec_unset_edid(
2197 &aconnector->dm_dp_aux.aux);
2198 }
900b3cb1 2199 } else {
4562236b 2200 aconnector->edid =
e6142dd5 2201 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2202
c555f023 2203 drm_connector_update_edid_property(connector,
e6142dd5 2204 aconnector->edid);
57321eae 2205 drm_add_edid_modes(connector, aconnector->edid);
e6142dd5
AP
2206
2207 if (aconnector->dc_link->aux_mode)
2208 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2209 aconnector->edid);
4562236b 2210 }
e6142dd5 2211
98e6436d 2212 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2213 update_connector_ext_caps(aconnector);
4562236b 2214 } else {
e86e8947 2215 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2216 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2217 drm_connector_update_edid_property(connector, NULL);
4562236b 2218 aconnector->num_modes = 0;
dcd5fb82 2219 dc_sink_release(aconnector->dc_sink);
4562236b 2220 aconnector->dc_sink = NULL;
5326c452 2221 aconnector->edid = NULL;
0c8620d6
BL
2222#ifdef CONFIG_DRM_AMD_DC_HDCP
2223 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2224 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2225 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2226#endif
4562236b
HW
2227 }
2228
2229 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2230
2231 if (sink)
2232 dc_sink_release(sink);
4562236b
HW
2233}
2234
2235static void handle_hpd_irq(void *param)
2236{
c84dec2f 2237 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2238 struct drm_connector *connector = &aconnector->base;
2239 struct drm_device *dev = connector->dev;
fbbdadf2 2240 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
2241#ifdef CONFIG_DRM_AMD_DC_HDCP
2242 struct amdgpu_device *adev = dev->dev_private;
2243#endif
4562236b 2244
1f6010a9
DF
2245 /*
2246 * In case of failure or MST no need to update connector status or notify the OS
2247 * since (for MST case) MST does this in its own context.
4562236b
HW
2248 */
2249 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2250
0c8620d6 2251#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2252 if (adev->dm.hdcp_workqueue)
96a3b32e 2253 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2254#endif
2e0ac3d6
HW
2255 if (aconnector->fake_enable)
2256 aconnector->fake_enable = false;
2257
fbbdadf2
BL
2258 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2259 DRM_ERROR("KMS: Failed to detect connector\n");
2260
2261 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2262 emulated_link_detect(aconnector->dc_link);
2263
2264
2265 drm_modeset_lock_all(dev);
2266 dm_restore_drm_connector_state(dev, connector);
2267 drm_modeset_unlock_all(dev);
2268
2269 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2270 drm_kms_helper_hotplug_event(dev);
2271
2272 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2273 amdgpu_dm_update_connector_after_detect(aconnector);
2274
2275
2276 drm_modeset_lock_all(dev);
2277 dm_restore_drm_connector_state(dev, connector);
2278 drm_modeset_unlock_all(dev);
2279
2280 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2281 drm_kms_helper_hotplug_event(dev);
2282 }
2283 mutex_unlock(&aconnector->hpd_lock);
2284
2285}
2286
c84dec2f 2287static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2288{
2289 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2290 uint8_t dret;
2291 bool new_irq_handled = false;
2292 int dpcd_addr;
2293 int dpcd_bytes_to_read;
2294
2295 const int max_process_count = 30;
2296 int process_count = 0;
2297
2298 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2299
2300 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2301 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2302 /* DPCD 0x200 - 0x201 for downstream IRQ */
2303 dpcd_addr = DP_SINK_COUNT;
2304 } else {
2305 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2306 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2307 dpcd_addr = DP_SINK_COUNT_ESI;
2308 }
2309
2310 dret = drm_dp_dpcd_read(
2311 &aconnector->dm_dp_aux.aux,
2312 dpcd_addr,
2313 esi,
2314 dpcd_bytes_to_read);
2315
2316 while (dret == dpcd_bytes_to_read &&
2317 process_count < max_process_count) {
2318 uint8_t retry;
2319 dret = 0;
2320
2321 process_count++;
2322
f1ad2f5e 2323 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2324 /* handle HPD short pulse irq */
2325 if (aconnector->mst_mgr.mst_state)
2326 drm_dp_mst_hpd_irq(
2327 &aconnector->mst_mgr,
2328 esi,
2329 &new_irq_handled);
4562236b
HW
2330
2331 if (new_irq_handled) {
2332 /* ACK at DPCD to notify down stream */
2333 const int ack_dpcd_bytes_to_write =
2334 dpcd_bytes_to_read - 1;
2335
2336 for (retry = 0; retry < 3; retry++) {
2337 uint8_t wret;
2338
2339 wret = drm_dp_dpcd_write(
2340 &aconnector->dm_dp_aux.aux,
2341 dpcd_addr + 1,
2342 &esi[1],
2343 ack_dpcd_bytes_to_write);
2344 if (wret == ack_dpcd_bytes_to_write)
2345 break;
2346 }
2347
1f6010a9 2348 /* check if there is new irq to be handled */
4562236b
HW
2349 dret = drm_dp_dpcd_read(
2350 &aconnector->dm_dp_aux.aux,
2351 dpcd_addr,
2352 esi,
2353 dpcd_bytes_to_read);
2354
2355 new_irq_handled = false;
d4a6e8a9 2356 } else {
4562236b 2357 break;
d4a6e8a9 2358 }
4562236b
HW
2359 }
2360
2361 if (process_count == max_process_count)
f1ad2f5e 2362 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2363}
2364
2365static void handle_hpd_rx_irq(void *param)
2366{
c84dec2f 2367 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2368 struct drm_connector *connector = &aconnector->base;
2369 struct drm_device *dev = connector->dev;
53cbf65c 2370 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2371 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2372 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2373#ifdef CONFIG_DRM_AMD_DC_HDCP
2374 union hpd_irq_data hpd_irq_data;
2375 struct amdgpu_device *adev = dev->dev_private;
2376
2377 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2378#endif
4562236b 2379
1f6010a9
DF
2380 /*
2381 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2382 * conflict, after implement i2c helper, this mutex should be
2383 * retired.
2384 */
53cbf65c 2385 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2386 mutex_lock(&aconnector->hpd_lock);
2387
2a0f9270
BL
2388
2389#ifdef CONFIG_DRM_AMD_DC_HDCP
2390 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2391#else
4e18814e 2392 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2393#endif
4562236b
HW
2394 !is_mst_root_connector) {
2395 /* Downstream Port status changed. */
fbbdadf2
BL
2396 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2397 DRM_ERROR("KMS: Failed to detect connector\n");
2398
2399 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2400 emulated_link_detect(dc_link);
2401
2402 if (aconnector->fake_enable)
2403 aconnector->fake_enable = false;
2404
2405 amdgpu_dm_update_connector_after_detect(aconnector);
2406
2407
2408 drm_modeset_lock_all(dev);
2409 dm_restore_drm_connector_state(dev, connector);
2410 drm_modeset_unlock_all(dev);
2411
2412 drm_kms_helper_hotplug_event(dev);
2413 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2414
2415 if (aconnector->fake_enable)
2416 aconnector->fake_enable = false;
2417
4562236b
HW
2418 amdgpu_dm_update_connector_after_detect(aconnector);
2419
2420
2421 drm_modeset_lock_all(dev);
2422 dm_restore_drm_connector_state(dev, connector);
2423 drm_modeset_unlock_all(dev);
2424
2425 drm_kms_helper_hotplug_event(dev);
2426 }
2427 }
2a0f9270 2428#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2429 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2430 if (adev->dm.hdcp_workqueue)
2431 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2432 }
2a0f9270 2433#endif
4562236b 2434 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2435 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2436 dm_handle_hpd_rx_irq(aconnector);
2437
e86e8947
HV
2438 if (dc_link->type != dc_connection_mst_branch) {
2439 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2440 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2441 }
4562236b
HW
2442}
2443
2444static void register_hpd_handlers(struct amdgpu_device *adev)
2445{
2446 struct drm_device *dev = adev->ddev;
2447 struct drm_connector *connector;
c84dec2f 2448 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2449 const struct dc_link *dc_link;
2450 struct dc_interrupt_params int_params = {0};
2451
2452 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2453 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2454
2455 list_for_each_entry(connector,
2456 &dev->mode_config.connector_list, head) {
2457
c84dec2f 2458 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2459 dc_link = aconnector->dc_link;
2460
2461 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2462 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2463 int_params.irq_source = dc_link->irq_source_hpd;
2464
2465 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2466 handle_hpd_irq,
2467 (void *) aconnector);
2468 }
2469
2470 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2471
2472 /* Also register for DP short pulse (hpd_rx). */
2473 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2474 int_params.irq_source = dc_link->irq_source_hpd_rx;
2475
2476 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2477 handle_hpd_rx_irq,
2478 (void *) aconnector);
2479 }
2480 }
2481}
2482
55e56389
MR
2483#if defined(CONFIG_DRM_AMD_DC_SI)
2484/* Register IRQ sources and initialize IRQ callbacks */
2485static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2486{
2487 struct dc *dc = adev->dm.dc;
2488 struct common_irq_params *c_irq_params;
2489 struct dc_interrupt_params int_params = {0};
2490 int r;
2491 int i;
2492 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2493
2494 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2495 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2496
2497 /*
2498 * Actions of amdgpu_irq_add_id():
2499 * 1. Register a set() function with base driver.
2500 * Base driver will call set() function to enable/disable an
2501 * interrupt in DC hardware.
2502 * 2. Register amdgpu_dm_irq_handler().
2503 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2504 * coming from DC hardware.
2505 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2506 * for acknowledging and handling. */
2507
2508 /* Use VBLANK interrupt */
2509 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2510 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2511 if (r) {
2512 DRM_ERROR("Failed to add crtc irq id!\n");
2513 return r;
2514 }
2515
2516 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2517 int_params.irq_source =
2518 dc_interrupt_to_irq_source(dc, i+1 , 0);
2519
2520 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2521
2522 c_irq_params->adev = adev;
2523 c_irq_params->irq_src = int_params.irq_source;
2524
2525 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2526 dm_crtc_high_irq, c_irq_params);
2527 }
2528
2529 /* Use GRPH_PFLIP interrupt */
2530 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2531 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2532 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2533 if (r) {
2534 DRM_ERROR("Failed to add page flip irq id!\n");
2535 return r;
2536 }
2537
2538 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2539 int_params.irq_source =
2540 dc_interrupt_to_irq_source(dc, i, 0);
2541
2542 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2543
2544 c_irq_params->adev = adev;
2545 c_irq_params->irq_src = int_params.irq_source;
2546
2547 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2548 dm_pflip_high_irq, c_irq_params);
2549
2550 }
2551
2552 /* HPD */
2553 r = amdgpu_irq_add_id(adev, client_id,
2554 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2555 if (r) {
2556 DRM_ERROR("Failed to add hpd irq id!\n");
2557 return r;
2558 }
2559
2560 register_hpd_handlers(adev);
2561
2562 return 0;
2563}
2564#endif
2565
4562236b
HW
2566/* Register IRQ sources and initialize IRQ callbacks */
2567static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2568{
2569 struct dc *dc = adev->dm.dc;
2570 struct common_irq_params *c_irq_params;
2571 struct dc_interrupt_params int_params = {0};
2572 int r;
2573 int i;
1ffdeca6 2574 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2575
84374725 2576 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2577 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2578
2579 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2580 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2581
1f6010a9
DF
2582 /*
2583 * Actions of amdgpu_irq_add_id():
4562236b
HW
2584 * 1. Register a set() function with base driver.
2585 * Base driver will call set() function to enable/disable an
2586 * interrupt in DC hardware.
2587 * 2. Register amdgpu_dm_irq_handler().
2588 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2589 * coming from DC hardware.
2590 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2591 * for acknowledging and handling. */
2592
b57de80a 2593 /* Use VBLANK interrupt */
e9029155 2594 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2595 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2596 if (r) {
2597 DRM_ERROR("Failed to add crtc irq id!\n");
2598 return r;
2599 }
2600
2601 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2602 int_params.irq_source =
3d761e79 2603 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2604
b57de80a 2605 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2606
2607 c_irq_params->adev = adev;
2608 c_irq_params->irq_src = int_params.irq_source;
2609
2610 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2611 dm_crtc_high_irq, c_irq_params);
2612 }
2613
d2574c33
MK
2614 /* Use VUPDATE interrupt */
2615 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2616 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2617 if (r) {
2618 DRM_ERROR("Failed to add vupdate irq id!\n");
2619 return r;
2620 }
2621
2622 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2623 int_params.irq_source =
2624 dc_interrupt_to_irq_source(dc, i, 0);
2625
2626 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2627
2628 c_irq_params->adev = adev;
2629 c_irq_params->irq_src = int_params.irq_source;
2630
2631 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2632 dm_vupdate_high_irq, c_irq_params);
2633 }
2634
3d761e79 2635 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2636 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2637 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2638 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2639 if (r) {
2640 DRM_ERROR("Failed to add page flip irq id!\n");
2641 return r;
2642 }
2643
2644 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2645 int_params.irq_source =
2646 dc_interrupt_to_irq_source(dc, i, 0);
2647
2648 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2649
2650 c_irq_params->adev = adev;
2651 c_irq_params->irq_src = int_params.irq_source;
2652
2653 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2654 dm_pflip_high_irq, c_irq_params);
2655
2656 }
2657
2658 /* HPD */
2c8ad2d5
AD
2659 r = amdgpu_irq_add_id(adev, client_id,
2660 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2661 if (r) {
2662 DRM_ERROR("Failed to add hpd irq id!\n");
2663 return r;
2664 }
2665
2666 register_hpd_handlers(adev);
2667
2668 return 0;
2669}
2670
b86a1aa3 2671#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2672/* Register IRQ sources and initialize IRQ callbacks */
2673static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2674{
2675 struct dc *dc = adev->dm.dc;
2676 struct common_irq_params *c_irq_params;
2677 struct dc_interrupt_params int_params = {0};
2678 int r;
2679 int i;
2680
2681 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2682 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2683
1f6010a9
DF
2684 /*
2685 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2686 * 1. Register a set() function with base driver.
2687 * Base driver will call set() function to enable/disable an
2688 * interrupt in DC hardware.
2689 * 2. Register amdgpu_dm_irq_handler().
2690 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2691 * coming from DC hardware.
2692 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2693 * for acknowledging and handling.
1f6010a9 2694 */
ff5ef992
AD
2695
2696 /* Use VSTARTUP interrupt */
2697 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2698 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2699 i++) {
3760f76c 2700 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2701
2702 if (r) {
2703 DRM_ERROR("Failed to add crtc irq id!\n");
2704 return r;
2705 }
2706
2707 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2708 int_params.irq_source =
2709 dc_interrupt_to_irq_source(dc, i, 0);
2710
2711 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2712
2713 c_irq_params->adev = adev;
2714 c_irq_params->irq_src = int_params.irq_source;
2715
2346ef47
NK
2716 amdgpu_dm_irq_register_interrupt(
2717 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2718 }
2719
2720 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2721 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2722 * to trigger at end of each vblank, regardless of state of the lock,
2723 * matching DCE behaviour.
2724 */
2725 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2726 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2727 i++) {
2728 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2729
2730 if (r) {
2731 DRM_ERROR("Failed to add vupdate irq id!\n");
2732 return r;
2733 }
2734
2735 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2736 int_params.irq_source =
2737 dc_interrupt_to_irq_source(dc, i, 0);
2738
2739 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2740
2741 c_irq_params->adev = adev;
2742 c_irq_params->irq_src = int_params.irq_source;
2743
ff5ef992 2744 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2745 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2746 }
2747
ff5ef992
AD
2748 /* Use GRPH_PFLIP interrupt */
2749 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2750 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2751 i++) {
3760f76c 2752 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2753 if (r) {
2754 DRM_ERROR("Failed to add page flip irq id!\n");
2755 return r;
2756 }
2757
2758 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2759 int_params.irq_source =
2760 dc_interrupt_to_irq_source(dc, i, 0);
2761
2762 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2763
2764 c_irq_params->adev = adev;
2765 c_irq_params->irq_src = int_params.irq_source;
2766
2767 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2768 dm_pflip_high_irq, c_irq_params);
2769
2770 }
2771
2772 /* HPD */
3760f76c 2773 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2774 &adev->hpd_irq);
2775 if (r) {
2776 DRM_ERROR("Failed to add hpd irq id!\n");
2777 return r;
2778 }
2779
2780 register_hpd_handlers(adev);
2781
2782 return 0;
2783}
2784#endif
2785
eb3dc897
NK
2786/*
2787 * Acquires the lock for the atomic state object and returns
2788 * the new atomic state.
2789 *
2790 * This should only be called during atomic check.
2791 */
2792static int dm_atomic_get_state(struct drm_atomic_state *state,
2793 struct dm_atomic_state **dm_state)
2794{
2795 struct drm_device *dev = state->dev;
2796 struct amdgpu_device *adev = dev->dev_private;
2797 struct amdgpu_display_manager *dm = &adev->dm;
2798 struct drm_private_state *priv_state;
eb3dc897
NK
2799
2800 if (*dm_state)
2801 return 0;
2802
eb3dc897
NK
2803 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2804 if (IS_ERR(priv_state))
2805 return PTR_ERR(priv_state);
2806
2807 *dm_state = to_dm_atomic_state(priv_state);
2808
2809 return 0;
2810}
2811
dfd84d90 2812static struct dm_atomic_state *
eb3dc897
NK
2813dm_atomic_get_new_state(struct drm_atomic_state *state)
2814{
2815 struct drm_device *dev = state->dev;
2816 struct amdgpu_device *adev = dev->dev_private;
2817 struct amdgpu_display_manager *dm = &adev->dm;
2818 struct drm_private_obj *obj;
2819 struct drm_private_state *new_obj_state;
2820 int i;
2821
2822 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2823 if (obj->funcs == dm->atomic_obj.funcs)
2824 return to_dm_atomic_state(new_obj_state);
2825 }
2826
2827 return NULL;
2828}
2829
eb3dc897
NK
2830static struct drm_private_state *
2831dm_atomic_duplicate_state(struct drm_private_obj *obj)
2832{
2833 struct dm_atomic_state *old_state, *new_state;
2834
2835 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2836 if (!new_state)
2837 return NULL;
2838
2839 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2840
813d20dc
AW
2841 old_state = to_dm_atomic_state(obj->state);
2842
2843 if (old_state && old_state->context)
2844 new_state->context = dc_copy_state(old_state->context);
2845
eb3dc897
NK
2846 if (!new_state->context) {
2847 kfree(new_state);
2848 return NULL;
2849 }
2850
eb3dc897
NK
2851 return &new_state->base;
2852}
2853
2854static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2855 struct drm_private_state *state)
2856{
2857 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2858
2859 if (dm_state && dm_state->context)
2860 dc_release_state(dm_state->context);
2861
2862 kfree(dm_state);
2863}
2864
2865static struct drm_private_state_funcs dm_atomic_state_funcs = {
2866 .atomic_duplicate_state = dm_atomic_duplicate_state,
2867 .atomic_destroy_state = dm_atomic_destroy_state,
2868};
2869
4562236b
HW
2870static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2871{
eb3dc897 2872 struct dm_atomic_state *state;
4562236b
HW
2873 int r;
2874
2875 adev->mode_info.mode_config_initialized = true;
2876
4562236b 2877 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2878 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2879
2880 adev->ddev->mode_config.max_width = 16384;
2881 adev->ddev->mode_config.max_height = 16384;
2882
2883 adev->ddev->mode_config.preferred_depth = 24;
2884 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2885 /* indicates support for immediate flip */
4562236b
HW
2886 adev->ddev->mode_config.async_page_flip = true;
2887
770d13b1 2888 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2889
eb3dc897
NK
2890 state = kzalloc(sizeof(*state), GFP_KERNEL);
2891 if (!state)
2892 return -ENOMEM;
2893
813d20dc 2894 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2895 if (!state->context) {
2896 kfree(state);
2897 return -ENOMEM;
2898 }
2899
2900 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2901
8c1a765b
DA
2902 drm_atomic_private_obj_init(adev->ddev,
2903 &adev->dm.atomic_obj,
eb3dc897
NK
2904 &state->base,
2905 &dm_atomic_state_funcs);
2906
3dc9b1ce 2907 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2908 if (r)
2909 return r;
2910
6ce8f316
NK
2911 r = amdgpu_dm_audio_init(adev);
2912 if (r)
2913 return r;
2914
4562236b
HW
2915 return 0;
2916}
2917
206bbafe
DF
2918#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2919#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 2920#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 2921
4562236b
HW
2922#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2923 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2924
206bbafe
DF
2925static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2926{
2927#if defined(CONFIG_ACPI)
2928 struct amdgpu_dm_backlight_caps caps;
2929
2930 if (dm->backlight_caps.caps_valid)
2931 return;
2932
2933 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2934 if (caps.caps_valid) {
94562810
RS
2935 dm->backlight_caps.caps_valid = true;
2936 if (caps.aux_support)
2937 return;
206bbafe
DF
2938 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2939 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
2940 } else {
2941 dm->backlight_caps.min_input_signal =
2942 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2943 dm->backlight_caps.max_input_signal =
2944 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2945 }
2946#else
94562810
RS
2947 if (dm->backlight_caps.aux_support)
2948 return;
2949
8bcbc9ef
DF
2950 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2951 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2952#endif
2953}
2954
94562810
RS
2955static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2956{
2957 bool rc;
2958
2959 if (!link)
2960 return 1;
2961
2962 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2963 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2964
2965 return rc ? 0 : 1;
2966}
2967
37d04745
AM
2968static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
2969 unsigned *min, unsigned *max)
94562810 2970{
94562810 2971 if (!caps)
37d04745 2972 return 0;
94562810 2973
37d04745
AM
2974 if (caps->aux_support) {
2975 // Firmware limits are in nits, DC API wants millinits.
2976 *max = 1000 * caps->aux_max_input_signal;
2977 *min = 1000 * caps->aux_min_input_signal;
94562810 2978 } else {
37d04745
AM
2979 // Firmware limits are 8-bit, PWM control is 16-bit.
2980 *max = 0x101 * caps->max_input_signal;
2981 *min = 0x101 * caps->min_input_signal;
94562810 2982 }
37d04745
AM
2983 return 1;
2984}
2985
2986static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
2987 uint32_t brightness)
2988{
2989 unsigned min, max;
94562810 2990
37d04745
AM
2991 if (!get_brightness_range(caps, &min, &max))
2992 return brightness;
2993
2994 // Rescale 0..255 to min..max
2995 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
2996 AMDGPU_MAX_BL_LEVEL);
2997}
2998
2999static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3000 uint32_t brightness)
3001{
3002 unsigned min, max;
3003
3004 if (!get_brightness_range(caps, &min, &max))
3005 return brightness;
3006
3007 if (brightness < min)
3008 return 0;
3009 // Rescale min..max to 0..255
3010 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3011 max - min);
94562810
RS
3012}
3013
4562236b
HW
3014static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3015{
3016 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3017 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3018 struct dc_link *link = NULL;
3019 u32 brightness;
3020 bool rc;
4562236b 3021
206bbafe
DF
3022 amdgpu_dm_update_backlight_caps(dm);
3023 caps = dm->backlight_caps;
94562810
RS
3024
3025 link = (struct dc_link *)dm->backlight_link;
3026
37d04745 3027 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3028 // Change brightness based on AUX property
3029 if (caps.aux_support)
3030 return set_backlight_via_aux(link, brightness);
3031
3032 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3033
3034 return rc ? 0 : 1;
4562236b
HW
3035}
3036
3037static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3038{
620a0d27
DF
3039 struct amdgpu_display_manager *dm = bl_get_data(bd);
3040 int ret = dc_link_get_backlight_level(dm->backlight_link);
3041
3042 if (ret == DC_ERROR_UNEXPECTED)
3043 return bd->props.brightness;
37d04745 3044 return convert_brightness_to_user(&dm->backlight_caps, ret);
4562236b
HW
3045}
3046
3047static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3048 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3049 .get_brightness = amdgpu_dm_backlight_get_brightness,
3050 .update_status = amdgpu_dm_backlight_update_status,
3051};
3052
7578ecda
AD
3053static void
3054amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3055{
3056 char bl_name[16];
3057 struct backlight_properties props = { 0 };
3058
206bbafe
DF
3059 amdgpu_dm_update_backlight_caps(dm);
3060
4562236b 3061 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3062 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3063 props.type = BACKLIGHT_RAW;
3064
3065 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3066 dm->adev->ddev->primary->index);
3067
3068 dm->backlight_dev = backlight_device_register(bl_name,
3069 dm->adev->ddev->dev,
3070 dm,
3071 &amdgpu_dm_backlight_ops,
3072 &props);
3073
74baea42 3074 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3075 DRM_ERROR("DM: Backlight registration failed!\n");
3076 else
f1ad2f5e 3077 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3078}
3079
3080#endif
3081
df534fff 3082static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3083 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3084 enum drm_plane_type plane_type,
3085 const struct dc_plane_cap *plane_cap)
df534fff 3086{
f180b4bc 3087 struct drm_plane *plane;
df534fff
S
3088 unsigned long possible_crtcs;
3089 int ret = 0;
3090
f180b4bc 3091 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3092 if (!plane) {
3093 DRM_ERROR("KMS: Failed to allocate plane\n");
3094 return -ENOMEM;
3095 }
b2fddb13 3096 plane->type = plane_type;
df534fff
S
3097
3098 /*
b2fddb13
NK
3099 * HACK: IGT tests expect that the primary plane for a CRTC
3100 * can only have one possible CRTC. Only expose support for
3101 * any CRTC if they're not going to be used as a primary plane
3102 * for a CRTC - like overlay or underlay planes.
df534fff
S
3103 */
3104 possible_crtcs = 1 << plane_id;
3105 if (plane_id >= dm->dc->caps.max_streams)
3106 possible_crtcs = 0xff;
3107
cc1fec57 3108 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3109
3110 if (ret) {
3111 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3112 kfree(plane);
df534fff
S
3113 return ret;
3114 }
3115
54087768
NK
3116 if (mode_info)
3117 mode_info->planes[plane_id] = plane;
3118
df534fff
S
3119 return ret;
3120}
3121
89fc8d4e
HW
3122
3123static void register_backlight_device(struct amdgpu_display_manager *dm,
3124 struct dc_link *link)
3125{
3126#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3127 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3128
3129 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3130 link->type != dc_connection_none) {
1f6010a9
DF
3131 /*
3132 * Event if registration failed, we should continue with
89fc8d4e
HW
3133 * DM initialization because not having a backlight control
3134 * is better then a black screen.
3135 */
3136 amdgpu_dm_register_backlight_device(dm);
3137
3138 if (dm->backlight_dev)
3139 dm->backlight_link = link;
3140 }
3141#endif
3142}
3143
3144
1f6010a9
DF
3145/*
3146 * In this architecture, the association
4562236b
HW
3147 * connector -> encoder -> crtc
3148 * id not really requried. The crtc and connector will hold the
3149 * display_index as an abstraction to use with DAL component
3150 *
3151 * Returns 0 on success
3152 */
7578ecda 3153static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3154{
3155 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3156 int32_t i;
c84dec2f 3157 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3158 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3159 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3160 uint32_t link_cnt;
cc1fec57 3161 int32_t primary_planes;
fbbdadf2 3162 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3163 const struct dc_plane_cap *plane;
4562236b
HW
3164
3165 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3166 if (amdgpu_dm_mode_config_init(dm->adev)) {
3167 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3168 return -EINVAL;
4562236b
HW
3169 }
3170
b2fddb13
NK
3171 /* There is one primary plane per CRTC */
3172 primary_planes = dm->dc->caps.max_streams;
54087768 3173 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3174
b2fddb13
NK
3175 /*
3176 * Initialize primary planes, implicit planes for legacy IOCTLS.
3177 * Order is reversed to match iteration order in atomic check.
3178 */
3179 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3180 plane = &dm->dc->caps.planes[i];
3181
b2fddb13 3182 if (initialize_plane(dm, mode_info, i,
cc1fec57 3183 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3184 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3185 goto fail;
d4e13b0d 3186 }
df534fff 3187 }
92f3ac40 3188
0d579c7e
NK
3189 /*
3190 * Initialize overlay planes, index starting after primary planes.
3191 * These planes have a higher DRM index than the primary planes since
3192 * they should be considered as having a higher z-order.
3193 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3194 *
3195 * Only support DCN for now, and only expose one so we don't encourage
3196 * userspace to use up all the pipes.
0d579c7e 3197 */
cc1fec57
NK
3198 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3199 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3200
3201 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3202 continue;
3203
3204 if (!plane->blends_with_above || !plane->blends_with_below)
3205 continue;
3206
ea36ad34 3207 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3208 continue;
3209
54087768 3210 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3211 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3212 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3213 goto fail;
d4e13b0d 3214 }
cc1fec57
NK
3215
3216 /* Only create one overlay plane. */
3217 break;
d4e13b0d 3218 }
4562236b 3219
d4e13b0d 3220 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3221 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3222 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3223 goto fail;
4562236b 3224 }
4562236b 3225
ab2541b6 3226 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
3227
3228 /* loops over all connectors on the board */
3229 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3230 struct dc_link *link = NULL;
4562236b
HW
3231
3232 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3233 DRM_ERROR(
3234 "KMS: Cannot support more than %d display indexes\n",
3235 AMDGPU_DM_MAX_DISPLAY_INDEX);
3236 continue;
3237 }
3238
3239 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3240 if (!aconnector)
cd8a2ae8 3241 goto fail;
4562236b
HW
3242
3243 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3244 if (!aencoder)
cd8a2ae8 3245 goto fail;
4562236b
HW
3246
3247 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3248 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3249 goto fail;
4562236b
HW
3250 }
3251
3252 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3253 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3254 goto fail;
4562236b
HW
3255 }
3256
89fc8d4e
HW
3257 link = dc_get_link_at_index(dm->dc, i);
3258
fbbdadf2
BL
3259 if (!dc_link_detect_sink(link, &new_connection_type))
3260 DRM_ERROR("KMS: Failed to detect connector\n");
3261
3262 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3263 emulated_link_detect(link);
3264 amdgpu_dm_update_connector_after_detect(aconnector);
3265
3266 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3267 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3268 register_backlight_device(dm, link);
397a9bc5
RL
3269 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3270 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3271 }
3272
3273
4562236b
HW
3274 }
3275
3276 /* Software is initialized. Now we can register interrupt handlers. */
3277 switch (adev->asic_type) {
55e56389
MR
3278#if defined(CONFIG_DRM_AMD_DC_SI)
3279 case CHIP_TAHITI:
3280 case CHIP_PITCAIRN:
3281 case CHIP_VERDE:
3282 case CHIP_OLAND:
3283 if (dce60_register_irq_handlers(dm->adev)) {
3284 DRM_ERROR("DM: Failed to initialize IRQ\n");
3285 goto fail;
3286 }
3287 break;
3288#endif
4562236b
HW
3289 case CHIP_BONAIRE:
3290 case CHIP_HAWAII:
cd4b356f
AD
3291 case CHIP_KAVERI:
3292 case CHIP_KABINI:
3293 case CHIP_MULLINS:
4562236b
HW
3294 case CHIP_TONGA:
3295 case CHIP_FIJI:
3296 case CHIP_CARRIZO:
3297 case CHIP_STONEY:
3298 case CHIP_POLARIS11:
3299 case CHIP_POLARIS10:
b264d345 3300 case CHIP_POLARIS12:
7737de91 3301 case CHIP_VEGAM:
2c8ad2d5 3302 case CHIP_VEGA10:
2325ff30 3303 case CHIP_VEGA12:
1fe6bf2f 3304 case CHIP_VEGA20:
4562236b
HW
3305 if (dce110_register_irq_handlers(dm->adev)) {
3306 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3307 goto fail;
4562236b
HW
3308 }
3309 break;
b86a1aa3 3310#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3311 case CHIP_RAVEN:
fbd2afe5 3312 case CHIP_NAVI12:
476e955d 3313 case CHIP_NAVI10:
fce651e3 3314 case CHIP_NAVI14:
30221ad8 3315 case CHIP_RENOIR:
79037324
BL
3316#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3317 case CHIP_SIENNA_CICHLID:
a6c5308f 3318 case CHIP_NAVY_FLOUNDER:
79037324 3319#endif
ff5ef992
AD
3320 if (dcn10_register_irq_handlers(dm->adev)) {
3321 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3322 goto fail;
ff5ef992
AD
3323 }
3324 break;
3325#endif
4562236b 3326 default:
e63f8673 3327 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3328 goto fail;
4562236b
HW
3329 }
3330
2d673560
NK
3331 /* No userspace support. */
3332 dm->dc->debug.disable_tri_buf = true;
3333
4562236b 3334 return 0;
cd8a2ae8 3335fail:
4562236b 3336 kfree(aencoder);
4562236b 3337 kfree(aconnector);
54087768 3338
59d0f396 3339 return -EINVAL;
4562236b
HW
3340}
3341
7578ecda 3342static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3343{
3344 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3345 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3346 return;
3347}
3348
3349/******************************************************************************
3350 * amdgpu_display_funcs functions
3351 *****************************************************************************/
3352
1f6010a9 3353/*
4562236b
HW
3354 * dm_bandwidth_update - program display watermarks
3355 *
3356 * @adev: amdgpu_device pointer
3357 *
3358 * Calculate and program the display watermarks and line buffer allocation.
3359 */
3360static void dm_bandwidth_update(struct amdgpu_device *adev)
3361{
49c07a99 3362 /* TODO: implement later */
4562236b
HW
3363}
3364
39cc5be2 3365static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3366 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3367 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3368 .backlight_set_level = NULL, /* never called for DC */
3369 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3370 .hpd_sense = NULL,/* called unconditionally */
3371 .hpd_set_polarity = NULL, /* called unconditionally */
3372 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3373 .page_flip_get_scanoutpos =
3374 dm_crtc_get_scanoutpos,/* called unconditionally */
3375 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3376 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3377};
3378
3379#if defined(CONFIG_DEBUG_KERNEL_DC)
3380
3ee6b26b
AD
3381static ssize_t s3_debug_store(struct device *device,
3382 struct device_attribute *attr,
3383 const char *buf,
3384 size_t count)
4562236b
HW
3385{
3386 int ret;
3387 int s3_state;
ef1de361 3388 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
3389 struct amdgpu_device *adev = drm_dev->dev_private;
3390
3391 ret = kstrtoint(buf, 0, &s3_state);
3392
3393 if (ret == 0) {
3394 if (s3_state) {
3395 dm_resume(adev);
4562236b
HW
3396 drm_kms_helper_hotplug_event(adev->ddev);
3397 } else
3398 dm_suspend(adev);
3399 }
3400
3401 return ret == 0 ? count : 0;
3402}
3403
3404DEVICE_ATTR_WO(s3_debug);
3405
3406#endif
3407
3408static int dm_early_init(void *handle)
3409{
3410 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3411
4562236b 3412 switch (adev->asic_type) {
55e56389
MR
3413#if defined(CONFIG_DRM_AMD_DC_SI)
3414 case CHIP_TAHITI:
3415 case CHIP_PITCAIRN:
3416 case CHIP_VERDE:
3417 adev->mode_info.num_crtc = 6;
3418 adev->mode_info.num_hpd = 6;
3419 adev->mode_info.num_dig = 6;
3420 break;
3421 case CHIP_OLAND:
3422 adev->mode_info.num_crtc = 2;
3423 adev->mode_info.num_hpd = 2;
3424 adev->mode_info.num_dig = 2;
3425 break;
3426#endif
4562236b
HW
3427 case CHIP_BONAIRE:
3428 case CHIP_HAWAII:
3429 adev->mode_info.num_crtc = 6;
3430 adev->mode_info.num_hpd = 6;
3431 adev->mode_info.num_dig = 6;
4562236b 3432 break;
cd4b356f
AD
3433 case CHIP_KAVERI:
3434 adev->mode_info.num_crtc = 4;
3435 adev->mode_info.num_hpd = 6;
3436 adev->mode_info.num_dig = 7;
cd4b356f
AD
3437 break;
3438 case CHIP_KABINI:
3439 case CHIP_MULLINS:
3440 adev->mode_info.num_crtc = 2;
3441 adev->mode_info.num_hpd = 6;
3442 adev->mode_info.num_dig = 6;
cd4b356f 3443 break;
4562236b
HW
3444 case CHIP_FIJI:
3445 case CHIP_TONGA:
3446 adev->mode_info.num_crtc = 6;
3447 adev->mode_info.num_hpd = 6;
3448 adev->mode_info.num_dig = 7;
4562236b
HW
3449 break;
3450 case CHIP_CARRIZO:
3451 adev->mode_info.num_crtc = 3;
3452 adev->mode_info.num_hpd = 6;
3453 adev->mode_info.num_dig = 9;
4562236b
HW
3454 break;
3455 case CHIP_STONEY:
3456 adev->mode_info.num_crtc = 2;
3457 adev->mode_info.num_hpd = 6;
3458 adev->mode_info.num_dig = 9;
4562236b
HW
3459 break;
3460 case CHIP_POLARIS11:
b264d345 3461 case CHIP_POLARIS12:
4562236b
HW
3462 adev->mode_info.num_crtc = 5;
3463 adev->mode_info.num_hpd = 5;
3464 adev->mode_info.num_dig = 5;
4562236b
HW
3465 break;
3466 case CHIP_POLARIS10:
7737de91 3467 case CHIP_VEGAM:
4562236b
HW
3468 adev->mode_info.num_crtc = 6;
3469 adev->mode_info.num_hpd = 6;
3470 adev->mode_info.num_dig = 6;
4562236b 3471 break;
2c8ad2d5 3472 case CHIP_VEGA10:
2325ff30 3473 case CHIP_VEGA12:
1fe6bf2f 3474 case CHIP_VEGA20:
2c8ad2d5
AD
3475 adev->mode_info.num_crtc = 6;
3476 adev->mode_info.num_hpd = 6;
3477 adev->mode_info.num_dig = 6;
3478 break;
b86a1aa3 3479#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3480 case CHIP_RAVEN:
3481 adev->mode_info.num_crtc = 4;
3482 adev->mode_info.num_hpd = 4;
3483 adev->mode_info.num_dig = 4;
ff5ef992 3484 break;
476e955d 3485#endif
476e955d 3486 case CHIP_NAVI10:
fbd2afe5 3487 case CHIP_NAVI12:
79037324
BL
3488#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3489 case CHIP_SIENNA_CICHLID:
a6c5308f 3490 case CHIP_NAVY_FLOUNDER:
79037324 3491#endif
476e955d
HW
3492 adev->mode_info.num_crtc = 6;
3493 adev->mode_info.num_hpd = 6;
3494 adev->mode_info.num_dig = 6;
3495 break;
fce651e3
BL
3496 case CHIP_NAVI14:
3497 adev->mode_info.num_crtc = 5;
3498 adev->mode_info.num_hpd = 5;
3499 adev->mode_info.num_dig = 5;
3500 break;
30221ad8
BL
3501 case CHIP_RENOIR:
3502 adev->mode_info.num_crtc = 4;
3503 adev->mode_info.num_hpd = 4;
3504 adev->mode_info.num_dig = 4;
3505 break;
4562236b 3506 default:
e63f8673 3507 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3508 return -EINVAL;
3509 }
3510
c8dd5715
MD
3511 amdgpu_dm_set_irq_funcs(adev);
3512
39cc5be2
AD
3513 if (adev->mode_info.funcs == NULL)
3514 adev->mode_info.funcs = &dm_display_funcs;
3515
1f6010a9
DF
3516 /*
3517 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3518 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3519 * amdgpu_device_init()
3520 */
4562236b
HW
3521#if defined(CONFIG_DEBUG_KERNEL_DC)
3522 device_create_file(
3523 adev->ddev->dev,
3524 &dev_attr_s3_debug);
3525#endif
3526
3527 return 0;
3528}
3529
9b690ef3 3530static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3531 struct dc_stream_state *new_stream,
3532 struct dc_stream_state *old_stream)
9b690ef3 3533{
2afda735 3534 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3535}
3536
3537static bool modereset_required(struct drm_crtc_state *crtc_state)
3538{
2afda735 3539 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3540}
3541
7578ecda 3542static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3543{
3544 drm_encoder_cleanup(encoder);
3545 kfree(encoder);
3546}
3547
3548static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3549 .destroy = amdgpu_dm_encoder_destroy,
3550};
3551
e7b07cee 3552
695af5f9
NK
3553static int fill_dc_scaling_info(const struct drm_plane_state *state,
3554 struct dc_scaling_info *scaling_info)
e7b07cee 3555{
6491f0c0 3556 int scale_w, scale_h;
e7b07cee 3557
695af5f9 3558 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3559
695af5f9
NK
3560 /* Source is fixed 16.16 but we ignore mantissa for now... */
3561 scaling_info->src_rect.x = state->src_x >> 16;
3562 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3563
695af5f9
NK
3564 scaling_info->src_rect.width = state->src_w >> 16;
3565 if (scaling_info->src_rect.width == 0)
3566 return -EINVAL;
3567
3568 scaling_info->src_rect.height = state->src_h >> 16;
3569 if (scaling_info->src_rect.height == 0)
3570 return -EINVAL;
3571
3572 scaling_info->dst_rect.x = state->crtc_x;
3573 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3574
3575 if (state->crtc_w == 0)
695af5f9 3576 return -EINVAL;
e7b07cee 3577
695af5f9 3578 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3579
3580 if (state->crtc_h == 0)
695af5f9 3581 return -EINVAL;
e7b07cee 3582
695af5f9 3583 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3584
695af5f9
NK
3585 /* DRM doesn't specify clipping on destination output. */
3586 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3587
6491f0c0
NK
3588 /* TODO: Validate scaling per-format with DC plane caps */
3589 scale_w = scaling_info->dst_rect.width * 1000 /
3590 scaling_info->src_rect.width;
e7b07cee 3591
6491f0c0
NK
3592 if (scale_w < 250 || scale_w > 16000)
3593 return -EINVAL;
3594
3595 scale_h = scaling_info->dst_rect.height * 1000 /
3596 scaling_info->src_rect.height;
3597
3598 if (scale_h < 250 || scale_h > 16000)
3599 return -EINVAL;
3600
695af5f9
NK
3601 /*
3602 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3603 * assume reasonable defaults based on the format.
3604 */
e7b07cee 3605
695af5f9 3606 return 0;
4562236b 3607}
695af5f9 3608
3ee6b26b 3609static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
5888f07a 3610 uint64_t *tiling_flags, bool *tmz_surface)
e7b07cee 3611{
707477b0
NK
3612 struct amdgpu_bo *rbo;
3613 int r;
3614
3615 if (!amdgpu_fb) {
3616 *tiling_flags = 0;
3617 *tmz_surface = false;
3618 return 0;
3619 }
3620
3621 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3622 r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3623
e7b07cee 3624 if (unlikely(r)) {
1f6010a9 3625 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3626 if (r != -ERESTARTSYS)
3627 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3628 return r;
3629 }
3630
e7b07cee
HW
3631 if (tiling_flags)
3632 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3633
5888f07a
HW
3634 if (tmz_surface)
3635 *tmz_surface = amdgpu_bo_encrypted(rbo);
3636
e7b07cee
HW
3637 amdgpu_bo_unreserve(rbo);
3638
3639 return r;
3640}
3641
7df7e505
NK
3642static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3643{
3644 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3645
3646 return offset ? (address + offset * 256) : 0;
3647}
3648
695af5f9
NK
3649static int
3650fill_plane_dcc_attributes(struct amdgpu_device *adev,
3651 const struct amdgpu_framebuffer *afb,
3652 const enum surface_pixel_format format,
3653 const enum dc_rotation_angle rotation,
12e2b2d4 3654 const struct plane_size *plane_size,
695af5f9
NK
3655 const union dc_tiling_info *tiling_info,
3656 const uint64_t info,
3657 struct dc_plane_dcc_param *dcc,
87b7ebc2
RS
3658 struct dc_plane_address *address,
3659 bool force_disable_dcc)
7df7e505
NK
3660{
3661 struct dc *dc = adev->dm.dc;
8daa1218
NC
3662 struct dc_dcc_surface_param input;
3663 struct dc_surface_dcc_cap output;
7df7e505
NK
3664 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3665 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3666 uint64_t dcc_address;
3667
8daa1218
NC
3668 memset(&input, 0, sizeof(input));
3669 memset(&output, 0, sizeof(output));
3670
87b7ebc2
RS
3671 if (force_disable_dcc)
3672 return 0;
3673
7df7e505 3674 if (!offset)
09e5665a
NK
3675 return 0;
3676
695af5f9 3677 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3678 return 0;
7df7e505
NK
3679
3680 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3681 return -EINVAL;
7df7e505 3682
695af5f9 3683 input.format = format;
12e2b2d4
DL
3684 input.surface_size.width = plane_size->surface_size.width;
3685 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3686 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3687
695af5f9 3688 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3689 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3690 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3691 input.scan = SCAN_DIRECTION_VERTICAL;
3692
3693 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3694 return -EINVAL;
7df7e505
NK
3695
3696 if (!output.capable)
09e5665a 3697 return -EINVAL;
7df7e505
NK
3698
3699 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3700 return -EINVAL;
7df7e505 3701
09e5665a 3702 dcc->enable = 1;
12e2b2d4 3703 dcc->meta_pitch =
7df7e505 3704 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3705 dcc->independent_64b_blks = i64b;
7df7e505
NK
3706
3707 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3708 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3709 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3710
09e5665a
NK
3711 return 0;
3712}
3713
3714static int
320932bf 3715fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3716 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3717 const enum surface_pixel_format format,
3718 const enum dc_rotation_angle rotation,
3719 const uint64_t tiling_flags,
09e5665a 3720 union dc_tiling_info *tiling_info,
12e2b2d4 3721 struct plane_size *plane_size,
09e5665a 3722 struct dc_plane_dcc_param *dcc,
87b7ebc2 3723 struct dc_plane_address *address,
5888f07a 3724 bool tmz_surface,
87b7ebc2 3725 bool force_disable_dcc)
09e5665a 3726{
320932bf 3727 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3728 int ret;
3729
3730 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3731 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3732 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3733 memset(address, 0, sizeof(*address));
3734
5888f07a
HW
3735 address->tmz_surface = tmz_surface;
3736
695af5f9 3737 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3738 plane_size->surface_size.x = 0;
3739 plane_size->surface_size.y = 0;
3740 plane_size->surface_size.width = fb->width;
3741 plane_size->surface_size.height = fb->height;
3742 plane_size->surface_pitch =
320932bf
NK
3743 fb->pitches[0] / fb->format->cpp[0];
3744
e0634e8d
NK
3745 address->type = PLN_ADDR_TYPE_GRAPHICS;
3746 address->grph.addr.low_part = lower_32_bits(afb->address);
3747 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3748 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3749 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3750
12e2b2d4
DL
3751 plane_size->surface_size.x = 0;
3752 plane_size->surface_size.y = 0;
3753 plane_size->surface_size.width = fb->width;
3754 plane_size->surface_size.height = fb->height;
3755 plane_size->surface_pitch =
320932bf
NK
3756 fb->pitches[0] / fb->format->cpp[0];
3757
12e2b2d4
DL
3758 plane_size->chroma_size.x = 0;
3759 plane_size->chroma_size.y = 0;
320932bf 3760 /* TODO: set these based on surface format */
12e2b2d4
DL
3761 plane_size->chroma_size.width = fb->width / 2;
3762 plane_size->chroma_size.height = fb->height / 2;
320932bf 3763
12e2b2d4 3764 plane_size->chroma_pitch =
320932bf
NK
3765 fb->pitches[1] / fb->format->cpp[1];
3766
e0634e8d
NK
3767 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3768 address->video_progressive.luma_addr.low_part =
3769 lower_32_bits(afb->address);
3770 address->video_progressive.luma_addr.high_part =
3771 upper_32_bits(afb->address);
3772 address->video_progressive.chroma_addr.low_part =
3773 lower_32_bits(chroma_addr);
3774 address->video_progressive.chroma_addr.high_part =
3775 upper_32_bits(chroma_addr);
3776 }
09e5665a
NK
3777
3778 /* Fill GFX8 params */
3779 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3780 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3781
3782 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3783 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3784 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3785 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3786 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3787
3788 /* XXX fix me for VI */
3789 tiling_info->gfx8.num_banks = num_banks;
3790 tiling_info->gfx8.array_mode =
3791 DC_ARRAY_2D_TILED_THIN1;
3792 tiling_info->gfx8.tile_split = tile_split;
3793 tiling_info->gfx8.bank_width = bankw;
3794 tiling_info->gfx8.bank_height = bankh;
3795 tiling_info->gfx8.tile_aspect = mtaspect;
3796 tiling_info->gfx8.tile_mode =
3797 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3798 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3799 == DC_ARRAY_1D_TILED_THIN1) {
3800 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3801 }
3802
3803 tiling_info->gfx8.pipe_config =
3804 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3805
3806 if (adev->asic_type == CHIP_VEGA10 ||
3807 adev->asic_type == CHIP_VEGA12 ||
3808 adev->asic_type == CHIP_VEGA20 ||
476e955d 3809 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3810 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3811 adev->asic_type == CHIP_NAVI12 ||
79037324
BL
3812#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3813 adev->asic_type == CHIP_SIENNA_CICHLID ||
a6c5308f 3814 adev->asic_type == CHIP_NAVY_FLOUNDER ||
79037324 3815#endif
30221ad8 3816 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3817 adev->asic_type == CHIP_RAVEN) {
3818 /* Fill GFX9 params */
3819 tiling_info->gfx9.num_pipes =
3820 adev->gfx.config.gb_addr_config_fields.num_pipes;
3821 tiling_info->gfx9.num_banks =
3822 adev->gfx.config.gb_addr_config_fields.num_banks;
3823 tiling_info->gfx9.pipe_interleave =
3824 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3825 tiling_info->gfx9.num_shader_engines =
3826 adev->gfx.config.gb_addr_config_fields.num_se;
3827 tiling_info->gfx9.max_compressed_frags =
3828 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3829 tiling_info->gfx9.num_rb_per_se =
3830 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3831 tiling_info->gfx9.swizzle =
3832 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3833 tiling_info->gfx9.shaderEnable = 1;
3834
79037324 3835#ifdef CONFIG_DRM_AMD_DC_DCN3_0
a6c5308f
BL
3836 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3837 adev->asic_type == CHIP_NAVY_FLOUNDER)
79037324 3838 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
79037324 3839#endif
695af5f9
NK
3840 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3841 plane_size, tiling_info,
87b7ebc2
RS
3842 tiling_flags, dcc, address,
3843 force_disable_dcc);
09e5665a
NK
3844 if (ret)
3845 return ret;
3846 }
3847
3848 return 0;
7df7e505
NK
3849}
3850
d74004b6 3851static void
695af5f9 3852fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3853 bool *per_pixel_alpha, bool *global_alpha,
3854 int *global_alpha_value)
3855{
3856 *per_pixel_alpha = false;
3857 *global_alpha = false;
3858 *global_alpha_value = 0xff;
3859
3860 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3861 return;
3862
3863 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3864 static const uint32_t alpha_formats[] = {
3865 DRM_FORMAT_ARGB8888,
3866 DRM_FORMAT_RGBA8888,
3867 DRM_FORMAT_ABGR8888,
3868 };
3869 uint32_t format = plane_state->fb->format->format;
3870 unsigned int i;
3871
3872 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3873 if (format == alpha_formats[i]) {
3874 *per_pixel_alpha = true;
3875 break;
3876 }
3877 }
3878 }
3879
3880 if (plane_state->alpha < 0xffff) {
3881 *global_alpha = true;
3882 *global_alpha_value = plane_state->alpha >> 8;
3883 }
3884}
3885
004fefa3
NK
3886static int
3887fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3888 const enum surface_pixel_format format,
004fefa3
NK
3889 enum dc_color_space *color_space)
3890{
3891 bool full_range;
3892
3893 *color_space = COLOR_SPACE_SRGB;
3894
3895 /* DRM color properties only affect non-RGB formats. */
695af5f9 3896 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3897 return 0;
3898
3899 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3900
3901 switch (plane_state->color_encoding) {
3902 case DRM_COLOR_YCBCR_BT601:
3903 if (full_range)
3904 *color_space = COLOR_SPACE_YCBCR601;
3905 else
3906 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3907 break;
3908
3909 case DRM_COLOR_YCBCR_BT709:
3910 if (full_range)
3911 *color_space = COLOR_SPACE_YCBCR709;
3912 else
3913 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3914 break;
3915
3916 case DRM_COLOR_YCBCR_BT2020:
3917 if (full_range)
3918 *color_space = COLOR_SPACE_2020_YCBCR;
3919 else
3920 return -EINVAL;
3921 break;
3922
3923 default:
3924 return -EINVAL;
3925 }
3926
3927 return 0;
3928}
3929
695af5f9
NK
3930static int
3931fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3932 const struct drm_plane_state *plane_state,
3933 const uint64_t tiling_flags,
3934 struct dc_plane_info *plane_info,
87b7ebc2 3935 struct dc_plane_address *address,
5888f07a 3936 bool tmz_surface,
87b7ebc2 3937 bool force_disable_dcc)
695af5f9
NK
3938{
3939 const struct drm_framebuffer *fb = plane_state->fb;
3940 const struct amdgpu_framebuffer *afb =
3941 to_amdgpu_framebuffer(plane_state->fb);
3942 struct drm_format_name_buf format_name;
3943 int ret;
3944
3945 memset(plane_info, 0, sizeof(*plane_info));
3946
3947 switch (fb->format->format) {
3948 case DRM_FORMAT_C8:
3949 plane_info->format =
3950 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3951 break;
3952 case DRM_FORMAT_RGB565:
3953 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3954 break;
3955 case DRM_FORMAT_XRGB8888:
3956 case DRM_FORMAT_ARGB8888:
3957 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3958 break;
3959 case DRM_FORMAT_XRGB2101010:
3960 case DRM_FORMAT_ARGB2101010:
3961 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3962 break;
3963 case DRM_FORMAT_XBGR2101010:
3964 case DRM_FORMAT_ABGR2101010:
3965 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3966 break;
3967 case DRM_FORMAT_XBGR8888:
3968 case DRM_FORMAT_ABGR8888:
3969 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3970 break;
3971 case DRM_FORMAT_NV21:
3972 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3973 break;
3974 case DRM_FORMAT_NV12:
3975 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3976 break;
cbec6477
SW
3977 case DRM_FORMAT_P010:
3978 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3979 break;
492548dc
SW
3980 case DRM_FORMAT_XRGB16161616F:
3981 case DRM_FORMAT_ARGB16161616F:
3982 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3983 break;
2a5195dc
MK
3984 case DRM_FORMAT_XBGR16161616F:
3985 case DRM_FORMAT_ABGR16161616F:
3986 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3987 break;
695af5f9
NK
3988 default:
3989 DRM_ERROR(
3990 "Unsupported screen format %s\n",
3991 drm_get_format_name(fb->format->format, &format_name));
3992 return -EINVAL;
3993 }
3994
3995 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3996 case DRM_MODE_ROTATE_0:
3997 plane_info->rotation = ROTATION_ANGLE_0;
3998 break;
3999 case DRM_MODE_ROTATE_90:
4000 plane_info->rotation = ROTATION_ANGLE_90;
4001 break;
4002 case DRM_MODE_ROTATE_180:
4003 plane_info->rotation = ROTATION_ANGLE_180;
4004 break;
4005 case DRM_MODE_ROTATE_270:
4006 plane_info->rotation = ROTATION_ANGLE_270;
4007 break;
4008 default:
4009 plane_info->rotation = ROTATION_ANGLE_0;
4010 break;
4011 }
4012
4013 plane_info->visible = true;
4014 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4015
6d83a32d
MS
4016 plane_info->layer_index = 0;
4017
695af5f9
NK
4018 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4019 &plane_info->color_space);
4020 if (ret)
4021 return ret;
4022
4023 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4024 plane_info->rotation, tiling_flags,
4025 &plane_info->tiling_info,
4026 &plane_info->plane_size,
5888f07a 4027 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4028 force_disable_dcc);
695af5f9
NK
4029 if (ret)
4030 return ret;
4031
4032 fill_blending_from_plane_state(
4033 plane_state, &plane_info->per_pixel_alpha,
4034 &plane_info->global_alpha, &plane_info->global_alpha_value);
4035
4036 return 0;
4037}
4038
4039static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4040 struct dc_plane_state *dc_plane_state,
4041 struct drm_plane_state *plane_state,
4042 struct drm_crtc_state *crtc_state)
e7b07cee 4043{
cf020d49 4044 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
707477b0 4045 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
695af5f9
NK
4046 struct dc_scaling_info scaling_info;
4047 struct dc_plane_info plane_info;
695af5f9 4048 int ret;
87b7ebc2 4049 bool force_disable_dcc = false;
e7b07cee 4050
695af5f9
NK
4051 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4052 if (ret)
4053 return ret;
e7b07cee 4054
695af5f9
NK
4055 dc_plane_state->src_rect = scaling_info.src_rect;
4056 dc_plane_state->dst_rect = scaling_info.dst_rect;
4057 dc_plane_state->clip_rect = scaling_info.clip_rect;
4058 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4059
87b7ebc2 4060 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0
NK
4061 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4062 dm_plane_state->tiling_flags,
695af5f9 4063 &plane_info,
87b7ebc2 4064 &dc_plane_state->address,
707477b0 4065 dm_plane_state->tmz_surface,
87b7ebc2 4066 force_disable_dcc);
004fefa3
NK
4067 if (ret)
4068 return ret;
4069
695af5f9
NK
4070 dc_plane_state->format = plane_info.format;
4071 dc_plane_state->color_space = plane_info.color_space;
4072 dc_plane_state->format = plane_info.format;
4073 dc_plane_state->plane_size = plane_info.plane_size;
4074 dc_plane_state->rotation = plane_info.rotation;
4075 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4076 dc_plane_state->stereo_format = plane_info.stereo_format;
4077 dc_plane_state->tiling_info = plane_info.tiling_info;
4078 dc_plane_state->visible = plane_info.visible;
4079 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4080 dc_plane_state->global_alpha = plane_info.global_alpha;
4081 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4082 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4083 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4084
e277adc5
LSL
4085 /*
4086 * Always set input transfer function, since plane state is refreshed
4087 * every time.
4088 */
cf020d49
NK
4089 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4090 if (ret)
4091 return ret;
e7b07cee 4092
cf020d49 4093 return 0;
e7b07cee
HW
4094}
4095
3ee6b26b
AD
4096static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4097 const struct dm_connector_state *dm_state,
4098 struct dc_stream_state *stream)
e7b07cee
HW
4099{
4100 enum amdgpu_rmx_type rmx_type;
4101
4102 struct rect src = { 0 }; /* viewport in composition space*/
4103 struct rect dst = { 0 }; /* stream addressable area */
4104
4105 /* no mode. nothing to be done */
4106 if (!mode)
4107 return;
4108
4109 /* Full screen scaling by default */
4110 src.width = mode->hdisplay;
4111 src.height = mode->vdisplay;
4112 dst.width = stream->timing.h_addressable;
4113 dst.height = stream->timing.v_addressable;
4114
f4791779
HW
4115 if (dm_state) {
4116 rmx_type = dm_state->scaling;
4117 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4118 if (src.width * dst.height <
4119 src.height * dst.width) {
4120 /* height needs less upscaling/more downscaling */
4121 dst.width = src.width *
4122 dst.height / src.height;
4123 } else {
4124 /* width needs less upscaling/more downscaling */
4125 dst.height = src.height *
4126 dst.width / src.width;
4127 }
4128 } else if (rmx_type == RMX_CENTER) {
4129 dst = src;
e7b07cee 4130 }
e7b07cee 4131
f4791779
HW
4132 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4133 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4134
f4791779
HW
4135 if (dm_state->underscan_enable) {
4136 dst.x += dm_state->underscan_hborder / 2;
4137 dst.y += dm_state->underscan_vborder / 2;
4138 dst.width -= dm_state->underscan_hborder;
4139 dst.height -= dm_state->underscan_vborder;
4140 }
e7b07cee
HW
4141 }
4142
4143 stream->src = src;
4144 stream->dst = dst;
4145
f1ad2f5e 4146 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4147 dst.x, dst.y, dst.width, dst.height);
4148
4149}
4150
3ee6b26b 4151static enum dc_color_depth
42ba01fc 4152convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4153 bool is_y420, int requested_bpc)
e7b07cee 4154{
1bc22f20 4155 uint8_t bpc;
01c22997 4156
1bc22f20
SW
4157 if (is_y420) {
4158 bpc = 8;
4159
4160 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4161 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4162 bpc = 16;
4163 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4164 bpc = 12;
4165 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4166 bpc = 10;
4167 } else {
4168 bpc = (uint8_t)connector->display_info.bpc;
4169 /* Assume 8 bpc by default if no bpc is specified. */
4170 bpc = bpc ? bpc : 8;
4171 }
e7b07cee 4172
cbd14ae7 4173 if (requested_bpc > 0) {
01c22997
NK
4174 /*
4175 * Cap display bpc based on the user requested value.
4176 *
4177 * The value for state->max_bpc may not correctly updated
4178 * depending on when the connector gets added to the state
4179 * or if this was called outside of atomic check, so it
4180 * can't be used directly.
4181 */
cbd14ae7 4182 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4183
1825fd34
NK
4184 /* Round down to the nearest even number. */
4185 bpc = bpc - (bpc & 1);
4186 }
07e3a1cf 4187
e7b07cee
HW
4188 switch (bpc) {
4189 case 0:
1f6010a9
DF
4190 /*
4191 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4192 * EDID revision before 1.4
4193 * TODO: Fix edid parsing
4194 */
4195 return COLOR_DEPTH_888;
4196 case 6:
4197 return COLOR_DEPTH_666;
4198 case 8:
4199 return COLOR_DEPTH_888;
4200 case 10:
4201 return COLOR_DEPTH_101010;
4202 case 12:
4203 return COLOR_DEPTH_121212;
4204 case 14:
4205 return COLOR_DEPTH_141414;
4206 case 16:
4207 return COLOR_DEPTH_161616;
4208 default:
4209 return COLOR_DEPTH_UNDEFINED;
4210 }
4211}
4212
3ee6b26b
AD
4213static enum dc_aspect_ratio
4214get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4215{
e11d4147
LSL
4216 /* 1-1 mapping, since both enums follow the HDMI spec. */
4217 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4218}
4219
3ee6b26b
AD
4220static enum dc_color_space
4221get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4222{
4223 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4224
4225 switch (dc_crtc_timing->pixel_encoding) {
4226 case PIXEL_ENCODING_YCBCR422:
4227 case PIXEL_ENCODING_YCBCR444:
4228 case PIXEL_ENCODING_YCBCR420:
4229 {
4230 /*
4231 * 27030khz is the separation point between HDTV and SDTV
4232 * according to HDMI spec, we use YCbCr709 and YCbCr601
4233 * respectively
4234 */
380604e2 4235 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4236 if (dc_crtc_timing->flags.Y_ONLY)
4237 color_space =
4238 COLOR_SPACE_YCBCR709_LIMITED;
4239 else
4240 color_space = COLOR_SPACE_YCBCR709;
4241 } else {
4242 if (dc_crtc_timing->flags.Y_ONLY)
4243 color_space =
4244 COLOR_SPACE_YCBCR601_LIMITED;
4245 else
4246 color_space = COLOR_SPACE_YCBCR601;
4247 }
4248
4249 }
4250 break;
4251 case PIXEL_ENCODING_RGB:
4252 color_space = COLOR_SPACE_SRGB;
4253 break;
4254
4255 default:
4256 WARN_ON(1);
4257 break;
4258 }
4259
4260 return color_space;
4261}
4262
ea117312
TA
4263static bool adjust_colour_depth_from_display_info(
4264 struct dc_crtc_timing *timing_out,
4265 const struct drm_display_info *info)
400443e8 4266{
ea117312 4267 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4268 int normalized_clk;
400443e8 4269 do {
380604e2 4270 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4271 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4272 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4273 normalized_clk /= 2;
4274 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4275 switch (depth) {
4276 case COLOR_DEPTH_888:
4277 break;
400443e8
ML
4278 case COLOR_DEPTH_101010:
4279 normalized_clk = (normalized_clk * 30) / 24;
4280 break;
4281 case COLOR_DEPTH_121212:
4282 normalized_clk = (normalized_clk * 36) / 24;
4283 break;
4284 case COLOR_DEPTH_161616:
4285 normalized_clk = (normalized_clk * 48) / 24;
4286 break;
4287 default:
ea117312
TA
4288 /* The above depths are the only ones valid for HDMI. */
4289 return false;
400443e8 4290 }
ea117312
TA
4291 if (normalized_clk <= info->max_tmds_clock) {
4292 timing_out->display_color_depth = depth;
4293 return true;
4294 }
4295 } while (--depth > COLOR_DEPTH_666);
4296 return false;
400443e8 4297}
e7b07cee 4298
42ba01fc
NK
4299static void fill_stream_properties_from_drm_display_mode(
4300 struct dc_stream_state *stream,
4301 const struct drm_display_mode *mode_in,
4302 const struct drm_connector *connector,
4303 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4304 const struct dc_stream_state *old_stream,
4305 int requested_bpc)
e7b07cee
HW
4306{
4307 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4308 const struct drm_display_info *info = &connector->display_info;
d4252eee 4309 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4310 struct hdmi_vendor_infoframe hv_frame;
4311 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4312
acf83f86
WL
4313 memset(&hv_frame, 0, sizeof(hv_frame));
4314 memset(&avi_frame, 0, sizeof(avi_frame));
4315
e7b07cee
HW
4316 timing_out->h_border_left = 0;
4317 timing_out->h_border_right = 0;
4318 timing_out->v_border_top = 0;
4319 timing_out->v_border_bottom = 0;
4320 /* TODO: un-hardcode */
fe61a2f1 4321 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4322 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4323 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4324 else if (drm_mode_is_420_also(info, mode_in)
4325 && aconnector->force_yuv420_output)
4326 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4327 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4328 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4329 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4330 else
4331 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4332
4333 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4334 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4335 connector,
4336 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4337 requested_bpc);
e7b07cee
HW
4338 timing_out->scan_type = SCANNING_TYPE_NODATA;
4339 timing_out->hdmi_vic = 0;
b333730d
BL
4340
4341 if(old_stream) {
4342 timing_out->vic = old_stream->timing.vic;
4343 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4344 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4345 } else {
4346 timing_out->vic = drm_match_cea_mode(mode_in);
4347 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4348 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4349 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4350 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4351 }
e7b07cee 4352
1cb1d477
WL
4353 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4354 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4355 timing_out->vic = avi_frame.video_code;
4356 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4357 timing_out->hdmi_vic = hv_frame.vic;
4358 }
4359
e7b07cee
HW
4360 timing_out->h_addressable = mode_in->crtc_hdisplay;
4361 timing_out->h_total = mode_in->crtc_htotal;
4362 timing_out->h_sync_width =
4363 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4364 timing_out->h_front_porch =
4365 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4366 timing_out->v_total = mode_in->crtc_vtotal;
4367 timing_out->v_addressable = mode_in->crtc_vdisplay;
4368 timing_out->v_front_porch =
4369 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4370 timing_out->v_sync_width =
4371 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4372 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4373 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4374
4375 stream->output_color_space = get_output_color_space(timing_out);
4376
e43a432c
AK
4377 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4378 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4379 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4380 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4381 drm_mode_is_420_also(info, mode_in) &&
4382 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4383 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4384 adjust_colour_depth_from_display_info(timing_out, info);
4385 }
4386 }
e7b07cee
HW
4387}
4388
3ee6b26b
AD
4389static void fill_audio_info(struct audio_info *audio_info,
4390 const struct drm_connector *drm_connector,
4391 const struct dc_sink *dc_sink)
e7b07cee
HW
4392{
4393 int i = 0;
4394 int cea_revision = 0;
4395 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4396
4397 audio_info->manufacture_id = edid_caps->manufacturer_id;
4398 audio_info->product_id = edid_caps->product_id;
4399
4400 cea_revision = drm_connector->display_info.cea_rev;
4401
090afc1e 4402 strscpy(audio_info->display_name,
d2b2562c 4403 edid_caps->display_name,
090afc1e 4404 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4405
b830ebc9 4406 if (cea_revision >= 3) {
e7b07cee
HW
4407 audio_info->mode_count = edid_caps->audio_mode_count;
4408
4409 for (i = 0; i < audio_info->mode_count; ++i) {
4410 audio_info->modes[i].format_code =
4411 (enum audio_format_code)
4412 (edid_caps->audio_modes[i].format_code);
4413 audio_info->modes[i].channel_count =
4414 edid_caps->audio_modes[i].channel_count;
4415 audio_info->modes[i].sample_rates.all =
4416 edid_caps->audio_modes[i].sample_rate;
4417 audio_info->modes[i].sample_size =
4418 edid_caps->audio_modes[i].sample_size;
4419 }
4420 }
4421
4422 audio_info->flags.all = edid_caps->speaker_flags;
4423
4424 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4425 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4426 audio_info->video_latency = drm_connector->video_latency[0];
4427 audio_info->audio_latency = drm_connector->audio_latency[0];
4428 }
4429
4430 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4431
4432}
4433
3ee6b26b
AD
4434static void
4435copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4436 struct drm_display_mode *dst_mode)
e7b07cee
HW
4437{
4438 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4439 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4440 dst_mode->crtc_clock = src_mode->crtc_clock;
4441 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4442 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4443 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4444 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4445 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4446 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4447 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4448 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4449 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4450 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4451 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4452}
4453
3ee6b26b
AD
4454static void
4455decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4456 const struct drm_display_mode *native_mode,
4457 bool scale_enabled)
e7b07cee
HW
4458{
4459 if (scale_enabled) {
4460 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4461 } else if (native_mode->clock == drm_mode->clock &&
4462 native_mode->htotal == drm_mode->htotal &&
4463 native_mode->vtotal == drm_mode->vtotal) {
4464 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4465 } else {
4466 /* no scaling nor amdgpu inserted, no need to patch */
4467 }
4468}
4469
aed15309
ML
4470static struct dc_sink *
4471create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4472{
2e0ac3d6 4473 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4474 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4475 sink_init_data.link = aconnector->dc_link;
4476 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4477
4478 sink = dc_sink_create(&sink_init_data);
423788c7 4479 if (!sink) {
2e0ac3d6 4480 DRM_ERROR("Failed to create sink!\n");
aed15309 4481 return NULL;
423788c7 4482 }
2e0ac3d6 4483 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4484
aed15309 4485 return sink;
2e0ac3d6
HW
4486}
4487
fa2123db
ML
4488static void set_multisync_trigger_params(
4489 struct dc_stream_state *stream)
4490{
4491 if (stream->triggered_crtc_reset.enabled) {
4492 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4493 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4494 }
4495}
4496
4497static void set_master_stream(struct dc_stream_state *stream_set[],
4498 int stream_count)
4499{
4500 int j, highest_rfr = 0, master_stream = 0;
4501
4502 for (j = 0; j < stream_count; j++) {
4503 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4504 int refresh_rate = 0;
4505
380604e2 4506 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4507 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4508 if (refresh_rate > highest_rfr) {
4509 highest_rfr = refresh_rate;
4510 master_stream = j;
4511 }
4512 }
4513 }
4514 for (j = 0; j < stream_count; j++) {
03736f4c 4515 if (stream_set[j])
fa2123db
ML
4516 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4517 }
4518}
4519
4520static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4521{
4522 int i = 0;
4523
4524 if (context->stream_count < 2)
4525 return;
4526 for (i = 0; i < context->stream_count ; i++) {
4527 if (!context->streams[i])
4528 continue;
1f6010a9
DF
4529 /*
4530 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4531 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4532 * For now it's set to false
fa2123db
ML
4533 */
4534 set_multisync_trigger_params(context->streams[i]);
4535 }
4536 set_master_stream(context->streams, context->stream_count);
4537}
4538
3ee6b26b
AD
4539static struct dc_stream_state *
4540create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4541 const struct drm_display_mode *drm_mode,
b333730d 4542 const struct dm_connector_state *dm_state,
cbd14ae7
SW
4543 const struct dc_stream_state *old_stream,
4544 int requested_bpc)
e7b07cee
HW
4545{
4546 struct drm_display_mode *preferred_mode = NULL;
391ef035 4547 struct drm_connector *drm_connector;
42ba01fc
NK
4548 const struct drm_connector_state *con_state =
4549 dm_state ? &dm_state->base : NULL;
0971c40e 4550 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4551 struct drm_display_mode mode = *drm_mode;
4552 bool native_mode_found = false;
b333730d
BL
4553 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4554 int mode_refresh;
58124bf8 4555 int preferred_refresh = 0;
defeb878 4556#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4557 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4558#endif
df2f1015 4559 uint32_t link_bandwidth_kbps;
b333730d 4560
aed15309 4561 struct dc_sink *sink = NULL;
b830ebc9 4562 if (aconnector == NULL) {
e7b07cee 4563 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4564 return stream;
e7b07cee
HW
4565 }
4566
e7b07cee 4567 drm_connector = &aconnector->base;
2e0ac3d6 4568
f4ac176e 4569 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4570 sink = create_fake_sink(aconnector);
4571 if (!sink)
4572 return stream;
aed15309
ML
4573 } else {
4574 sink = aconnector->dc_sink;
dcd5fb82 4575 dc_sink_retain(sink);
f4ac176e 4576 }
2e0ac3d6 4577
aed15309 4578 stream = dc_create_stream_for_sink(sink);
4562236b 4579
b830ebc9 4580 if (stream == NULL) {
e7b07cee 4581 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4582 goto finish;
e7b07cee
HW
4583 }
4584
ceb3dbb4
JL
4585 stream->dm_stream_context = aconnector;
4586
4a36fcba
WL
4587 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4588 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4589
e7b07cee
HW
4590 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4591 /* Search for preferred mode */
4592 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4593 native_mode_found = true;
4594 break;
4595 }
4596 }
4597 if (!native_mode_found)
4598 preferred_mode = list_first_entry_or_null(
4599 &aconnector->base.modes,
4600 struct drm_display_mode,
4601 head);
4602
b333730d
BL
4603 mode_refresh = drm_mode_vrefresh(&mode);
4604
b830ebc9 4605 if (preferred_mode == NULL) {
1f6010a9
DF
4606 /*
4607 * This may not be an error, the use case is when we have no
e7b07cee
HW
4608 * usermode calls to reset and set mode upon hotplug. In this
4609 * case, we call set mode ourselves to restore the previous mode
4610 * and the modelist may not be filled in in time.
4611 */
f1ad2f5e 4612 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4613 } else {
4614 decide_crtc_timing_for_drm_display_mode(
4615 &mode, preferred_mode,
f4791779 4616 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4617 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4618 }
4619
f783577c
JFZ
4620 if (!dm_state)
4621 drm_mode_set_crtcinfo(&mode, 0);
4622
b333730d
BL
4623 /*
4624 * If scaling is enabled and refresh rate didn't change
4625 * we copy the vic and polarities of the old timings
4626 */
4627 if (!scale || mode_refresh != preferred_refresh)
4628 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4629 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
4630 else
4631 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4632 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 4633
df2f1015
DF
4634 stream->timing.flags.DSC = 0;
4635
4636 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4637#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4638 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4639 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 4640 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015 4641 &dsc_caps);
defeb878 4642#endif
df2f1015
DF
4643 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4644 dc_link_get_link_cap(aconnector->dc_link));
4645
defeb878 4646#if defined(CONFIG_DRM_AMD_DC_DCN)
097e6d98 4647 if (dsc_caps.is_dsc_supported) {
bcc6aa61
EB
4648 /* Set DSC policy according to dsc_clock_en */
4649 dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en);
4650
0417df16 4651 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4652 &dsc_caps,
0417df16 4653 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4654 link_bandwidth_kbps,
4655 &stream->timing,
4656 &stream->timing.dsc_cfg))
4657 stream->timing.flags.DSC = 1;
27e84dd7 4658 /* Overwrite the stream flag if DSC is enabled through debugfs */
097e6d98
EB
4659 if (aconnector->dsc_settings.dsc_clock_en)
4660 stream->timing.flags.DSC = 1;
734e4c97 4661
27e84dd7
EB
4662 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
4663 stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
734e4c97
EB
4664 aconnector->dsc_settings.dsc_slice_width);
4665
4666 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
4667 stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
4668 aconnector->dsc_settings.dsc_slice_height);
5268bf13
EB
4669
4670 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4671 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 4672 }
39a4eb85 4673#endif
df2f1015 4674 }
39a4eb85 4675
e7b07cee
HW
4676 update_stream_scaling_settings(&mode, dm_state, stream);
4677
4678 fill_audio_info(
4679 &stream->audio_info,
4680 drm_connector,
aed15309 4681 sink);
e7b07cee 4682
ceb3dbb4 4683 update_stream_signal(stream, sink);
9182b4cb 4684
d832fc3b
WL
4685 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4686 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
8a488f5d
RL
4687 if (stream->link->psr_settings.psr_feature_enabled) {
4688 //
4689 // should decide stream support vsc sdp colorimetry capability
4690 // before building vsc info packet
4691 //
4692 stream->use_vsc_sdp_for_colorimetry = false;
4693 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4694 stream->use_vsc_sdp_for_colorimetry =
4695 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4696 } else {
4697 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4698 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 4699 }
8a488f5d 4700 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 4701 }
aed15309 4702finish:
dcd5fb82 4703 dc_sink_release(sink);
9e3efe3e 4704
e7b07cee
HW
4705 return stream;
4706}
4707
7578ecda 4708static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4709{
4710 drm_crtc_cleanup(crtc);
4711 kfree(crtc);
4712}
4713
4714static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4715 struct drm_crtc_state *state)
e7b07cee
HW
4716{
4717 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4718
4719 /* TODO Destroy dc_stream objects are stream object is flattened */
4720 if (cur->stream)
4721 dc_stream_release(cur->stream);
4722
4723
4724 __drm_atomic_helper_crtc_destroy_state(state);
4725
4726
4727 kfree(state);
4728}
4729
4730static void dm_crtc_reset_state(struct drm_crtc *crtc)
4731{
4732 struct dm_crtc_state *state;
4733
4734 if (crtc->state)
4735 dm_crtc_destroy_state(crtc, crtc->state);
4736
4737 state = kzalloc(sizeof(*state), GFP_KERNEL);
4738 if (WARN_ON(!state))
4739 return;
4740
4741 crtc->state = &state->base;
4742 crtc->state->crtc = crtc;
4743
4744}
4745
4746static struct drm_crtc_state *
4747dm_crtc_duplicate_state(struct drm_crtc *crtc)
4748{
4749 struct dm_crtc_state *state, *cur;
4750
4751 cur = to_dm_crtc_state(crtc->state);
4752
4753 if (WARN_ON(!crtc->state))
4754 return NULL;
4755
2004f45e 4756 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4757 if (!state)
4758 return NULL;
e7b07cee
HW
4759
4760 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4761
4762 if (cur->stream) {
4763 state->stream = cur->stream;
4764 dc_stream_retain(state->stream);
4765 }
4766
d6ef9b41 4767 state->active_planes = cur->active_planes;
180db303 4768 state->vrr_params = cur->vrr_params;
98e6436d 4769 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4770 state->abm_level = cur->abm_level;
bb47de73
NK
4771 state->vrr_supported = cur->vrr_supported;
4772 state->freesync_config = cur->freesync_config;
14b25846 4773 state->crc_src = cur->crc_src;
cf020d49
NK
4774 state->cm_has_degamma = cur->cm_has_degamma;
4775 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4776
e7b07cee
HW
4777 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4778
4779 return &state->base;
4780}
4781
d2574c33
MK
4782static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4783{
4784 enum dc_irq_source irq_source;
4785 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4786 struct amdgpu_device *adev = crtc->dev->dev_private;
4787 int rc;
4788
4789 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4790
4791 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4792
4793 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4794 acrtc->crtc_id, enable ? "en" : "dis", rc);
4795 return rc;
4796}
589d2739
HW
4797
4798static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4799{
4800 enum dc_irq_source irq_source;
4801 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4802 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4803 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4804 int rc = 0;
4805
4806 if (enable) {
4807 /* vblank irq on -> Only need vupdate irq in vrr mode */
4808 if (amdgpu_dm_vrr_active(acrtc_state))
4809 rc = dm_set_vupdate_irq(crtc, true);
4810 } else {
4811 /* vblank irq off -> vupdate irq off */
4812 rc = dm_set_vupdate_irq(crtc, false);
4813 }
4814
4815 if (rc)
4816 return rc;
589d2739
HW
4817
4818 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4819 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4820}
4821
4822static int dm_enable_vblank(struct drm_crtc *crtc)
4823{
4824 return dm_set_vblank(crtc, true);
4825}
4826
4827static void dm_disable_vblank(struct drm_crtc *crtc)
4828{
4829 dm_set_vblank(crtc, false);
4830}
4831
e7b07cee
HW
4832/* Implemented only the options currently availible for the driver */
4833static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4834 .reset = dm_crtc_reset_state,
4835 .destroy = amdgpu_dm_crtc_destroy,
4836 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4837 .set_config = drm_atomic_helper_set_config,
4838 .page_flip = drm_atomic_helper_page_flip,
4839 .atomic_duplicate_state = dm_crtc_duplicate_state,
4840 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4841 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4842 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4843 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 4844 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
4845 .enable_vblank = dm_enable_vblank,
4846 .disable_vblank = dm_disable_vblank,
e3eff4b5 4847 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
4848};
4849
4850static enum drm_connector_status
4851amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4852{
4853 bool connected;
c84dec2f 4854 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4855
1f6010a9
DF
4856 /*
4857 * Notes:
e7b07cee
HW
4858 * 1. This interface is NOT called in context of HPD irq.
4859 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4860 * makes it a bad place for *any* MST-related activity.
4861 */
e7b07cee 4862
8580d60b
HW
4863 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4864 !aconnector->fake_enable)
e7b07cee
HW
4865 connected = (aconnector->dc_sink != NULL);
4866 else
4867 connected = (aconnector->base.force == DRM_FORCE_ON);
4868
4869 return (connected ? connector_status_connected :
4870 connector_status_disconnected);
4871}
4872
3ee6b26b
AD
4873int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4874 struct drm_connector_state *connector_state,
4875 struct drm_property *property,
4876 uint64_t val)
e7b07cee
HW
4877{
4878 struct drm_device *dev = connector->dev;
4879 struct amdgpu_device *adev = dev->dev_private;
4880 struct dm_connector_state *dm_old_state =
4881 to_dm_connector_state(connector->state);
4882 struct dm_connector_state *dm_new_state =
4883 to_dm_connector_state(connector_state);
4884
4885 int ret = -EINVAL;
4886
4887 if (property == dev->mode_config.scaling_mode_property) {
4888 enum amdgpu_rmx_type rmx_type;
4889
4890 switch (val) {
4891 case DRM_MODE_SCALE_CENTER:
4892 rmx_type = RMX_CENTER;
4893 break;
4894 case DRM_MODE_SCALE_ASPECT:
4895 rmx_type = RMX_ASPECT;
4896 break;
4897 case DRM_MODE_SCALE_FULLSCREEN:
4898 rmx_type = RMX_FULL;
4899 break;
4900 case DRM_MODE_SCALE_NONE:
4901 default:
4902 rmx_type = RMX_OFF;
4903 break;
4904 }
4905
4906 if (dm_old_state->scaling == rmx_type)
4907 return 0;
4908
4909 dm_new_state->scaling = rmx_type;
4910 ret = 0;
4911 } else if (property == adev->mode_info.underscan_hborder_property) {
4912 dm_new_state->underscan_hborder = val;
4913 ret = 0;
4914 } else if (property == adev->mode_info.underscan_vborder_property) {
4915 dm_new_state->underscan_vborder = val;
4916 ret = 0;
4917 } else if (property == adev->mode_info.underscan_property) {
4918 dm_new_state->underscan_enable = val;
4919 ret = 0;
c1ee92f9
DF
4920 } else if (property == adev->mode_info.abm_level_property) {
4921 dm_new_state->abm_level = val;
4922 ret = 0;
e7b07cee
HW
4923 }
4924
4925 return ret;
4926}
4927
3ee6b26b
AD
4928int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4929 const struct drm_connector_state *state,
4930 struct drm_property *property,
4931 uint64_t *val)
e7b07cee
HW
4932{
4933 struct drm_device *dev = connector->dev;
4934 struct amdgpu_device *adev = dev->dev_private;
4935 struct dm_connector_state *dm_state =
4936 to_dm_connector_state(state);
4937 int ret = -EINVAL;
4938
4939 if (property == dev->mode_config.scaling_mode_property) {
4940 switch (dm_state->scaling) {
4941 case RMX_CENTER:
4942 *val = DRM_MODE_SCALE_CENTER;
4943 break;
4944 case RMX_ASPECT:
4945 *val = DRM_MODE_SCALE_ASPECT;
4946 break;
4947 case RMX_FULL:
4948 *val = DRM_MODE_SCALE_FULLSCREEN;
4949 break;
4950 case RMX_OFF:
4951 default:
4952 *val = DRM_MODE_SCALE_NONE;
4953 break;
4954 }
4955 ret = 0;
4956 } else if (property == adev->mode_info.underscan_hborder_property) {
4957 *val = dm_state->underscan_hborder;
4958 ret = 0;
4959 } else if (property == adev->mode_info.underscan_vborder_property) {
4960 *val = dm_state->underscan_vborder;
4961 ret = 0;
4962 } else if (property == adev->mode_info.underscan_property) {
4963 *val = dm_state->underscan_enable;
4964 ret = 0;
c1ee92f9
DF
4965 } else if (property == adev->mode_info.abm_level_property) {
4966 *val = dm_state->abm_level;
4967 ret = 0;
e7b07cee 4968 }
c1ee92f9 4969
e7b07cee
HW
4970 return ret;
4971}
4972
526c654a
ED
4973static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4974{
4975 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4976
4977 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4978}
4979
7578ecda 4980static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4981{
c84dec2f 4982 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4983 const struct dc_link *link = aconnector->dc_link;
4984 struct amdgpu_device *adev = connector->dev->dev_private;
4985 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4986
e7b07cee
HW
4987#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4988 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4989
89fc8d4e 4990 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
4991 link->type != dc_connection_none &&
4992 dm->backlight_dev) {
4993 backlight_device_unregister(dm->backlight_dev);
4994 dm->backlight_dev = NULL;
e7b07cee
HW
4995 }
4996#endif
dcd5fb82
MF
4997
4998 if (aconnector->dc_em_sink)
4999 dc_sink_release(aconnector->dc_em_sink);
5000 aconnector->dc_em_sink = NULL;
5001 if (aconnector->dc_sink)
5002 dc_sink_release(aconnector->dc_sink);
5003 aconnector->dc_sink = NULL;
5004
e86e8947 5005 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5006 drm_connector_unregister(connector);
5007 drm_connector_cleanup(connector);
526c654a
ED
5008 if (aconnector->i2c) {
5009 i2c_del_adapter(&aconnector->i2c->base);
5010 kfree(aconnector->i2c);
5011 }
7daec99f 5012 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5013
e7b07cee
HW
5014 kfree(connector);
5015}
5016
5017void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5018{
5019 struct dm_connector_state *state =
5020 to_dm_connector_state(connector->state);
5021
df099b9b
LSL
5022 if (connector->state)
5023 __drm_atomic_helper_connector_destroy_state(connector->state);
5024
e7b07cee
HW
5025 kfree(state);
5026
5027 state = kzalloc(sizeof(*state), GFP_KERNEL);
5028
5029 if (state) {
5030 state->scaling = RMX_OFF;
5031 state->underscan_enable = false;
5032 state->underscan_hborder = 0;
5033 state->underscan_vborder = 0;
01933ba4 5034 state->base.max_requested_bpc = 8;
3261e013
ML
5035 state->vcpi_slots = 0;
5036 state->pbn = 0;
c3e50f89
NK
5037 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5038 state->abm_level = amdgpu_dm_abm_level;
5039
df099b9b 5040 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5041 }
5042}
5043
3ee6b26b
AD
5044struct drm_connector_state *
5045amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5046{
5047 struct dm_connector_state *state =
5048 to_dm_connector_state(connector->state);
5049
5050 struct dm_connector_state *new_state =
5051 kmemdup(state, sizeof(*state), GFP_KERNEL);
5052
98e6436d
AK
5053 if (!new_state)
5054 return NULL;
e7b07cee 5055
98e6436d
AK
5056 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5057
5058 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5059 new_state->abm_level = state->abm_level;
922454c2
NK
5060 new_state->scaling = state->scaling;
5061 new_state->underscan_enable = state->underscan_enable;
5062 new_state->underscan_hborder = state->underscan_hborder;
5063 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5064 new_state->vcpi_slots = state->vcpi_slots;
5065 new_state->pbn = state->pbn;
98e6436d 5066 return &new_state->base;
e7b07cee
HW
5067}
5068
14f04fa4
AD
5069static int
5070amdgpu_dm_connector_late_register(struct drm_connector *connector)
5071{
5072 struct amdgpu_dm_connector *amdgpu_dm_connector =
5073 to_amdgpu_dm_connector(connector);
00a8037e 5074 int r;
14f04fa4 5075
00a8037e
AD
5076 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5077 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5078 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5079 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5080 if (r)
5081 return r;
5082 }
5083
5084#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5085 connector_debugfs_init(amdgpu_dm_connector);
5086#endif
5087
5088 return 0;
5089}
5090
e7b07cee
HW
5091static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5092 .reset = amdgpu_dm_connector_funcs_reset,
5093 .detect = amdgpu_dm_connector_detect,
5094 .fill_modes = drm_helper_probe_single_connector_modes,
5095 .destroy = amdgpu_dm_connector_destroy,
5096 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5097 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5098 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5099 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5100 .late_register = amdgpu_dm_connector_late_register,
526c654a 5101 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5102};
5103
e7b07cee
HW
5104static int get_modes(struct drm_connector *connector)
5105{
5106 return amdgpu_dm_connector_get_modes(connector);
5107}
5108
c84dec2f 5109static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5110{
5111 struct dc_sink_init_data init_params = {
5112 .link = aconnector->dc_link,
5113 .sink_signal = SIGNAL_TYPE_VIRTUAL
5114 };
70e8ffc5 5115 struct edid *edid;
e7b07cee 5116
a89ff457 5117 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5118 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5119 aconnector->base.name);
5120
5121 aconnector->base.force = DRM_FORCE_OFF;
5122 aconnector->base.override_edid = false;
5123 return;
5124 }
5125
70e8ffc5
HW
5126 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5127
e7b07cee
HW
5128 aconnector->edid = edid;
5129
5130 aconnector->dc_em_sink = dc_link_add_remote_sink(
5131 aconnector->dc_link,
5132 (uint8_t *)edid,
5133 (edid->extensions + 1) * EDID_LENGTH,
5134 &init_params);
5135
dcd5fb82 5136 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5137 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5138 aconnector->dc_link->local_sink :
5139 aconnector->dc_em_sink;
dcd5fb82
MF
5140 dc_sink_retain(aconnector->dc_sink);
5141 }
e7b07cee
HW
5142}
5143
c84dec2f 5144static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5145{
5146 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5147
1f6010a9
DF
5148 /*
5149 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5150 * Those settings have to be != 0 to get initial modeset
5151 */
5152 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5153 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5154 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5155 }
5156
5157
5158 aconnector->base.override_edid = true;
5159 create_eml_sink(aconnector);
5160}
5161
cbd14ae7
SW
5162static struct dc_stream_state *
5163create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5164 const struct drm_display_mode *drm_mode,
5165 const struct dm_connector_state *dm_state,
5166 const struct dc_stream_state *old_stream)
5167{
5168 struct drm_connector *connector = &aconnector->base;
5169 struct amdgpu_device *adev = connector->dev->dev_private;
5170 struct dc_stream_state *stream;
4b7da34b
SW
5171 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5172 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5173 enum dc_status dc_result = DC_OK;
5174
5175 do {
5176 stream = create_stream_for_sink(aconnector, drm_mode,
5177 dm_state, old_stream,
5178 requested_bpc);
5179 if (stream == NULL) {
5180 DRM_ERROR("Failed to create stream for sink!\n");
5181 break;
5182 }
5183
5184 dc_result = dc_validate_stream(adev->dm.dc, stream);
5185
5186 if (dc_result != DC_OK) {
74a16675 5187 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5188 drm_mode->hdisplay,
5189 drm_mode->vdisplay,
5190 drm_mode->clock,
74a16675
RS
5191 dc_result,
5192 dc_status_to_str(dc_result));
cbd14ae7
SW
5193
5194 dc_stream_release(stream);
5195 stream = NULL;
5196 requested_bpc -= 2; /* lower bpc to retry validation */
5197 }
5198
5199 } while (stream == NULL && requested_bpc >= 6);
5200
5201 return stream;
5202}
5203
ba9ca088 5204enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5205 struct drm_display_mode *mode)
e7b07cee
HW
5206{
5207 int result = MODE_ERROR;
5208 struct dc_sink *dc_sink;
e7b07cee 5209 /* TODO: Unhardcode stream count */
0971c40e 5210 struct dc_stream_state *stream;
c84dec2f 5211 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5212
5213 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5214 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5215 return result;
5216
1f6010a9
DF
5217 /*
5218 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5219 * EDID mgmt
5220 */
5221 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5222 !aconnector->dc_em_sink)
5223 handle_edid_mgmt(aconnector);
5224
c84dec2f 5225 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5226
b830ebc9 5227 if (dc_sink == NULL) {
e7b07cee
HW
5228 DRM_ERROR("dc_sink is NULL!\n");
5229 goto fail;
5230 }
5231
cbd14ae7
SW
5232 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5233 if (stream) {
5234 dc_stream_release(stream);
e7b07cee 5235 result = MODE_OK;
cbd14ae7 5236 }
e7b07cee
HW
5237
5238fail:
5239 /* TODO: error handling*/
5240 return result;
5241}
5242
88694af9
NK
5243static int fill_hdr_info_packet(const struct drm_connector_state *state,
5244 struct dc_info_packet *out)
5245{
5246 struct hdmi_drm_infoframe frame;
5247 unsigned char buf[30]; /* 26 + 4 */
5248 ssize_t len;
5249 int ret, i;
5250
5251 memset(out, 0, sizeof(*out));
5252
5253 if (!state->hdr_output_metadata)
5254 return 0;
5255
5256 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5257 if (ret)
5258 return ret;
5259
5260 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5261 if (len < 0)
5262 return (int)len;
5263
5264 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5265 if (len != 30)
5266 return -EINVAL;
5267
5268 /* Prepare the infopacket for DC. */
5269 switch (state->connector->connector_type) {
5270 case DRM_MODE_CONNECTOR_HDMIA:
5271 out->hb0 = 0x87; /* type */
5272 out->hb1 = 0x01; /* version */
5273 out->hb2 = 0x1A; /* length */
5274 out->sb[0] = buf[3]; /* checksum */
5275 i = 1;
5276 break;
5277
5278 case DRM_MODE_CONNECTOR_DisplayPort:
5279 case DRM_MODE_CONNECTOR_eDP:
5280 out->hb0 = 0x00; /* sdp id, zero */
5281 out->hb1 = 0x87; /* type */
5282 out->hb2 = 0x1D; /* payload len - 1 */
5283 out->hb3 = (0x13 << 2); /* sdp version */
5284 out->sb[0] = 0x01; /* version */
5285 out->sb[1] = 0x1A; /* length */
5286 i = 2;
5287 break;
5288
5289 default:
5290 return -EINVAL;
5291 }
5292
5293 memcpy(&out->sb[i], &buf[4], 26);
5294 out->valid = true;
5295
5296 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5297 sizeof(out->sb), false);
5298
5299 return 0;
5300}
5301
5302static bool
5303is_hdr_metadata_different(const struct drm_connector_state *old_state,
5304 const struct drm_connector_state *new_state)
5305{
5306 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5307 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5308
5309 if (old_blob != new_blob) {
5310 if (old_blob && new_blob &&
5311 old_blob->length == new_blob->length)
5312 return memcmp(old_blob->data, new_blob->data,
5313 old_blob->length);
5314
5315 return true;
5316 }
5317
5318 return false;
5319}
5320
5321static int
5322amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5323 struct drm_atomic_state *state)
88694af9 5324{
51e857af
SP
5325 struct drm_connector_state *new_con_state =
5326 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5327 struct drm_connector_state *old_con_state =
5328 drm_atomic_get_old_connector_state(state, conn);
5329 struct drm_crtc *crtc = new_con_state->crtc;
5330 struct drm_crtc_state *new_crtc_state;
5331 int ret;
5332
5333 if (!crtc)
5334 return 0;
5335
5336 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5337 struct dc_info_packet hdr_infopacket;
5338
5339 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5340 if (ret)
5341 return ret;
5342
5343 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5344 if (IS_ERR(new_crtc_state))
5345 return PTR_ERR(new_crtc_state);
5346
5347 /*
5348 * DC considers the stream backends changed if the
5349 * static metadata changes. Forcing the modeset also
5350 * gives a simple way for userspace to switch from
b232d4ed
NK
5351 * 8bpc to 10bpc when setting the metadata to enter
5352 * or exit HDR.
5353 *
5354 * Changing the static metadata after it's been
5355 * set is permissible, however. So only force a
5356 * modeset if we're entering or exiting HDR.
88694af9 5357 */
b232d4ed
NK
5358 new_crtc_state->mode_changed =
5359 !old_con_state->hdr_output_metadata ||
5360 !new_con_state->hdr_output_metadata;
88694af9
NK
5361 }
5362
5363 return 0;
5364}
5365
e7b07cee
HW
5366static const struct drm_connector_helper_funcs
5367amdgpu_dm_connector_helper_funcs = {
5368 /*
1f6010a9 5369 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5370 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5371 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5372 * in get_modes call back, not just return the modes count
5373 */
e7b07cee
HW
5374 .get_modes = get_modes,
5375 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5376 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5377};
5378
5379static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5380{
5381}
5382
bc92c065
NK
5383static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5384{
5385 struct drm_device *dev = new_crtc_state->crtc->dev;
5386 struct drm_plane *plane;
5387
5388 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5389 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5390 return true;
5391 }
5392
5393 return false;
5394}
5395
d6ef9b41 5396static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5397{
5398 struct drm_atomic_state *state = new_crtc_state->state;
5399 struct drm_plane *plane;
5400 int num_active = 0;
5401
5402 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5403 struct drm_plane_state *new_plane_state;
5404
5405 /* Cursor planes are "fake". */
5406 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5407 continue;
5408
5409 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5410
5411 if (!new_plane_state) {
5412 /*
5413 * The plane is enable on the CRTC and hasn't changed
5414 * state. This means that it previously passed
5415 * validation and is therefore enabled.
5416 */
5417 num_active += 1;
5418 continue;
5419 }
5420
5421 /* We need a framebuffer to be considered enabled. */
5422 num_active += (new_plane_state->fb != NULL);
5423 }
5424
d6ef9b41
NK
5425 return num_active;
5426}
5427
8fe684e9
NK
5428static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5429 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
5430{
5431 struct dm_crtc_state *dm_new_crtc_state =
5432 to_dm_crtc_state(new_crtc_state);
5433
5434 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
5435
5436 if (!dm_new_crtc_state->stream)
5437 return;
5438
5439 dm_new_crtc_state->active_planes =
5440 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
5441}
5442
3ee6b26b
AD
5443static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5444 struct drm_crtc_state *state)
e7b07cee
HW
5445{
5446 struct amdgpu_device *adev = crtc->dev->dev_private;
5447 struct dc *dc = adev->dm.dc;
5448 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5449 int ret = -EINVAL;
5450
8fe684e9 5451 dm_update_crtc_active_planes(crtc, state);
d6ef9b41 5452
9b690ef3
BL
5453 if (unlikely(!dm_crtc_state->stream &&
5454 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5455 WARN_ON(1);
5456 return ret;
5457 }
5458
1f6010a9 5459 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
5460 if (!dm_crtc_state->stream)
5461 return 0;
5462
bc92c065
NK
5463 /*
5464 * We want at least one hardware plane enabled to use
5465 * the stream with a cursor enabled.
5466 */
c14a005c 5467 if (state->enable && state->active &&
bc92c065 5468 does_crtc_have_active_cursor(state) &&
d6ef9b41 5469 dm_crtc_state->active_planes == 0)
c14a005c
NK
5470 return -EINVAL;
5471
62c933f9 5472 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5473 return 0;
5474
5475 return ret;
5476}
5477
3ee6b26b
AD
5478static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5479 const struct drm_display_mode *mode,
5480 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5481{
5482 return true;
5483}
5484
5485static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5486 .disable = dm_crtc_helper_disable,
5487 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5488 .mode_fixup = dm_crtc_helper_mode_fixup,
5489 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5490};
5491
5492static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5493{
5494
5495}
5496
3261e013
ML
5497static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5498{
5499 switch (display_color_depth) {
5500 case COLOR_DEPTH_666:
5501 return 6;
5502 case COLOR_DEPTH_888:
5503 return 8;
5504 case COLOR_DEPTH_101010:
5505 return 10;
5506 case COLOR_DEPTH_121212:
5507 return 12;
5508 case COLOR_DEPTH_141414:
5509 return 14;
5510 case COLOR_DEPTH_161616:
5511 return 16;
5512 default:
5513 break;
5514 }
5515 return 0;
5516}
5517
3ee6b26b
AD
5518static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5519 struct drm_crtc_state *crtc_state,
5520 struct drm_connector_state *conn_state)
e7b07cee 5521{
3261e013
ML
5522 struct drm_atomic_state *state = crtc_state->state;
5523 struct drm_connector *connector = conn_state->connector;
5524 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5525 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5526 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5527 struct drm_dp_mst_topology_mgr *mst_mgr;
5528 struct drm_dp_mst_port *mst_port;
5529 enum dc_color_depth color_depth;
5530 int clock, bpp = 0;
1bc22f20 5531 bool is_y420 = false;
3261e013
ML
5532
5533 if (!aconnector->port || !aconnector->dc_sink)
5534 return 0;
5535
5536 mst_port = aconnector->port;
5537 mst_mgr = &aconnector->mst_port->mst_mgr;
5538
5539 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5540 return 0;
5541
5542 if (!state->duplicated) {
cbd14ae7 5543 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
5544 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5545 aconnector->force_yuv420_output;
cbd14ae7
SW
5546 color_depth = convert_color_depth_from_display_info(connector,
5547 is_y420,
5548 max_bpc);
3261e013
ML
5549 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5550 clock = adjusted_mode->clock;
dc48529f 5551 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5552 }
5553 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5554 mst_mgr,
5555 mst_port,
1c6c1cb5 5556 dm_new_connector_state->pbn,
03ca9600 5557 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
5558 if (dm_new_connector_state->vcpi_slots < 0) {
5559 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5560 return dm_new_connector_state->vcpi_slots;
5561 }
e7b07cee
HW
5562 return 0;
5563}
5564
5565const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5566 .disable = dm_encoder_helper_disable,
5567 .atomic_check = dm_encoder_helper_atomic_check
5568};
5569
d9fe1a4c 5570#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5571static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5572 struct dc_state *dc_state)
5573{
5574 struct dc_stream_state *stream = NULL;
5575 struct drm_connector *connector;
5576 struct drm_connector_state *new_con_state, *old_con_state;
5577 struct amdgpu_dm_connector *aconnector;
5578 struct dm_connector_state *dm_conn_state;
5579 int i, j, clock, bpp;
5580 int vcpi, pbn_div, pbn = 0;
5581
5582 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5583
5584 aconnector = to_amdgpu_dm_connector(connector);
5585
5586 if (!aconnector->port)
5587 continue;
5588
5589 if (!new_con_state || !new_con_state->crtc)
5590 continue;
5591
5592 dm_conn_state = to_dm_connector_state(new_con_state);
5593
5594 for (j = 0; j < dc_state->stream_count; j++) {
5595 stream = dc_state->streams[j];
5596 if (!stream)
5597 continue;
5598
5599 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5600 break;
5601
5602 stream = NULL;
5603 }
5604
5605 if (!stream)
5606 continue;
5607
5608 if (stream->timing.flags.DSC != 1) {
5609 drm_dp_mst_atomic_enable_dsc(state,
5610 aconnector->port,
5611 dm_conn_state->pbn,
5612 0,
5613 false);
5614 continue;
5615 }
5616
5617 pbn_div = dm_mst_get_pbn_divider(stream->link);
5618 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5619 clock = stream->timing.pix_clk_100hz / 10;
5620 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5621 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5622 aconnector->port,
5623 pbn, pbn_div,
5624 true);
5625 if (vcpi < 0)
5626 return vcpi;
5627
5628 dm_conn_state->pbn = pbn;
5629 dm_conn_state->vcpi_slots = vcpi;
5630 }
5631 return 0;
5632}
d9fe1a4c 5633#endif
29b9ba74 5634
e7b07cee
HW
5635static void dm_drm_plane_reset(struct drm_plane *plane)
5636{
5637 struct dm_plane_state *amdgpu_state = NULL;
5638
5639 if (plane->state)
5640 plane->funcs->atomic_destroy_state(plane, plane->state);
5641
5642 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5643 WARN_ON(amdgpu_state == NULL);
1f6010a9 5644
7ddaef96
NK
5645 if (amdgpu_state)
5646 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5647}
5648
5649static struct drm_plane_state *
5650dm_drm_plane_duplicate_state(struct drm_plane *plane)
5651{
5652 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5653
5654 old_dm_plane_state = to_dm_plane_state(plane->state);
5655 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5656 if (!dm_plane_state)
5657 return NULL;
5658
5659 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5660
3be5262e
HW
5661 if (old_dm_plane_state->dc_state) {
5662 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5663 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5664 }
5665
707477b0
NK
5666 /* Framebuffer hasn't been updated yet, so retain old flags. */
5667 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5668 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5669
e7b07cee
HW
5670 return &dm_plane_state->base;
5671}
5672
dfd84d90 5673static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5674 struct drm_plane_state *state)
e7b07cee
HW
5675{
5676 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5677
3be5262e
HW
5678 if (dm_plane_state->dc_state)
5679 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5680
0627bbd3 5681 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5682}
5683
5684static const struct drm_plane_funcs dm_plane_funcs = {
5685 .update_plane = drm_atomic_helper_update_plane,
5686 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5687 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5688 .reset = dm_drm_plane_reset,
5689 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5690 .atomic_destroy_state = dm_drm_plane_destroy_state,
5691};
5692
3ee6b26b
AD
5693static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5694 struct drm_plane_state *new_state)
e7b07cee
HW
5695{
5696 struct amdgpu_framebuffer *afb;
5697 struct drm_gem_object *obj;
5d43be0c 5698 struct amdgpu_device *adev;
e7b07cee 5699 struct amdgpu_bo *rbo;
e7b07cee 5700 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5701 struct list_head list;
5702 struct ttm_validate_buffer tv;
5703 struct ww_acquire_ctx ticket;
5d43be0c
CK
5704 uint32_t domain;
5705 int r;
e7b07cee
HW
5706
5707 if (!new_state->fb) {
f1ad2f5e 5708 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5709 return 0;
5710 }
5711
5712 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5713 obj = new_state->fb->obj[0];
e7b07cee 5714 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5715 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5716 INIT_LIST_HEAD(&list);
5717
5718 tv.bo = &rbo->tbo;
5719 tv.num_shared = 1;
5720 list_add(&tv.head, &list);
5721
9165fb87 5722 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5723 if (r) {
5724 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5725 return r;
0f257b09 5726 }
e7b07cee 5727
5d43be0c 5728 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5729 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5730 else
5731 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5732
7b7c6c81 5733 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5734 if (unlikely(r != 0)) {
30b7c614
HW
5735 if (r != -ERESTARTSYS)
5736 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5737 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5738 return r;
5739 }
5740
bb812f1e
JZ
5741 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5742 if (unlikely(r != 0)) {
5743 amdgpu_bo_unpin(rbo);
0f257b09 5744 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5745 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5746 return r;
5747 }
7df7e505 5748
0f257b09 5749 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5750
7b7c6c81 5751 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5752
5753 amdgpu_bo_ref(rbo);
5754
cf322b49
NK
5755 /**
5756 * We don't do surface updates on planes that have been newly created,
5757 * but we also don't have the afb->address during atomic check.
5758 *
5759 * Fill in buffer attributes depending on the address here, but only on
5760 * newly created planes since they're not being used by DC yet and this
5761 * won't modify global state.
5762 */
5763 dm_plane_state_old = to_dm_plane_state(plane->state);
5764 dm_plane_state_new = to_dm_plane_state(new_state);
5765
3be5262e 5766 if (dm_plane_state_new->dc_state &&
cf322b49
NK
5767 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5768 struct dc_plane_state *plane_state =
5769 dm_plane_state_new->dc_state;
5770 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 5771
320932bf 5772 fill_plane_buffer_attributes(
695af5f9 5773 adev, afb, plane_state->format, plane_state->rotation,
cf322b49
NK
5774 dm_plane_state_new->tiling_flags,
5775 &plane_state->tiling_info, &plane_state->plane_size,
5776 &plane_state->dcc, &plane_state->address,
5777 dm_plane_state_new->tmz_surface, force_disable_dcc);
e7b07cee
HW
5778 }
5779
e7b07cee
HW
5780 return 0;
5781}
5782
3ee6b26b
AD
5783static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5784 struct drm_plane_state *old_state)
e7b07cee
HW
5785{
5786 struct amdgpu_bo *rbo;
e7b07cee
HW
5787 int r;
5788
5789 if (!old_state->fb)
5790 return;
5791
e68d14dd 5792 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5793 r = amdgpu_bo_reserve(rbo, false);
5794 if (unlikely(r)) {
5795 DRM_ERROR("failed to reserve rbo before unpin\n");
5796 return;
b830ebc9
HW
5797 }
5798
5799 amdgpu_bo_unpin(rbo);
5800 amdgpu_bo_unreserve(rbo);
5801 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5802}
5803
8c44515b
AP
5804static int dm_plane_helper_check_state(struct drm_plane_state *state,
5805 struct drm_crtc_state *new_crtc_state)
5806{
5807 int max_downscale = 0;
5808 int max_upscale = INT_MAX;
5809
5810 /* TODO: These should be checked against DC plane caps */
5811 return drm_atomic_helper_check_plane_state(
5812 state, new_crtc_state, max_downscale, max_upscale, true, true);
5813}
5814
7578ecda
AD
5815static int dm_plane_atomic_check(struct drm_plane *plane,
5816 struct drm_plane_state *state)
cbd19488
AG
5817{
5818 struct amdgpu_device *adev = plane->dev->dev_private;
5819 struct dc *dc = adev->dm.dc;
78171832 5820 struct dm_plane_state *dm_plane_state;
695af5f9 5821 struct dc_scaling_info scaling_info;
8c44515b 5822 struct drm_crtc_state *new_crtc_state;
695af5f9 5823 int ret;
78171832
NK
5824
5825 dm_plane_state = to_dm_plane_state(state);
cbd19488 5826
3be5262e 5827 if (!dm_plane_state->dc_state)
9a3329b1 5828 return 0;
cbd19488 5829
8c44515b
AP
5830 new_crtc_state =
5831 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5832 if (!new_crtc_state)
5833 return -EINVAL;
5834
5835 ret = dm_plane_helper_check_state(state, new_crtc_state);
5836 if (ret)
5837 return ret;
5838
695af5f9
NK
5839 ret = fill_dc_scaling_info(state, &scaling_info);
5840 if (ret)
5841 return ret;
a05bcff1 5842
62c933f9 5843 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5844 return 0;
5845
5846 return -EINVAL;
5847}
5848
674e78ac
NK
5849static int dm_plane_atomic_async_check(struct drm_plane *plane,
5850 struct drm_plane_state *new_plane_state)
5851{
5852 /* Only support async updates on cursor planes. */
5853 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5854 return -EINVAL;
5855
5856 return 0;
5857}
5858
5859static void dm_plane_atomic_async_update(struct drm_plane *plane,
5860 struct drm_plane_state *new_state)
5861{
5862 struct drm_plane_state *old_state =
5863 drm_atomic_get_old_plane_state(new_state->state, plane);
5864
332af874 5865 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5866
5867 plane->state->src_x = new_state->src_x;
5868 plane->state->src_y = new_state->src_y;
5869 plane->state->src_w = new_state->src_w;
5870 plane->state->src_h = new_state->src_h;
5871 plane->state->crtc_x = new_state->crtc_x;
5872 plane->state->crtc_y = new_state->crtc_y;
5873 plane->state->crtc_w = new_state->crtc_w;
5874 plane->state->crtc_h = new_state->crtc_h;
5875
5876 handle_cursor_update(plane, old_state);
5877}
5878
e7b07cee
HW
5879static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5880 .prepare_fb = dm_plane_helper_prepare_fb,
5881 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5882 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5883 .atomic_async_check = dm_plane_atomic_async_check,
5884 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5885};
5886
5887/*
5888 * TODO: these are currently initialized to rgb formats only.
5889 * For future use cases we should either initialize them dynamically based on
5890 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5891 * check will succeed, and let DC implement proper check
e7b07cee 5892 */
d90371b0 5893static const uint32_t rgb_formats[] = {
e7b07cee
HW
5894 DRM_FORMAT_XRGB8888,
5895 DRM_FORMAT_ARGB8888,
5896 DRM_FORMAT_RGBA8888,
5897 DRM_FORMAT_XRGB2101010,
5898 DRM_FORMAT_XBGR2101010,
5899 DRM_FORMAT_ARGB2101010,
5900 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5901 DRM_FORMAT_XBGR8888,
5902 DRM_FORMAT_ABGR8888,
46dd9ff7 5903 DRM_FORMAT_RGB565,
e7b07cee
HW
5904};
5905
0d579c7e
NK
5906static const uint32_t overlay_formats[] = {
5907 DRM_FORMAT_XRGB8888,
5908 DRM_FORMAT_ARGB8888,
5909 DRM_FORMAT_RGBA8888,
5910 DRM_FORMAT_XBGR8888,
5911 DRM_FORMAT_ABGR8888,
7267a1a9 5912 DRM_FORMAT_RGB565
e7b07cee
HW
5913};
5914
5915static const u32 cursor_formats[] = {
5916 DRM_FORMAT_ARGB8888
5917};
5918
37c6a93b
NK
5919static int get_plane_formats(const struct drm_plane *plane,
5920 const struct dc_plane_cap *plane_cap,
5921 uint32_t *formats, int max_formats)
e7b07cee 5922{
37c6a93b
NK
5923 int i, num_formats = 0;
5924
5925 /*
5926 * TODO: Query support for each group of formats directly from
5927 * DC plane caps. This will require adding more formats to the
5928 * caps list.
5929 */
e7b07cee 5930
f180b4bc 5931 switch (plane->type) {
e7b07cee 5932 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5933 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5934 if (num_formats >= max_formats)
5935 break;
5936
5937 formats[num_formats++] = rgb_formats[i];
5938 }
5939
ea36ad34 5940 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5941 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
5942 if (plane_cap && plane_cap->pixel_format_support.p010)
5943 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
5944 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5945 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5946 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
5947 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5948 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 5949 }
e7b07cee 5950 break;
37c6a93b 5951
e7b07cee 5952 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5953 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5954 if (num_formats >= max_formats)
5955 break;
5956
5957 formats[num_formats++] = overlay_formats[i];
5958 }
e7b07cee 5959 break;
37c6a93b 5960
e7b07cee 5961 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5962 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5963 if (num_formats >= max_formats)
5964 break;
5965
5966 formats[num_formats++] = cursor_formats[i];
5967 }
e7b07cee
HW
5968 break;
5969 }
5970
37c6a93b
NK
5971 return num_formats;
5972}
5973
5974static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5975 struct drm_plane *plane,
5976 unsigned long possible_crtcs,
5977 const struct dc_plane_cap *plane_cap)
5978{
5979 uint32_t formats[32];
5980 int num_formats;
5981 int res = -EPERM;
ecc874a6 5982 unsigned int supported_rotations;
37c6a93b
NK
5983
5984 num_formats = get_plane_formats(plane, plane_cap, formats,
5985 ARRAY_SIZE(formats));
5986
5987 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5988 &dm_plane_funcs, formats, num_formats,
5989 NULL, plane->type, NULL);
5990 if (res)
5991 return res;
5992
cc1fec57
NK
5993 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5994 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
5995 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5996 BIT(DRM_MODE_BLEND_PREMULTI);
5997
5998 drm_plane_create_alpha_property(plane);
5999 drm_plane_create_blend_mode_property(plane, blend_caps);
6000 }
6001
fc8e5230 6002 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6003 plane_cap &&
6004 (plane_cap->pixel_format_support.nv12 ||
6005 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6006 /* This only affects YUV formats. */
6007 drm_plane_create_color_properties(
6008 plane,
6009 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6010 BIT(DRM_COLOR_YCBCR_BT709) |
6011 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6012 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6013 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6014 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6015 }
6016
ecc874a6
PLG
6017 supported_rotations =
6018 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6019 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6020
f784112f
MR
6021 if (dm->adev->asic_type >= CHIP_BONAIRE)
6022 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6023 supported_rotations);
ecc874a6 6024
f180b4bc 6025 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6026
96719c54 6027 /* Create (reset) the plane state */
f180b4bc
HW
6028 if (plane->funcs->reset)
6029 plane->funcs->reset(plane);
96719c54 6030
37c6a93b 6031 return 0;
e7b07cee
HW
6032}
6033
7578ecda
AD
6034static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6035 struct drm_plane *plane,
6036 uint32_t crtc_index)
e7b07cee
HW
6037{
6038 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6039 struct drm_plane *cursor_plane;
e7b07cee
HW
6040
6041 int res = -ENOMEM;
6042
6043 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6044 if (!cursor_plane)
6045 goto fail;
6046
f180b4bc 6047 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6048 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6049
6050 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6051 if (!acrtc)
6052 goto fail;
6053
6054 res = drm_crtc_init_with_planes(
6055 dm->ddev,
6056 &acrtc->base,
6057 plane,
f180b4bc 6058 cursor_plane,
e7b07cee
HW
6059 &amdgpu_dm_crtc_funcs, NULL);
6060
6061 if (res)
6062 goto fail;
6063
6064 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6065
96719c54
HW
6066 /* Create (reset) the plane state */
6067 if (acrtc->base.funcs->reset)
6068 acrtc->base.funcs->reset(&acrtc->base);
6069
e7b07cee
HW
6070 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6071 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6072
6073 acrtc->crtc_id = crtc_index;
6074 acrtc->base.enabled = false;
c37e2d29 6075 acrtc->otg_inst = -1;
e7b07cee
HW
6076
6077 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6078 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6079 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6080 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
6081
6082 return 0;
6083
6084fail:
b830ebc9
HW
6085 kfree(acrtc);
6086 kfree(cursor_plane);
e7b07cee
HW
6087 return res;
6088}
6089
6090
6091static int to_drm_connector_type(enum signal_type st)
6092{
6093 switch (st) {
6094 case SIGNAL_TYPE_HDMI_TYPE_A:
6095 return DRM_MODE_CONNECTOR_HDMIA;
6096 case SIGNAL_TYPE_EDP:
6097 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6098 case SIGNAL_TYPE_LVDS:
6099 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6100 case SIGNAL_TYPE_RGB:
6101 return DRM_MODE_CONNECTOR_VGA;
6102 case SIGNAL_TYPE_DISPLAY_PORT:
6103 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6104 return DRM_MODE_CONNECTOR_DisplayPort;
6105 case SIGNAL_TYPE_DVI_DUAL_LINK:
6106 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6107 return DRM_MODE_CONNECTOR_DVID;
6108 case SIGNAL_TYPE_VIRTUAL:
6109 return DRM_MODE_CONNECTOR_VIRTUAL;
6110
6111 default:
6112 return DRM_MODE_CONNECTOR_Unknown;
6113 }
6114}
6115
2b4c1c05
DV
6116static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6117{
62afb4ad
JRS
6118 struct drm_encoder *encoder;
6119
6120 /* There is only one encoder per connector */
6121 drm_connector_for_each_possible_encoder(connector, encoder)
6122 return encoder;
6123
6124 return NULL;
2b4c1c05
DV
6125}
6126
e7b07cee
HW
6127static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6128{
e7b07cee
HW
6129 struct drm_encoder *encoder;
6130 struct amdgpu_encoder *amdgpu_encoder;
6131
2b4c1c05 6132 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6133
6134 if (encoder == NULL)
6135 return;
6136
6137 amdgpu_encoder = to_amdgpu_encoder(encoder);
6138
6139 amdgpu_encoder->native_mode.clock = 0;
6140
6141 if (!list_empty(&connector->probed_modes)) {
6142 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6143
e7b07cee 6144 list_for_each_entry(preferred_mode,
b830ebc9
HW
6145 &connector->probed_modes,
6146 head) {
6147 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6148 amdgpu_encoder->native_mode = *preferred_mode;
6149
e7b07cee
HW
6150 break;
6151 }
6152
6153 }
6154}
6155
3ee6b26b
AD
6156static struct drm_display_mode *
6157amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6158 char *name,
6159 int hdisplay, int vdisplay)
e7b07cee
HW
6160{
6161 struct drm_device *dev = encoder->dev;
6162 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6163 struct drm_display_mode *mode = NULL;
6164 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6165
6166 mode = drm_mode_duplicate(dev, native_mode);
6167
b830ebc9 6168 if (mode == NULL)
e7b07cee
HW
6169 return NULL;
6170
6171 mode->hdisplay = hdisplay;
6172 mode->vdisplay = vdisplay;
6173 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6174 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6175
6176 return mode;
6177
6178}
6179
6180static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6181 struct drm_connector *connector)
e7b07cee
HW
6182{
6183 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6184 struct drm_display_mode *mode = NULL;
6185 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6186 struct amdgpu_dm_connector *amdgpu_dm_connector =
6187 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6188 int i;
6189 int n;
6190 struct mode_size {
6191 char name[DRM_DISPLAY_MODE_LEN];
6192 int w;
6193 int h;
b830ebc9 6194 } common_modes[] = {
e7b07cee
HW
6195 { "640x480", 640, 480},
6196 { "800x600", 800, 600},
6197 { "1024x768", 1024, 768},
6198 { "1280x720", 1280, 720},
6199 { "1280x800", 1280, 800},
6200 {"1280x1024", 1280, 1024},
6201 { "1440x900", 1440, 900},
6202 {"1680x1050", 1680, 1050},
6203 {"1600x1200", 1600, 1200},
6204 {"1920x1080", 1920, 1080},
6205 {"1920x1200", 1920, 1200}
6206 };
6207
b830ebc9 6208 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6209
6210 for (i = 0; i < n; i++) {
6211 struct drm_display_mode *curmode = NULL;
6212 bool mode_existed = false;
6213
6214 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6215 common_modes[i].h > native_mode->vdisplay ||
6216 (common_modes[i].w == native_mode->hdisplay &&
6217 common_modes[i].h == native_mode->vdisplay))
6218 continue;
e7b07cee
HW
6219
6220 list_for_each_entry(curmode, &connector->probed_modes, head) {
6221 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6222 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6223 mode_existed = true;
6224 break;
6225 }
6226 }
6227
6228 if (mode_existed)
6229 continue;
6230
6231 mode = amdgpu_dm_create_common_mode(encoder,
6232 common_modes[i].name, common_modes[i].w,
6233 common_modes[i].h);
6234 drm_mode_probed_add(connector, mode);
c84dec2f 6235 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6236 }
6237}
6238
3ee6b26b
AD
6239static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6240 struct edid *edid)
e7b07cee 6241{
c84dec2f
HW
6242 struct amdgpu_dm_connector *amdgpu_dm_connector =
6243 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6244
6245 if (edid) {
6246 /* empty probed_modes */
6247 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6248 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6249 drm_add_edid_modes(connector, edid);
6250
f1e5e913
YMM
6251 /* sorting the probed modes before calling function
6252 * amdgpu_dm_get_native_mode() since EDID can have
6253 * more than one preferred mode. The modes that are
6254 * later in the probed mode list could be of higher
6255 * and preferred resolution. For example, 3840x2160
6256 * resolution in base EDID preferred timing and 4096x2160
6257 * preferred resolution in DID extension block later.
6258 */
6259 drm_mode_sort(&connector->probed_modes);
e7b07cee 6260 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6261 } else {
c84dec2f 6262 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6263 }
e7b07cee
HW
6264}
6265
7578ecda 6266static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6267{
c84dec2f
HW
6268 struct amdgpu_dm_connector *amdgpu_dm_connector =
6269 to_amdgpu_dm_connector(connector);
e7b07cee 6270 struct drm_encoder *encoder;
c84dec2f 6271 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6272
2b4c1c05 6273 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6274
85ee15d6 6275 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
6276 amdgpu_dm_connector->num_modes =
6277 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6278 } else {
6279 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6280 amdgpu_dm_connector_add_common_modes(encoder, connector);
6281 }
3e332d3a 6282 amdgpu_dm_fbc_init(connector);
5099114b 6283
c84dec2f 6284 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6285}
6286
3ee6b26b
AD
6287void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6288 struct amdgpu_dm_connector *aconnector,
6289 int connector_type,
6290 struct dc_link *link,
6291 int link_index)
e7b07cee
HW
6292{
6293 struct amdgpu_device *adev = dm->ddev->dev_private;
6294
f04bee34
NK
6295 /*
6296 * Some of the properties below require access to state, like bpc.
6297 * Allocate some default initial connector state with our reset helper.
6298 */
6299 if (aconnector->base.funcs->reset)
6300 aconnector->base.funcs->reset(&aconnector->base);
6301
e7b07cee
HW
6302 aconnector->connector_id = link_index;
6303 aconnector->dc_link = link;
6304 aconnector->base.interlace_allowed = false;
6305 aconnector->base.doublescan_allowed = false;
6306 aconnector->base.stereo_allowed = false;
6307 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6308 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6309 aconnector->audio_inst = -1;
e7b07cee
HW
6310 mutex_init(&aconnector->hpd_lock);
6311
1f6010a9
DF
6312 /*
6313 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6314 * which means HPD hot plug not supported
6315 */
e7b07cee
HW
6316 switch (connector_type) {
6317 case DRM_MODE_CONNECTOR_HDMIA:
6318 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6319 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6320 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6321 break;
6322 case DRM_MODE_CONNECTOR_DisplayPort:
6323 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6324 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6325 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6326 break;
6327 case DRM_MODE_CONNECTOR_DVID:
6328 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6329 break;
6330 default:
6331 break;
6332 }
6333
6334 drm_object_attach_property(&aconnector->base.base,
6335 dm->ddev->mode_config.scaling_mode_property,
6336 DRM_MODE_SCALE_NONE);
6337
6338 drm_object_attach_property(&aconnector->base.base,
6339 adev->mode_info.underscan_property,
6340 UNDERSCAN_OFF);
6341 drm_object_attach_property(&aconnector->base.base,
6342 adev->mode_info.underscan_hborder_property,
6343 0);
6344 drm_object_attach_property(&aconnector->base.base,
6345 adev->mode_info.underscan_vborder_property,
6346 0);
1825fd34 6347
8c61b31e
JFZ
6348 if (!aconnector->mst_port)
6349 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 6350
4a8ca46b
RL
6351 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6352 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6353 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 6354
c1ee92f9 6355 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 6356 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
6357 drm_object_attach_property(&aconnector->base.base,
6358 adev->mode_info.abm_level_property, 0);
6359 }
bb47de73
NK
6360
6361 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
6362 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6363 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
6364 drm_object_attach_property(
6365 &aconnector->base.base,
6366 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6367
8c61b31e
JFZ
6368 if (!aconnector->mst_port)
6369 drm_connector_attach_vrr_capable_property(&aconnector->base);
6370
0c8620d6 6371#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 6372 if (adev->dm.hdcp_workqueue)
53e108aa 6373 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 6374#endif
bb47de73 6375 }
e7b07cee
HW
6376}
6377
7578ecda
AD
6378static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6379 struct i2c_msg *msgs, int num)
e7b07cee
HW
6380{
6381 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6382 struct ddc_service *ddc_service = i2c->ddc_service;
6383 struct i2c_command cmd;
6384 int i;
6385 int result = -EIO;
6386
b830ebc9 6387 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
6388
6389 if (!cmd.payloads)
6390 return result;
6391
6392 cmd.number_of_payloads = num;
6393 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6394 cmd.speed = 100;
6395
6396 for (i = 0; i < num; i++) {
6397 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6398 cmd.payloads[i].address = msgs[i].addr;
6399 cmd.payloads[i].length = msgs[i].len;
6400 cmd.payloads[i].data = msgs[i].buf;
6401 }
6402
c85e6e54
DF
6403 if (dc_submit_i2c(
6404 ddc_service->ctx->dc,
6405 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
6406 &cmd))
6407 result = num;
6408
6409 kfree(cmd.payloads);
6410 return result;
6411}
6412
7578ecda 6413static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
6414{
6415 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6416}
6417
6418static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6419 .master_xfer = amdgpu_dm_i2c_xfer,
6420 .functionality = amdgpu_dm_i2c_func,
6421};
6422
3ee6b26b
AD
6423static struct amdgpu_i2c_adapter *
6424create_i2c(struct ddc_service *ddc_service,
6425 int link_index,
6426 int *res)
e7b07cee
HW
6427{
6428 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6429 struct amdgpu_i2c_adapter *i2c;
6430
b830ebc9 6431 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
6432 if (!i2c)
6433 return NULL;
e7b07cee
HW
6434 i2c->base.owner = THIS_MODULE;
6435 i2c->base.class = I2C_CLASS_DDC;
6436 i2c->base.dev.parent = &adev->pdev->dev;
6437 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 6438 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
6439 i2c_set_adapdata(&i2c->base, i2c);
6440 i2c->ddc_service = ddc_service;
c85e6e54 6441 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
6442
6443 return i2c;
6444}
6445
89fc8d4e 6446
1f6010a9
DF
6447/*
6448 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
6449 * dc_link which will be represented by this aconnector.
6450 */
7578ecda
AD
6451static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6452 struct amdgpu_dm_connector *aconnector,
6453 uint32_t link_index,
6454 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6455{
6456 int res = 0;
6457 int connector_type;
6458 struct dc *dc = dm->dc;
6459 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6460 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6461
6462 link->priv = aconnector;
e7b07cee 6463
f1ad2f5e 6464 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6465
6466 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6467 if (!i2c) {
6468 DRM_ERROR("Failed to create i2c adapter data\n");
6469 return -ENOMEM;
6470 }
6471
e7b07cee
HW
6472 aconnector->i2c = i2c;
6473 res = i2c_add_adapter(&i2c->base);
6474
6475 if (res) {
6476 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6477 goto out_free;
6478 }
6479
6480 connector_type = to_drm_connector_type(link->connector_signal);
6481
17165de2 6482 res = drm_connector_init_with_ddc(
e7b07cee
HW
6483 dm->ddev,
6484 &aconnector->base,
6485 &amdgpu_dm_connector_funcs,
17165de2
AP
6486 connector_type,
6487 &i2c->base);
e7b07cee
HW
6488
6489 if (res) {
6490 DRM_ERROR("connector_init failed\n");
6491 aconnector->connector_id = -1;
6492 goto out_free;
6493 }
6494
6495 drm_connector_helper_add(
6496 &aconnector->base,
6497 &amdgpu_dm_connector_helper_funcs);
6498
6499 amdgpu_dm_connector_init_helper(
6500 dm,
6501 aconnector,
6502 connector_type,
6503 link,
6504 link_index);
6505
cde4c44d 6506 drm_connector_attach_encoder(
e7b07cee
HW
6507 &aconnector->base, &aencoder->base);
6508
e7b07cee
HW
6509 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6510 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 6511 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 6512
e7b07cee
HW
6513out_free:
6514 if (res) {
6515 kfree(i2c);
6516 aconnector->i2c = NULL;
6517 }
6518 return res;
6519}
6520
6521int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6522{
6523 switch (adev->mode_info.num_crtc) {
6524 case 1:
6525 return 0x1;
6526 case 2:
6527 return 0x3;
6528 case 3:
6529 return 0x7;
6530 case 4:
6531 return 0xf;
6532 case 5:
6533 return 0x1f;
6534 case 6:
6535 default:
6536 return 0x3f;
6537 }
6538}
6539
7578ecda
AD
6540static int amdgpu_dm_encoder_init(struct drm_device *dev,
6541 struct amdgpu_encoder *aencoder,
6542 uint32_t link_index)
e7b07cee
HW
6543{
6544 struct amdgpu_device *adev = dev->dev_private;
6545
6546 int res = drm_encoder_init(dev,
6547 &aencoder->base,
6548 &amdgpu_dm_encoder_funcs,
6549 DRM_MODE_ENCODER_TMDS,
6550 NULL);
6551
6552 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6553
6554 if (!res)
6555 aencoder->encoder_id = link_index;
6556 else
6557 aencoder->encoder_id = -1;
6558
6559 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6560
6561 return res;
6562}
6563
3ee6b26b
AD
6564static void manage_dm_interrupts(struct amdgpu_device *adev,
6565 struct amdgpu_crtc *acrtc,
6566 bool enable)
e7b07cee
HW
6567{
6568 /*
8fe684e9
NK
6569 * We have no guarantee that the frontend index maps to the same
6570 * backend index - some even map to more than one.
6571 *
6572 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
6573 */
6574 int irq_type =
734dd01d 6575 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6576 adev,
6577 acrtc->crtc_id);
6578
6579 if (enable) {
6580 drm_crtc_vblank_on(&acrtc->base);
6581 amdgpu_irq_get(
6582 adev,
6583 &adev->pageflip_irq,
6584 irq_type);
6585 } else {
6586
6587 amdgpu_irq_put(
6588 adev,
6589 &adev->pageflip_irq,
6590 irq_type);
6591 drm_crtc_vblank_off(&acrtc->base);
6592 }
6593}
6594
8fe684e9
NK
6595static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6596 struct amdgpu_crtc *acrtc)
6597{
6598 int irq_type =
6599 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6600
6601 /**
6602 * This reads the current state for the IRQ and force reapplies
6603 * the setting to hardware.
6604 */
6605 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6606}
6607
3ee6b26b
AD
6608static bool
6609is_scaling_state_different(const struct dm_connector_state *dm_state,
6610 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6611{
6612 if (dm_state->scaling != old_dm_state->scaling)
6613 return true;
6614 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6615 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6616 return true;
6617 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6618 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6619 return true;
b830ebc9
HW
6620 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6621 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6622 return true;
e7b07cee
HW
6623 return false;
6624}
6625
0c8620d6
BL
6626#ifdef CONFIG_DRM_AMD_DC_HDCP
6627static bool is_content_protection_different(struct drm_connector_state *state,
6628 const struct drm_connector_state *old_state,
6629 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6630{
6631 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6632
53e108aa
BL
6633 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6634 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6635 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6636 return true;
6637 }
6638
0c8620d6
BL
6639 /* CP is being re enabled, ignore this */
6640 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6641 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6642 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6643 return false;
6644 }
6645
6646 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6647 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6648 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6649 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6650
6651 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6652 * hot-plug, headless s3, dpms
6653 */
6654 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6655 aconnector->dc_sink != NULL)
6656 return true;
6657
6658 if (old_state->content_protection == state->content_protection)
6659 return false;
6660
6661 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6662 return true;
6663
6664 return false;
6665}
6666
0c8620d6 6667#endif
3ee6b26b
AD
6668static void remove_stream(struct amdgpu_device *adev,
6669 struct amdgpu_crtc *acrtc,
6670 struct dc_stream_state *stream)
e7b07cee
HW
6671{
6672 /* this is the update mode case */
e7b07cee
HW
6673
6674 acrtc->otg_inst = -1;
6675 acrtc->enabled = false;
6676}
6677
7578ecda
AD
6678static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6679 struct dc_cursor_position *position)
2a8f6ccb 6680{
f4c2cc43 6681 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6682 int x, y;
6683 int xorigin = 0, yorigin = 0;
6684
e371e19c
NK
6685 position->enable = false;
6686 position->x = 0;
6687 position->y = 0;
6688
6689 if (!crtc || !plane->state->fb)
2a8f6ccb 6690 return 0;
2a8f6ccb
HW
6691
6692 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6693 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6694 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6695 __func__,
6696 plane->state->crtc_w,
6697 plane->state->crtc_h);
6698 return -EINVAL;
6699 }
6700
6701 x = plane->state->crtc_x;
6702 y = plane->state->crtc_y;
c14a005c 6703
e371e19c
NK
6704 if (x <= -amdgpu_crtc->max_cursor_width ||
6705 y <= -amdgpu_crtc->max_cursor_height)
6706 return 0;
6707
2a8f6ccb
HW
6708 if (x < 0) {
6709 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6710 x = 0;
6711 }
6712 if (y < 0) {
6713 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6714 y = 0;
6715 }
6716 position->enable = true;
d243b6ff 6717 position->translate_by_source = true;
2a8f6ccb
HW
6718 position->x = x;
6719 position->y = y;
6720 position->x_hotspot = xorigin;
6721 position->y_hotspot = yorigin;
6722
6723 return 0;
6724}
6725
3ee6b26b
AD
6726static void handle_cursor_update(struct drm_plane *plane,
6727 struct drm_plane_state *old_plane_state)
e7b07cee 6728{
674e78ac 6729 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
6730 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6731 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6732 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6733 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6734 uint64_t address = afb ? afb->address : 0;
6735 struct dc_cursor_position position;
6736 struct dc_cursor_attributes attributes;
6737 int ret;
6738
e7b07cee
HW
6739 if (!plane->state->fb && !old_plane_state->fb)
6740 return;
6741
f1ad2f5e 6742 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6743 __func__,
6744 amdgpu_crtc->crtc_id,
6745 plane->state->crtc_w,
6746 plane->state->crtc_h);
2a8f6ccb
HW
6747
6748 ret = get_cursor_position(plane, crtc, &position);
6749 if (ret)
6750 return;
6751
6752 if (!position.enable) {
6753 /* turn off cursor */
674e78ac
NK
6754 if (crtc_state && crtc_state->stream) {
6755 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6756 dc_stream_set_cursor_position(crtc_state->stream,
6757 &position);
674e78ac
NK
6758 mutex_unlock(&adev->dm.dc_lock);
6759 }
2a8f6ccb 6760 return;
e7b07cee 6761 }
e7b07cee 6762
2a8f6ccb
HW
6763 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6764 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6765
c1cefe11 6766 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6767 attributes.address.high_part = upper_32_bits(address);
6768 attributes.address.low_part = lower_32_bits(address);
6769 attributes.width = plane->state->crtc_w;
6770 attributes.height = plane->state->crtc_h;
6771 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6772 attributes.rotation_angle = 0;
6773 attributes.attribute_flags.value = 0;
6774
6775 attributes.pitch = attributes.width;
6776
886daac9 6777 if (crtc_state->stream) {
674e78ac 6778 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6779 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6780 &attributes))
6781 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6782
2a8f6ccb
HW
6783 if (!dc_stream_set_cursor_position(crtc_state->stream,
6784 &position))
6785 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6786 mutex_unlock(&adev->dm.dc_lock);
886daac9 6787 }
2a8f6ccb 6788}
e7b07cee
HW
6789
6790static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6791{
6792
6793 assert_spin_locked(&acrtc->base.dev->event_lock);
6794 WARN_ON(acrtc->event);
6795
6796 acrtc->event = acrtc->base.state->event;
6797
6798 /* Set the flip status */
6799 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6800
6801 /* Mark this event as consumed */
6802 acrtc->base.state->event = NULL;
6803
6804 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6805 acrtc->crtc_id);
6806}
6807
bb47de73
NK
6808static void update_freesync_state_on_stream(
6809 struct amdgpu_display_manager *dm,
6810 struct dm_crtc_state *new_crtc_state,
180db303
NK
6811 struct dc_stream_state *new_stream,
6812 struct dc_plane_state *surface,
6813 u32 flip_timestamp_in_us)
bb47de73 6814{
09aef2c4 6815 struct mod_vrr_params vrr_params;
bb47de73 6816 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
6817 struct amdgpu_device *adev = dm->adev;
6818 unsigned long flags;
bb47de73
NK
6819
6820 if (!new_stream)
6821 return;
6822
6823 /*
6824 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6825 * For now it's sufficient to just guard against these conditions.
6826 */
6827
6828 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6829 return;
6830
09aef2c4
MK
6831 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6832 vrr_params = new_crtc_state->vrr_params;
6833
180db303
NK
6834 if (surface) {
6835 mod_freesync_handle_preflip(
6836 dm->freesync_module,
6837 surface,
6838 new_stream,
6839 flip_timestamp_in_us,
6840 &vrr_params);
09aef2c4
MK
6841
6842 if (adev->family < AMDGPU_FAMILY_AI &&
6843 amdgpu_dm_vrr_active(new_crtc_state)) {
6844 mod_freesync_handle_v_update(dm->freesync_module,
6845 new_stream, &vrr_params);
e63e2491
EB
6846
6847 /* Need to call this before the frame ends. */
6848 dc_stream_adjust_vmin_vmax(dm->dc,
6849 new_crtc_state->stream,
6850 &vrr_params.adjust);
09aef2c4 6851 }
180db303 6852 }
bb47de73
NK
6853
6854 mod_freesync_build_vrr_infopacket(
6855 dm->freesync_module,
6856 new_stream,
180db303 6857 &vrr_params,
ecd0136b
HT
6858 PACKET_TYPE_VRR,
6859 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6860 &vrr_infopacket);
6861
8a48b44c 6862 new_crtc_state->freesync_timing_changed |=
180db303
NK
6863 (memcmp(&new_crtc_state->vrr_params.adjust,
6864 &vrr_params.adjust,
6865 sizeof(vrr_params.adjust)) != 0);
bb47de73 6866
8a48b44c 6867 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6868 (memcmp(&new_crtc_state->vrr_infopacket,
6869 &vrr_infopacket,
6870 sizeof(vrr_infopacket)) != 0);
6871
180db303 6872 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6873 new_crtc_state->vrr_infopacket = vrr_infopacket;
6874
180db303 6875 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6876 new_stream->vrr_infopacket = vrr_infopacket;
6877
6878 if (new_crtc_state->freesync_vrr_info_changed)
6879 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6880 new_crtc_state->base.crtc->base.id,
6881 (int)new_crtc_state->base.vrr_enabled,
180db303 6882 (int)vrr_params.state);
09aef2c4
MK
6883
6884 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6885}
6886
e854194c
MK
6887static void pre_update_freesync_state_on_stream(
6888 struct amdgpu_display_manager *dm,
6889 struct dm_crtc_state *new_crtc_state)
6890{
6891 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6892 struct mod_vrr_params vrr_params;
e854194c 6893 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6894 struct amdgpu_device *adev = dm->adev;
6895 unsigned long flags;
e854194c
MK
6896
6897 if (!new_stream)
6898 return;
6899
6900 /*
6901 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6902 * For now it's sufficient to just guard against these conditions.
6903 */
6904 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6905 return;
6906
09aef2c4
MK
6907 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6908 vrr_params = new_crtc_state->vrr_params;
6909
e854194c
MK
6910 if (new_crtc_state->vrr_supported &&
6911 config.min_refresh_in_uhz &&
6912 config.max_refresh_in_uhz) {
6913 config.state = new_crtc_state->base.vrr_enabled ?
6914 VRR_STATE_ACTIVE_VARIABLE :
6915 VRR_STATE_INACTIVE;
6916 } else {
6917 config.state = VRR_STATE_UNSUPPORTED;
6918 }
6919
6920 mod_freesync_build_vrr_params(dm->freesync_module,
6921 new_stream,
6922 &config, &vrr_params);
6923
6924 new_crtc_state->freesync_timing_changed |=
6925 (memcmp(&new_crtc_state->vrr_params.adjust,
6926 &vrr_params.adjust,
6927 sizeof(vrr_params.adjust)) != 0);
6928
6929 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6930 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6931}
6932
66b0c973
MK
6933static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6934 struct dm_crtc_state *new_state)
6935{
6936 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6937 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6938
6939 if (!old_vrr_active && new_vrr_active) {
6940 /* Transition VRR inactive -> active:
6941 * While VRR is active, we must not disable vblank irq, as a
6942 * reenable after disable would compute bogus vblank/pflip
6943 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6944 *
6945 * We also need vupdate irq for the actual core vblank handling
6946 * at end of vblank.
66b0c973 6947 */
d2574c33 6948 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6949 drm_crtc_vblank_get(new_state->base.crtc);
6950 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6951 __func__, new_state->base.crtc->base.id);
6952 } else if (old_vrr_active && !new_vrr_active) {
6953 /* Transition VRR active -> inactive:
6954 * Allow vblank irq disable again for fixed refresh rate.
6955 */
d2574c33 6956 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6957 drm_crtc_vblank_put(new_state->base.crtc);
6958 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6959 __func__, new_state->base.crtc->base.id);
6960 }
6961}
6962
8ad27806
NK
6963static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6964{
6965 struct drm_plane *plane;
6966 struct drm_plane_state *old_plane_state, *new_plane_state;
6967 int i;
6968
6969 /*
6970 * TODO: Make this per-stream so we don't issue redundant updates for
6971 * commits with multiple streams.
6972 */
6973 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6974 new_plane_state, i)
6975 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6976 handle_cursor_update(plane, old_plane_state);
6977}
6978
3be5262e 6979static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6980 struct dc_state *dc_state,
3ee6b26b
AD
6981 struct drm_device *dev,
6982 struct amdgpu_display_manager *dm,
6983 struct drm_crtc *pcrtc,
420cd472 6984 bool wait_for_vblank)
e7b07cee 6985{
570c91d5 6986 uint32_t i;
8a48b44c 6987 uint64_t timestamp_ns;
e7b07cee 6988 struct drm_plane *plane;
0bc9706d 6989 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6990 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6991 struct drm_crtc_state *new_pcrtc_state =
6992 drm_atomic_get_new_crtc_state(state, pcrtc);
6993 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
6994 struct dm_crtc_state *dm_old_crtc_state =
6995 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 6996 int planes_count = 0, vpos, hpos;
570c91d5 6997 long r;
e7b07cee 6998 unsigned long flags;
8a48b44c 6999 struct amdgpu_bo *abo;
fdd1fe57
MK
7000 uint32_t target_vblank, last_flip_vblank;
7001 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7002 bool pflip_present = false;
bc7f670e
DF
7003 struct {
7004 struct dc_surface_update surface_updates[MAX_SURFACES];
7005 struct dc_plane_info plane_infos[MAX_SURFACES];
7006 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7007 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7008 struct dc_stream_update stream_update;
74aa7bd4 7009 } *bundle;
bc7f670e 7010
74aa7bd4 7011 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7012
74aa7bd4
DF
7013 if (!bundle) {
7014 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7015 goto cleanup;
7016 }
e7b07cee 7017
8ad27806
NK
7018 /*
7019 * Disable the cursor first if we're disabling all the planes.
7020 * It'll remain on the screen after the planes are re-enabled
7021 * if we don't.
7022 */
7023 if (acrtc_state->active_planes == 0)
7024 amdgpu_dm_commit_cursors(state);
7025
e7b07cee 7026 /* update planes when needed */
0bc9706d
LSL
7027 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7028 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7029 struct drm_crtc_state *new_crtc_state;
0bc9706d 7030 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 7031 bool plane_needs_flip;
c7af5f77 7032 struct dc_plane_state *dc_plane;
54d76575 7033 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7034
80c218d5
NK
7035 /* Cursor plane is handled after stream updates */
7036 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7037 continue;
e7b07cee 7038
f5ba60fe
DD
7039 if (!fb || !crtc || pcrtc != crtc)
7040 continue;
7041
7042 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7043 if (!new_crtc_state->active)
e7b07cee
HW
7044 continue;
7045
bc7f670e 7046 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7047
74aa7bd4 7048 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7049 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7050 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7051 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7052 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7053 }
8a48b44c 7054
695af5f9
NK
7055 fill_dc_scaling_info(new_plane_state,
7056 &bundle->scaling_infos[planes_count]);
8a48b44c 7057
695af5f9
NK
7058 bundle->surface_updates[planes_count].scaling_info =
7059 &bundle->scaling_infos[planes_count];
8a48b44c 7060
f5031000 7061 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7062
f5031000 7063 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7064
f5031000
DF
7065 if (!plane_needs_flip) {
7066 planes_count += 1;
7067 continue;
7068 }
8a48b44c 7069
2fac0f53
CK
7070 abo = gem_to_amdgpu_bo(fb->obj[0]);
7071
f8308898
AG
7072 /*
7073 * Wait for all fences on this FB. Do limited wait to avoid
7074 * deadlock during GPU reset when this fence will not signal
7075 * but we hold reservation lock for the BO.
7076 */
52791eee 7077 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7078 false,
f8308898
AG
7079 msecs_to_jiffies(5000));
7080 if (unlikely(r <= 0))
ed8a5fb2 7081 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7082
695af5f9 7083 fill_dc_plane_info_and_addr(
8ce5d842
NK
7084 dm->adev, new_plane_state,
7085 dm_new_plane_state->tiling_flags,
695af5f9 7086 &bundle->plane_infos[planes_count],
87b7ebc2 7087 &bundle->flip_addrs[planes_count].address,
8ce5d842 7088 dm_new_plane_state->tmz_surface, false);
87b7ebc2
RS
7089
7090 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7091 new_plane_state->plane->index,
7092 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7093
7094 bundle->surface_updates[planes_count].plane_info =
7095 &bundle->plane_infos[planes_count];
8a48b44c 7096
caff0e66
NK
7097 /*
7098 * Only allow immediate flips for fast updates that don't
7099 * change FB pitch, DCC state, rotation or mirroing.
7100 */
f5031000 7101 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7102 crtc->state->async_flip &&
caff0e66 7103 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7104
f5031000
DF
7105 timestamp_ns = ktime_get_ns();
7106 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7107 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7108 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7109
f5031000
DF
7110 if (!bundle->surface_updates[planes_count].surface) {
7111 DRM_ERROR("No surface for CRTC: id=%d\n",
7112 acrtc_attach->crtc_id);
7113 continue;
bc7f670e
DF
7114 }
7115
f5031000
DF
7116 if (plane == pcrtc->primary)
7117 update_freesync_state_on_stream(
7118 dm,
7119 acrtc_state,
7120 acrtc_state->stream,
7121 dc_plane,
7122 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7123
f5031000
DF
7124 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7125 __func__,
7126 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7127 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7128
7129 planes_count += 1;
7130
8a48b44c
DF
7131 }
7132
74aa7bd4 7133 if (pflip_present) {
634092b1
MK
7134 if (!vrr_active) {
7135 /* Use old throttling in non-vrr fixed refresh rate mode
7136 * to keep flip scheduling based on target vblank counts
7137 * working in a backwards compatible way, e.g., for
7138 * clients using the GLX_OML_sync_control extension or
7139 * DRI3/Present extension with defined target_msc.
7140 */
e3eff4b5 7141 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7142 }
7143 else {
7144 /* For variable refresh rate mode only:
7145 * Get vblank of last completed flip to avoid > 1 vrr
7146 * flips per video frame by use of throttling, but allow
7147 * flip programming anywhere in the possibly large
7148 * variable vrr vblank interval for fine-grained flip
7149 * timing control and more opportunity to avoid stutter
7150 * on late submission of flips.
7151 */
7152 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7153 last_flip_vblank = acrtc_attach->last_flip_vblank;
7154 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7155 }
7156
fdd1fe57 7157 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7158
7159 /*
7160 * Wait until we're out of the vertical blank period before the one
7161 * targeted by the flip
7162 */
7163 while ((acrtc_attach->enabled &&
7164 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7165 0, &vpos, &hpos, NULL,
7166 NULL, &pcrtc->hwmode)
7167 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7168 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7169 (int)(target_vblank -
e3eff4b5 7170 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7171 usleep_range(1000, 1100);
7172 }
7173
8fe684e9
NK
7174 /**
7175 * Prepare the flip event for the pageflip interrupt to handle.
7176 *
7177 * This only works in the case where we've already turned on the
7178 * appropriate hardware blocks (eg. HUBP) so in the transition case
7179 * from 0 -> n planes we have to skip a hardware generated event
7180 * and rely on sending it from software.
7181 */
7182 if (acrtc_attach->base.state->event &&
7183 acrtc_state->active_planes > 0) {
8a48b44c
DF
7184 drm_crtc_vblank_get(pcrtc);
7185
7186 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7187
7188 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7189 prepare_flip_isr(acrtc_attach);
7190
7191 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7192 }
7193
7194 if (acrtc_state->stream) {
8a48b44c 7195 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7196 bundle->stream_update.vrr_infopacket =
8a48b44c 7197 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7198 }
e7b07cee
HW
7199 }
7200
bc92c065 7201 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7202 if ((planes_count || acrtc_state->active_planes == 0) &&
7203 acrtc_state->stream) {
b6e881c9 7204 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7205 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7206 bundle->stream_update.src = acrtc_state->stream->src;
7207 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7208 }
7209
cf020d49
NK
7210 if (new_pcrtc_state->color_mgmt_changed) {
7211 /*
7212 * TODO: This isn't fully correct since we've actually
7213 * already modified the stream in place.
7214 */
7215 bundle->stream_update.gamut_remap =
7216 &acrtc_state->stream->gamut_remap_matrix;
7217 bundle->stream_update.output_csc_transform =
7218 &acrtc_state->stream->csc_color_matrix;
7219 bundle->stream_update.out_transfer_func =
7220 acrtc_state->stream->out_transfer_func;
7221 }
bc7f670e 7222
8a48b44c 7223 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7224 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7225 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7226
e63e2491
EB
7227 /*
7228 * If FreeSync state on the stream has changed then we need to
7229 * re-adjust the min/max bounds now that DC doesn't handle this
7230 * as part of commit.
7231 */
7232 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7233 amdgpu_dm_vrr_active(acrtc_state)) {
7234 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7235 dc_stream_adjust_vmin_vmax(
7236 dm->dc, acrtc_state->stream,
7237 &acrtc_state->vrr_params.adjust);
7238 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7239 }
bc7f670e 7240 mutex_lock(&dm->dc_lock);
8c322309 7241 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7242 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7243 amdgpu_dm_psr_disable(acrtc_state->stream);
7244
bc7f670e 7245 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7246 bundle->surface_updates,
bc7f670e
DF
7247 planes_count,
7248 acrtc_state->stream,
74aa7bd4 7249 &bundle->stream_update,
bc7f670e 7250 dc_state);
8c322309 7251
8fe684e9
NK
7252 /**
7253 * Enable or disable the interrupts on the backend.
7254 *
7255 * Most pipes are put into power gating when unused.
7256 *
7257 * When power gating is enabled on a pipe we lose the
7258 * interrupt enablement state when power gating is disabled.
7259 *
7260 * So we need to update the IRQ control state in hardware
7261 * whenever the pipe turns on (since it could be previously
7262 * power gated) or off (since some pipes can't be power gated
7263 * on some ASICs).
7264 */
7265 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7266 dm_update_pflip_irq_state(
7267 (struct amdgpu_device *)dev->dev_private,
7268 acrtc_attach);
7269
8c322309 7270 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7271 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7272 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7273 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7274 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7275 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7276 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7277 amdgpu_dm_psr_enable(acrtc_state->stream);
7278 }
7279
bc7f670e 7280 mutex_unlock(&dm->dc_lock);
e7b07cee 7281 }
4b510503 7282
8ad27806
NK
7283 /*
7284 * Update cursor state *after* programming all the planes.
7285 * This avoids redundant programming in the case where we're going
7286 * to be disabling a single plane - those pipes are being disabled.
7287 */
7288 if (acrtc_state->active_planes)
7289 amdgpu_dm_commit_cursors(state);
80c218d5 7290
4b510503 7291cleanup:
74aa7bd4 7292 kfree(bundle);
e7b07cee
HW
7293}
7294
6ce8f316
NK
7295static void amdgpu_dm_commit_audio(struct drm_device *dev,
7296 struct drm_atomic_state *state)
7297{
7298 struct amdgpu_device *adev = dev->dev_private;
7299 struct amdgpu_dm_connector *aconnector;
7300 struct drm_connector *connector;
7301 struct drm_connector_state *old_con_state, *new_con_state;
7302 struct drm_crtc_state *new_crtc_state;
7303 struct dm_crtc_state *new_dm_crtc_state;
7304 const struct dc_stream_status *status;
7305 int i, inst;
7306
7307 /* Notify device removals. */
7308 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7309 if (old_con_state->crtc != new_con_state->crtc) {
7310 /* CRTC changes require notification. */
7311 goto notify;
7312 }
7313
7314 if (!new_con_state->crtc)
7315 continue;
7316
7317 new_crtc_state = drm_atomic_get_new_crtc_state(
7318 state, new_con_state->crtc);
7319
7320 if (!new_crtc_state)
7321 continue;
7322
7323 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7324 continue;
7325
7326 notify:
7327 aconnector = to_amdgpu_dm_connector(connector);
7328
7329 mutex_lock(&adev->dm.audio_lock);
7330 inst = aconnector->audio_inst;
7331 aconnector->audio_inst = -1;
7332 mutex_unlock(&adev->dm.audio_lock);
7333
7334 amdgpu_dm_audio_eld_notify(adev, inst);
7335 }
7336
7337 /* Notify audio device additions. */
7338 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7339 if (!new_con_state->crtc)
7340 continue;
7341
7342 new_crtc_state = drm_atomic_get_new_crtc_state(
7343 state, new_con_state->crtc);
7344
7345 if (!new_crtc_state)
7346 continue;
7347
7348 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7349 continue;
7350
7351 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7352 if (!new_dm_crtc_state->stream)
7353 continue;
7354
7355 status = dc_stream_get_status(new_dm_crtc_state->stream);
7356 if (!status)
7357 continue;
7358
7359 aconnector = to_amdgpu_dm_connector(connector);
7360
7361 mutex_lock(&adev->dm.audio_lock);
7362 inst = status->audio_inst;
7363 aconnector->audio_inst = inst;
7364 mutex_unlock(&adev->dm.audio_lock);
7365
7366 amdgpu_dm_audio_eld_notify(adev, inst);
7367 }
7368}
7369
1f6010a9 7370/*
27b3f4fc
LSL
7371 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7372 * @crtc_state: the DRM CRTC state
7373 * @stream_state: the DC stream state.
7374 *
7375 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7376 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7377 */
7378static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7379 struct dc_stream_state *stream_state)
7380{
b9952f93 7381 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 7382}
e7b07cee 7383
7578ecda
AD
7384static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7385 struct drm_atomic_state *state,
7386 bool nonblock)
e7b07cee
HW
7387{
7388 struct drm_crtc *crtc;
c2cea706 7389 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7390 struct amdgpu_device *adev = dev->dev_private;
7391 int i;
7392
7393 /*
d6ef9b41
NK
7394 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7395 * a modeset, being disabled, or have no active planes.
7396 *
7397 * It's done in atomic commit rather than commit tail for now since
7398 * some of these interrupt handlers access the current CRTC state and
7399 * potentially the stream pointer itself.
7400 *
7401 * Since the atomic state is swapped within atomic commit and not within
7402 * commit tail this would leave to new state (that hasn't been committed yet)
7403 * being accesssed from within the handlers.
7404 *
7405 * TODO: Fix this so we can do this in commit tail and not have to block
7406 * in atomic check.
e7b07cee 7407 */
c2cea706 7408 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee
HW
7409 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7410
8fe684e9
NK
7411 if (old_crtc_state->active &&
7412 (!new_crtc_state->active ||
57638021 7413 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
7414 manage_dm_interrupts(adev, acrtc, false);
7415 }
1f6010a9
DF
7416 /*
7417 * Add check here for SoC's that support hardware cursor plane, to
7418 * unset legacy_cursor_update
7419 */
e7b07cee
HW
7420
7421 return drm_atomic_helper_commit(dev, state, nonblock);
7422
7423 /*TODO Handle EINTR, reenable IRQ*/
7424}
7425
b8592b48
LL
7426/**
7427 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7428 * @state: The atomic state to commit
7429 *
7430 * This will tell DC to commit the constructed DC state from atomic_check,
7431 * programming the hardware. Any failures here implies a hardware failure, since
7432 * atomic check should have filtered anything non-kosher.
7433 */
7578ecda 7434static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7435{
7436 struct drm_device *dev = state->dev;
7437 struct amdgpu_device *adev = dev->dev_private;
7438 struct amdgpu_display_manager *dm = &adev->dm;
7439 struct dm_atomic_state *dm_state;
eb3dc897 7440 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7441 uint32_t i, j;
5cc6dcbd 7442 struct drm_crtc *crtc;
0bc9706d 7443 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7444 unsigned long flags;
7445 bool wait_for_vblank = true;
7446 struct drm_connector *connector;
c2cea706 7447 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7448 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7449 int crtc_disable_count = 0;
6ee90e88 7450 bool mode_set_reset_required = false;
e7b07cee
HW
7451
7452 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7453
eb3dc897
NK
7454 dm_state = dm_atomic_get_new_state(state);
7455 if (dm_state && dm_state->context) {
7456 dc_state = dm_state->context;
7457 } else {
7458 /* No state changes, retain current state. */
813d20dc 7459 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7460 ASSERT(dc_state_temp);
7461 dc_state = dc_state_temp;
7462 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7463 }
e7b07cee
HW
7464
7465 /* update changed items */
0bc9706d 7466 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7467 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7468
54d76575
LSL
7469 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7470 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7471
f1ad2f5e 7472 DRM_DEBUG_DRIVER(
e7b07cee
HW
7473 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7474 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7475 "connectors_changed:%d\n",
7476 acrtc->crtc_id,
0bc9706d
LSL
7477 new_crtc_state->enable,
7478 new_crtc_state->active,
7479 new_crtc_state->planes_changed,
7480 new_crtc_state->mode_changed,
7481 new_crtc_state->active_changed,
7482 new_crtc_state->connectors_changed);
e7b07cee 7483
27b3f4fc
LSL
7484 /* Copy all transient state flags into dc state */
7485 if (dm_new_crtc_state->stream) {
7486 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7487 dm_new_crtc_state->stream);
7488 }
7489
e7b07cee
HW
7490 /* handles headless hotplug case, updating new_state and
7491 * aconnector as needed
7492 */
7493
54d76575 7494 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7495
f1ad2f5e 7496 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7497
54d76575 7498 if (!dm_new_crtc_state->stream) {
e7b07cee 7499 /*
b830ebc9
HW
7500 * this could happen because of issues with
7501 * userspace notifications delivery.
7502 * In this case userspace tries to set mode on
1f6010a9
DF
7503 * display which is disconnected in fact.
7504 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7505 * We expect reset mode will come soon.
7506 *
7507 * This can also happen when unplug is done
7508 * during resume sequence ended
7509 *
7510 * In this case, we want to pretend we still
7511 * have a sink to keep the pipe running so that
7512 * hw state is consistent with the sw state
7513 */
f1ad2f5e 7514 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7515 __func__, acrtc->base.base.id);
7516 continue;
7517 }
7518
54d76575
LSL
7519 if (dm_old_crtc_state->stream)
7520 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7521
97028037
LP
7522 pm_runtime_get_noresume(dev->dev);
7523
e7b07cee 7524 acrtc->enabled = true;
0bc9706d
LSL
7525 acrtc->hw_mode = new_crtc_state->mode;
7526 crtc->hwmode = new_crtc_state->mode;
6ee90e88 7527 mode_set_reset_required = true;
0bc9706d 7528 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7529 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7530 /* i.e. reset mode */
6ee90e88 7531 if (dm_old_crtc_state->stream)
54d76575 7532 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 7533 mode_set_reset_required = true;
e7b07cee
HW
7534 }
7535 } /* for_each_crtc_in_state() */
7536
eb3dc897 7537 if (dc_state) {
6ee90e88 7538 /* if there mode set or reset, disable eDP PSR */
7539 if (mode_set_reset_required)
7540 amdgpu_dm_psr_disable_all(dm);
7541
eb3dc897 7542 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7543 mutex_lock(&dm->dc_lock);
eb3dc897 7544 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7545 mutex_unlock(&dm->dc_lock);
fa2123db 7546 }
e7b07cee 7547
0bc9706d 7548 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7549 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7550
54d76575 7551 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7552
54d76575 7553 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7554 const struct dc_stream_status *status =
54d76575 7555 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7556
eb3dc897 7557 if (!status)
09f609c3
LL
7558 status = dc_stream_get_status_from_state(dc_state,
7559 dm_new_crtc_state->stream);
eb3dc897 7560
e7b07cee 7561 if (!status)
54d76575 7562 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7563 else
7564 acrtc->otg_inst = status->primary_otg_inst;
7565 }
7566 }
0c8620d6
BL
7567#ifdef CONFIG_DRM_AMD_DC_HDCP
7568 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7569 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7570 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7571 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7572
7573 new_crtc_state = NULL;
7574
7575 if (acrtc)
7576 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7577
7578 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7579
7580 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7581 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7582 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7583 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7584 continue;
7585 }
7586
7587 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7588 hdcp_update_display(
7589 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7590 new_con_state->hdcp_content_type,
b1abe558
BL
7591 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7592 : false);
0c8620d6
BL
7593 }
7594#endif
e7b07cee 7595
02d6a6fc 7596 /* Handle connector state changes */
c2cea706 7597 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7598 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7599 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7600 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7601 struct dc_surface_update dummy_updates[MAX_SURFACES];
7602 struct dc_stream_update stream_update;
b232d4ed 7603 struct dc_info_packet hdr_packet;
e7b07cee 7604 struct dc_stream_status *status = NULL;
b232d4ed 7605 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7606
19afd799
NC
7607 memset(&dummy_updates, 0, sizeof(dummy_updates));
7608 memset(&stream_update, 0, sizeof(stream_update));
7609
44d09c6a 7610 if (acrtc) {
0bc9706d 7611 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7612 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7613 }
0bc9706d 7614
e7b07cee 7615 /* Skip any modesets/resets */
0bc9706d 7616 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7617 continue;
7618
54d76575 7619 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7620 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7621
b232d4ed
NK
7622 scaling_changed = is_scaling_state_different(dm_new_con_state,
7623 dm_old_con_state);
7624
7625 abm_changed = dm_new_crtc_state->abm_level !=
7626 dm_old_crtc_state->abm_level;
7627
7628 hdr_changed =
7629 is_hdr_metadata_different(old_con_state, new_con_state);
7630
7631 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7632 continue;
e7b07cee 7633
b6e881c9 7634 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7635 if (scaling_changed) {
02d6a6fc 7636 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7637 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7638
02d6a6fc
DF
7639 stream_update.src = dm_new_crtc_state->stream->src;
7640 stream_update.dst = dm_new_crtc_state->stream->dst;
7641 }
7642
b232d4ed 7643 if (abm_changed) {
02d6a6fc
DF
7644 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7645
7646 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7647 }
70e8ffc5 7648
b232d4ed
NK
7649 if (hdr_changed) {
7650 fill_hdr_info_packet(new_con_state, &hdr_packet);
7651 stream_update.hdr_static_metadata = &hdr_packet;
7652 }
7653
54d76575 7654 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7655 WARN_ON(!status);
3be5262e 7656 WARN_ON(!status->plane_count);
e7b07cee 7657
02d6a6fc
DF
7658 /*
7659 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7660 * Here we create an empty update on each plane.
7661 * To fix this, DC should permit updating only stream properties.
7662 */
7663 for (j = 0; j < status->plane_count; j++)
7664 dummy_updates[j].surface = status->plane_states[0];
7665
7666
7667 mutex_lock(&dm->dc_lock);
7668 dc_commit_updates_for_stream(dm->dc,
7669 dummy_updates,
7670 status->plane_count,
7671 dm_new_crtc_state->stream,
7672 &stream_update,
7673 dc_state);
7674 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7675 }
7676
b5e83f6f 7677 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7678 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7679 new_crtc_state, i) {
fe2a1965
LP
7680 if (old_crtc_state->active && !new_crtc_state->active)
7681 crtc_disable_count++;
7682
54d76575 7683 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7684 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7685
057be086
NK
7686 /* Update freesync active state. */
7687 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7688
66b0c973
MK
7689 /* Handle vrr on->off / off->on transitions */
7690 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7691 dm_new_crtc_state);
e7b07cee
HW
7692 }
7693
8fe684e9
NK
7694 /**
7695 * Enable interrupts for CRTCs that are newly enabled or went through
7696 * a modeset. It was intentionally deferred until after the front end
7697 * state was modified to wait until the OTG was on and so the IRQ
7698 * handlers didn't access stale or invalid state.
7699 */
7700 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7701 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7702
7703 if (new_crtc_state->active &&
7704 (!old_crtc_state->active ||
7705 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7706 manage_dm_interrupts(adev, acrtc, true);
7707#ifdef CONFIG_DEBUG_FS
7708 /**
7709 * Frontend may have changed so reapply the CRC capture
7710 * settings for the stream.
7711 */
7712 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7713
7714 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7715 amdgpu_dm_crtc_configure_crc_source(
7716 crtc, dm_new_crtc_state,
7717 dm_new_crtc_state->crc_src);
7718 }
7719#endif
7720 }
7721 }
e7b07cee 7722
420cd472 7723 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7724 if (new_crtc_state->async_flip)
420cd472
DF
7725 wait_for_vblank = false;
7726
e7b07cee 7727 /* update planes when needed per crtc*/
5cc6dcbd 7728 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7729 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7730
54d76575 7731 if (dm_new_crtc_state->stream)
eb3dc897 7732 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7733 dm, crtc, wait_for_vblank);
e7b07cee
HW
7734 }
7735
6ce8f316
NK
7736 /* Update audio instances for each connector. */
7737 amdgpu_dm_commit_audio(dev, state);
7738
e7b07cee
HW
7739 /*
7740 * send vblank event on all events not handled in flip and
7741 * mark consumed event for drm_atomic_helper_commit_hw_done
7742 */
7743 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 7744 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7745
0bc9706d
LSL
7746 if (new_crtc_state->event)
7747 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7748
0bc9706d 7749 new_crtc_state->event = NULL;
e7b07cee
HW
7750 }
7751 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7752
29c8f234
LL
7753 /* Signal HW programming completion */
7754 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7755
7756 if (wait_for_vblank)
320a1274 7757 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7758
7759 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7760
1f6010a9
DF
7761 /*
7762 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7763 * so we can put the GPU into runtime suspend if we're not driving any
7764 * displays anymore
7765 */
fe2a1965
LP
7766 for (i = 0; i < crtc_disable_count; i++)
7767 pm_runtime_put_autosuspend(dev->dev);
97028037 7768 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7769
7770 if (dc_state_temp)
7771 dc_release_state(dc_state_temp);
e7b07cee
HW
7772}
7773
7774
7775static int dm_force_atomic_commit(struct drm_connector *connector)
7776{
7777 int ret = 0;
7778 struct drm_device *ddev = connector->dev;
7779 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7780 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7781 struct drm_plane *plane = disconnected_acrtc->base.primary;
7782 struct drm_connector_state *conn_state;
7783 struct drm_crtc_state *crtc_state;
7784 struct drm_plane_state *plane_state;
7785
7786 if (!state)
7787 return -ENOMEM;
7788
7789 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7790
7791 /* Construct an atomic state to restore previous display setting */
7792
7793 /*
7794 * Attach connectors to drm_atomic_state
7795 */
7796 conn_state = drm_atomic_get_connector_state(state, connector);
7797
7798 ret = PTR_ERR_OR_ZERO(conn_state);
7799 if (ret)
7800 goto err;
7801
7802 /* Attach crtc to drm_atomic_state*/
7803 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7804
7805 ret = PTR_ERR_OR_ZERO(crtc_state);
7806 if (ret)
7807 goto err;
7808
7809 /* force a restore */
7810 crtc_state->mode_changed = true;
7811
7812 /* Attach plane to drm_atomic_state */
7813 plane_state = drm_atomic_get_plane_state(state, plane);
7814
7815 ret = PTR_ERR_OR_ZERO(plane_state);
7816 if (ret)
7817 goto err;
7818
7819
7820 /* Call commit internally with the state we just constructed */
7821 ret = drm_atomic_commit(state);
7822 if (!ret)
7823 return 0;
7824
7825err:
7826 DRM_ERROR("Restoring old state failed with %i\n", ret);
7827 drm_atomic_state_put(state);
7828
7829 return ret;
7830}
7831
7832/*
1f6010a9
DF
7833 * This function handles all cases when set mode does not come upon hotplug.
7834 * This includes when a display is unplugged then plugged back into the
7835 * same port and when running without usermode desktop manager supprot
e7b07cee 7836 */
3ee6b26b
AD
7837void dm_restore_drm_connector_state(struct drm_device *dev,
7838 struct drm_connector *connector)
e7b07cee 7839{
c84dec2f 7840 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7841 struct amdgpu_crtc *disconnected_acrtc;
7842 struct dm_crtc_state *acrtc_state;
7843
7844 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7845 return;
7846
7847 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7848 if (!disconnected_acrtc)
7849 return;
e7b07cee 7850
70e8ffc5
HW
7851 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7852 if (!acrtc_state->stream)
e7b07cee
HW
7853 return;
7854
7855 /*
7856 * If the previous sink is not released and different from the current,
7857 * we deduce we are in a state where we can not rely on usermode call
7858 * to turn on the display, so we do it here
7859 */
7860 if (acrtc_state->stream->sink != aconnector->dc_sink)
7861 dm_force_atomic_commit(&aconnector->base);
7862}
7863
1f6010a9 7864/*
e7b07cee
HW
7865 * Grabs all modesetting locks to serialize against any blocking commits,
7866 * Waits for completion of all non blocking commits.
7867 */
3ee6b26b
AD
7868static int do_aquire_global_lock(struct drm_device *dev,
7869 struct drm_atomic_state *state)
e7b07cee
HW
7870{
7871 struct drm_crtc *crtc;
7872 struct drm_crtc_commit *commit;
7873 long ret;
7874
1f6010a9
DF
7875 /*
7876 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7877 * ensure that when the framework release it the
7878 * extra locks we are locking here will get released to
7879 */
7880 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7881 if (ret)
7882 return ret;
7883
7884 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7885 spin_lock(&crtc->commit_lock);
7886 commit = list_first_entry_or_null(&crtc->commit_list,
7887 struct drm_crtc_commit, commit_entry);
7888 if (commit)
7889 drm_crtc_commit_get(commit);
7890 spin_unlock(&crtc->commit_lock);
7891
7892 if (!commit)
7893 continue;
7894
1f6010a9
DF
7895 /*
7896 * Make sure all pending HW programming completed and
e7b07cee
HW
7897 * page flips done
7898 */
7899 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7900
7901 if (ret > 0)
7902 ret = wait_for_completion_interruptible_timeout(
7903 &commit->flip_done, 10*HZ);
7904
7905 if (ret == 0)
7906 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7907 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7908
7909 drm_crtc_commit_put(commit);
7910 }
7911
7912 return ret < 0 ? ret : 0;
7913}
7914
bb47de73
NK
7915static void get_freesync_config_for_crtc(
7916 struct dm_crtc_state *new_crtc_state,
7917 struct dm_connector_state *new_con_state)
98e6436d
AK
7918{
7919 struct mod_freesync_config config = {0};
98e6436d
AK
7920 struct amdgpu_dm_connector *aconnector =
7921 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7922 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7923 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7924
a057ec46 7925 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7926 vrefresh >= aconnector->min_vfreq &&
7927 vrefresh <= aconnector->max_vfreq;
bb47de73 7928
a057ec46
IB
7929 if (new_crtc_state->vrr_supported) {
7930 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7931 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7932 VRR_STATE_ACTIVE_VARIABLE :
7933 VRR_STATE_INACTIVE;
7934 config.min_refresh_in_uhz =
7935 aconnector->min_vfreq * 1000000;
7936 config.max_refresh_in_uhz =
7937 aconnector->max_vfreq * 1000000;
69ff8845 7938 config.vsif_supported = true;
180db303 7939 config.btr = true;
98e6436d
AK
7940 }
7941
bb47de73
NK
7942 new_crtc_state->freesync_config = config;
7943}
98e6436d 7944
bb47de73
NK
7945static void reset_freesync_config_for_crtc(
7946 struct dm_crtc_state *new_crtc_state)
7947{
7948 new_crtc_state->vrr_supported = false;
98e6436d 7949
180db303
NK
7950 memset(&new_crtc_state->vrr_params, 0,
7951 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7952 memset(&new_crtc_state->vrr_infopacket, 0,
7953 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7954}
7955
4b9674e5
LL
7956static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7957 struct drm_atomic_state *state,
7958 struct drm_crtc *crtc,
7959 struct drm_crtc_state *old_crtc_state,
7960 struct drm_crtc_state *new_crtc_state,
7961 bool enable,
7962 bool *lock_and_validation_needed)
e7b07cee 7963{
eb3dc897 7964 struct dm_atomic_state *dm_state = NULL;
54d76575 7965 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7966 struct dc_stream_state *new_stream;
62f55537 7967 int ret = 0;
d4d4a645 7968
1f6010a9
DF
7969 /*
7970 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7971 * update changed items
7972 */
4b9674e5
LL
7973 struct amdgpu_crtc *acrtc = NULL;
7974 struct amdgpu_dm_connector *aconnector = NULL;
7975 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7976 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7977
4b9674e5 7978 new_stream = NULL;
9635b754 7979
4b9674e5
LL
7980 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7981 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7982 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7983 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7984
4b9674e5
LL
7985 /* TODO This hack should go away */
7986 if (aconnector && enable) {
7987 /* Make sure fake sink is created in plug-in scenario */
7988 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7989 &aconnector->base);
7990 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7991 &aconnector->base);
19f89e23 7992
4b9674e5
LL
7993 if (IS_ERR(drm_new_conn_state)) {
7994 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7995 goto fail;
7996 }
19f89e23 7997
4b9674e5
LL
7998 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7999 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8000
02d35a67
JFZ
8001 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8002 goto skip_modeset;
8003
cbd14ae7
SW
8004 new_stream = create_validate_stream_for_sink(aconnector,
8005 &new_crtc_state->mode,
8006 dm_new_conn_state,
8007 dm_old_crtc_state->stream);
19f89e23 8008
4b9674e5
LL
8009 /*
8010 * we can have no stream on ACTION_SET if a display
8011 * was disconnected during S3, in this case it is not an
8012 * error, the OS will be updated after detection, and
8013 * will do the right thing on next atomic commit
8014 */
19f89e23 8015
4b9674e5
LL
8016 if (!new_stream) {
8017 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8018 __func__, acrtc->base.base.id);
8019 ret = -ENOMEM;
8020 goto fail;
8021 }
e7b07cee 8022
3d4e52d0
VL
8023 /*
8024 * TODO: Check VSDB bits to decide whether this should
8025 * be enabled or not.
8026 */
8027 new_stream->triggered_crtc_reset.enabled =
8028 dm->force_timing_sync;
8029
4b9674e5 8030 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8031
88694af9
NK
8032 ret = fill_hdr_info_packet(drm_new_conn_state,
8033 &new_stream->hdr_static_metadata);
8034 if (ret)
8035 goto fail;
8036
7e930949
NK
8037 /*
8038 * If we already removed the old stream from the context
8039 * (and set the new stream to NULL) then we can't reuse
8040 * the old stream even if the stream and scaling are unchanged.
8041 * We'll hit the BUG_ON and black screen.
8042 *
8043 * TODO: Refactor this function to allow this check to work
8044 * in all conditions.
8045 */
8046 if (dm_new_crtc_state->stream &&
8047 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8048 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8049 new_crtc_state->mode_changed = false;
8050 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8051 new_crtc_state->mode_changed);
62f55537 8052 }
4b9674e5 8053 }
b830ebc9 8054
02d35a67 8055 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8056 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8057 goto skip_modeset;
e7b07cee 8058
4b9674e5
LL
8059 DRM_DEBUG_DRIVER(
8060 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8061 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8062 "connectors_changed:%d\n",
8063 acrtc->crtc_id,
8064 new_crtc_state->enable,
8065 new_crtc_state->active,
8066 new_crtc_state->planes_changed,
8067 new_crtc_state->mode_changed,
8068 new_crtc_state->active_changed,
8069 new_crtc_state->connectors_changed);
62f55537 8070
4b9674e5
LL
8071 /* Remove stream for any changed/disabled CRTC */
8072 if (!enable) {
62f55537 8073
4b9674e5
LL
8074 if (!dm_old_crtc_state->stream)
8075 goto skip_modeset;
eb3dc897 8076
4b9674e5
LL
8077 ret = dm_atomic_get_state(state, &dm_state);
8078 if (ret)
8079 goto fail;
e7b07cee 8080
4b9674e5
LL
8081 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8082 crtc->base.id);
62f55537 8083
4b9674e5
LL
8084 /* i.e. reset mode */
8085 if (dc_remove_stream_from_ctx(
8086 dm->dc,
8087 dm_state->context,
8088 dm_old_crtc_state->stream) != DC_OK) {
8089 ret = -EINVAL;
8090 goto fail;
8091 }
62f55537 8092
4b9674e5
LL
8093 dc_stream_release(dm_old_crtc_state->stream);
8094 dm_new_crtc_state->stream = NULL;
bb47de73 8095
4b9674e5 8096 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8097
4b9674e5 8098 *lock_and_validation_needed = true;
62f55537 8099
4b9674e5
LL
8100 } else {/* Add stream for any updated/enabled CRTC */
8101 /*
8102 * Quick fix to prevent NULL pointer on new_stream when
8103 * added MST connectors not found in existing crtc_state in the chained mode
8104 * TODO: need to dig out the root cause of that
8105 */
8106 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8107 goto skip_modeset;
62f55537 8108
4b9674e5
LL
8109 if (modereset_required(new_crtc_state))
8110 goto skip_modeset;
62f55537 8111
4b9674e5
LL
8112 if (modeset_required(new_crtc_state, new_stream,
8113 dm_old_crtc_state->stream)) {
62f55537 8114
4b9674e5 8115 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8116
4b9674e5
LL
8117 ret = dm_atomic_get_state(state, &dm_state);
8118 if (ret)
8119 goto fail;
27b3f4fc 8120
4b9674e5 8121 dm_new_crtc_state->stream = new_stream;
62f55537 8122
4b9674e5 8123 dc_stream_retain(new_stream);
1dc90497 8124
4b9674e5
LL
8125 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8126 crtc->base.id);
1dc90497 8127
4b9674e5
LL
8128 if (dc_add_stream_to_ctx(
8129 dm->dc,
8130 dm_state->context,
8131 dm_new_crtc_state->stream) != DC_OK) {
8132 ret = -EINVAL;
8133 goto fail;
9b690ef3
BL
8134 }
8135
4b9674e5
LL
8136 *lock_and_validation_needed = true;
8137 }
8138 }
e277adc5 8139
4b9674e5
LL
8140skip_modeset:
8141 /* Release extra reference */
8142 if (new_stream)
8143 dc_stream_release(new_stream);
e277adc5 8144
4b9674e5
LL
8145 /*
8146 * We want to do dc stream updates that do not require a
8147 * full modeset below.
8148 */
2afda735 8149 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8150 return 0;
8151 /*
8152 * Given above conditions, the dc state cannot be NULL because:
8153 * 1. We're in the process of enabling CRTCs (just been added
8154 * to the dc context, or already is on the context)
8155 * 2. Has a valid connector attached, and
8156 * 3. Is currently active and enabled.
8157 * => The dc stream state currently exists.
8158 */
8159 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8160
4b9674e5
LL
8161 /* Scaling or underscan settings */
8162 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8163 update_stream_scaling_settings(
8164 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8165
b05e2c5e
DF
8166 /* ABM settings */
8167 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8168
4b9674e5
LL
8169 /*
8170 * Color management settings. We also update color properties
8171 * when a modeset is needed, to ensure it gets reprogrammed.
8172 */
8173 if (dm_new_crtc_state->base.color_mgmt_changed ||
8174 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8175 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8176 if (ret)
8177 goto fail;
62f55537 8178 }
e7b07cee 8179
4b9674e5
LL
8180 /* Update Freesync settings. */
8181 get_freesync_config_for_crtc(dm_new_crtc_state,
8182 dm_new_conn_state);
8183
62f55537 8184 return ret;
9635b754
DS
8185
8186fail:
8187 if (new_stream)
8188 dc_stream_release(new_stream);
8189 return ret;
62f55537 8190}
9b690ef3 8191
f6ff2a08
NK
8192static bool should_reset_plane(struct drm_atomic_state *state,
8193 struct drm_plane *plane,
8194 struct drm_plane_state *old_plane_state,
8195 struct drm_plane_state *new_plane_state)
8196{
8197 struct drm_plane *other;
8198 struct drm_plane_state *old_other_state, *new_other_state;
8199 struct drm_crtc_state *new_crtc_state;
8200 int i;
8201
70a1efac
NK
8202 /*
8203 * TODO: Remove this hack once the checks below are sufficient
8204 * enough to determine when we need to reset all the planes on
8205 * the stream.
8206 */
8207 if (state->allow_modeset)
8208 return true;
8209
f6ff2a08
NK
8210 /* Exit early if we know that we're adding or removing the plane. */
8211 if (old_plane_state->crtc != new_plane_state->crtc)
8212 return true;
8213
8214 /* old crtc == new_crtc == NULL, plane not in context. */
8215 if (!new_plane_state->crtc)
8216 return false;
8217
8218 new_crtc_state =
8219 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8220
8221 if (!new_crtc_state)
8222 return true;
8223
7316c4ad
NK
8224 /* CRTC Degamma changes currently require us to recreate planes. */
8225 if (new_crtc_state->color_mgmt_changed)
8226 return true;
8227
f6ff2a08
NK
8228 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8229 return true;
8230
8231 /*
8232 * If there are any new primary or overlay planes being added or
8233 * removed then the z-order can potentially change. To ensure
8234 * correct z-order and pipe acquisition the current DC architecture
8235 * requires us to remove and recreate all existing planes.
8236 *
8237 * TODO: Come up with a more elegant solution for this.
8238 */
8239 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9a81cc60
NK
8240 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8241
f6ff2a08
NK
8242 if (other->type == DRM_PLANE_TYPE_CURSOR)
8243 continue;
8244
8245 if (old_other_state->crtc != new_plane_state->crtc &&
8246 new_other_state->crtc != new_plane_state->crtc)
8247 continue;
8248
8249 if (old_other_state->crtc != new_other_state->crtc)
8250 return true;
8251
dc4cb30d
NK
8252 /* Src/dst size and scaling updates. */
8253 if (old_other_state->src_w != new_other_state->src_w ||
8254 old_other_state->src_h != new_other_state->src_h ||
8255 old_other_state->crtc_w != new_other_state->crtc_w ||
8256 old_other_state->crtc_h != new_other_state->crtc_h)
8257 return true;
8258
8259 /* Rotation / mirroring updates. */
8260 if (old_other_state->rotation != new_other_state->rotation)
8261 return true;
8262
8263 /* Blending updates. */
8264 if (old_other_state->pixel_blend_mode !=
8265 new_other_state->pixel_blend_mode)
8266 return true;
8267
8268 /* Alpha updates. */
8269 if (old_other_state->alpha != new_other_state->alpha)
8270 return true;
8271
8272 /* Colorspace changes. */
8273 if (old_other_state->color_range != new_other_state->color_range ||
8274 old_other_state->color_encoding != new_other_state->color_encoding)
8275 return true;
8276
9a81cc60
NK
8277 /* Framebuffer checks fall at the end. */
8278 if (!old_other_state->fb || !new_other_state->fb)
8279 continue;
8280
8281 /* Pixel format changes can require bandwidth updates. */
8282 if (old_other_state->fb->format != new_other_state->fb->format)
8283 return true;
8284
8285 old_dm_plane_state = to_dm_plane_state(old_other_state);
8286 new_dm_plane_state = to_dm_plane_state(new_other_state);
8287
8288 /* Tiling and DCC changes also require bandwidth updates. */
8289 if (old_dm_plane_state->tiling_flags !=
8290 new_dm_plane_state->tiling_flags)
f6ff2a08
NK
8291 return true;
8292 }
8293
8294 return false;
8295}
8296
9e869063
LL
8297static int dm_update_plane_state(struct dc *dc,
8298 struct drm_atomic_state *state,
8299 struct drm_plane *plane,
8300 struct drm_plane_state *old_plane_state,
8301 struct drm_plane_state *new_plane_state,
8302 bool enable,
8303 bool *lock_and_validation_needed)
62f55537 8304{
eb3dc897
NK
8305
8306 struct dm_atomic_state *dm_state = NULL;
62f55537 8307 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 8308 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 8309 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 8310 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 8311 struct amdgpu_crtc *new_acrtc;
f6ff2a08 8312 bool needs_reset;
62f55537 8313 int ret = 0;
e7b07cee 8314
9b690ef3 8315
9e869063
LL
8316 new_plane_crtc = new_plane_state->crtc;
8317 old_plane_crtc = old_plane_state->crtc;
8318 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8319 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 8320
626bf90f
SS
8321 /*TODO Implement better atomic check for cursor plane */
8322 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8323 if (!enable || !new_plane_crtc ||
8324 drm_atomic_plane_disabling(plane->state, new_plane_state))
8325 return 0;
8326
8327 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8328
8329 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8330 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8331 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8332 new_plane_state->crtc_w, new_plane_state->crtc_h);
8333 return -EINVAL;
8334 }
8335
9e869063 8336 return 0;
626bf90f 8337 }
9b690ef3 8338
f6ff2a08
NK
8339 needs_reset = should_reset_plane(state, plane, old_plane_state,
8340 new_plane_state);
8341
9e869063
LL
8342 /* Remove any changed/removed planes */
8343 if (!enable) {
f6ff2a08 8344 if (!needs_reset)
9e869063 8345 return 0;
a7b06724 8346
9e869063
LL
8347 if (!old_plane_crtc)
8348 return 0;
62f55537 8349
9e869063
LL
8350 old_crtc_state = drm_atomic_get_old_crtc_state(
8351 state, old_plane_crtc);
8352 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 8353
9e869063
LL
8354 if (!dm_old_crtc_state->stream)
8355 return 0;
62f55537 8356
9e869063
LL
8357 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8358 plane->base.id, old_plane_crtc->base.id);
9b690ef3 8359
9e869063
LL
8360 ret = dm_atomic_get_state(state, &dm_state);
8361 if (ret)
8362 return ret;
eb3dc897 8363
9e869063
LL
8364 if (!dc_remove_plane_from_context(
8365 dc,
8366 dm_old_crtc_state->stream,
8367 dm_old_plane_state->dc_state,
8368 dm_state->context)) {
62f55537 8369
c3537613 8370 return -EINVAL;
9e869063 8371 }
e7b07cee 8372
9b690ef3 8373
9e869063
LL
8374 dc_plane_state_release(dm_old_plane_state->dc_state);
8375 dm_new_plane_state->dc_state = NULL;
1dc90497 8376
9e869063 8377 *lock_and_validation_needed = true;
1dc90497 8378
9e869063
LL
8379 } else { /* Add new planes */
8380 struct dc_plane_state *dc_new_plane_state;
1dc90497 8381
9e869063
LL
8382 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8383 return 0;
e7b07cee 8384
9e869063
LL
8385 if (!new_plane_crtc)
8386 return 0;
e7b07cee 8387
9e869063
LL
8388 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8389 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 8390
9e869063
LL
8391 if (!dm_new_crtc_state->stream)
8392 return 0;
62f55537 8393
f6ff2a08 8394 if (!needs_reset)
9e869063 8395 return 0;
62f55537 8396
8c44515b
AP
8397 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8398 if (ret)
8399 return ret;
8400
9e869063 8401 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 8402
9e869063
LL
8403 dc_new_plane_state = dc_create_plane_state(dc);
8404 if (!dc_new_plane_state)
8405 return -ENOMEM;
62f55537 8406
9e869063
LL
8407 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8408 plane->base.id, new_plane_crtc->base.id);
8c45c5db 8409
695af5f9 8410 ret = fill_dc_plane_attributes(
9e869063
LL
8411 new_plane_crtc->dev->dev_private,
8412 dc_new_plane_state,
8413 new_plane_state,
8414 new_crtc_state);
8415 if (ret) {
8416 dc_plane_state_release(dc_new_plane_state);
8417 return ret;
8418 }
62f55537 8419
9e869063
LL
8420 ret = dm_atomic_get_state(state, &dm_state);
8421 if (ret) {
8422 dc_plane_state_release(dc_new_plane_state);
8423 return ret;
8424 }
eb3dc897 8425
9e869063
LL
8426 /*
8427 * Any atomic check errors that occur after this will
8428 * not need a release. The plane state will be attached
8429 * to the stream, and therefore part of the atomic
8430 * state. It'll be released when the atomic state is
8431 * cleaned.
8432 */
8433 if (!dc_add_plane_to_context(
8434 dc,
8435 dm_new_crtc_state->stream,
8436 dc_new_plane_state,
8437 dm_state->context)) {
62f55537 8438
9e869063
LL
8439 dc_plane_state_release(dc_new_plane_state);
8440 return -EINVAL;
8441 }
8c45c5db 8442
9e869063 8443 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 8444
9e869063
LL
8445 /* Tell DC to do a full surface update every time there
8446 * is a plane change. Inefficient, but works for now.
8447 */
8448 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8449
8450 *lock_and_validation_needed = true;
62f55537 8451 }
e7b07cee
HW
8452
8453
62f55537
AG
8454 return ret;
8455}
a87fa993 8456
e10517b3 8457#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
8458static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8459{
8460 struct drm_connector *connector;
8461 struct drm_connector_state *conn_state;
8462 struct amdgpu_dm_connector *aconnector = NULL;
8463 int i;
8464 for_each_new_connector_in_state(state, connector, conn_state, i) {
8465 if (conn_state->crtc != crtc)
8466 continue;
8467
8468 aconnector = to_amdgpu_dm_connector(connector);
8469 if (!aconnector->port || !aconnector->mst_port)
8470 aconnector = NULL;
8471 else
8472 break;
8473 }
8474
8475 if (!aconnector)
8476 return 0;
8477
8478 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8479}
e10517b3 8480#endif
44be939f 8481
b8592b48
LL
8482/**
8483 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8484 * @dev: The DRM device
8485 * @state: The atomic state to commit
8486 *
8487 * Validate that the given atomic state is programmable by DC into hardware.
8488 * This involves constructing a &struct dc_state reflecting the new hardware
8489 * state we wish to commit, then querying DC to see if it is programmable. It's
8490 * important not to modify the existing DC state. Otherwise, atomic_check
8491 * may unexpectedly commit hardware changes.
8492 *
8493 * When validating the DC state, it's important that the right locks are
8494 * acquired. For full updates case which removes/adds/updates streams on one
8495 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8496 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 8497 * flip using DRMs synchronization events.
b8592b48
LL
8498 *
8499 * Note that DM adds the affected connectors for all CRTCs in state, when that
8500 * might not seem necessary. This is because DC stream creation requires the
8501 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8502 * be possible but non-trivial - a possible TODO item.
8503 *
8504 * Return: -Error code if validation failed.
8505 */
7578ecda
AD
8506static int amdgpu_dm_atomic_check(struct drm_device *dev,
8507 struct drm_atomic_state *state)
62f55537 8508{
62f55537 8509 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 8510 struct dm_atomic_state *dm_state = NULL;
62f55537 8511 struct dc *dc = adev->dm.dc;
62f55537 8512 struct drm_connector *connector;
c2cea706 8513 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8514 struct drm_crtc *crtc;
fc9e9920 8515 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8516 struct drm_plane *plane;
8517 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 8518 enum dc_status status;
1e88ad0a 8519 int ret, i;
62f55537
AG
8520 bool lock_and_validation_needed = false;
8521
8522 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8523 if (ret)
8524 goto fail;
62f55537 8525
473e2d16
SW
8526 /* Check connector changes */
8527 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8528 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8529 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8530
8531 /* Skip connectors that are disabled or part of modeset already. */
8532 if (!old_con_state->crtc && !new_con_state->crtc)
8533 continue;
8534
8535 if (!new_con_state->crtc)
8536 continue;
8537
8538 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8539 if (IS_ERR(new_crtc_state)) {
8540 ret = PTR_ERR(new_crtc_state);
8541 goto fail;
8542 }
8543
8544 if (dm_old_con_state->abm_level !=
8545 dm_new_con_state->abm_level)
8546 new_crtc_state->connectors_changed = true;
8547 }
8548
e10517b3 8549#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
8550 if (adev->asic_type >= CHIP_NAVI10) {
8551 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8552 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8553 ret = add_affected_mst_dsc_crtcs(state, crtc);
8554 if (ret)
8555 goto fail;
8556 }
8557 }
8558 }
e10517b3 8559#endif
1e88ad0a
S
8560 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8561 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8562 !new_crtc_state->color_mgmt_changed &&
a93587b3 8563 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8564 continue;
7bef1af3 8565
1e88ad0a
S
8566 if (!new_crtc_state->enable)
8567 continue;
fc9e9920 8568
1e88ad0a
S
8569 ret = drm_atomic_add_affected_connectors(state, crtc);
8570 if (ret)
8571 return ret;
fc9e9920 8572
1e88ad0a
S
8573 ret = drm_atomic_add_affected_planes(state, crtc);
8574 if (ret)
8575 goto fail;
e7b07cee
HW
8576 }
8577
2d9e6431
NK
8578 /*
8579 * Add all primary and overlay planes on the CRTC to the state
8580 * whenever a plane is enabled to maintain correct z-ordering
8581 * and to enable fast surface updates.
8582 */
8583 drm_for_each_crtc(crtc, dev) {
8584 bool modified = false;
8585
8586 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8587 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8588 continue;
8589
8590 if (new_plane_state->crtc == crtc ||
8591 old_plane_state->crtc == crtc) {
8592 modified = true;
8593 break;
8594 }
8595 }
8596
8597 if (!modified)
8598 continue;
8599
8600 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8601 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8602 continue;
8603
8604 new_plane_state =
8605 drm_atomic_get_plane_state(state, plane);
8606
8607 if (IS_ERR(new_plane_state)) {
8608 ret = PTR_ERR(new_plane_state);
8609 goto fail;
8610 }
8611 }
8612 }
8613
707477b0
NK
8614 /* Prepass for updating tiling flags on new planes. */
8615 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8616 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8617 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8618
8619 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8620 &new_dm_plane_state->tmz_surface);
8621 if (ret)
8622 goto fail;
8623 }
8624
62f55537 8625 /* Remove exiting planes if they are modified */
9e869063
LL
8626 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8627 ret = dm_update_plane_state(dc, state, plane,
8628 old_plane_state,
8629 new_plane_state,
8630 false,
8631 &lock_and_validation_needed);
8632 if (ret)
8633 goto fail;
62f55537
AG
8634 }
8635
8636 /* Disable all crtcs which require disable */
4b9674e5
LL
8637 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8638 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8639 old_crtc_state,
8640 new_crtc_state,
8641 false,
8642 &lock_and_validation_needed);
8643 if (ret)
8644 goto fail;
62f55537
AG
8645 }
8646
8647 /* Enable all crtcs which require enable */
4b9674e5
LL
8648 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8649 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8650 old_crtc_state,
8651 new_crtc_state,
8652 true,
8653 &lock_and_validation_needed);
8654 if (ret)
8655 goto fail;
62f55537
AG
8656 }
8657
8658 /* Add new/modified planes */
9e869063
LL
8659 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8660 ret = dm_update_plane_state(dc, state, plane,
8661 old_plane_state,
8662 new_plane_state,
8663 true,
8664 &lock_and_validation_needed);
8665 if (ret)
8666 goto fail;
62f55537
AG
8667 }
8668
b349f76e
ES
8669 /* Run this here since we want to validate the streams we created */
8670 ret = drm_atomic_helper_check_planes(dev, state);
8671 if (ret)
8672 goto fail;
62f55537 8673
43d10d30
NK
8674 if (state->legacy_cursor_update) {
8675 /*
8676 * This is a fast cursor update coming from the plane update
8677 * helper, check if it can be done asynchronously for better
8678 * performance.
8679 */
8680 state->async_update =
8681 !drm_atomic_helper_async_check(dev, state);
8682
8683 /*
8684 * Skip the remaining global validation if this is an async
8685 * update. Cursor updates can be done without affecting
8686 * state or bandwidth calcs and this avoids the performance
8687 * penalty of locking the private state object and
8688 * allocating a new dc_state.
8689 */
8690 if (state->async_update)
8691 return 0;
8692 }
8693
ebdd27e1 8694 /* Check scaling and underscan changes*/
1f6010a9 8695 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8696 * new stream into context w\o causing full reset. Need to
8697 * decide how to handle.
8698 */
c2cea706 8699 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8700 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8701 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8702 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8703
8704 /* Skip any modesets/resets */
0bc9706d
LSL
8705 if (!acrtc || drm_atomic_crtc_needs_modeset(
8706 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8707 continue;
8708
b830ebc9 8709 /* Skip any thing not scale or underscan changes */
54d76575 8710 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8711 continue;
8712
8713 lock_and_validation_needed = true;
8714 }
8715
f6d7c7fa
NK
8716 /**
8717 * Streams and planes are reset when there are changes that affect
8718 * bandwidth. Anything that affects bandwidth needs to go through
8719 * DC global validation to ensure that the configuration can be applied
8720 * to hardware.
8721 *
8722 * We have to currently stall out here in atomic_check for outstanding
8723 * commits to finish in this case because our IRQ handlers reference
8724 * DRM state directly - we can end up disabling interrupts too early
8725 * if we don't.
8726 *
8727 * TODO: Remove this stall and drop DM state private objects.
a87fa993 8728 */
f6d7c7fa 8729 if (lock_and_validation_needed) {
eb3dc897
NK
8730 ret = dm_atomic_get_state(state, &dm_state);
8731 if (ret)
8732 goto fail;
e7b07cee
HW
8733
8734 ret = do_aquire_global_lock(dev, state);
8735 if (ret)
8736 goto fail;
1dc90497 8737
d9fe1a4c 8738#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8739 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8740 goto fail;
8741
29b9ba74
ML
8742 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8743 if (ret)
8744 goto fail;
d9fe1a4c 8745#endif
29b9ba74 8746
ded58c7b
ZL
8747 /*
8748 * Perform validation of MST topology in the state:
8749 * We need to perform MST atomic check before calling
8750 * dc_validate_global_state(), or there is a chance
8751 * to get stuck in an infinite loop and hang eventually.
8752 */
8753 ret = drm_dp_mst_atomic_check(state);
8754 if (ret)
8755 goto fail;
74a16675
RS
8756 status = dc_validate_global_state(dc, dm_state->context, false);
8757 if (status != DC_OK) {
8758 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8759 dc_status_to_str(status), status);
e7b07cee
HW
8760 ret = -EINVAL;
8761 goto fail;
8762 }
bd200d19 8763 } else {
674e78ac 8764 /*
bd200d19
NK
8765 * The commit is a fast update. Fast updates shouldn't change
8766 * the DC context, affect global validation, and can have their
8767 * commit work done in parallel with other commits not touching
8768 * the same resource. If we have a new DC context as part of
8769 * the DM atomic state from validation we need to free it and
8770 * retain the existing one instead.
76195175
MR
8771 *
8772 * Furthermore, since the DM atomic state only contains the DC
8773 * context and can safely be annulled, we can free the state
8774 * and clear the associated private object now to free
8775 * some memory and avoid a possible use-after-free later.
674e78ac 8776 */
bd200d19 8777
76195175
MR
8778 for (i = 0; i < state->num_private_objs; i++) {
8779 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 8780
76195175
MR
8781 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8782 int j = state->num_private_objs-1;
bd200d19 8783
76195175
MR
8784 dm_atomic_destroy_state(obj,
8785 state->private_objs[i].state);
8786
8787 /* If i is not at the end of the array then the
8788 * last element needs to be moved to where i was
8789 * before the array can safely be truncated.
8790 */
8791 if (i != j)
8792 state->private_objs[i] =
8793 state->private_objs[j];
bd200d19 8794
76195175
MR
8795 state->private_objs[j].ptr = NULL;
8796 state->private_objs[j].state = NULL;
8797 state->private_objs[j].old_state = NULL;
8798 state->private_objs[j].new_state = NULL;
8799
8800 state->num_private_objs = j;
8801 break;
8802 }
bd200d19 8803 }
e7b07cee
HW
8804 }
8805
caff0e66
NK
8806 /* Store the overall update type for use later in atomic check. */
8807 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8808 struct dm_crtc_state *dm_new_crtc_state =
8809 to_dm_crtc_state(new_crtc_state);
8810
f6d7c7fa
NK
8811 dm_new_crtc_state->update_type = lock_and_validation_needed ?
8812 UPDATE_TYPE_FULL :
8813 UPDATE_TYPE_FAST;
e7b07cee
HW
8814 }
8815
8816 /* Must be success */
8817 WARN_ON(ret);
8818 return ret;
8819
8820fail:
8821 if (ret == -EDEADLK)
01e28f9c 8822 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8823 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8824 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8825 else
01e28f9c 8826 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8827
8828 return ret;
8829}
8830
3ee6b26b
AD
8831static bool is_dp_capable_without_timing_msa(struct dc *dc,
8832 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8833{
8834 uint8_t dpcd_data;
8835 bool capable = false;
8836
c84dec2f 8837 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8838 dm_helpers_dp_read_dpcd(
8839 NULL,
c84dec2f 8840 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8841 DP_DOWN_STREAM_PORT_COUNT,
8842 &dpcd_data,
8843 sizeof(dpcd_data))) {
8844 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8845 }
8846
8847 return capable;
8848}
98e6436d
AK
8849void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8850 struct edid *edid)
e7b07cee
HW
8851{
8852 int i;
e7b07cee
HW
8853 bool edid_check_required;
8854 struct detailed_timing *timing;
8855 struct detailed_non_pixel *data;
8856 struct detailed_data_monitor_range *range;
c84dec2f
HW
8857 struct amdgpu_dm_connector *amdgpu_dm_connector =
8858 to_amdgpu_dm_connector(connector);
bb47de73 8859 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8860
8861 struct drm_device *dev = connector->dev;
8862 struct amdgpu_device *adev = dev->dev_private;
bb47de73 8863 bool freesync_capable = false;
b830ebc9 8864
8218d7f1
HW
8865 if (!connector->state) {
8866 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 8867 goto update;
8218d7f1
HW
8868 }
8869
98e6436d
AK
8870 if (!edid) {
8871 dm_con_state = to_dm_connector_state(connector->state);
8872
8873 amdgpu_dm_connector->min_vfreq = 0;
8874 amdgpu_dm_connector->max_vfreq = 0;
8875 amdgpu_dm_connector->pixel_clock_mhz = 0;
8876
bb47de73 8877 goto update;
98e6436d
AK
8878 }
8879
8218d7f1
HW
8880 dm_con_state = to_dm_connector_state(connector->state);
8881
e7b07cee 8882 edid_check_required = false;
c84dec2f 8883 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 8884 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 8885 goto update;
e7b07cee
HW
8886 }
8887 if (!adev->dm.freesync_module)
bb47de73 8888 goto update;
e7b07cee
HW
8889 /*
8890 * if edid non zero restrict freesync only for dp and edp
8891 */
8892 if (edid) {
c84dec2f
HW
8893 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8894 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
8895 edid_check_required = is_dp_capable_without_timing_msa(
8896 adev->dm.dc,
c84dec2f 8897 amdgpu_dm_connector);
e7b07cee
HW
8898 }
8899 }
e7b07cee
HW
8900 if (edid_check_required == true && (edid->version > 1 ||
8901 (edid->version == 1 && edid->revision > 1))) {
8902 for (i = 0; i < 4; i++) {
8903
8904 timing = &edid->detailed_timings[i];
8905 data = &timing->data.other_data;
8906 range = &data->data.range;
8907 /*
8908 * Check if monitor has continuous frequency mode
8909 */
8910 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8911 continue;
8912 /*
8913 * Check for flag range limits only. If flag == 1 then
8914 * no additional timing information provided.
8915 * Default GTF, GTF Secondary curve and CVT are not
8916 * supported
8917 */
8918 if (range->flags != 1)
8919 continue;
8920
c84dec2f
HW
8921 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8922 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8923 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
8924 range->pixel_clock_mhz * 10;
8925 break;
8926 }
8927
c84dec2f 8928 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
8929 amdgpu_dm_connector->min_vfreq > 10) {
8930
bb47de73 8931 freesync_capable = true;
e7b07cee
HW
8932 }
8933 }
bb47de73
NK
8934
8935update:
8936 if (dm_con_state)
8937 dm_con_state->freesync_capable = freesync_capable;
8938
8939 if (connector->vrr_capable_property)
8940 drm_connector_set_vrr_capable_property(connector,
8941 freesync_capable);
e7b07cee
HW
8942}
8943
8c322309
RL
8944static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8945{
8946 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8947
8948 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8949 return;
8950 if (link->type == dc_connection_none)
8951 return;
8952 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8953 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
8954 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8955
8956 if (dpcd_data[0] == 0) {
1cfbbdde 8957 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
8958 link->psr_settings.psr_feature_enabled = false;
8959 } else {
1cfbbdde 8960 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
8961 link->psr_settings.psr_feature_enabled = true;
8962 }
8963
8964 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
8965 }
8966}
8967
8968/*
8969 * amdgpu_dm_link_setup_psr() - configure psr link
8970 * @stream: stream state
8971 *
8972 * Return: true if success
8973 */
8974static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8975{
8976 struct dc_link *link = NULL;
8977 struct psr_config psr_config = {0};
8978 struct psr_context psr_context = {0};
8c322309
RL
8979 bool ret = false;
8980
8981 if (stream == NULL)
8982 return false;
8983
8984 link = stream->link;
8c322309 8985
d1ebfdd8 8986 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
8987
8988 if (psr_config.psr_version > 0) {
8989 psr_config.psr_exit_link_training_required = 0x1;
8990 psr_config.psr_frame_capture_indication_req = 0;
8991 psr_config.psr_rfb_setup_time = 0x37;
8992 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8993 psr_config.allow_smu_optimizations = 0x0;
8994
8995 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8996
8997 }
d1ebfdd8 8998 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
8999
9000 return ret;
9001}
9002
9003/*
9004 * amdgpu_dm_psr_enable() - enable psr f/w
9005 * @stream: stream state
9006 *
9007 * Return: true if success
9008 */
9009bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9010{
9011 struct dc_link *link = stream->link;
5b5abe95
AK
9012 unsigned int vsync_rate_hz = 0;
9013 struct dc_static_screen_params params = {0};
9014 /* Calculate number of static frames before generating interrupt to
9015 * enter PSR.
9016 */
5b5abe95
AK
9017 // Init fail safe of 2 frames static
9018 unsigned int num_frames_static = 2;
8c322309
RL
9019
9020 DRM_DEBUG_DRIVER("Enabling psr...\n");
9021
5b5abe95
AK
9022 vsync_rate_hz = div64_u64(div64_u64((
9023 stream->timing.pix_clk_100hz * 100),
9024 stream->timing.v_total),
9025 stream->timing.h_total);
9026
9027 /* Round up
9028 * Calculate number of frames such that at least 30 ms of time has
9029 * passed.
9030 */
7aa62404
RL
9031 if (vsync_rate_hz != 0) {
9032 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9033 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9034 }
5b5abe95
AK
9035
9036 params.triggers.cursor_update = true;
9037 params.triggers.overlay_update = true;
9038 params.triggers.surface_update = true;
9039 params.num_frames = num_frames_static;
8c322309 9040
5b5abe95 9041 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9042 &stream, 1,
5b5abe95 9043 &params);
8c322309
RL
9044
9045 return dc_link_set_psr_allow_active(link, true, false);
9046}
9047
9048/*
9049 * amdgpu_dm_psr_disable() - disable psr f/w
9050 * @stream: stream state
9051 *
9052 * Return: true if success
9053 */
9054static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9055{
9056
9057 DRM_DEBUG_DRIVER("Disabling psr...\n");
9058
9059 return dc_link_set_psr_allow_active(stream->link, false, true);
9060}
3d4e52d0 9061
6ee90e88 9062/*
9063 * amdgpu_dm_psr_disable() - disable psr f/w
9064 * if psr is enabled on any stream
9065 *
9066 * Return: true if success
9067 */
9068static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9069{
9070 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9071 return dc_set_psr_allow_active(dm->dc, false);
9072}
9073
3d4e52d0
VL
9074void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9075{
9076 struct amdgpu_device *adev = dev->dev_private;
9077 struct dc *dc = adev->dm.dc;
9078 int i;
9079
9080 mutex_lock(&adev->dm.dc_lock);
9081 if (dc->current_state) {
9082 for (i = 0; i < dc->current_state->stream_count; ++i)
9083 dc->current_state->streams[i]
9084 ->triggered_crtc_reset.enabled =
9085 adev->dm.force_timing_sync;
9086
9087 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9088 dc_trigger_sync(dc, dc->current_state);
9089 }
9090 mutex_unlock(&adev->dm.dc_lock);
9091}