drm/amdgpu: fix spelling mistake "Falied" -> "Failed"
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100#endif
2200eb9e 101
a94d5569
DF
102#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
103MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 104
5ea23931
RL
105#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
106MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
107
8c7aea40
NK
108/* Number of bytes in PSP header for firmware. */
109#define PSP_HEADER_BYTES 0x100
110
111/* Number of bytes in PSP footer for firmware. */
112#define PSP_FOOTER_BYTES 0x100
113
b8592b48
LL
114/**
115 * DOC: overview
116 *
117 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
118 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
119 * requests into DC requests, and DC responses into DRM responses.
120 *
121 * The root control structure is &struct amdgpu_display_manager.
122 */
123
7578ecda
AD
124/* basic init/fini API */
125static int amdgpu_dm_init(struct amdgpu_device *adev);
126static void amdgpu_dm_fini(struct amdgpu_device *adev);
127
1f6010a9
DF
128/*
129 * initializes drm_device display related structures, based on the information
7578ecda
AD
130 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
131 * drm_encoder, drm_mode_config
132 *
133 * Returns 0 on success
134 */
135static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
136/* removes and deallocates the drm structures, created by the above function */
137static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
138
7578ecda 139static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 140 struct drm_plane *plane,
cc1fec57
NK
141 unsigned long possible_crtcs,
142 const struct dc_plane_cap *plane_cap);
7578ecda
AD
143static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
144 struct drm_plane *plane,
145 uint32_t link_index);
146static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
147 struct amdgpu_dm_connector *amdgpu_dm_connector,
148 uint32_t link_index,
149 struct amdgpu_encoder *amdgpu_encoder);
150static int amdgpu_dm_encoder_init(struct drm_device *dev,
151 struct amdgpu_encoder *aencoder,
152 uint32_t link_index);
153
154static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
155
156static int amdgpu_dm_atomic_commit(struct drm_device *dev,
157 struct drm_atomic_state *state,
158 bool nonblock);
159
160static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
161
162static int amdgpu_dm_atomic_check(struct drm_device *dev,
163 struct drm_atomic_state *state);
164
674e78ac
NK
165static void handle_cursor_update(struct drm_plane *plane,
166 struct drm_plane_state *old_plane_state);
7578ecda 167
8c322309
RL
168static void amdgpu_dm_set_psr_caps(struct dc_link *link);
169static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
170static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
171static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
172
173
4562236b
HW
174/*
175 * dm_vblank_get_counter
176 *
177 * @brief
178 * Get counter for number of vertical blanks
179 *
180 * @param
181 * struct amdgpu_device *adev - [in] desired amdgpu device
182 * int disp_idx - [in] which CRTC to get the counter from
183 *
184 * @return
185 * Counter for vertical blanks
186 */
187static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
188{
189 if (crtc >= adev->mode_info.num_crtc)
190 return 0;
191 else {
192 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
193 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
194 acrtc->base.state);
4562236b 195
da5c47f6
AG
196
197 if (acrtc_state->stream == NULL) {
0971c40e
HW
198 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 crtc);
4562236b
HW
200 return 0;
201 }
202
da5c47f6 203 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
204 }
205}
206
207static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 208 u32 *vbl, u32 *position)
4562236b 209{
81c50963
ST
210 uint32_t v_blank_start, v_blank_end, h_position, v_position;
211
4562236b
HW
212 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
213 return -EINVAL;
214 else {
215 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
216 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
217 acrtc->base.state);
4562236b 218
da5c47f6 219 if (acrtc_state->stream == NULL) {
0971c40e
HW
220 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
221 crtc);
4562236b
HW
222 return 0;
223 }
224
81c50963
ST
225 /*
226 * TODO rework base driver to use values directly.
227 * for now parse it back into reg-format
228 */
da5c47f6 229 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
230 &v_blank_start,
231 &v_blank_end,
232 &h_position,
233 &v_position);
234
e806208d
AG
235 *position = v_position | (h_position << 16);
236 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
237 }
238
239 return 0;
240}
241
242static bool dm_is_idle(void *handle)
243{
244 /* XXX todo */
245 return true;
246}
247
248static int dm_wait_for_idle(void *handle)
249{
250 /* XXX todo */
251 return 0;
252}
253
254static bool dm_check_soft_reset(void *handle)
255{
256 return false;
257}
258
259static int dm_soft_reset(void *handle)
260{
261 /* XXX todo */
262 return 0;
263}
264
3ee6b26b
AD
265static struct amdgpu_crtc *
266get_crtc_by_otg_inst(struct amdgpu_device *adev,
267 int otg_inst)
4562236b
HW
268{
269 struct drm_device *dev = adev->ddev;
270 struct drm_crtc *crtc;
271 struct amdgpu_crtc *amdgpu_crtc;
272
4562236b
HW
273 if (otg_inst == -1) {
274 WARN_ON(1);
275 return adev->mode_info.crtcs[0];
276 }
277
278 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
279 amdgpu_crtc = to_amdgpu_crtc(crtc);
280
281 if (amdgpu_crtc->otg_inst == otg_inst)
282 return amdgpu_crtc;
283 }
284
285 return NULL;
286}
287
66b0c973
MK
288static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
289{
290 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
291 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
292}
293
b8e8c934
HW
294/**
295 * dm_pflip_high_irq() - Handle pageflip interrupt
296 * @interrupt_params: ignored
297 *
298 * Handles the pageflip interrupt by notifying all interested parties
299 * that the pageflip has been completed.
300 */
4562236b
HW
301static void dm_pflip_high_irq(void *interrupt_params)
302{
4562236b
HW
303 struct amdgpu_crtc *amdgpu_crtc;
304 struct common_irq_params *irq_params = interrupt_params;
305 struct amdgpu_device *adev = irq_params->adev;
306 unsigned long flags;
71bbe51a
MK
307 struct drm_pending_vblank_event *e;
308 struct dm_crtc_state *acrtc_state;
309 uint32_t vpos, hpos, v_blank_start, v_blank_end;
310 bool vrr_active;
4562236b
HW
311
312 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
313
314 /* IRQ could occur when in initial stage */
1f6010a9 315 /* TODO work and BO cleanup */
4562236b
HW
316 if (amdgpu_crtc == NULL) {
317 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
318 return;
319 }
320
321 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
322
323 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
324 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
325 amdgpu_crtc->pflip_status,
326 AMDGPU_FLIP_SUBMITTED,
327 amdgpu_crtc->crtc_id,
328 amdgpu_crtc);
329 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
330 return;
331 }
332
71bbe51a
MK
333 /* page flip completed. */
334 e = amdgpu_crtc->event;
335 amdgpu_crtc->event = NULL;
4562236b 336
71bbe51a
MK
337 if (!e)
338 WARN_ON(1);
1159898a 339
71bbe51a
MK
340 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
341 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
342
343 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
344 if (!vrr_active ||
345 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
346 &v_blank_end, &hpos, &vpos) ||
347 (vpos < v_blank_start)) {
348 /* Update to correct count and vblank timestamp if racing with
349 * vblank irq. This also updates to the correct vblank timestamp
350 * even in VRR mode, as scanout is past the front-porch atm.
351 */
352 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 353
71bbe51a
MK
354 /* Wake up userspace by sending the pageflip event with proper
355 * count and timestamp of vblank of flip completion.
356 */
357 if (e) {
358 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
359
360 /* Event sent, so done with vblank for this flip */
361 drm_crtc_vblank_put(&amdgpu_crtc->base);
362 }
363 } else if (e) {
364 /* VRR active and inside front-porch: vblank count and
365 * timestamp for pageflip event will only be up to date after
366 * drm_crtc_handle_vblank() has been executed from late vblank
367 * irq handler after start of back-porch (vline 0). We queue the
368 * pageflip event for send-out by drm_crtc_handle_vblank() with
369 * updated timestamp and count, once it runs after us.
370 *
371 * We need to open-code this instead of using the helper
372 * drm_crtc_arm_vblank_event(), as that helper would
373 * call drm_crtc_accurate_vblank_count(), which we must
374 * not call in VRR mode while we are in front-porch!
375 */
376
377 /* sequence will be replaced by real count during send-out. */
378 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
379 e->pipe = amdgpu_crtc->crtc_id;
380
381 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
382 e = NULL;
383 }
4562236b 384
fdd1fe57
MK
385 /* Keep track of vblank of this flip for flip throttling. We use the
386 * cooked hw counter, as that one incremented at start of this vblank
387 * of pageflip completion, so last_flip_vblank is the forbidden count
388 * for queueing new pageflips if vsync + VRR is enabled.
389 */
e3eff4b5
TZ
390 amdgpu_crtc->last_flip_vblank =
391 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 392
54f5499a 393 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
394 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
395
71bbe51a
MK
396 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
397 amdgpu_crtc->crtc_id, amdgpu_crtc,
398 vrr_active, (int) !e);
4562236b
HW
399}
400
d2574c33
MK
401static void dm_vupdate_high_irq(void *interrupt_params)
402{
403 struct common_irq_params *irq_params = interrupt_params;
404 struct amdgpu_device *adev = irq_params->adev;
405 struct amdgpu_crtc *acrtc;
406 struct dm_crtc_state *acrtc_state;
09aef2c4 407 unsigned long flags;
d2574c33
MK
408
409 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
410
411 if (acrtc) {
412 acrtc_state = to_dm_crtc_state(acrtc->base.state);
413
7f2be468
LP
414 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
415 acrtc->crtc_id,
416 amdgpu_dm_vrr_active(acrtc_state));
d2574c33
MK
417
418 /* Core vblank handling is done here after end of front-porch in
419 * vrr mode, as vblank timestamping will give valid results
420 * while now done after front-porch. This will also deliver
421 * page-flip completion events that have been queued to us
422 * if a pageflip happened inside front-porch.
423 */
09aef2c4 424 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 425 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
426
427 /* BTR processing for pre-DCE12 ASICs */
428 if (acrtc_state->stream &&
429 adev->family < AMDGPU_FAMILY_AI) {
430 spin_lock_irqsave(&adev->ddev->event_lock, flags);
431 mod_freesync_handle_v_update(
432 adev->dm.freesync_module,
433 acrtc_state->stream,
434 &acrtc_state->vrr_params);
435
436 dc_stream_adjust_vmin_vmax(
437 adev->dm.dc,
438 acrtc_state->stream,
439 &acrtc_state->vrr_params.adjust);
440 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
441 }
442 }
d2574c33
MK
443 }
444}
445
b8e8c934
HW
446/**
447 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 448 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
449 *
450 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
451 * event handler.
452 */
4562236b
HW
453static void dm_crtc_high_irq(void *interrupt_params)
454{
455 struct common_irq_params *irq_params = interrupt_params;
456 struct amdgpu_device *adev = irq_params->adev;
4562236b 457 struct amdgpu_crtc *acrtc;
180db303 458 struct dm_crtc_state *acrtc_state;
09aef2c4 459 unsigned long flags;
4562236b 460
b57de80a 461 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
462 if (!acrtc)
463 return;
464
465 acrtc_state = to_dm_crtc_state(acrtc->base.state);
466
2b5aed9a
MK
467 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
468 amdgpu_dm_vrr_active(acrtc_state),
469 acrtc_state->active_planes);
16f17eda 470
2346ef47
NK
471 /**
472 * Core vblank handling at start of front-porch is only possible
473 * in non-vrr mode, as only there vblank timestamping will give
474 * valid results while done in front-porch. Otherwise defer it
475 * to dm_vupdate_high_irq after end of front-porch.
476 */
477 if (!amdgpu_dm_vrr_active(acrtc_state))
478 drm_crtc_handle_vblank(&acrtc->base);
479
480 /**
481 * Following stuff must happen at start of vblank, for crc
482 * computation and below-the-range btr support in vrr mode.
483 */
16f17eda 484 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
485
486 /* BTR updates need to happen before VUPDATE on Vega and above. */
487 if (adev->family < AMDGPU_FAMILY_AI)
488 return;
16f17eda
LL
489
490 spin_lock_irqsave(&adev->ddev->event_lock, flags);
491
2346ef47 492 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
16f17eda 493 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
2346ef47
NK
494 mod_freesync_handle_v_update(adev->dm.freesync_module,
495 acrtc_state->stream,
496 &acrtc_state->vrr_params);
16f17eda 497
2346ef47
NK
498 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
499 &acrtc_state->vrr_params.adjust);
16f17eda
LL
500 }
501
2b5aed9a
MK
502 /*
503 * If there aren't any active_planes then DCH HUBP may be clock-gated.
504 * In that case, pageflip completion interrupts won't fire and pageflip
505 * completion events won't get delivered. Prevent this by sending
506 * pending pageflip events from here if a flip is still pending.
507 *
508 * If any planes are enabled, use dm_pflip_high_irq() instead, to
509 * avoid race conditions between flip programming and completion,
510 * which could cause too early flip completion events.
511 */
2346ef47
NK
512 if (adev->family >= AMDGPU_FAMILY_RV &&
513 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
2b5aed9a 514 acrtc_state->active_planes == 0) {
16f17eda
LL
515 if (acrtc->event) {
516 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
517 acrtc->event = NULL;
518 drm_crtc_vblank_put(&acrtc->base);
519 }
520 acrtc->pflip_status = AMDGPU_FLIP_NONE;
521 }
522
523 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
524}
525
4562236b
HW
526static int dm_set_clockgating_state(void *handle,
527 enum amd_clockgating_state state)
528{
529 return 0;
530}
531
532static int dm_set_powergating_state(void *handle,
533 enum amd_powergating_state state)
534{
535 return 0;
536}
537
538/* Prototypes of private functions */
539static int dm_early_init(void* handle);
540
a32e24b4 541/* Allocate memory for FBC compressed data */
3e332d3a 542static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 543{
3e332d3a
RL
544 struct drm_device *dev = connector->dev;
545 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 546 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
547 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
548 struct drm_display_mode *mode;
42e67c3b
RL
549 unsigned long max_size = 0;
550
551 if (adev->dm.dc->fbc_compressor == NULL)
552 return;
a32e24b4 553
3e332d3a 554 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
555 return;
556
3e332d3a
RL
557 if (compressor->bo_ptr)
558 return;
42e67c3b 559
42e67c3b 560
3e332d3a
RL
561 list_for_each_entry(mode, &connector->modes, head) {
562 if (max_size < mode->htotal * mode->vtotal)
563 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
564 }
565
566 if (max_size) {
567 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 568 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 569 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
570
571 if (r)
42e67c3b
RL
572 DRM_ERROR("DM: Failed to initialize FBC\n");
573 else {
574 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
575 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
576 }
577
a32e24b4
RL
578 }
579
580}
a32e24b4 581
6ce8f316
NK
582static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
583 int pipe, bool *enabled,
584 unsigned char *buf, int max_bytes)
585{
586 struct drm_device *dev = dev_get_drvdata(kdev);
587 struct amdgpu_device *adev = dev->dev_private;
588 struct drm_connector *connector;
589 struct drm_connector_list_iter conn_iter;
590 struct amdgpu_dm_connector *aconnector;
591 int ret = 0;
592
593 *enabled = false;
594
595 mutex_lock(&adev->dm.audio_lock);
596
597 drm_connector_list_iter_begin(dev, &conn_iter);
598 drm_for_each_connector_iter(connector, &conn_iter) {
599 aconnector = to_amdgpu_dm_connector(connector);
600 if (aconnector->audio_inst != port)
601 continue;
602
603 *enabled = true;
604 ret = drm_eld_size(connector->eld);
605 memcpy(buf, connector->eld, min(max_bytes, ret));
606
607 break;
608 }
609 drm_connector_list_iter_end(&conn_iter);
610
611 mutex_unlock(&adev->dm.audio_lock);
612
613 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
614
615 return ret;
616}
617
618static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
619 .get_eld = amdgpu_dm_audio_component_get_eld,
620};
621
622static int amdgpu_dm_audio_component_bind(struct device *kdev,
623 struct device *hda_kdev, void *data)
624{
625 struct drm_device *dev = dev_get_drvdata(kdev);
626 struct amdgpu_device *adev = dev->dev_private;
627 struct drm_audio_component *acomp = data;
628
629 acomp->ops = &amdgpu_dm_audio_component_ops;
630 acomp->dev = kdev;
631 adev->dm.audio_component = acomp;
632
633 return 0;
634}
635
636static void amdgpu_dm_audio_component_unbind(struct device *kdev,
637 struct device *hda_kdev, void *data)
638{
639 struct drm_device *dev = dev_get_drvdata(kdev);
640 struct amdgpu_device *adev = dev->dev_private;
641 struct drm_audio_component *acomp = data;
642
643 acomp->ops = NULL;
644 acomp->dev = NULL;
645 adev->dm.audio_component = NULL;
646}
647
648static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
649 .bind = amdgpu_dm_audio_component_bind,
650 .unbind = amdgpu_dm_audio_component_unbind,
651};
652
653static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
654{
655 int i, ret;
656
657 if (!amdgpu_audio)
658 return 0;
659
660 adev->mode_info.audio.enabled = true;
661
662 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
663
664 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
665 adev->mode_info.audio.pin[i].channels = -1;
666 adev->mode_info.audio.pin[i].rate = -1;
667 adev->mode_info.audio.pin[i].bits_per_sample = -1;
668 adev->mode_info.audio.pin[i].status_bits = 0;
669 adev->mode_info.audio.pin[i].category_code = 0;
670 adev->mode_info.audio.pin[i].connected = false;
671 adev->mode_info.audio.pin[i].id =
672 adev->dm.dc->res_pool->audios[i]->inst;
673 adev->mode_info.audio.pin[i].offset = 0;
674 }
675
676 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
677 if (ret < 0)
678 return ret;
679
680 adev->dm.audio_registered = true;
681
682 return 0;
683}
684
685static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
686{
687 if (!amdgpu_audio)
688 return;
689
690 if (!adev->mode_info.audio.enabled)
691 return;
692
693 if (adev->dm.audio_registered) {
694 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
695 adev->dm.audio_registered = false;
696 }
697
698 /* TODO: Disable audio? */
699
700 adev->mode_info.audio.enabled = false;
701}
702
dfd84d90 703static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
704{
705 struct drm_audio_component *acomp = adev->dm.audio_component;
706
707 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
708 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
709
710 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
711 pin, -1);
712 }
713}
714
743b9786
NK
715static int dm_dmub_hw_init(struct amdgpu_device *adev)
716{
743b9786
NK
717 const struct dmcub_firmware_header_v1_0 *hdr;
718 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 719 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
720 const struct firmware *dmub_fw = adev->dm.dmub_fw;
721 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
722 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
723 struct dmub_srv_hw_params hw_params;
724 enum dmub_status status;
725 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 726 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
727 bool has_hw_support;
728
729 if (!dmub_srv)
730 /* DMUB isn't supported on the ASIC. */
731 return 0;
732
8c7aea40
NK
733 if (!fb_info) {
734 DRM_ERROR("No framebuffer info for DMUB service.\n");
735 return -EINVAL;
736 }
737
743b9786
NK
738 if (!dmub_fw) {
739 /* Firmware required for DMUB support. */
740 DRM_ERROR("No firmware provided for DMUB.\n");
741 return -EINVAL;
742 }
743
744 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
745 if (status != DMUB_STATUS_OK) {
746 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
747 return -EINVAL;
748 }
749
750 if (!has_hw_support) {
751 DRM_INFO("DMUB unsupported on ASIC\n");
752 return 0;
753 }
754
755 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
756
743b9786
NK
757 fw_inst_const = dmub_fw->data +
758 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 759 PSP_HEADER_BYTES;
743b9786
NK
760
761 fw_bss_data = dmub_fw->data +
762 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763 le32_to_cpu(hdr->inst_const_bytes);
764
765 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
766 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
767 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
768
769 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
770
ddde28a5
HW
771 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
772 * amdgpu_ucode_init_single_fw will load dmub firmware
773 * fw_inst_const part to cw0; otherwise, the firmware back door load
774 * will be done by dm_dmub_hw_init
775 */
776 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
777 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
778 fw_inst_const_size);
779 }
780
a576b345
NK
781 if (fw_bss_data_size)
782 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
783 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
784
785 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
786 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
787 adev->bios_size);
788
789 /* Reset regions that need to be reset. */
790 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
791 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
792
793 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
794 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
795
796 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
797 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
798
799 /* Initialize hardware. */
800 memset(&hw_params, 0, sizeof(hw_params));
801 hw_params.fb_base = adev->gmc.fb_start;
802 hw_params.fb_offset = adev->gmc.aper_base;
803
31a7f4bb
HW
804 /* backdoor load firmware and trigger dmub running */
805 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
806 hw_params.load_inst_const = true;
807
743b9786
NK
808 if (dmcu)
809 hw_params.psp_version = dmcu->psp_version;
810
8c7aea40
NK
811 for (i = 0; i < fb_info->num_fb; ++i)
812 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
813
814 status = dmub_srv_hw_init(dmub_srv, &hw_params);
815 if (status != DMUB_STATUS_OK) {
816 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
817 return -EINVAL;
818 }
819
820 /* Wait for firmware load to finish. */
821 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
822 if (status != DMUB_STATUS_OK)
823 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
824
825 /* Init DMCU and ABM if available. */
826 if (dmcu && abm) {
827 dmcu->funcs->dmcu_init(dmcu);
828 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
829 }
830
9a71c7d3
NK
831 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
832 if (!adev->dm.dc->ctx->dmub_srv) {
833 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
834 return -ENOMEM;
835 }
836
743b9786
NK
837 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
838 adev->dm.dmcub_fw_version);
839
840 return 0;
841}
842
7578ecda 843static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
844{
845 struct dc_init_data init_data;
52704fca
BL
846#ifdef CONFIG_DRM_AMD_DC_HDCP
847 struct dc_callback_init init_params;
848#endif
743b9786 849 int r;
52704fca 850
4562236b
HW
851 adev->dm.ddev = adev->ddev;
852 adev->dm.adev = adev;
853
4562236b
HW
854 /* Zero all the fields */
855 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
856#ifdef CONFIG_DRM_AMD_DC_HDCP
857 memset(&init_params, 0, sizeof(init_params));
858#endif
4562236b 859
674e78ac 860 mutex_init(&adev->dm.dc_lock);
6ce8f316 861 mutex_init(&adev->dm.audio_lock);
674e78ac 862
4562236b
HW
863 if(amdgpu_dm_irq_init(adev)) {
864 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
865 goto error;
866 }
867
868 init_data.asic_id.chip_family = adev->family;
869
2dc31ca1 870 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
871 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
872
770d13b1 873 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
874 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
875 init_data.asic_id.atombios_base_address =
876 adev->mode_info.atom_context->bios;
877
878 init_data.driver = adev;
879
880 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
881
882 if (!adev->dm.cgs_device) {
883 DRM_ERROR("amdgpu: failed to create cgs device.\n");
884 goto error;
885 }
886
887 init_data.cgs_device = adev->dm.cgs_device;
888
4562236b
HW
889 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
890
60fb100b
AD
891 switch (adev->asic_type) {
892 case CHIP_CARRIZO:
893 case CHIP_STONEY:
894 case CHIP_RAVEN:
fe3db437 895 case CHIP_RENOIR:
6e227308 896 init_data.flags.gpu_vm_support = true;
60fb100b
AD
897 break;
898 default:
899 break;
900 }
6e227308 901
04b94af4
AD
902 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
903 init_data.flags.fbc_support = true;
904
d99f38ae
AD
905 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
906 init_data.flags.multi_mon_pp_mclk_switch = true;
907
eaf56410
LL
908 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
909 init_data.flags.disable_fractional_pwm = true;
910
27eaa492 911 init_data.flags.power_down_display_on_boot = true;
78ad75f8 912
48321c3d 913 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 914
4562236b
HW
915 /* Display Core create. */
916 adev->dm.dc = dc_create(&init_data);
917
423788c7 918 if (adev->dm.dc) {
76121231 919 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 920 } else {
76121231 921 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
922 goto error;
923 }
4562236b 924
8a791dab
HW
925 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
926 adev->dm.dc->debug.force_single_disp_pipe_split = false;
927 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
928 }
929
f99d8762
HW
930 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
931 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
932
8a791dab
HW
933 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
934 adev->dm.dc->debug.disable_stutter = true;
935
936 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
937 adev->dm.dc->debug.disable_dsc = true;
938
939 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
940 adev->dm.dc->debug.disable_clock_gate = true;
941
743b9786
NK
942 r = dm_dmub_hw_init(adev);
943 if (r) {
944 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
945 goto error;
946 }
947
bb6785c1
NK
948 dc_hardware_init(adev->dm.dc);
949
4562236b
HW
950 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
951 if (!adev->dm.freesync_module) {
952 DRM_ERROR(
953 "amdgpu: failed to initialize freesync_module.\n");
954 } else
f1ad2f5e 955 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
956 adev->dm.freesync_module);
957
e277adc5
LSL
958 amdgpu_dm_init_color_mod();
959
52704fca 960#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 961 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 962 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 963
96a3b32e
BL
964 if (!adev->dm.hdcp_workqueue)
965 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
966 else
967 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 968
96a3b32e
BL
969 dc_init_callbacks(adev->dm.dc, &init_params);
970 }
52704fca 971#endif
4562236b
HW
972 if (amdgpu_dm_initialize_drm_device(adev)) {
973 DRM_ERROR(
974 "amdgpu: failed to initialize sw for display support.\n");
975 goto error;
976 }
977
978 /* Update the actual used number of crtc */
979 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
980
981 /* TODO: Add_display_info? */
982
983 /* TODO use dynamic cursor width */
ce75805e
AG
984 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
985 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
986
987 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
988 DRM_ERROR(
989 "amdgpu: failed to initialize sw for display support.\n");
990 goto error;
991 }
992
f1ad2f5e 993 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
994
995 return 0;
996error:
997 amdgpu_dm_fini(adev);
998
59d0f396 999 return -EINVAL;
4562236b
HW
1000}
1001
7578ecda 1002static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1003{
6ce8f316
NK
1004 amdgpu_dm_audio_fini(adev);
1005
4562236b 1006 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1007
52704fca
BL
1008#ifdef CONFIG_DRM_AMD_DC_HDCP
1009 if (adev->dm.hdcp_workqueue) {
1010 hdcp_destroy(adev->dm.hdcp_workqueue);
1011 adev->dm.hdcp_workqueue = NULL;
1012 }
1013
1014 if (adev->dm.dc)
1015 dc_deinit_callbacks(adev->dm.dc);
1016#endif
9a71c7d3
NK
1017 if (adev->dm.dc->ctx->dmub_srv) {
1018 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1019 adev->dm.dc->ctx->dmub_srv = NULL;
1020 }
1021
743b9786
NK
1022 if (adev->dm.dmub_bo)
1023 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1024 &adev->dm.dmub_bo_gpu_addr,
1025 &adev->dm.dmub_bo_cpu_addr);
52704fca 1026
c8bdf2b6
ED
1027 /* DC Destroy TODO: Replace destroy DAL */
1028 if (adev->dm.dc)
1029 dc_destroy(&adev->dm.dc);
4562236b
HW
1030 /*
1031 * TODO: pageflip, vlank interrupt
1032 *
1033 * amdgpu_dm_irq_fini(adev);
1034 */
1035
1036 if (adev->dm.cgs_device) {
1037 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1038 adev->dm.cgs_device = NULL;
1039 }
1040 if (adev->dm.freesync_module) {
1041 mod_freesync_destroy(adev->dm.freesync_module);
1042 adev->dm.freesync_module = NULL;
1043 }
674e78ac 1044
6ce8f316 1045 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1046 mutex_destroy(&adev->dm.dc_lock);
1047
4562236b
HW
1048 return;
1049}
1050
a94d5569 1051static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1052{
a7669aff 1053 const char *fw_name_dmcu = NULL;
a94d5569
DF
1054 int r;
1055 const struct dmcu_firmware_header_v1_0 *hdr;
1056
1057 switch(adev->asic_type) {
1058 case CHIP_BONAIRE:
1059 case CHIP_HAWAII:
1060 case CHIP_KAVERI:
1061 case CHIP_KABINI:
1062 case CHIP_MULLINS:
1063 case CHIP_TONGA:
1064 case CHIP_FIJI:
1065 case CHIP_CARRIZO:
1066 case CHIP_STONEY:
1067 case CHIP_POLARIS11:
1068 case CHIP_POLARIS10:
1069 case CHIP_POLARIS12:
1070 case CHIP_VEGAM:
1071 case CHIP_VEGA10:
1072 case CHIP_VEGA12:
1073 case CHIP_VEGA20:
476e955d 1074 case CHIP_NAVI10:
baebcf2e 1075 case CHIP_NAVI14:
30221ad8 1076 case CHIP_RENOIR:
79037324
BL
1077#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1078 case CHIP_SIENNA_CICHLID:
1079#endif
a94d5569 1080 return 0;
5ea23931
RL
1081 case CHIP_NAVI12:
1082 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1083 break;
a94d5569 1084 case CHIP_RAVEN:
a7669aff
HW
1085 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1086 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1087 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1088 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1089 else
a7669aff 1090 return 0;
a94d5569
DF
1091 break;
1092 default:
1093 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1094 return -EINVAL;
a94d5569
DF
1095 }
1096
1097 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1098 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1099 return 0;
1100 }
1101
1102 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1103 if (r == -ENOENT) {
1104 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1105 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1106 adev->dm.fw_dmcu = NULL;
1107 return 0;
1108 }
1109 if (r) {
1110 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1111 fw_name_dmcu);
1112 return r;
1113 }
1114
1115 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1116 if (r) {
1117 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1118 fw_name_dmcu);
1119 release_firmware(adev->dm.fw_dmcu);
1120 adev->dm.fw_dmcu = NULL;
1121 return r;
1122 }
1123
1124 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1125 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1126 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1127 adev->firmware.fw_size +=
1128 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1129
1130 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1131 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1132 adev->firmware.fw_size +=
1133 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1134
ee6e89c0
DF
1135 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1136
a94d5569
DF
1137 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1138
4562236b
HW
1139 return 0;
1140}
1141
743b9786
NK
1142static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1143{
1144 struct amdgpu_device *adev = ctx;
1145
1146 return dm_read_reg(adev->dm.dc->ctx, address);
1147}
1148
1149static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1150 uint32_t value)
1151{
1152 struct amdgpu_device *adev = ctx;
1153
1154 return dm_write_reg(adev->dm.dc->ctx, address, value);
1155}
1156
1157static int dm_dmub_sw_init(struct amdgpu_device *adev)
1158{
1159 struct dmub_srv_create_params create_params;
8c7aea40
NK
1160 struct dmub_srv_region_params region_params;
1161 struct dmub_srv_region_info region_info;
1162 struct dmub_srv_fb_params fb_params;
1163 struct dmub_srv_fb_info *fb_info;
1164 struct dmub_srv *dmub_srv;
743b9786
NK
1165 const struct dmcub_firmware_header_v1_0 *hdr;
1166 const char *fw_name_dmub;
1167 enum dmub_asic dmub_asic;
1168 enum dmub_status status;
1169 int r;
1170
1171 switch (adev->asic_type) {
1172 case CHIP_RENOIR:
1173 dmub_asic = DMUB_ASIC_DCN21;
1174 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1175 break;
79037324
BL
1176#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1177 case CHIP_SIENNA_CICHLID:
1178 dmub_asic = DMUB_ASIC_DCN30;
1179 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1180 break;
1181#endif
743b9786
NK
1182
1183 default:
1184 /* ASIC doesn't support DMUB. */
1185 return 0;
1186 }
1187
743b9786
NK
1188 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1189 if (r) {
1190 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1191 return 0;
1192 }
1193
1194 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1195 if (r) {
1196 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1197 return 0;
1198 }
1199
743b9786 1200 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1201
9a6ed547
NK
1202 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1203 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1204 AMDGPU_UCODE_ID_DMCUB;
1205 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1206 adev->dm.dmub_fw;
1207 adev->firmware.fw_size +=
1208 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1209
9a6ed547
NK
1210 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1211 adev->dm.dmcub_fw_version);
1212 }
1213
1214 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1215
8c7aea40
NK
1216 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1217 dmub_srv = adev->dm.dmub_srv;
1218
1219 if (!dmub_srv) {
1220 DRM_ERROR("Failed to allocate DMUB service!\n");
1221 return -ENOMEM;
1222 }
1223
1224 memset(&create_params, 0, sizeof(create_params));
1225 create_params.user_ctx = adev;
1226 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1227 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1228 create_params.asic = dmub_asic;
1229
1230 /* Create the DMUB service. */
1231 status = dmub_srv_create(dmub_srv, &create_params);
1232 if (status != DMUB_STATUS_OK) {
1233 DRM_ERROR("Error creating DMUB service: %d\n", status);
1234 return -EINVAL;
1235 }
1236
1237 /* Calculate the size of all the regions for the DMUB service. */
1238 memset(&region_params, 0, sizeof(region_params));
1239
1240 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1241 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1242 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1243 region_params.vbios_size = adev->bios_size;
0922b899 1244 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1245 adev->dm.dmub_fw->data +
1246 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1247 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1248 region_params.fw_inst_const =
1249 adev->dm.dmub_fw->data +
1250 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1251 PSP_HEADER_BYTES;
8c7aea40
NK
1252
1253 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1254 &region_info);
1255
1256 if (status != DMUB_STATUS_OK) {
1257 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1258 return -EINVAL;
1259 }
1260
1261 /*
1262 * Allocate a framebuffer based on the total size of all the regions.
1263 * TODO: Move this into GART.
1264 */
1265 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1266 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1267 &adev->dm.dmub_bo_gpu_addr,
1268 &adev->dm.dmub_bo_cpu_addr);
1269 if (r)
1270 return r;
1271
1272 /* Rebase the regions on the framebuffer address. */
1273 memset(&fb_params, 0, sizeof(fb_params));
1274 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1275 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1276 fb_params.region_info = &region_info;
1277
1278 adev->dm.dmub_fb_info =
1279 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1280 fb_info = adev->dm.dmub_fb_info;
1281
1282 if (!fb_info) {
1283 DRM_ERROR(
1284 "Failed to allocate framebuffer info for DMUB service!\n");
1285 return -ENOMEM;
1286 }
1287
1288 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1289 if (status != DMUB_STATUS_OK) {
1290 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1291 return -EINVAL;
1292 }
1293
743b9786
NK
1294 return 0;
1295}
1296
a94d5569
DF
1297static int dm_sw_init(void *handle)
1298{
1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1300 int r;
1301
1302 r = dm_dmub_sw_init(adev);
1303 if (r)
1304 return r;
a94d5569
DF
1305
1306 return load_dmcu_fw(adev);
1307}
1308
4562236b
HW
1309static int dm_sw_fini(void *handle)
1310{
a94d5569
DF
1311 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1312
8c7aea40
NK
1313 kfree(adev->dm.dmub_fb_info);
1314 adev->dm.dmub_fb_info = NULL;
1315
743b9786
NK
1316 if (adev->dm.dmub_srv) {
1317 dmub_srv_destroy(adev->dm.dmub_srv);
1318 adev->dm.dmub_srv = NULL;
1319 }
1320
75e1658e
ND
1321 release_firmware(adev->dm.dmub_fw);
1322 adev->dm.dmub_fw = NULL;
743b9786 1323
75e1658e
ND
1324 release_firmware(adev->dm.fw_dmcu);
1325 adev->dm.fw_dmcu = NULL;
a94d5569 1326
4562236b
HW
1327 return 0;
1328}
1329
7abcf6b5 1330static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1331{
c84dec2f 1332 struct amdgpu_dm_connector *aconnector;
4562236b 1333 struct drm_connector *connector;
f8d2d39e 1334 struct drm_connector_list_iter iter;
7abcf6b5 1335 int ret = 0;
4562236b 1336
f8d2d39e
LP
1337 drm_connector_list_iter_begin(dev, &iter);
1338 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1339 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1340 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1341 aconnector->mst_mgr.aux) {
f1ad2f5e 1342 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1343 aconnector,
1344 aconnector->base.base.id);
7abcf6b5
AG
1345
1346 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1347 if (ret < 0) {
1348 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1349 aconnector->dc_link->type =
1350 dc_connection_single;
1351 break;
7abcf6b5 1352 }
f8d2d39e 1353 }
4562236b 1354 }
f8d2d39e 1355 drm_connector_list_iter_end(&iter);
4562236b 1356
7abcf6b5
AG
1357 return ret;
1358}
1359
1360static int dm_late_init(void *handle)
1361{
42e67c3b 1362 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1363
bbf854dc
DF
1364 struct dmcu_iram_parameters params;
1365 unsigned int linear_lut[16];
1366 int i;
17bdb4a8 1367 struct dmcu *dmcu = NULL;
14ed1c90 1368 bool ret;
bbf854dc 1369
17bdb4a8
JFZ
1370 if (!adev->dm.fw_dmcu)
1371 return detect_mst_link_for_all_connectors(adev->ddev);
1372
1373 dmcu = adev->dm.dc->res_pool->dmcu;
1374
bbf854dc
DF
1375 for (i = 0; i < 16; i++)
1376 linear_lut[i] = 0xFFFF * i / 15;
1377
1378 params.set = 0;
1379 params.backlight_ramping_start = 0xCCCC;
1380 params.backlight_ramping_reduction = 0xCCCCCCCC;
1381 params.backlight_lut_array_size = 16;
1382 params.backlight_lut_array = linear_lut;
1383
2ad0cdf9
AK
1384 /* Min backlight level after ABM reduction, Don't allow below 1%
1385 * 0xFFFF x 0.01 = 0x28F
1386 */
1387 params.min_abm_backlight = 0x28F;
1388
14ed1c90 1389 ret = dmcu_load_iram(dmcu, params);
bbf854dc 1390
14ed1c90
HW
1391 if (!ret)
1392 return -EINVAL;
bbf854dc 1393
42e67c3b 1394 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1395}
1396
1397static void s3_handle_mst(struct drm_device *dev, bool suspend)
1398{
c84dec2f 1399 struct amdgpu_dm_connector *aconnector;
4562236b 1400 struct drm_connector *connector;
f8d2d39e 1401 struct drm_connector_list_iter iter;
fe7553be
LP
1402 struct drm_dp_mst_topology_mgr *mgr;
1403 int ret;
1404 bool need_hotplug = false;
4562236b 1405
f8d2d39e
LP
1406 drm_connector_list_iter_begin(dev, &iter);
1407 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1408 aconnector = to_amdgpu_dm_connector(connector);
1409 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1410 aconnector->mst_port)
1411 continue;
1412
1413 mgr = &aconnector->mst_mgr;
1414
1415 if (suspend) {
1416 drm_dp_mst_topology_mgr_suspend(mgr);
1417 } else {
6f85f738 1418 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1419 if (ret < 0) {
1420 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1421 need_hotplug = true;
1422 }
1423 }
4562236b 1424 }
f8d2d39e 1425 drm_connector_list_iter_end(&iter);
fe7553be
LP
1426
1427 if (need_hotplug)
1428 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1429}
1430
9340dfd3
HW
1431static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1432{
1433 struct smu_context *smu = &adev->smu;
1434 int ret = 0;
1435
1436 if (!is_support_sw_smu(adev))
1437 return 0;
1438
1439 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1440 * on window driver dc implementation.
1441 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1442 * should be passed to smu during boot up and resume from s3.
1443 * boot up: dc calculate dcn watermark clock settings within dc_create,
1444 * dcn20_resource_construct
1445 * then call pplib functions below to pass the settings to smu:
1446 * smu_set_watermarks_for_clock_ranges
1447 * smu_set_watermarks_table
1448 * navi10_set_watermarks_table
1449 * smu_write_watermarks_table
1450 *
1451 * For Renoir, clock settings of dcn watermark are also fixed values.
1452 * dc has implemented different flow for window driver:
1453 * dc_hardware_init / dc_set_power_state
1454 * dcn10_init_hw
1455 * notify_wm_ranges
1456 * set_wm_ranges
1457 * -- Linux
1458 * smu_set_watermarks_for_clock_ranges
1459 * renoir_set_watermarks_table
1460 * smu_write_watermarks_table
1461 *
1462 * For Linux,
1463 * dc_hardware_init -> amdgpu_dm_init
1464 * dc_set_power_state --> dm_resume
1465 *
1466 * therefore, this function apply to navi10/12/14 but not Renoir
1467 * *
1468 */
1469 switch(adev->asic_type) {
1470 case CHIP_NAVI10:
1471 case CHIP_NAVI14:
1472 case CHIP_NAVI12:
1473 break;
1474 default:
1475 return 0;
1476 }
1477
1478 mutex_lock(&smu->mutex);
1479
1480 /* pass data to smu controller */
1481 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1482 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1483 ret = smu_write_watermarks_table(smu);
1484
1485 if (ret) {
1486 mutex_unlock(&smu->mutex);
1487 DRM_ERROR("Failed to update WMTABLE!\n");
1488 return ret;
1489 }
1490 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1491 }
1492
1493 mutex_unlock(&smu->mutex);
1494
1495 return 0;
1496}
1497
b8592b48
LL
1498/**
1499 * dm_hw_init() - Initialize DC device
28d687ea 1500 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1501 *
1502 * Initialize the &struct amdgpu_display_manager device. This involves calling
1503 * the initializers of each DM component, then populating the struct with them.
1504 *
1505 * Although the function implies hardware initialization, both hardware and
1506 * software are initialized here. Splitting them out to their relevant init
1507 * hooks is a future TODO item.
1508 *
1509 * Some notable things that are initialized here:
1510 *
1511 * - Display Core, both software and hardware
1512 * - DC modules that we need (freesync and color management)
1513 * - DRM software states
1514 * - Interrupt sources and handlers
1515 * - Vblank support
1516 * - Debug FS entries, if enabled
1517 */
4562236b
HW
1518static int dm_hw_init(void *handle)
1519{
1520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521 /* Create DAL display manager */
1522 amdgpu_dm_init(adev);
4562236b
HW
1523 amdgpu_dm_hpd_init(adev);
1524
4562236b
HW
1525 return 0;
1526}
1527
b8592b48
LL
1528/**
1529 * dm_hw_fini() - Teardown DC device
28d687ea 1530 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1531 *
1532 * Teardown components within &struct amdgpu_display_manager that require
1533 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1534 * were loaded. Also flush IRQ workqueues and disable them.
1535 */
4562236b
HW
1536static int dm_hw_fini(void *handle)
1537{
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539
1540 amdgpu_dm_hpd_fini(adev);
1541
1542 amdgpu_dm_irq_fini(adev);
21de3396 1543 amdgpu_dm_fini(adev);
4562236b
HW
1544 return 0;
1545}
1546
cdaae837
BL
1547
1548static int dm_enable_vblank(struct drm_crtc *crtc);
1549static void dm_disable_vblank(struct drm_crtc *crtc);
1550
1551static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1552 struct dc_state *state, bool enable)
1553{
1554 enum dc_irq_source irq_source;
1555 struct amdgpu_crtc *acrtc;
1556 int rc = -EBUSY;
1557 int i = 0;
1558
1559 for (i = 0; i < state->stream_count; i++) {
1560 acrtc = get_crtc_by_otg_inst(
1561 adev, state->stream_status[i].primary_otg_inst);
1562
1563 if (acrtc && state->stream_status[i].plane_count != 0) {
1564 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1565 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1566 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1567 acrtc->crtc_id, enable ? "en" : "dis", rc);
1568 if (rc)
1569 DRM_WARN("Failed to %s pflip interrupts\n",
1570 enable ? "enable" : "disable");
1571
1572 if (enable) {
1573 rc = dm_enable_vblank(&acrtc->base);
1574 if (rc)
1575 DRM_WARN("Failed to enable vblank interrupts\n");
1576 } else {
1577 dm_disable_vblank(&acrtc->base);
1578 }
1579
1580 }
1581 }
1582
1583}
1584
dfd84d90 1585static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1586{
1587 struct dc_state *context = NULL;
1588 enum dc_status res = DC_ERROR_UNEXPECTED;
1589 int i;
1590 struct dc_stream_state *del_streams[MAX_PIPES];
1591 int del_streams_count = 0;
1592
1593 memset(del_streams, 0, sizeof(del_streams));
1594
1595 context = dc_create_state(dc);
1596 if (context == NULL)
1597 goto context_alloc_fail;
1598
1599 dc_resource_state_copy_construct_current(dc, context);
1600
1601 /* First remove from context all streams */
1602 for (i = 0; i < context->stream_count; i++) {
1603 struct dc_stream_state *stream = context->streams[i];
1604
1605 del_streams[del_streams_count++] = stream;
1606 }
1607
1608 /* Remove all planes for removed streams and then remove the streams */
1609 for (i = 0; i < del_streams_count; i++) {
1610 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1611 res = DC_FAIL_DETACH_SURFACES;
1612 goto fail;
1613 }
1614
1615 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1616 if (res != DC_OK)
1617 goto fail;
1618 }
1619
1620
1621 res = dc_validate_global_state(dc, context, false);
1622
1623 if (res != DC_OK) {
1624 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1625 goto fail;
1626 }
1627
1628 res = dc_commit_state(dc, context);
1629
1630fail:
1631 dc_release_state(context);
1632
1633context_alloc_fail:
1634 return res;
1635}
1636
4562236b
HW
1637static int dm_suspend(void *handle)
1638{
1639 struct amdgpu_device *adev = handle;
1640 struct amdgpu_display_manager *dm = &adev->dm;
1641 int ret = 0;
4562236b 1642
cdaae837
BL
1643 if (adev->in_gpu_reset) {
1644 mutex_lock(&dm->dc_lock);
1645 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1646
1647 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1648
1649 amdgpu_dm_commit_zero_streams(dm->dc);
1650
1651 amdgpu_dm_irq_suspend(adev);
1652
1653 return ret;
1654 }
4562236b 1655
d2f0b53b
LHM
1656 WARN_ON(adev->dm.cached_state);
1657 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1658
4562236b
HW
1659 s3_handle_mst(adev->ddev, true);
1660
4562236b
HW
1661 amdgpu_dm_irq_suspend(adev);
1662
a3621485 1663
32f5062d 1664 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1665
1c2075d4 1666 return 0;
4562236b
HW
1667}
1668
1daf8c63
AD
1669static struct amdgpu_dm_connector *
1670amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1671 struct drm_crtc *crtc)
4562236b
HW
1672{
1673 uint32_t i;
c2cea706 1674 struct drm_connector_state *new_con_state;
4562236b
HW
1675 struct drm_connector *connector;
1676 struct drm_crtc *crtc_from_state;
1677
c2cea706
LSL
1678 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1679 crtc_from_state = new_con_state->crtc;
4562236b
HW
1680
1681 if (crtc_from_state == crtc)
c84dec2f 1682 return to_amdgpu_dm_connector(connector);
4562236b
HW
1683 }
1684
1685 return NULL;
1686}
1687
fbbdadf2
BL
1688static void emulated_link_detect(struct dc_link *link)
1689{
1690 struct dc_sink_init_data sink_init_data = { 0 };
1691 struct display_sink_capability sink_caps = { 0 };
1692 enum dc_edid_status edid_status;
1693 struct dc_context *dc_ctx = link->ctx;
1694 struct dc_sink *sink = NULL;
1695 struct dc_sink *prev_sink = NULL;
1696
1697 link->type = dc_connection_none;
1698 prev_sink = link->local_sink;
1699
1700 if (prev_sink != NULL)
1701 dc_sink_retain(prev_sink);
1702
1703 switch (link->connector_signal) {
1704 case SIGNAL_TYPE_HDMI_TYPE_A: {
1705 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1706 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1707 break;
1708 }
1709
1710 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1711 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1712 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1713 break;
1714 }
1715
1716 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1717 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1719 break;
1720 }
1721
1722 case SIGNAL_TYPE_LVDS: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_LVDS;
1725 break;
1726 }
1727
1728 case SIGNAL_TYPE_EDP: {
1729 sink_caps.transaction_type =
1730 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1731 sink_caps.signal = SIGNAL_TYPE_EDP;
1732 break;
1733 }
1734
1735 case SIGNAL_TYPE_DISPLAY_PORT: {
1736 sink_caps.transaction_type =
1737 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1738 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1739 break;
1740 }
1741
1742 default:
1743 DC_ERROR("Invalid connector type! signal:%d\n",
1744 link->connector_signal);
1745 return;
1746 }
1747
1748 sink_init_data.link = link;
1749 sink_init_data.sink_signal = sink_caps.signal;
1750
1751 sink = dc_sink_create(&sink_init_data);
1752 if (!sink) {
1753 DC_ERROR("Failed to create sink!\n");
1754 return;
1755 }
1756
dcd5fb82 1757 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1758 link->local_sink = sink;
1759
1760 edid_status = dm_helpers_read_local_edid(
1761 link->ctx,
1762 link,
1763 sink);
1764
1765 if (edid_status != EDID_OK)
1766 DC_ERROR("Failed to read EDID");
1767
1768}
1769
cdaae837
BL
1770static void dm_gpureset_commit_state(struct dc_state *dc_state,
1771 struct amdgpu_display_manager *dm)
1772{
1773 struct {
1774 struct dc_surface_update surface_updates[MAX_SURFACES];
1775 struct dc_plane_info plane_infos[MAX_SURFACES];
1776 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1777 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1778 struct dc_stream_update stream_update;
1779 } * bundle;
1780 int k, m;
1781
1782 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1783
1784 if (!bundle) {
1785 dm_error("Failed to allocate update bundle\n");
1786 goto cleanup;
1787 }
1788
1789 for (k = 0; k < dc_state->stream_count; k++) {
1790 bundle->stream_update.stream = dc_state->streams[k];
1791
1792 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1793 bundle->surface_updates[m].surface =
1794 dc_state->stream_status->plane_states[m];
1795 bundle->surface_updates[m].surface->force_full_update =
1796 true;
1797 }
1798 dc_commit_updates_for_stream(
1799 dm->dc, bundle->surface_updates,
1800 dc_state->stream_status->plane_count,
1801 dc_state->streams[k], &bundle->stream_update, dc_state);
1802 }
1803
1804cleanup:
1805 kfree(bundle);
1806
1807 return;
1808}
1809
4562236b
HW
1810static int dm_resume(void *handle)
1811{
1812 struct amdgpu_device *adev = handle;
4562236b
HW
1813 struct drm_device *ddev = adev->ddev;
1814 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1815 struct amdgpu_dm_connector *aconnector;
4562236b 1816 struct drm_connector *connector;
f8d2d39e 1817 struct drm_connector_list_iter iter;
4562236b 1818 struct drm_crtc *crtc;
c2cea706 1819 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1820 struct dm_crtc_state *dm_new_crtc_state;
1821 struct drm_plane *plane;
1822 struct drm_plane_state *new_plane_state;
1823 struct dm_plane_state *dm_new_plane_state;
113b7a01 1824 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1825 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
1826 struct dc_state *dc_state;
1827 int i, r, j;
4562236b 1828
cdaae837
BL
1829 if (adev->in_gpu_reset) {
1830 dc_state = dm->cached_dc_state;
1831
1832 r = dm_dmub_hw_init(adev);
1833 if (r)
1834 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1835
1836 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1837 dc_resume(dm->dc);
1838
1839 amdgpu_dm_irq_resume_early(adev);
1840
1841 for (i = 0; i < dc_state->stream_count; i++) {
1842 dc_state->streams[i]->mode_changed = true;
1843 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1844 dc_state->stream_status->plane_states[j]->update_flags.raw
1845 = 0xffffffff;
1846 }
1847 }
1848
1849 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 1850
cdaae837
BL
1851 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1852
1853 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1854
1855 dc_release_state(dm->cached_dc_state);
1856 dm->cached_dc_state = NULL;
1857
1858 amdgpu_dm_irq_resume_late(adev);
1859
1860 mutex_unlock(&dm->dc_lock);
1861
1862 return 0;
1863 }
113b7a01
LL
1864 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1865 dc_release_state(dm_state->context);
1866 dm_state->context = dc_create_state(dm->dc);
1867 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1868 dc_resource_state_construct(dm->dc, dm_state->context);
1869
8c7aea40
NK
1870 /* Before powering on DC we need to re-initialize DMUB. */
1871 r = dm_dmub_hw_init(adev);
1872 if (r)
1873 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1874
a80aa93d
ML
1875 /* power on hardware */
1876 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1877
4562236b
HW
1878 /* program HPD filter */
1879 dc_resume(dm->dc);
1880
4562236b
HW
1881 /*
1882 * early enable HPD Rx IRQ, should be done before set mode as short
1883 * pulse interrupts are used for MST
1884 */
1885 amdgpu_dm_irq_resume_early(adev);
1886
d20ebea8 1887 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1888 s3_handle_mst(ddev, false);
1889
4562236b 1890 /* Do detection*/
f8d2d39e
LP
1891 drm_connector_list_iter_begin(ddev, &iter);
1892 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1893 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1894
1895 /*
1896 * this is the case when traversing through already created
1897 * MST connectors, should be skipped
1898 */
1899 if (aconnector->mst_port)
1900 continue;
1901
03ea364c 1902 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1903 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1904 DRM_ERROR("KMS: Failed to detect connector\n");
1905
1906 if (aconnector->base.force && new_connection_type == dc_connection_none)
1907 emulated_link_detect(aconnector->dc_link);
1908 else
1909 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1910
1911 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1912 aconnector->fake_enable = false;
1913
dcd5fb82
MF
1914 if (aconnector->dc_sink)
1915 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1916 aconnector->dc_sink = NULL;
1917 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1918 mutex_unlock(&aconnector->hpd_lock);
4562236b 1919 }
f8d2d39e 1920 drm_connector_list_iter_end(&iter);
4562236b 1921
1f6010a9 1922 /* Force mode set in atomic commit */
a80aa93d 1923 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1924 new_crtc_state->active_changed = true;
4f346e65 1925
fcb4019e
LSL
1926 /*
1927 * atomic_check is expected to create the dc states. We need to release
1928 * them here, since they were duplicated as part of the suspend
1929 * procedure.
1930 */
a80aa93d 1931 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1932 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1933 if (dm_new_crtc_state->stream) {
1934 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1935 dc_stream_release(dm_new_crtc_state->stream);
1936 dm_new_crtc_state->stream = NULL;
1937 }
1938 }
1939
a80aa93d 1940 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1941 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1942 if (dm_new_plane_state->dc_state) {
1943 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1944 dc_plane_state_release(dm_new_plane_state->dc_state);
1945 dm_new_plane_state->dc_state = NULL;
1946 }
1947 }
1948
2d1af6a1 1949 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1950
a80aa93d 1951 dm->cached_state = NULL;
0a214e2f 1952
9faa4237 1953 amdgpu_dm_irq_resume_late(adev);
4562236b 1954
9340dfd3
HW
1955 amdgpu_dm_smu_write_watermarks_table(adev);
1956
2d1af6a1 1957 return 0;
4562236b
HW
1958}
1959
b8592b48
LL
1960/**
1961 * DOC: DM Lifecycle
1962 *
1963 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1964 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1965 * the base driver's device list to be initialized and torn down accordingly.
1966 *
1967 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1968 */
1969
4562236b
HW
1970static const struct amd_ip_funcs amdgpu_dm_funcs = {
1971 .name = "dm",
1972 .early_init = dm_early_init,
7abcf6b5 1973 .late_init = dm_late_init,
4562236b
HW
1974 .sw_init = dm_sw_init,
1975 .sw_fini = dm_sw_fini,
1976 .hw_init = dm_hw_init,
1977 .hw_fini = dm_hw_fini,
1978 .suspend = dm_suspend,
1979 .resume = dm_resume,
1980 .is_idle = dm_is_idle,
1981 .wait_for_idle = dm_wait_for_idle,
1982 .check_soft_reset = dm_check_soft_reset,
1983 .soft_reset = dm_soft_reset,
1984 .set_clockgating_state = dm_set_clockgating_state,
1985 .set_powergating_state = dm_set_powergating_state,
1986};
1987
1988const struct amdgpu_ip_block_version dm_ip_block =
1989{
1990 .type = AMD_IP_BLOCK_TYPE_DCE,
1991 .major = 1,
1992 .minor = 0,
1993 .rev = 0,
1994 .funcs = &amdgpu_dm_funcs,
1995};
1996
ca3268c4 1997
b8592b48
LL
1998/**
1999 * DOC: atomic
2000 *
2001 * *WIP*
2002 */
0a323b84 2003
b3663f70 2004static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2005 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 2006 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2007 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 2008 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
2009};
2010
2011static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2012 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2013};
2014
94562810
RS
2015static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2016{
2017 u32 max_cll, min_cll, max, min, q, r;
2018 struct amdgpu_dm_backlight_caps *caps;
2019 struct amdgpu_display_manager *dm;
2020 struct drm_connector *conn_base;
2021 struct amdgpu_device *adev;
2022 static const u8 pre_computed_values[] = {
2023 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2024 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2025
2026 if (!aconnector || !aconnector->dc_link)
2027 return;
2028
2029 conn_base = &aconnector->base;
2030 adev = conn_base->dev->dev_private;
2031 dm = &adev->dm;
2032 caps = &dm->backlight_caps;
2033 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2034 caps->aux_support = false;
2035 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2036 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2037
2038 if (caps->ext_caps->bits.oled == 1 ||
2039 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2040 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2041 caps->aux_support = true;
2042
2043 /* From the specification (CTA-861-G), for calculating the maximum
2044 * luminance we need to use:
2045 * Luminance = 50*2**(CV/32)
2046 * Where CV is a one-byte value.
2047 * For calculating this expression we may need float point precision;
2048 * to avoid this complexity level, we take advantage that CV is divided
2049 * by a constant. From the Euclids division algorithm, we know that CV
2050 * can be written as: CV = 32*q + r. Next, we replace CV in the
2051 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2052 * need to pre-compute the value of r/32. For pre-computing the values
2053 * We just used the following Ruby line:
2054 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2055 * The results of the above expressions can be verified at
2056 * pre_computed_values.
2057 */
2058 q = max_cll >> 5;
2059 r = max_cll % 32;
2060 max = (1 << q) * pre_computed_values[r];
2061
2062 // min luminance: maxLum * (CV/255)^2 / 100
2063 q = DIV_ROUND_CLOSEST(min_cll, 255);
2064 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2065
2066 caps->aux_max_input_signal = max;
2067 caps->aux_min_input_signal = min;
2068}
2069
97e51c16
HW
2070void amdgpu_dm_update_connector_after_detect(
2071 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2072{
2073 struct drm_connector *connector = &aconnector->base;
2074 struct drm_device *dev = connector->dev;
b73a22d3 2075 struct dc_sink *sink;
4562236b
HW
2076
2077 /* MST handled by drm_mst framework */
2078 if (aconnector->mst_mgr.mst_state == true)
2079 return;
2080
2081
2082 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2083 if (sink)
2084 dc_sink_retain(sink);
4562236b 2085
1f6010a9
DF
2086 /*
2087 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2088 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2089 * Skip if already done during boot.
4562236b
HW
2090 */
2091 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2092 && aconnector->dc_em_sink) {
2093
1f6010a9
DF
2094 /*
2095 * For S3 resume with headless use eml_sink to fake stream
2096 * because on resume connector->sink is set to NULL
4562236b
HW
2097 */
2098 mutex_lock(&dev->mode_config.mutex);
2099
2100 if (sink) {
922aa1e1 2101 if (aconnector->dc_sink) {
98e6436d 2102 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2103 /*
2104 * retain and release below are used to
2105 * bump up refcount for sink because the link doesn't point
2106 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2107 * reshuffle by UMD we will get into unwanted dc_sink release
2108 */
dcd5fb82 2109 dc_sink_release(aconnector->dc_sink);
922aa1e1 2110 }
4562236b 2111 aconnector->dc_sink = sink;
dcd5fb82 2112 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2113 amdgpu_dm_update_freesync_caps(connector,
2114 aconnector->edid);
4562236b 2115 } else {
98e6436d 2116 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2117 if (!aconnector->dc_sink) {
4562236b 2118 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2119 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2120 }
4562236b
HW
2121 }
2122
2123 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2124
2125 if (sink)
2126 dc_sink_release(sink);
4562236b
HW
2127 return;
2128 }
2129
2130 /*
2131 * TODO: temporary guard to look for proper fix
2132 * if this sink is MST sink, we should not do anything
2133 */
dcd5fb82
MF
2134 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2135 dc_sink_release(sink);
4562236b 2136 return;
dcd5fb82 2137 }
4562236b
HW
2138
2139 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2140 /*
2141 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2142 * Do nothing!!
2143 */
f1ad2f5e 2144 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2145 aconnector->connector_id);
dcd5fb82
MF
2146 if (sink)
2147 dc_sink_release(sink);
4562236b
HW
2148 return;
2149 }
2150
f1ad2f5e 2151 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2152 aconnector->connector_id, aconnector->dc_sink, sink);
2153
2154 mutex_lock(&dev->mode_config.mutex);
2155
1f6010a9
DF
2156 /*
2157 * 1. Update status of the drm connector
2158 * 2. Send an event and let userspace tell us what to do
2159 */
4562236b 2160 if (sink) {
1f6010a9
DF
2161 /*
2162 * TODO: check if we still need the S3 mode update workaround.
2163 * If yes, put it here.
2164 */
4562236b 2165 if (aconnector->dc_sink)
98e6436d 2166 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2167
2168 aconnector->dc_sink = sink;
dcd5fb82 2169 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2170 if (sink->dc_edid.length == 0) {
4562236b 2171 aconnector->edid = NULL;
e6142dd5
AP
2172 if (aconnector->dc_link->aux_mode) {
2173 drm_dp_cec_unset_edid(
2174 &aconnector->dm_dp_aux.aux);
2175 }
900b3cb1 2176 } else {
4562236b 2177 aconnector->edid =
e6142dd5 2178 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2179
c555f023 2180 drm_connector_update_edid_property(connector,
e6142dd5
AP
2181 aconnector->edid);
2182
2183 if (aconnector->dc_link->aux_mode)
2184 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2185 aconnector->edid);
4562236b 2186 }
e6142dd5 2187
98e6436d 2188 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2189 update_connector_ext_caps(aconnector);
4562236b 2190 } else {
e86e8947 2191 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2192 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2193 drm_connector_update_edid_property(connector, NULL);
4562236b 2194 aconnector->num_modes = 0;
dcd5fb82 2195 dc_sink_release(aconnector->dc_sink);
4562236b 2196 aconnector->dc_sink = NULL;
5326c452 2197 aconnector->edid = NULL;
0c8620d6
BL
2198#ifdef CONFIG_DRM_AMD_DC_HDCP
2199 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2200 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2201 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2202#endif
4562236b
HW
2203 }
2204
2205 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2206
2207 if (sink)
2208 dc_sink_release(sink);
4562236b
HW
2209}
2210
2211static void handle_hpd_irq(void *param)
2212{
c84dec2f 2213 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2214 struct drm_connector *connector = &aconnector->base;
2215 struct drm_device *dev = connector->dev;
fbbdadf2 2216 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
2217#ifdef CONFIG_DRM_AMD_DC_HDCP
2218 struct amdgpu_device *adev = dev->dev_private;
2219#endif
4562236b 2220
1f6010a9
DF
2221 /*
2222 * In case of failure or MST no need to update connector status or notify the OS
2223 * since (for MST case) MST does this in its own context.
4562236b
HW
2224 */
2225 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2226
0c8620d6 2227#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2228 if (adev->dm.hdcp_workqueue)
96a3b32e 2229 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2230#endif
2e0ac3d6
HW
2231 if (aconnector->fake_enable)
2232 aconnector->fake_enable = false;
2233
fbbdadf2
BL
2234 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2235 DRM_ERROR("KMS: Failed to detect connector\n");
2236
2237 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2238 emulated_link_detect(aconnector->dc_link);
2239
2240
2241 drm_modeset_lock_all(dev);
2242 dm_restore_drm_connector_state(dev, connector);
2243 drm_modeset_unlock_all(dev);
2244
2245 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2246 drm_kms_helper_hotplug_event(dev);
2247
2248 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2249 amdgpu_dm_update_connector_after_detect(aconnector);
2250
2251
2252 drm_modeset_lock_all(dev);
2253 dm_restore_drm_connector_state(dev, connector);
2254 drm_modeset_unlock_all(dev);
2255
2256 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2257 drm_kms_helper_hotplug_event(dev);
2258 }
2259 mutex_unlock(&aconnector->hpd_lock);
2260
2261}
2262
c84dec2f 2263static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2264{
2265 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2266 uint8_t dret;
2267 bool new_irq_handled = false;
2268 int dpcd_addr;
2269 int dpcd_bytes_to_read;
2270
2271 const int max_process_count = 30;
2272 int process_count = 0;
2273
2274 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2275
2276 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2277 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2278 /* DPCD 0x200 - 0x201 for downstream IRQ */
2279 dpcd_addr = DP_SINK_COUNT;
2280 } else {
2281 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2282 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2283 dpcd_addr = DP_SINK_COUNT_ESI;
2284 }
2285
2286 dret = drm_dp_dpcd_read(
2287 &aconnector->dm_dp_aux.aux,
2288 dpcd_addr,
2289 esi,
2290 dpcd_bytes_to_read);
2291
2292 while (dret == dpcd_bytes_to_read &&
2293 process_count < max_process_count) {
2294 uint8_t retry;
2295 dret = 0;
2296
2297 process_count++;
2298
f1ad2f5e 2299 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2300 /* handle HPD short pulse irq */
2301 if (aconnector->mst_mgr.mst_state)
2302 drm_dp_mst_hpd_irq(
2303 &aconnector->mst_mgr,
2304 esi,
2305 &new_irq_handled);
4562236b
HW
2306
2307 if (new_irq_handled) {
2308 /* ACK at DPCD to notify down stream */
2309 const int ack_dpcd_bytes_to_write =
2310 dpcd_bytes_to_read - 1;
2311
2312 for (retry = 0; retry < 3; retry++) {
2313 uint8_t wret;
2314
2315 wret = drm_dp_dpcd_write(
2316 &aconnector->dm_dp_aux.aux,
2317 dpcd_addr + 1,
2318 &esi[1],
2319 ack_dpcd_bytes_to_write);
2320 if (wret == ack_dpcd_bytes_to_write)
2321 break;
2322 }
2323
1f6010a9 2324 /* check if there is new irq to be handled */
4562236b
HW
2325 dret = drm_dp_dpcd_read(
2326 &aconnector->dm_dp_aux.aux,
2327 dpcd_addr,
2328 esi,
2329 dpcd_bytes_to_read);
2330
2331 new_irq_handled = false;
d4a6e8a9 2332 } else {
4562236b 2333 break;
d4a6e8a9 2334 }
4562236b
HW
2335 }
2336
2337 if (process_count == max_process_count)
f1ad2f5e 2338 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2339}
2340
2341static void handle_hpd_rx_irq(void *param)
2342{
c84dec2f 2343 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2344 struct drm_connector *connector = &aconnector->base;
2345 struct drm_device *dev = connector->dev;
53cbf65c 2346 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2347 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2348 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2349#ifdef CONFIG_DRM_AMD_DC_HDCP
2350 union hpd_irq_data hpd_irq_data;
2351 struct amdgpu_device *adev = dev->dev_private;
2352
2353 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2354#endif
4562236b 2355
1f6010a9
DF
2356 /*
2357 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2358 * conflict, after implement i2c helper, this mutex should be
2359 * retired.
2360 */
53cbf65c 2361 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2362 mutex_lock(&aconnector->hpd_lock);
2363
2a0f9270
BL
2364
2365#ifdef CONFIG_DRM_AMD_DC_HDCP
2366 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2367#else
4e18814e 2368 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2369#endif
4562236b
HW
2370 !is_mst_root_connector) {
2371 /* Downstream Port status changed. */
fbbdadf2
BL
2372 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2373 DRM_ERROR("KMS: Failed to detect connector\n");
2374
2375 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2376 emulated_link_detect(dc_link);
2377
2378 if (aconnector->fake_enable)
2379 aconnector->fake_enable = false;
2380
2381 amdgpu_dm_update_connector_after_detect(aconnector);
2382
2383
2384 drm_modeset_lock_all(dev);
2385 dm_restore_drm_connector_state(dev, connector);
2386 drm_modeset_unlock_all(dev);
2387
2388 drm_kms_helper_hotplug_event(dev);
2389 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2390
2391 if (aconnector->fake_enable)
2392 aconnector->fake_enable = false;
2393
4562236b
HW
2394 amdgpu_dm_update_connector_after_detect(aconnector);
2395
2396
2397 drm_modeset_lock_all(dev);
2398 dm_restore_drm_connector_state(dev, connector);
2399 drm_modeset_unlock_all(dev);
2400
2401 drm_kms_helper_hotplug_event(dev);
2402 }
2403 }
2a0f9270 2404#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2405 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2406 if (adev->dm.hdcp_workqueue)
2407 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2408 }
2a0f9270 2409#endif
4562236b 2410 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2411 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2412 dm_handle_hpd_rx_irq(aconnector);
2413
e86e8947
HV
2414 if (dc_link->type != dc_connection_mst_branch) {
2415 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2416 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2417 }
4562236b
HW
2418}
2419
2420static void register_hpd_handlers(struct amdgpu_device *adev)
2421{
2422 struct drm_device *dev = adev->ddev;
2423 struct drm_connector *connector;
c84dec2f 2424 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2425 const struct dc_link *dc_link;
2426 struct dc_interrupt_params int_params = {0};
2427
2428 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2429 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2430
2431 list_for_each_entry(connector,
2432 &dev->mode_config.connector_list, head) {
2433
c84dec2f 2434 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2435 dc_link = aconnector->dc_link;
2436
2437 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2438 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2439 int_params.irq_source = dc_link->irq_source_hpd;
2440
2441 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2442 handle_hpd_irq,
2443 (void *) aconnector);
2444 }
2445
2446 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2447
2448 /* Also register for DP short pulse (hpd_rx). */
2449 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2450 int_params.irq_source = dc_link->irq_source_hpd_rx;
2451
2452 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2453 handle_hpd_rx_irq,
2454 (void *) aconnector);
2455 }
2456 }
2457}
2458
2459/* Register IRQ sources and initialize IRQ callbacks */
2460static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2461{
2462 struct dc *dc = adev->dm.dc;
2463 struct common_irq_params *c_irq_params;
2464 struct dc_interrupt_params int_params = {0};
2465 int r;
2466 int i;
1ffdeca6 2467 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2468
84374725 2469 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2470 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2471
2472 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2473 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2474
1f6010a9
DF
2475 /*
2476 * Actions of amdgpu_irq_add_id():
4562236b
HW
2477 * 1. Register a set() function with base driver.
2478 * Base driver will call set() function to enable/disable an
2479 * interrupt in DC hardware.
2480 * 2. Register amdgpu_dm_irq_handler().
2481 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2482 * coming from DC hardware.
2483 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2484 * for acknowledging and handling. */
2485
b57de80a 2486 /* Use VBLANK interrupt */
e9029155 2487 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2488 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2489 if (r) {
2490 DRM_ERROR("Failed to add crtc irq id!\n");
2491 return r;
2492 }
2493
2494 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2495 int_params.irq_source =
3d761e79 2496 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2497
b57de80a 2498 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2499
2500 c_irq_params->adev = adev;
2501 c_irq_params->irq_src = int_params.irq_source;
2502
2503 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2504 dm_crtc_high_irq, c_irq_params);
2505 }
2506
d2574c33
MK
2507 /* Use VUPDATE interrupt */
2508 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2509 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2510 if (r) {
2511 DRM_ERROR("Failed to add vupdate irq id!\n");
2512 return r;
2513 }
2514
2515 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2516 int_params.irq_source =
2517 dc_interrupt_to_irq_source(dc, i, 0);
2518
2519 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2520
2521 c_irq_params->adev = adev;
2522 c_irq_params->irq_src = int_params.irq_source;
2523
2524 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2525 dm_vupdate_high_irq, c_irq_params);
2526 }
2527
3d761e79 2528 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2529 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2530 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2531 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2532 if (r) {
2533 DRM_ERROR("Failed to add page flip irq id!\n");
2534 return r;
2535 }
2536
2537 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2538 int_params.irq_source =
2539 dc_interrupt_to_irq_source(dc, i, 0);
2540
2541 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2542
2543 c_irq_params->adev = adev;
2544 c_irq_params->irq_src = int_params.irq_source;
2545
2546 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2547 dm_pflip_high_irq, c_irq_params);
2548
2549 }
2550
2551 /* HPD */
2c8ad2d5
AD
2552 r = amdgpu_irq_add_id(adev, client_id,
2553 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2554 if (r) {
2555 DRM_ERROR("Failed to add hpd irq id!\n");
2556 return r;
2557 }
2558
2559 register_hpd_handlers(adev);
2560
2561 return 0;
2562}
2563
b86a1aa3 2564#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2565/* Register IRQ sources and initialize IRQ callbacks */
2566static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2567{
2568 struct dc *dc = adev->dm.dc;
2569 struct common_irq_params *c_irq_params;
2570 struct dc_interrupt_params int_params = {0};
2571 int r;
2572 int i;
2573
2574 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2575 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2576
1f6010a9
DF
2577 /*
2578 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2579 * 1. Register a set() function with base driver.
2580 * Base driver will call set() function to enable/disable an
2581 * interrupt in DC hardware.
2582 * 2. Register amdgpu_dm_irq_handler().
2583 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2584 * coming from DC hardware.
2585 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2586 * for acknowledging and handling.
1f6010a9 2587 */
ff5ef992
AD
2588
2589 /* Use VSTARTUP interrupt */
2590 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2591 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2592 i++) {
3760f76c 2593 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2594
2595 if (r) {
2596 DRM_ERROR("Failed to add crtc irq id!\n");
2597 return r;
2598 }
2599
2600 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2601 int_params.irq_source =
2602 dc_interrupt_to_irq_source(dc, i, 0);
2603
2604 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2605
2606 c_irq_params->adev = adev;
2607 c_irq_params->irq_src = int_params.irq_source;
2608
2346ef47
NK
2609 amdgpu_dm_irq_register_interrupt(
2610 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2611 }
2612
2613 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2614 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2615 * to trigger at end of each vblank, regardless of state of the lock,
2616 * matching DCE behaviour.
2617 */
2618 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2619 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2620 i++) {
2621 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2622
2623 if (r) {
2624 DRM_ERROR("Failed to add vupdate irq id!\n");
2625 return r;
2626 }
2627
2628 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2629 int_params.irq_source =
2630 dc_interrupt_to_irq_source(dc, i, 0);
2631
2632 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2633
2634 c_irq_params->adev = adev;
2635 c_irq_params->irq_src = int_params.irq_source;
2636
ff5ef992 2637 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2638 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2639 }
2640
ff5ef992
AD
2641 /* Use GRPH_PFLIP interrupt */
2642 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2643 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2644 i++) {
3760f76c 2645 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2646 if (r) {
2647 DRM_ERROR("Failed to add page flip irq id!\n");
2648 return r;
2649 }
2650
2651 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2652 int_params.irq_source =
2653 dc_interrupt_to_irq_source(dc, i, 0);
2654
2655 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2656
2657 c_irq_params->adev = adev;
2658 c_irq_params->irq_src = int_params.irq_source;
2659
2660 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2661 dm_pflip_high_irq, c_irq_params);
2662
2663 }
2664
2665 /* HPD */
3760f76c 2666 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2667 &adev->hpd_irq);
2668 if (r) {
2669 DRM_ERROR("Failed to add hpd irq id!\n");
2670 return r;
2671 }
2672
2673 register_hpd_handlers(adev);
2674
2675 return 0;
2676}
2677#endif
2678
eb3dc897
NK
2679/*
2680 * Acquires the lock for the atomic state object and returns
2681 * the new atomic state.
2682 *
2683 * This should only be called during atomic check.
2684 */
2685static int dm_atomic_get_state(struct drm_atomic_state *state,
2686 struct dm_atomic_state **dm_state)
2687{
2688 struct drm_device *dev = state->dev;
2689 struct amdgpu_device *adev = dev->dev_private;
2690 struct amdgpu_display_manager *dm = &adev->dm;
2691 struct drm_private_state *priv_state;
eb3dc897
NK
2692
2693 if (*dm_state)
2694 return 0;
2695
eb3dc897
NK
2696 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2697 if (IS_ERR(priv_state))
2698 return PTR_ERR(priv_state);
2699
2700 *dm_state = to_dm_atomic_state(priv_state);
2701
2702 return 0;
2703}
2704
dfd84d90 2705static struct dm_atomic_state *
eb3dc897
NK
2706dm_atomic_get_new_state(struct drm_atomic_state *state)
2707{
2708 struct drm_device *dev = state->dev;
2709 struct amdgpu_device *adev = dev->dev_private;
2710 struct amdgpu_display_manager *dm = &adev->dm;
2711 struct drm_private_obj *obj;
2712 struct drm_private_state *new_obj_state;
2713 int i;
2714
2715 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2716 if (obj->funcs == dm->atomic_obj.funcs)
2717 return to_dm_atomic_state(new_obj_state);
2718 }
2719
2720 return NULL;
2721}
2722
dfd84d90 2723static struct dm_atomic_state *
eb3dc897
NK
2724dm_atomic_get_old_state(struct drm_atomic_state *state)
2725{
2726 struct drm_device *dev = state->dev;
2727 struct amdgpu_device *adev = dev->dev_private;
2728 struct amdgpu_display_manager *dm = &adev->dm;
2729 struct drm_private_obj *obj;
2730 struct drm_private_state *old_obj_state;
2731 int i;
2732
2733 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2734 if (obj->funcs == dm->atomic_obj.funcs)
2735 return to_dm_atomic_state(old_obj_state);
2736 }
2737
2738 return NULL;
2739}
2740
2741static struct drm_private_state *
2742dm_atomic_duplicate_state(struct drm_private_obj *obj)
2743{
2744 struct dm_atomic_state *old_state, *new_state;
2745
2746 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2747 if (!new_state)
2748 return NULL;
2749
2750 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2751
813d20dc
AW
2752 old_state = to_dm_atomic_state(obj->state);
2753
2754 if (old_state && old_state->context)
2755 new_state->context = dc_copy_state(old_state->context);
2756
eb3dc897
NK
2757 if (!new_state->context) {
2758 kfree(new_state);
2759 return NULL;
2760 }
2761
eb3dc897
NK
2762 return &new_state->base;
2763}
2764
2765static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2766 struct drm_private_state *state)
2767{
2768 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2769
2770 if (dm_state && dm_state->context)
2771 dc_release_state(dm_state->context);
2772
2773 kfree(dm_state);
2774}
2775
2776static struct drm_private_state_funcs dm_atomic_state_funcs = {
2777 .atomic_duplicate_state = dm_atomic_duplicate_state,
2778 .atomic_destroy_state = dm_atomic_destroy_state,
2779};
2780
4562236b
HW
2781static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2782{
eb3dc897 2783 struct dm_atomic_state *state;
4562236b
HW
2784 int r;
2785
2786 adev->mode_info.mode_config_initialized = true;
2787
4562236b 2788 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2789 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2790
2791 adev->ddev->mode_config.max_width = 16384;
2792 adev->ddev->mode_config.max_height = 16384;
2793
2794 adev->ddev->mode_config.preferred_depth = 24;
2795 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2796 /* indicates support for immediate flip */
4562236b
HW
2797 adev->ddev->mode_config.async_page_flip = true;
2798
770d13b1 2799 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2800
eb3dc897
NK
2801 state = kzalloc(sizeof(*state), GFP_KERNEL);
2802 if (!state)
2803 return -ENOMEM;
2804
813d20dc 2805 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2806 if (!state->context) {
2807 kfree(state);
2808 return -ENOMEM;
2809 }
2810
2811 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2812
8c1a765b
DA
2813 drm_atomic_private_obj_init(adev->ddev,
2814 &adev->dm.atomic_obj,
eb3dc897
NK
2815 &state->base,
2816 &dm_atomic_state_funcs);
2817
3dc9b1ce 2818 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2819 if (r)
2820 return r;
2821
6ce8f316
NK
2822 r = amdgpu_dm_audio_init(adev);
2823 if (r)
2824 return r;
2825
4562236b
HW
2826 return 0;
2827}
2828
206bbafe
DF
2829#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2830#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 2831#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 2832
4562236b
HW
2833#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2834 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2835
206bbafe
DF
2836static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2837{
2838#if defined(CONFIG_ACPI)
2839 struct amdgpu_dm_backlight_caps caps;
2840
2841 if (dm->backlight_caps.caps_valid)
2842 return;
2843
2844 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2845 if (caps.caps_valid) {
94562810
RS
2846 dm->backlight_caps.caps_valid = true;
2847 if (caps.aux_support)
2848 return;
206bbafe
DF
2849 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2850 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
2851 } else {
2852 dm->backlight_caps.min_input_signal =
2853 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2854 dm->backlight_caps.max_input_signal =
2855 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2856 }
2857#else
94562810
RS
2858 if (dm->backlight_caps.aux_support)
2859 return;
2860
8bcbc9ef
DF
2861 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2862 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2863#endif
2864}
2865
94562810
RS
2866static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2867{
2868 bool rc;
2869
2870 if (!link)
2871 return 1;
2872
2873 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2874 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2875
2876 return rc ? 0 : 1;
2877}
2878
2879static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2880 const uint32_t user_brightness)
2881{
2882 u32 min, max, conversion_pace;
2883 u32 brightness = user_brightness;
2884
2885 if (!caps)
2886 goto out;
2887
2888 if (!caps->aux_support) {
2889 max = caps->max_input_signal;
2890 min = caps->min_input_signal;
2891 /*
2892 * The brightness input is in the range 0-255
2893 * It needs to be rescaled to be between the
2894 * requested min and max input signal
2895 * It also needs to be scaled up by 0x101 to
2896 * match the DC interface which has a range of
2897 * 0 to 0xffff
2898 */
2899 conversion_pace = 0x101;
2900 brightness =
2901 user_brightness
2902 * conversion_pace
2903 * (max - min)
2904 / AMDGPU_MAX_BL_LEVEL
2905 + min * conversion_pace;
2906 } else {
2907 /* TODO
2908 * We are doing a linear interpolation here, which is OK but
2909 * does not provide the optimal result. We probably want
2910 * something close to the Perceptual Quantizer (PQ) curve.
2911 */
2912 max = caps->aux_max_input_signal;
2913 min = caps->aux_min_input_signal;
2914
2915 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2916 + user_brightness * max;
2917 // Multiple the value by 1000 since we use millinits
2918 brightness *= 1000;
2919 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2920 }
2921
2922out:
2923 return brightness;
2924}
2925
4562236b
HW
2926static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2927{
2928 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 2929 struct amdgpu_dm_backlight_caps caps;
94562810
RS
2930 struct dc_link *link = NULL;
2931 u32 brightness;
2932 bool rc;
4562236b 2933
206bbafe
DF
2934 amdgpu_dm_update_backlight_caps(dm);
2935 caps = dm->backlight_caps;
94562810
RS
2936
2937 link = (struct dc_link *)dm->backlight_link;
2938
2939 brightness = convert_brightness(&caps, bd->props.brightness);
2940 // Change brightness based on AUX property
2941 if (caps.aux_support)
2942 return set_backlight_via_aux(link, brightness);
2943
2944 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2945
2946 return rc ? 0 : 1;
4562236b
HW
2947}
2948
2949static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2950{
620a0d27
DF
2951 struct amdgpu_display_manager *dm = bl_get_data(bd);
2952 int ret = dc_link_get_backlight_level(dm->backlight_link);
2953
2954 if (ret == DC_ERROR_UNEXPECTED)
2955 return bd->props.brightness;
2956 return ret;
4562236b
HW
2957}
2958
2959static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 2960 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
2961 .get_brightness = amdgpu_dm_backlight_get_brightness,
2962 .update_status = amdgpu_dm_backlight_update_status,
2963};
2964
7578ecda
AD
2965static void
2966amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
2967{
2968 char bl_name[16];
2969 struct backlight_properties props = { 0 };
2970
206bbafe
DF
2971 amdgpu_dm_update_backlight_caps(dm);
2972
4562236b 2973 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 2974 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
2975 props.type = BACKLIGHT_RAW;
2976
2977 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2978 dm->adev->ddev->primary->index);
2979
2980 dm->backlight_dev = backlight_device_register(bl_name,
2981 dm->adev->ddev->dev,
2982 dm,
2983 &amdgpu_dm_backlight_ops,
2984 &props);
2985
74baea42 2986 if (IS_ERR(dm->backlight_dev))
4562236b
HW
2987 DRM_ERROR("DM: Backlight registration failed!\n");
2988 else
f1ad2f5e 2989 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
2990}
2991
2992#endif
2993
df534fff 2994static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 2995 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
2996 enum drm_plane_type plane_type,
2997 const struct dc_plane_cap *plane_cap)
df534fff 2998{
f180b4bc 2999 struct drm_plane *plane;
df534fff
S
3000 unsigned long possible_crtcs;
3001 int ret = 0;
3002
f180b4bc 3003 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3004 if (!plane) {
3005 DRM_ERROR("KMS: Failed to allocate plane\n");
3006 return -ENOMEM;
3007 }
b2fddb13 3008 plane->type = plane_type;
df534fff
S
3009
3010 /*
b2fddb13
NK
3011 * HACK: IGT tests expect that the primary plane for a CRTC
3012 * can only have one possible CRTC. Only expose support for
3013 * any CRTC if they're not going to be used as a primary plane
3014 * for a CRTC - like overlay or underlay planes.
df534fff
S
3015 */
3016 possible_crtcs = 1 << plane_id;
3017 if (plane_id >= dm->dc->caps.max_streams)
3018 possible_crtcs = 0xff;
3019
cc1fec57 3020 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3021
3022 if (ret) {
3023 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3024 kfree(plane);
df534fff
S
3025 return ret;
3026 }
3027
54087768
NK
3028 if (mode_info)
3029 mode_info->planes[plane_id] = plane;
3030
df534fff
S
3031 return ret;
3032}
3033
89fc8d4e
HW
3034
3035static void register_backlight_device(struct amdgpu_display_manager *dm,
3036 struct dc_link *link)
3037{
3038#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3039 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3040
3041 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3042 link->type != dc_connection_none) {
1f6010a9
DF
3043 /*
3044 * Event if registration failed, we should continue with
89fc8d4e
HW
3045 * DM initialization because not having a backlight control
3046 * is better then a black screen.
3047 */
3048 amdgpu_dm_register_backlight_device(dm);
3049
3050 if (dm->backlight_dev)
3051 dm->backlight_link = link;
3052 }
3053#endif
3054}
3055
3056
1f6010a9
DF
3057/*
3058 * In this architecture, the association
4562236b
HW
3059 * connector -> encoder -> crtc
3060 * id not really requried. The crtc and connector will hold the
3061 * display_index as an abstraction to use with DAL component
3062 *
3063 * Returns 0 on success
3064 */
7578ecda 3065static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3066{
3067 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3068 int32_t i;
c84dec2f 3069 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3070 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3071 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3072 uint32_t link_cnt;
cc1fec57 3073 int32_t primary_planes;
fbbdadf2 3074 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3075 const struct dc_plane_cap *plane;
4562236b
HW
3076
3077 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3078 if (amdgpu_dm_mode_config_init(dm->adev)) {
3079 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3080 return -EINVAL;
4562236b
HW
3081 }
3082
b2fddb13
NK
3083 /* There is one primary plane per CRTC */
3084 primary_planes = dm->dc->caps.max_streams;
54087768 3085 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3086
b2fddb13
NK
3087 /*
3088 * Initialize primary planes, implicit planes for legacy IOCTLS.
3089 * Order is reversed to match iteration order in atomic check.
3090 */
3091 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3092 plane = &dm->dc->caps.planes[i];
3093
b2fddb13 3094 if (initialize_plane(dm, mode_info, i,
cc1fec57 3095 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3096 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3097 goto fail;
d4e13b0d 3098 }
df534fff 3099 }
92f3ac40 3100
0d579c7e
NK
3101 /*
3102 * Initialize overlay planes, index starting after primary planes.
3103 * These planes have a higher DRM index than the primary planes since
3104 * they should be considered as having a higher z-order.
3105 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3106 *
3107 * Only support DCN for now, and only expose one so we don't encourage
3108 * userspace to use up all the pipes.
0d579c7e 3109 */
cc1fec57
NK
3110 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3111 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3112
3113 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3114 continue;
3115
3116 if (!plane->blends_with_above || !plane->blends_with_below)
3117 continue;
3118
ea36ad34 3119 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3120 continue;
3121
54087768 3122 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3123 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3124 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3125 goto fail;
d4e13b0d 3126 }
cc1fec57
NK
3127
3128 /* Only create one overlay plane. */
3129 break;
d4e13b0d 3130 }
4562236b 3131
d4e13b0d 3132 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3133 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3134 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3135 goto fail;
4562236b 3136 }
4562236b 3137
ab2541b6 3138 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
3139
3140 /* loops over all connectors on the board */
3141 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3142 struct dc_link *link = NULL;
4562236b
HW
3143
3144 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3145 DRM_ERROR(
3146 "KMS: Cannot support more than %d display indexes\n",
3147 AMDGPU_DM_MAX_DISPLAY_INDEX);
3148 continue;
3149 }
3150
3151 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3152 if (!aconnector)
cd8a2ae8 3153 goto fail;
4562236b
HW
3154
3155 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3156 if (!aencoder)
cd8a2ae8 3157 goto fail;
4562236b
HW
3158
3159 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3160 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3161 goto fail;
4562236b
HW
3162 }
3163
3164 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3165 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3166 goto fail;
4562236b
HW
3167 }
3168
89fc8d4e
HW
3169 link = dc_get_link_at_index(dm->dc, i);
3170
fbbdadf2
BL
3171 if (!dc_link_detect_sink(link, &new_connection_type))
3172 DRM_ERROR("KMS: Failed to detect connector\n");
3173
3174 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3175 emulated_link_detect(link);
3176 amdgpu_dm_update_connector_after_detect(aconnector);
3177
3178 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3179 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3180 register_backlight_device(dm, link);
397a9bc5
RL
3181 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3182 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3183 }
3184
3185
4562236b
HW
3186 }
3187
3188 /* Software is initialized. Now we can register interrupt handlers. */
3189 switch (adev->asic_type) {
3190 case CHIP_BONAIRE:
3191 case CHIP_HAWAII:
cd4b356f
AD
3192 case CHIP_KAVERI:
3193 case CHIP_KABINI:
3194 case CHIP_MULLINS:
4562236b
HW
3195 case CHIP_TONGA:
3196 case CHIP_FIJI:
3197 case CHIP_CARRIZO:
3198 case CHIP_STONEY:
3199 case CHIP_POLARIS11:
3200 case CHIP_POLARIS10:
b264d345 3201 case CHIP_POLARIS12:
7737de91 3202 case CHIP_VEGAM:
2c8ad2d5 3203 case CHIP_VEGA10:
2325ff30 3204 case CHIP_VEGA12:
1fe6bf2f 3205 case CHIP_VEGA20:
4562236b
HW
3206 if (dce110_register_irq_handlers(dm->adev)) {
3207 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3208 goto fail;
4562236b
HW
3209 }
3210 break;
b86a1aa3 3211#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3212 case CHIP_RAVEN:
fbd2afe5 3213 case CHIP_NAVI12:
476e955d 3214 case CHIP_NAVI10:
fce651e3 3215 case CHIP_NAVI14:
30221ad8 3216 case CHIP_RENOIR:
79037324
BL
3217#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3218 case CHIP_SIENNA_CICHLID:
3219#endif
ff5ef992
AD
3220 if (dcn10_register_irq_handlers(dm->adev)) {
3221 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3222 goto fail;
ff5ef992
AD
3223 }
3224 break;
3225#endif
4562236b 3226 default:
e63f8673 3227 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3228 goto fail;
4562236b
HW
3229 }
3230
2d673560
NK
3231 /* No userspace support. */
3232 dm->dc->debug.disable_tri_buf = true;
3233
4562236b 3234 return 0;
cd8a2ae8 3235fail:
4562236b 3236 kfree(aencoder);
4562236b 3237 kfree(aconnector);
54087768 3238
59d0f396 3239 return -EINVAL;
4562236b
HW
3240}
3241
7578ecda 3242static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3243{
3244 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3245 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3246 return;
3247}
3248
3249/******************************************************************************
3250 * amdgpu_display_funcs functions
3251 *****************************************************************************/
3252
1f6010a9 3253/*
4562236b
HW
3254 * dm_bandwidth_update - program display watermarks
3255 *
3256 * @adev: amdgpu_device pointer
3257 *
3258 * Calculate and program the display watermarks and line buffer allocation.
3259 */
3260static void dm_bandwidth_update(struct amdgpu_device *adev)
3261{
49c07a99 3262 /* TODO: implement later */
4562236b
HW
3263}
3264
39cc5be2 3265static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3266 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3267 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3268 .backlight_set_level = NULL, /* never called for DC */
3269 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3270 .hpd_sense = NULL,/* called unconditionally */
3271 .hpd_set_polarity = NULL, /* called unconditionally */
3272 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3273 .page_flip_get_scanoutpos =
3274 dm_crtc_get_scanoutpos,/* called unconditionally */
3275 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3276 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3277};
3278
3279#if defined(CONFIG_DEBUG_KERNEL_DC)
3280
3ee6b26b
AD
3281static ssize_t s3_debug_store(struct device *device,
3282 struct device_attribute *attr,
3283 const char *buf,
3284 size_t count)
4562236b
HW
3285{
3286 int ret;
3287 int s3_state;
ef1de361 3288 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
3289 struct amdgpu_device *adev = drm_dev->dev_private;
3290
3291 ret = kstrtoint(buf, 0, &s3_state);
3292
3293 if (ret == 0) {
3294 if (s3_state) {
3295 dm_resume(adev);
4562236b
HW
3296 drm_kms_helper_hotplug_event(adev->ddev);
3297 } else
3298 dm_suspend(adev);
3299 }
3300
3301 return ret == 0 ? count : 0;
3302}
3303
3304DEVICE_ATTR_WO(s3_debug);
3305
3306#endif
3307
3308static int dm_early_init(void *handle)
3309{
3310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3311
4562236b
HW
3312 switch (adev->asic_type) {
3313 case CHIP_BONAIRE:
3314 case CHIP_HAWAII:
3315 adev->mode_info.num_crtc = 6;
3316 adev->mode_info.num_hpd = 6;
3317 adev->mode_info.num_dig = 6;
4562236b 3318 break;
cd4b356f
AD
3319 case CHIP_KAVERI:
3320 adev->mode_info.num_crtc = 4;
3321 adev->mode_info.num_hpd = 6;
3322 adev->mode_info.num_dig = 7;
cd4b356f
AD
3323 break;
3324 case CHIP_KABINI:
3325 case CHIP_MULLINS:
3326 adev->mode_info.num_crtc = 2;
3327 adev->mode_info.num_hpd = 6;
3328 adev->mode_info.num_dig = 6;
cd4b356f 3329 break;
4562236b
HW
3330 case CHIP_FIJI:
3331 case CHIP_TONGA:
3332 adev->mode_info.num_crtc = 6;
3333 adev->mode_info.num_hpd = 6;
3334 adev->mode_info.num_dig = 7;
4562236b
HW
3335 break;
3336 case CHIP_CARRIZO:
3337 adev->mode_info.num_crtc = 3;
3338 adev->mode_info.num_hpd = 6;
3339 adev->mode_info.num_dig = 9;
4562236b
HW
3340 break;
3341 case CHIP_STONEY:
3342 adev->mode_info.num_crtc = 2;
3343 adev->mode_info.num_hpd = 6;
3344 adev->mode_info.num_dig = 9;
4562236b
HW
3345 break;
3346 case CHIP_POLARIS11:
b264d345 3347 case CHIP_POLARIS12:
4562236b
HW
3348 adev->mode_info.num_crtc = 5;
3349 adev->mode_info.num_hpd = 5;
3350 adev->mode_info.num_dig = 5;
4562236b
HW
3351 break;
3352 case CHIP_POLARIS10:
7737de91 3353 case CHIP_VEGAM:
4562236b
HW
3354 adev->mode_info.num_crtc = 6;
3355 adev->mode_info.num_hpd = 6;
3356 adev->mode_info.num_dig = 6;
4562236b 3357 break;
2c8ad2d5 3358 case CHIP_VEGA10:
2325ff30 3359 case CHIP_VEGA12:
1fe6bf2f 3360 case CHIP_VEGA20:
2c8ad2d5
AD
3361 adev->mode_info.num_crtc = 6;
3362 adev->mode_info.num_hpd = 6;
3363 adev->mode_info.num_dig = 6;
3364 break;
b86a1aa3 3365#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3366 case CHIP_RAVEN:
3367 adev->mode_info.num_crtc = 4;
3368 adev->mode_info.num_hpd = 4;
3369 adev->mode_info.num_dig = 4;
ff5ef992 3370 break;
476e955d 3371#endif
476e955d 3372 case CHIP_NAVI10:
fbd2afe5 3373 case CHIP_NAVI12:
79037324
BL
3374#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3375 case CHIP_SIENNA_CICHLID:
3376#endif
476e955d
HW
3377 adev->mode_info.num_crtc = 6;
3378 adev->mode_info.num_hpd = 6;
3379 adev->mode_info.num_dig = 6;
3380 break;
fce651e3
BL
3381 case CHIP_NAVI14:
3382 adev->mode_info.num_crtc = 5;
3383 adev->mode_info.num_hpd = 5;
3384 adev->mode_info.num_dig = 5;
3385 break;
30221ad8
BL
3386 case CHIP_RENOIR:
3387 adev->mode_info.num_crtc = 4;
3388 adev->mode_info.num_hpd = 4;
3389 adev->mode_info.num_dig = 4;
3390 break;
4562236b 3391 default:
e63f8673 3392 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3393 return -EINVAL;
3394 }
3395
c8dd5715
MD
3396 amdgpu_dm_set_irq_funcs(adev);
3397
39cc5be2
AD
3398 if (adev->mode_info.funcs == NULL)
3399 adev->mode_info.funcs = &dm_display_funcs;
3400
1f6010a9
DF
3401 /*
3402 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3403 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3404 * amdgpu_device_init()
3405 */
4562236b
HW
3406#if defined(CONFIG_DEBUG_KERNEL_DC)
3407 device_create_file(
3408 adev->ddev->dev,
3409 &dev_attr_s3_debug);
3410#endif
3411
3412 return 0;
3413}
3414
9b690ef3 3415static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3416 struct dc_stream_state *new_stream,
3417 struct dc_stream_state *old_stream)
9b690ef3 3418{
e7b07cee
HW
3419 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3420 return false;
3421
3422 if (!crtc_state->enable)
3423 return false;
3424
3425 return crtc_state->active;
3426}
3427
3428static bool modereset_required(struct drm_crtc_state *crtc_state)
3429{
3430 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3431 return false;
3432
3433 return !crtc_state->enable || !crtc_state->active;
3434}
3435
7578ecda 3436static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3437{
3438 drm_encoder_cleanup(encoder);
3439 kfree(encoder);
3440}
3441
3442static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3443 .destroy = amdgpu_dm_encoder_destroy,
3444};
3445
e7b07cee 3446
695af5f9
NK
3447static int fill_dc_scaling_info(const struct drm_plane_state *state,
3448 struct dc_scaling_info *scaling_info)
e7b07cee 3449{
6491f0c0 3450 int scale_w, scale_h;
e7b07cee 3451
695af5f9 3452 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3453
695af5f9
NK
3454 /* Source is fixed 16.16 but we ignore mantissa for now... */
3455 scaling_info->src_rect.x = state->src_x >> 16;
3456 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3457
695af5f9
NK
3458 scaling_info->src_rect.width = state->src_w >> 16;
3459 if (scaling_info->src_rect.width == 0)
3460 return -EINVAL;
3461
3462 scaling_info->src_rect.height = state->src_h >> 16;
3463 if (scaling_info->src_rect.height == 0)
3464 return -EINVAL;
3465
3466 scaling_info->dst_rect.x = state->crtc_x;
3467 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3468
3469 if (state->crtc_w == 0)
695af5f9 3470 return -EINVAL;
e7b07cee 3471
695af5f9 3472 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3473
3474 if (state->crtc_h == 0)
695af5f9 3475 return -EINVAL;
e7b07cee 3476
695af5f9 3477 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3478
695af5f9
NK
3479 /* DRM doesn't specify clipping on destination output. */
3480 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3481
6491f0c0
NK
3482 /* TODO: Validate scaling per-format with DC plane caps */
3483 scale_w = scaling_info->dst_rect.width * 1000 /
3484 scaling_info->src_rect.width;
e7b07cee 3485
6491f0c0
NK
3486 if (scale_w < 250 || scale_w > 16000)
3487 return -EINVAL;
3488
3489 scale_h = scaling_info->dst_rect.height * 1000 /
3490 scaling_info->src_rect.height;
3491
3492 if (scale_h < 250 || scale_h > 16000)
3493 return -EINVAL;
3494
695af5f9
NK
3495 /*
3496 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3497 * assume reasonable defaults based on the format.
3498 */
e7b07cee 3499
695af5f9 3500 return 0;
4562236b 3501}
695af5f9 3502
3ee6b26b 3503static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
5888f07a 3504 uint64_t *tiling_flags, bool *tmz_surface)
e7b07cee 3505{
e68d14dd 3506 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 3507 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3508
e7b07cee 3509 if (unlikely(r)) {
1f6010a9 3510 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3511 if (r != -ERESTARTSYS)
3512 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3513 return r;
3514 }
3515
e7b07cee
HW
3516 if (tiling_flags)
3517 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3518
5888f07a
HW
3519 if (tmz_surface)
3520 *tmz_surface = amdgpu_bo_encrypted(rbo);
3521
e7b07cee
HW
3522 amdgpu_bo_unreserve(rbo);
3523
3524 return r;
3525}
3526
7df7e505
NK
3527static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3528{
3529 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3530
3531 return offset ? (address + offset * 256) : 0;
3532}
3533
695af5f9
NK
3534static int
3535fill_plane_dcc_attributes(struct amdgpu_device *adev,
3536 const struct amdgpu_framebuffer *afb,
3537 const enum surface_pixel_format format,
3538 const enum dc_rotation_angle rotation,
12e2b2d4 3539 const struct plane_size *plane_size,
695af5f9
NK
3540 const union dc_tiling_info *tiling_info,
3541 const uint64_t info,
3542 struct dc_plane_dcc_param *dcc,
87b7ebc2
RS
3543 struct dc_plane_address *address,
3544 bool force_disable_dcc)
7df7e505
NK
3545{
3546 struct dc *dc = adev->dm.dc;
8daa1218
NC
3547 struct dc_dcc_surface_param input;
3548 struct dc_surface_dcc_cap output;
7df7e505
NK
3549 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3550 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3551 uint64_t dcc_address;
3552
8daa1218
NC
3553 memset(&input, 0, sizeof(input));
3554 memset(&output, 0, sizeof(output));
3555
87b7ebc2
RS
3556 if (force_disable_dcc)
3557 return 0;
3558
7df7e505 3559 if (!offset)
09e5665a
NK
3560 return 0;
3561
695af5f9 3562 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3563 return 0;
7df7e505
NK
3564
3565 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3566 return -EINVAL;
7df7e505 3567
695af5f9 3568 input.format = format;
12e2b2d4
DL
3569 input.surface_size.width = plane_size->surface_size.width;
3570 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3571 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3572
695af5f9 3573 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3574 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3575 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3576 input.scan = SCAN_DIRECTION_VERTICAL;
3577
3578 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3579 return -EINVAL;
7df7e505
NK
3580
3581 if (!output.capable)
09e5665a 3582 return -EINVAL;
7df7e505
NK
3583
3584 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3585 return -EINVAL;
7df7e505 3586
09e5665a 3587 dcc->enable = 1;
12e2b2d4 3588 dcc->meta_pitch =
7df7e505 3589 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3590 dcc->independent_64b_blks = i64b;
7df7e505
NK
3591
3592 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3593 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3594 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3595
09e5665a
NK
3596 return 0;
3597}
3598
3599static int
320932bf 3600fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3601 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3602 const enum surface_pixel_format format,
3603 const enum dc_rotation_angle rotation,
3604 const uint64_t tiling_flags,
09e5665a 3605 union dc_tiling_info *tiling_info,
12e2b2d4 3606 struct plane_size *plane_size,
09e5665a 3607 struct dc_plane_dcc_param *dcc,
87b7ebc2 3608 struct dc_plane_address *address,
5888f07a 3609 bool tmz_surface,
87b7ebc2 3610 bool force_disable_dcc)
09e5665a 3611{
320932bf 3612 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3613 int ret;
3614
3615 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3616 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3617 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3618 memset(address, 0, sizeof(*address));
3619
5888f07a
HW
3620 address->tmz_surface = tmz_surface;
3621
695af5f9 3622 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3623 plane_size->surface_size.x = 0;
3624 plane_size->surface_size.y = 0;
3625 plane_size->surface_size.width = fb->width;
3626 plane_size->surface_size.height = fb->height;
3627 plane_size->surface_pitch =
320932bf
NK
3628 fb->pitches[0] / fb->format->cpp[0];
3629
e0634e8d
NK
3630 address->type = PLN_ADDR_TYPE_GRAPHICS;
3631 address->grph.addr.low_part = lower_32_bits(afb->address);
3632 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3633 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3634 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3635
12e2b2d4
DL
3636 plane_size->surface_size.x = 0;
3637 plane_size->surface_size.y = 0;
3638 plane_size->surface_size.width = fb->width;
3639 plane_size->surface_size.height = fb->height;
3640 plane_size->surface_pitch =
320932bf
NK
3641 fb->pitches[0] / fb->format->cpp[0];
3642
12e2b2d4
DL
3643 plane_size->chroma_size.x = 0;
3644 plane_size->chroma_size.y = 0;
320932bf 3645 /* TODO: set these based on surface format */
12e2b2d4
DL
3646 plane_size->chroma_size.width = fb->width / 2;
3647 plane_size->chroma_size.height = fb->height / 2;
320932bf 3648
12e2b2d4 3649 plane_size->chroma_pitch =
320932bf
NK
3650 fb->pitches[1] / fb->format->cpp[1];
3651
e0634e8d
NK
3652 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3653 address->video_progressive.luma_addr.low_part =
3654 lower_32_bits(afb->address);
3655 address->video_progressive.luma_addr.high_part =
3656 upper_32_bits(afb->address);
3657 address->video_progressive.chroma_addr.low_part =
3658 lower_32_bits(chroma_addr);
3659 address->video_progressive.chroma_addr.high_part =
3660 upper_32_bits(chroma_addr);
3661 }
09e5665a
NK
3662
3663 /* Fill GFX8 params */
3664 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3665 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3666
3667 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3668 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3669 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3670 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3671 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3672
3673 /* XXX fix me for VI */
3674 tiling_info->gfx8.num_banks = num_banks;
3675 tiling_info->gfx8.array_mode =
3676 DC_ARRAY_2D_TILED_THIN1;
3677 tiling_info->gfx8.tile_split = tile_split;
3678 tiling_info->gfx8.bank_width = bankw;
3679 tiling_info->gfx8.bank_height = bankh;
3680 tiling_info->gfx8.tile_aspect = mtaspect;
3681 tiling_info->gfx8.tile_mode =
3682 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3683 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3684 == DC_ARRAY_1D_TILED_THIN1) {
3685 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3686 }
3687
3688 tiling_info->gfx8.pipe_config =
3689 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3690
3691 if (adev->asic_type == CHIP_VEGA10 ||
3692 adev->asic_type == CHIP_VEGA12 ||
3693 adev->asic_type == CHIP_VEGA20 ||
476e955d 3694 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3695 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3696 adev->asic_type == CHIP_NAVI12 ||
79037324
BL
3697#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3698 adev->asic_type == CHIP_SIENNA_CICHLID ||
3699#endif
30221ad8 3700 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3701 adev->asic_type == CHIP_RAVEN) {
3702 /* Fill GFX9 params */
3703 tiling_info->gfx9.num_pipes =
3704 adev->gfx.config.gb_addr_config_fields.num_pipes;
3705 tiling_info->gfx9.num_banks =
3706 adev->gfx.config.gb_addr_config_fields.num_banks;
3707 tiling_info->gfx9.pipe_interleave =
3708 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3709 tiling_info->gfx9.num_shader_engines =
3710 adev->gfx.config.gb_addr_config_fields.num_se;
3711 tiling_info->gfx9.max_compressed_frags =
3712 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3713 tiling_info->gfx9.num_rb_per_se =
3714 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3715 tiling_info->gfx9.swizzle =
3716 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3717 tiling_info->gfx9.shaderEnable = 1;
3718
79037324
BL
3719#ifdef CONFIG_DRM_AMD_DC_DCN3_0
3720 if (adev->asic_type == CHIP_SIENNA_CICHLID)
3721 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3722
3723#endif
695af5f9
NK
3724 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3725 plane_size, tiling_info,
87b7ebc2
RS
3726 tiling_flags, dcc, address,
3727 force_disable_dcc);
09e5665a
NK
3728 if (ret)
3729 return ret;
3730 }
3731
3732 return 0;
7df7e505
NK
3733}
3734
d74004b6 3735static void
695af5f9 3736fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3737 bool *per_pixel_alpha, bool *global_alpha,
3738 int *global_alpha_value)
3739{
3740 *per_pixel_alpha = false;
3741 *global_alpha = false;
3742 *global_alpha_value = 0xff;
3743
3744 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3745 return;
3746
3747 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3748 static const uint32_t alpha_formats[] = {
3749 DRM_FORMAT_ARGB8888,
3750 DRM_FORMAT_RGBA8888,
3751 DRM_FORMAT_ABGR8888,
3752 };
3753 uint32_t format = plane_state->fb->format->format;
3754 unsigned int i;
3755
3756 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3757 if (format == alpha_formats[i]) {
3758 *per_pixel_alpha = true;
3759 break;
3760 }
3761 }
3762 }
3763
3764 if (plane_state->alpha < 0xffff) {
3765 *global_alpha = true;
3766 *global_alpha_value = plane_state->alpha >> 8;
3767 }
3768}
3769
004fefa3
NK
3770static int
3771fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3772 const enum surface_pixel_format format,
004fefa3
NK
3773 enum dc_color_space *color_space)
3774{
3775 bool full_range;
3776
3777 *color_space = COLOR_SPACE_SRGB;
3778
3779 /* DRM color properties only affect non-RGB formats. */
695af5f9 3780 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3781 return 0;
3782
3783 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3784
3785 switch (plane_state->color_encoding) {
3786 case DRM_COLOR_YCBCR_BT601:
3787 if (full_range)
3788 *color_space = COLOR_SPACE_YCBCR601;
3789 else
3790 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3791 break;
3792
3793 case DRM_COLOR_YCBCR_BT709:
3794 if (full_range)
3795 *color_space = COLOR_SPACE_YCBCR709;
3796 else
3797 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3798 break;
3799
3800 case DRM_COLOR_YCBCR_BT2020:
3801 if (full_range)
3802 *color_space = COLOR_SPACE_2020_YCBCR;
3803 else
3804 return -EINVAL;
3805 break;
3806
3807 default:
3808 return -EINVAL;
3809 }
3810
3811 return 0;
3812}
3813
695af5f9
NK
3814static int
3815fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3816 const struct drm_plane_state *plane_state,
3817 const uint64_t tiling_flags,
3818 struct dc_plane_info *plane_info,
87b7ebc2 3819 struct dc_plane_address *address,
5888f07a 3820 bool tmz_surface,
87b7ebc2 3821 bool force_disable_dcc)
695af5f9
NK
3822{
3823 const struct drm_framebuffer *fb = plane_state->fb;
3824 const struct amdgpu_framebuffer *afb =
3825 to_amdgpu_framebuffer(plane_state->fb);
3826 struct drm_format_name_buf format_name;
3827 int ret;
3828
3829 memset(plane_info, 0, sizeof(*plane_info));
3830
3831 switch (fb->format->format) {
3832 case DRM_FORMAT_C8:
3833 plane_info->format =
3834 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3835 break;
3836 case DRM_FORMAT_RGB565:
3837 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3838 break;
3839 case DRM_FORMAT_XRGB8888:
3840 case DRM_FORMAT_ARGB8888:
3841 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3842 break;
3843 case DRM_FORMAT_XRGB2101010:
3844 case DRM_FORMAT_ARGB2101010:
3845 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3846 break;
3847 case DRM_FORMAT_XBGR2101010:
3848 case DRM_FORMAT_ABGR2101010:
3849 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3850 break;
3851 case DRM_FORMAT_XBGR8888:
3852 case DRM_FORMAT_ABGR8888:
3853 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3854 break;
3855 case DRM_FORMAT_NV21:
3856 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3857 break;
3858 case DRM_FORMAT_NV12:
3859 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3860 break;
cbec6477
SW
3861 case DRM_FORMAT_P010:
3862 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3863 break;
492548dc
SW
3864 case DRM_FORMAT_XRGB16161616F:
3865 case DRM_FORMAT_ARGB16161616F:
3866 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3867 break;
2a5195dc
MK
3868 case DRM_FORMAT_XBGR16161616F:
3869 case DRM_FORMAT_ABGR16161616F:
3870 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3871 break;
695af5f9
NK
3872 default:
3873 DRM_ERROR(
3874 "Unsupported screen format %s\n",
3875 drm_get_format_name(fb->format->format, &format_name));
3876 return -EINVAL;
3877 }
3878
3879 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3880 case DRM_MODE_ROTATE_0:
3881 plane_info->rotation = ROTATION_ANGLE_0;
3882 break;
3883 case DRM_MODE_ROTATE_90:
3884 plane_info->rotation = ROTATION_ANGLE_90;
3885 break;
3886 case DRM_MODE_ROTATE_180:
3887 plane_info->rotation = ROTATION_ANGLE_180;
3888 break;
3889 case DRM_MODE_ROTATE_270:
3890 plane_info->rotation = ROTATION_ANGLE_270;
3891 break;
3892 default:
3893 plane_info->rotation = ROTATION_ANGLE_0;
3894 break;
3895 }
3896
3897 plane_info->visible = true;
3898 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3899
6d83a32d
MS
3900 plane_info->layer_index = 0;
3901
695af5f9
NK
3902 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3903 &plane_info->color_space);
3904 if (ret)
3905 return ret;
3906
3907 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3908 plane_info->rotation, tiling_flags,
3909 &plane_info->tiling_info,
3910 &plane_info->plane_size,
5888f07a 3911 &plane_info->dcc, address, tmz_surface,
87b7ebc2 3912 force_disable_dcc);
695af5f9
NK
3913 if (ret)
3914 return ret;
3915
3916 fill_blending_from_plane_state(
3917 plane_state, &plane_info->per_pixel_alpha,
3918 &plane_info->global_alpha, &plane_info->global_alpha_value);
3919
3920 return 0;
3921}
3922
3923static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3924 struct dc_plane_state *dc_plane_state,
3925 struct drm_plane_state *plane_state,
3926 struct drm_crtc_state *crtc_state)
e7b07cee 3927{
cf020d49 3928 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
3929 const struct amdgpu_framebuffer *amdgpu_fb =
3930 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
3931 struct dc_scaling_info scaling_info;
3932 struct dc_plane_info plane_info;
3933 uint64_t tiling_flags;
3934 int ret;
5888f07a 3935 bool tmz_surface = false;
87b7ebc2 3936 bool force_disable_dcc = false;
e7b07cee 3937
695af5f9
NK
3938 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3939 if (ret)
3940 return ret;
e7b07cee 3941
695af5f9
NK
3942 dc_plane_state->src_rect = scaling_info.src_rect;
3943 dc_plane_state->dst_rect = scaling_info.dst_rect;
3944 dc_plane_state->clip_rect = scaling_info.clip_rect;
3945 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 3946
5888f07a 3947 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
e7b07cee
HW
3948 if (ret)
3949 return ret;
3950
87b7ebc2 3951 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
695af5f9
NK
3952 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3953 &plane_info,
87b7ebc2 3954 &dc_plane_state->address,
5888f07a 3955 tmz_surface,
87b7ebc2 3956 force_disable_dcc);
004fefa3
NK
3957 if (ret)
3958 return ret;
3959
695af5f9
NK
3960 dc_plane_state->format = plane_info.format;
3961 dc_plane_state->color_space = plane_info.color_space;
3962 dc_plane_state->format = plane_info.format;
3963 dc_plane_state->plane_size = plane_info.plane_size;
3964 dc_plane_state->rotation = plane_info.rotation;
3965 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3966 dc_plane_state->stereo_format = plane_info.stereo_format;
3967 dc_plane_state->tiling_info = plane_info.tiling_info;
3968 dc_plane_state->visible = plane_info.visible;
3969 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3970 dc_plane_state->global_alpha = plane_info.global_alpha;
3971 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3972 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 3973 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 3974
e277adc5
LSL
3975 /*
3976 * Always set input transfer function, since plane state is refreshed
3977 * every time.
3978 */
cf020d49
NK
3979 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3980 if (ret)
3981 return ret;
e7b07cee 3982
cf020d49 3983 return 0;
e7b07cee
HW
3984}
3985
3ee6b26b
AD
3986static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3987 const struct dm_connector_state *dm_state,
3988 struct dc_stream_state *stream)
e7b07cee
HW
3989{
3990 enum amdgpu_rmx_type rmx_type;
3991
3992 struct rect src = { 0 }; /* viewport in composition space*/
3993 struct rect dst = { 0 }; /* stream addressable area */
3994
3995 /* no mode. nothing to be done */
3996 if (!mode)
3997 return;
3998
3999 /* Full screen scaling by default */
4000 src.width = mode->hdisplay;
4001 src.height = mode->vdisplay;
4002 dst.width = stream->timing.h_addressable;
4003 dst.height = stream->timing.v_addressable;
4004
f4791779
HW
4005 if (dm_state) {
4006 rmx_type = dm_state->scaling;
4007 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4008 if (src.width * dst.height <
4009 src.height * dst.width) {
4010 /* height needs less upscaling/more downscaling */
4011 dst.width = src.width *
4012 dst.height / src.height;
4013 } else {
4014 /* width needs less upscaling/more downscaling */
4015 dst.height = src.height *
4016 dst.width / src.width;
4017 }
4018 } else if (rmx_type == RMX_CENTER) {
4019 dst = src;
e7b07cee 4020 }
e7b07cee 4021
f4791779
HW
4022 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4023 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4024
f4791779
HW
4025 if (dm_state->underscan_enable) {
4026 dst.x += dm_state->underscan_hborder / 2;
4027 dst.y += dm_state->underscan_vborder / 2;
4028 dst.width -= dm_state->underscan_hborder;
4029 dst.height -= dm_state->underscan_vborder;
4030 }
e7b07cee
HW
4031 }
4032
4033 stream->src = src;
4034 stream->dst = dst;
4035
f1ad2f5e 4036 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4037 dst.x, dst.y, dst.width, dst.height);
4038
4039}
4040
3ee6b26b 4041static enum dc_color_depth
42ba01fc 4042convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4043 bool is_y420, int requested_bpc)
e7b07cee 4044{
1bc22f20 4045 uint8_t bpc;
01c22997 4046
1bc22f20
SW
4047 if (is_y420) {
4048 bpc = 8;
4049
4050 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4051 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4052 bpc = 16;
4053 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4054 bpc = 12;
4055 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4056 bpc = 10;
4057 } else {
4058 bpc = (uint8_t)connector->display_info.bpc;
4059 /* Assume 8 bpc by default if no bpc is specified. */
4060 bpc = bpc ? bpc : 8;
4061 }
e7b07cee 4062
cbd14ae7 4063 if (requested_bpc > 0) {
01c22997
NK
4064 /*
4065 * Cap display bpc based on the user requested value.
4066 *
4067 * The value for state->max_bpc may not correctly updated
4068 * depending on when the connector gets added to the state
4069 * or if this was called outside of atomic check, so it
4070 * can't be used directly.
4071 */
cbd14ae7 4072 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4073
1825fd34
NK
4074 /* Round down to the nearest even number. */
4075 bpc = bpc - (bpc & 1);
4076 }
07e3a1cf 4077
e7b07cee
HW
4078 switch (bpc) {
4079 case 0:
1f6010a9
DF
4080 /*
4081 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4082 * EDID revision before 1.4
4083 * TODO: Fix edid parsing
4084 */
4085 return COLOR_DEPTH_888;
4086 case 6:
4087 return COLOR_DEPTH_666;
4088 case 8:
4089 return COLOR_DEPTH_888;
4090 case 10:
4091 return COLOR_DEPTH_101010;
4092 case 12:
4093 return COLOR_DEPTH_121212;
4094 case 14:
4095 return COLOR_DEPTH_141414;
4096 case 16:
4097 return COLOR_DEPTH_161616;
4098 default:
4099 return COLOR_DEPTH_UNDEFINED;
4100 }
4101}
4102
3ee6b26b
AD
4103static enum dc_aspect_ratio
4104get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4105{
e11d4147
LSL
4106 /* 1-1 mapping, since both enums follow the HDMI spec. */
4107 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4108}
4109
3ee6b26b
AD
4110static enum dc_color_space
4111get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4112{
4113 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4114
4115 switch (dc_crtc_timing->pixel_encoding) {
4116 case PIXEL_ENCODING_YCBCR422:
4117 case PIXEL_ENCODING_YCBCR444:
4118 case PIXEL_ENCODING_YCBCR420:
4119 {
4120 /*
4121 * 27030khz is the separation point between HDTV and SDTV
4122 * according to HDMI spec, we use YCbCr709 and YCbCr601
4123 * respectively
4124 */
380604e2 4125 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4126 if (dc_crtc_timing->flags.Y_ONLY)
4127 color_space =
4128 COLOR_SPACE_YCBCR709_LIMITED;
4129 else
4130 color_space = COLOR_SPACE_YCBCR709;
4131 } else {
4132 if (dc_crtc_timing->flags.Y_ONLY)
4133 color_space =
4134 COLOR_SPACE_YCBCR601_LIMITED;
4135 else
4136 color_space = COLOR_SPACE_YCBCR601;
4137 }
4138
4139 }
4140 break;
4141 case PIXEL_ENCODING_RGB:
4142 color_space = COLOR_SPACE_SRGB;
4143 break;
4144
4145 default:
4146 WARN_ON(1);
4147 break;
4148 }
4149
4150 return color_space;
4151}
4152
ea117312
TA
4153static bool adjust_colour_depth_from_display_info(
4154 struct dc_crtc_timing *timing_out,
4155 const struct drm_display_info *info)
400443e8 4156{
ea117312 4157 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4158 int normalized_clk;
400443e8 4159 do {
380604e2 4160 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4161 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4162 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4163 normalized_clk /= 2;
4164 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4165 switch (depth) {
4166 case COLOR_DEPTH_888:
4167 break;
400443e8
ML
4168 case COLOR_DEPTH_101010:
4169 normalized_clk = (normalized_clk * 30) / 24;
4170 break;
4171 case COLOR_DEPTH_121212:
4172 normalized_clk = (normalized_clk * 36) / 24;
4173 break;
4174 case COLOR_DEPTH_161616:
4175 normalized_clk = (normalized_clk * 48) / 24;
4176 break;
4177 default:
ea117312
TA
4178 /* The above depths are the only ones valid for HDMI. */
4179 return false;
400443e8 4180 }
ea117312
TA
4181 if (normalized_clk <= info->max_tmds_clock) {
4182 timing_out->display_color_depth = depth;
4183 return true;
4184 }
4185 } while (--depth > COLOR_DEPTH_666);
4186 return false;
400443e8 4187}
e7b07cee 4188
42ba01fc
NK
4189static void fill_stream_properties_from_drm_display_mode(
4190 struct dc_stream_state *stream,
4191 const struct drm_display_mode *mode_in,
4192 const struct drm_connector *connector,
4193 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4194 const struct dc_stream_state *old_stream,
4195 int requested_bpc)
e7b07cee
HW
4196{
4197 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4198 const struct drm_display_info *info = &connector->display_info;
d4252eee 4199 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4200 struct hdmi_vendor_infoframe hv_frame;
4201 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4202
acf83f86
WL
4203 memset(&hv_frame, 0, sizeof(hv_frame));
4204 memset(&avi_frame, 0, sizeof(avi_frame));
4205
e7b07cee
HW
4206 timing_out->h_border_left = 0;
4207 timing_out->h_border_right = 0;
4208 timing_out->v_border_top = 0;
4209 timing_out->v_border_bottom = 0;
4210 /* TODO: un-hardcode */
fe61a2f1 4211 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4212 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4213 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4214 else if (drm_mode_is_420_also(info, mode_in)
4215 && aconnector->force_yuv420_output)
4216 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4217 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4218 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4219 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4220 else
4221 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4222
4223 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4224 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4225 connector,
4226 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4227 requested_bpc);
e7b07cee
HW
4228 timing_out->scan_type = SCANNING_TYPE_NODATA;
4229 timing_out->hdmi_vic = 0;
b333730d
BL
4230
4231 if(old_stream) {
4232 timing_out->vic = old_stream->timing.vic;
4233 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4234 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4235 } else {
4236 timing_out->vic = drm_match_cea_mode(mode_in);
4237 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4238 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4239 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4240 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4241 }
e7b07cee 4242
1cb1d477
WL
4243 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4244 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4245 timing_out->vic = avi_frame.video_code;
4246 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4247 timing_out->hdmi_vic = hv_frame.vic;
4248 }
4249
e7b07cee
HW
4250 timing_out->h_addressable = mode_in->crtc_hdisplay;
4251 timing_out->h_total = mode_in->crtc_htotal;
4252 timing_out->h_sync_width =
4253 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4254 timing_out->h_front_porch =
4255 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4256 timing_out->v_total = mode_in->crtc_vtotal;
4257 timing_out->v_addressable = mode_in->crtc_vdisplay;
4258 timing_out->v_front_porch =
4259 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4260 timing_out->v_sync_width =
4261 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4262 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4263 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4264
4265 stream->output_color_space = get_output_color_space(timing_out);
4266
e43a432c
AK
4267 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4268 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4269 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4270 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4271 drm_mode_is_420_also(info, mode_in) &&
4272 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4273 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4274 adjust_colour_depth_from_display_info(timing_out, info);
4275 }
4276 }
e7b07cee
HW
4277}
4278
3ee6b26b
AD
4279static void fill_audio_info(struct audio_info *audio_info,
4280 const struct drm_connector *drm_connector,
4281 const struct dc_sink *dc_sink)
e7b07cee
HW
4282{
4283 int i = 0;
4284 int cea_revision = 0;
4285 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4286
4287 audio_info->manufacture_id = edid_caps->manufacturer_id;
4288 audio_info->product_id = edid_caps->product_id;
4289
4290 cea_revision = drm_connector->display_info.cea_rev;
4291
090afc1e 4292 strscpy(audio_info->display_name,
d2b2562c 4293 edid_caps->display_name,
090afc1e 4294 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4295
b830ebc9 4296 if (cea_revision >= 3) {
e7b07cee
HW
4297 audio_info->mode_count = edid_caps->audio_mode_count;
4298
4299 for (i = 0; i < audio_info->mode_count; ++i) {
4300 audio_info->modes[i].format_code =
4301 (enum audio_format_code)
4302 (edid_caps->audio_modes[i].format_code);
4303 audio_info->modes[i].channel_count =
4304 edid_caps->audio_modes[i].channel_count;
4305 audio_info->modes[i].sample_rates.all =
4306 edid_caps->audio_modes[i].sample_rate;
4307 audio_info->modes[i].sample_size =
4308 edid_caps->audio_modes[i].sample_size;
4309 }
4310 }
4311
4312 audio_info->flags.all = edid_caps->speaker_flags;
4313
4314 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4315 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4316 audio_info->video_latency = drm_connector->video_latency[0];
4317 audio_info->audio_latency = drm_connector->audio_latency[0];
4318 }
4319
4320 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4321
4322}
4323
3ee6b26b
AD
4324static void
4325copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4326 struct drm_display_mode *dst_mode)
e7b07cee
HW
4327{
4328 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4329 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4330 dst_mode->crtc_clock = src_mode->crtc_clock;
4331 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4332 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4333 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4334 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4335 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4336 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4337 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4338 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4339 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4340 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4341 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4342}
4343
3ee6b26b
AD
4344static void
4345decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4346 const struct drm_display_mode *native_mode,
4347 bool scale_enabled)
e7b07cee
HW
4348{
4349 if (scale_enabled) {
4350 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4351 } else if (native_mode->clock == drm_mode->clock &&
4352 native_mode->htotal == drm_mode->htotal &&
4353 native_mode->vtotal == drm_mode->vtotal) {
4354 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4355 } else {
4356 /* no scaling nor amdgpu inserted, no need to patch */
4357 }
4358}
4359
aed15309
ML
4360static struct dc_sink *
4361create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4362{
2e0ac3d6 4363 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4364 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4365 sink_init_data.link = aconnector->dc_link;
4366 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4367
4368 sink = dc_sink_create(&sink_init_data);
423788c7 4369 if (!sink) {
2e0ac3d6 4370 DRM_ERROR("Failed to create sink!\n");
aed15309 4371 return NULL;
423788c7 4372 }
2e0ac3d6 4373 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4374
aed15309 4375 return sink;
2e0ac3d6
HW
4376}
4377
fa2123db
ML
4378static void set_multisync_trigger_params(
4379 struct dc_stream_state *stream)
4380{
4381 if (stream->triggered_crtc_reset.enabled) {
4382 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4383 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4384 }
4385}
4386
4387static void set_master_stream(struct dc_stream_state *stream_set[],
4388 int stream_count)
4389{
4390 int j, highest_rfr = 0, master_stream = 0;
4391
4392 for (j = 0; j < stream_count; j++) {
4393 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4394 int refresh_rate = 0;
4395
380604e2 4396 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4397 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4398 if (refresh_rate > highest_rfr) {
4399 highest_rfr = refresh_rate;
4400 master_stream = j;
4401 }
4402 }
4403 }
4404 for (j = 0; j < stream_count; j++) {
03736f4c 4405 if (stream_set[j])
fa2123db
ML
4406 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4407 }
4408}
4409
4410static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4411{
4412 int i = 0;
4413
4414 if (context->stream_count < 2)
4415 return;
4416 for (i = 0; i < context->stream_count ; i++) {
4417 if (!context->streams[i])
4418 continue;
1f6010a9
DF
4419 /*
4420 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4421 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4422 * For now it's set to false
fa2123db
ML
4423 */
4424 set_multisync_trigger_params(context->streams[i]);
4425 }
4426 set_master_stream(context->streams, context->stream_count);
4427}
4428
3ee6b26b
AD
4429static struct dc_stream_state *
4430create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4431 const struct drm_display_mode *drm_mode,
b333730d 4432 const struct dm_connector_state *dm_state,
cbd14ae7
SW
4433 const struct dc_stream_state *old_stream,
4434 int requested_bpc)
e7b07cee
HW
4435{
4436 struct drm_display_mode *preferred_mode = NULL;
391ef035 4437 struct drm_connector *drm_connector;
42ba01fc
NK
4438 const struct drm_connector_state *con_state =
4439 dm_state ? &dm_state->base : NULL;
0971c40e 4440 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4441 struct drm_display_mode mode = *drm_mode;
4442 bool native_mode_found = false;
b333730d
BL
4443 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4444 int mode_refresh;
58124bf8 4445 int preferred_refresh = 0;
defeb878 4446#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4447 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4448#endif
df2f1015 4449 uint32_t link_bandwidth_kbps;
b333730d 4450
aed15309 4451 struct dc_sink *sink = NULL;
b830ebc9 4452 if (aconnector == NULL) {
e7b07cee 4453 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4454 return stream;
e7b07cee
HW
4455 }
4456
e7b07cee 4457 drm_connector = &aconnector->base;
2e0ac3d6 4458
f4ac176e 4459 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4460 sink = create_fake_sink(aconnector);
4461 if (!sink)
4462 return stream;
aed15309
ML
4463 } else {
4464 sink = aconnector->dc_sink;
dcd5fb82 4465 dc_sink_retain(sink);
f4ac176e 4466 }
2e0ac3d6 4467
aed15309 4468 stream = dc_create_stream_for_sink(sink);
4562236b 4469
b830ebc9 4470 if (stream == NULL) {
e7b07cee 4471 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4472 goto finish;
e7b07cee
HW
4473 }
4474
ceb3dbb4
JL
4475 stream->dm_stream_context = aconnector;
4476
4a36fcba
WL
4477 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4478 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4479
e7b07cee
HW
4480 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4481 /* Search for preferred mode */
4482 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4483 native_mode_found = true;
4484 break;
4485 }
4486 }
4487 if (!native_mode_found)
4488 preferred_mode = list_first_entry_or_null(
4489 &aconnector->base.modes,
4490 struct drm_display_mode,
4491 head);
4492
b333730d
BL
4493 mode_refresh = drm_mode_vrefresh(&mode);
4494
b830ebc9 4495 if (preferred_mode == NULL) {
1f6010a9
DF
4496 /*
4497 * This may not be an error, the use case is when we have no
e7b07cee
HW
4498 * usermode calls to reset and set mode upon hotplug. In this
4499 * case, we call set mode ourselves to restore the previous mode
4500 * and the modelist may not be filled in in time.
4501 */
f1ad2f5e 4502 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4503 } else {
4504 decide_crtc_timing_for_drm_display_mode(
4505 &mode, preferred_mode,
f4791779 4506 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4507 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4508 }
4509
f783577c
JFZ
4510 if (!dm_state)
4511 drm_mode_set_crtcinfo(&mode, 0);
4512
b333730d
BL
4513 /*
4514 * If scaling is enabled and refresh rate didn't change
4515 * we copy the vic and polarities of the old timings
4516 */
4517 if (!scale || mode_refresh != preferred_refresh)
4518 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4519 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
4520 else
4521 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4522 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 4523
df2f1015
DF
4524 stream->timing.flags.DSC = 0;
4525
4526 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4527#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4528 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4529 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
df2f1015
DF
4530 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4531 &dsc_caps);
defeb878 4532#endif
df2f1015
DF
4533 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4534 dc_link_get_link_cap(aconnector->dc_link));
4535
defeb878 4536#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4537 if (dsc_caps.is_dsc_supported)
0417df16 4538 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4539 &dsc_caps,
0417df16 4540 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4541 link_bandwidth_kbps,
4542 &stream->timing,
4543 &stream->timing.dsc_cfg))
4544 stream->timing.flags.DSC = 1;
39a4eb85 4545#endif
df2f1015 4546 }
39a4eb85 4547
e7b07cee
HW
4548 update_stream_scaling_settings(&mode, dm_state, stream);
4549
4550 fill_audio_info(
4551 &stream->audio_info,
4552 drm_connector,
aed15309 4553 sink);
e7b07cee 4554
ceb3dbb4 4555 update_stream_signal(stream, sink);
9182b4cb 4556
d832fc3b
WL
4557 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4558 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
8a488f5d
RL
4559 if (stream->link->psr_settings.psr_feature_enabled) {
4560 //
4561 // should decide stream support vsc sdp colorimetry capability
4562 // before building vsc info packet
4563 //
4564 stream->use_vsc_sdp_for_colorimetry = false;
4565 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4566 stream->use_vsc_sdp_for_colorimetry =
4567 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4568 } else {
4569 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4570 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 4571 }
8a488f5d 4572 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 4573 }
aed15309 4574finish:
dcd5fb82 4575 dc_sink_release(sink);
9e3efe3e 4576
e7b07cee
HW
4577 return stream;
4578}
4579
7578ecda 4580static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4581{
4582 drm_crtc_cleanup(crtc);
4583 kfree(crtc);
4584}
4585
4586static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4587 struct drm_crtc_state *state)
e7b07cee
HW
4588{
4589 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4590
4591 /* TODO Destroy dc_stream objects are stream object is flattened */
4592 if (cur->stream)
4593 dc_stream_release(cur->stream);
4594
4595
4596 __drm_atomic_helper_crtc_destroy_state(state);
4597
4598
4599 kfree(state);
4600}
4601
4602static void dm_crtc_reset_state(struct drm_crtc *crtc)
4603{
4604 struct dm_crtc_state *state;
4605
4606 if (crtc->state)
4607 dm_crtc_destroy_state(crtc, crtc->state);
4608
4609 state = kzalloc(sizeof(*state), GFP_KERNEL);
4610 if (WARN_ON(!state))
4611 return;
4612
4613 crtc->state = &state->base;
4614 crtc->state->crtc = crtc;
4615
4616}
4617
4618static struct drm_crtc_state *
4619dm_crtc_duplicate_state(struct drm_crtc *crtc)
4620{
4621 struct dm_crtc_state *state, *cur;
4622
4623 cur = to_dm_crtc_state(crtc->state);
4624
4625 if (WARN_ON(!crtc->state))
4626 return NULL;
4627
2004f45e 4628 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4629 if (!state)
4630 return NULL;
e7b07cee
HW
4631
4632 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4633
4634 if (cur->stream) {
4635 state->stream = cur->stream;
4636 dc_stream_retain(state->stream);
4637 }
4638
d6ef9b41
NK
4639 state->active_planes = cur->active_planes;
4640 state->interrupts_enabled = cur->interrupts_enabled;
180db303 4641 state->vrr_params = cur->vrr_params;
98e6436d 4642 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4643 state->abm_level = cur->abm_level;
bb47de73
NK
4644 state->vrr_supported = cur->vrr_supported;
4645 state->freesync_config = cur->freesync_config;
14b25846 4646 state->crc_src = cur->crc_src;
cf020d49
NK
4647 state->cm_has_degamma = cur->cm_has_degamma;
4648 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4649
e7b07cee
HW
4650 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4651
4652 return &state->base;
4653}
4654
d2574c33
MK
4655static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4656{
4657 enum dc_irq_source irq_source;
4658 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4659 struct amdgpu_device *adev = crtc->dev->dev_private;
4660 int rc;
4661
4662 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4663
4664 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4665
4666 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4667 acrtc->crtc_id, enable ? "en" : "dis", rc);
4668 return rc;
4669}
589d2739
HW
4670
4671static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4672{
4673 enum dc_irq_source irq_source;
4674 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4675 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4676 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4677 int rc = 0;
4678
4679 if (enable) {
4680 /* vblank irq on -> Only need vupdate irq in vrr mode */
4681 if (amdgpu_dm_vrr_active(acrtc_state))
4682 rc = dm_set_vupdate_irq(crtc, true);
4683 } else {
4684 /* vblank irq off -> vupdate irq off */
4685 rc = dm_set_vupdate_irq(crtc, false);
4686 }
4687
4688 if (rc)
4689 return rc;
589d2739
HW
4690
4691 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4692 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4693}
4694
4695static int dm_enable_vblank(struct drm_crtc *crtc)
4696{
4697 return dm_set_vblank(crtc, true);
4698}
4699
4700static void dm_disable_vblank(struct drm_crtc *crtc)
4701{
4702 dm_set_vblank(crtc, false);
4703}
4704
e7b07cee
HW
4705/* Implemented only the options currently availible for the driver */
4706static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4707 .reset = dm_crtc_reset_state,
4708 .destroy = amdgpu_dm_crtc_destroy,
4709 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4710 .set_config = drm_atomic_helper_set_config,
4711 .page_flip = drm_atomic_helper_page_flip,
4712 .atomic_duplicate_state = dm_crtc_duplicate_state,
4713 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4714 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4715 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4716 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 4717 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
4718 .enable_vblank = dm_enable_vblank,
4719 .disable_vblank = dm_disable_vblank,
e3eff4b5 4720 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
4721};
4722
4723static enum drm_connector_status
4724amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4725{
4726 bool connected;
c84dec2f 4727 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4728
1f6010a9
DF
4729 /*
4730 * Notes:
e7b07cee
HW
4731 * 1. This interface is NOT called in context of HPD irq.
4732 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4733 * makes it a bad place for *any* MST-related activity.
4734 */
e7b07cee 4735
8580d60b
HW
4736 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4737 !aconnector->fake_enable)
e7b07cee
HW
4738 connected = (aconnector->dc_sink != NULL);
4739 else
4740 connected = (aconnector->base.force == DRM_FORCE_ON);
4741
4742 return (connected ? connector_status_connected :
4743 connector_status_disconnected);
4744}
4745
3ee6b26b
AD
4746int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4747 struct drm_connector_state *connector_state,
4748 struct drm_property *property,
4749 uint64_t val)
e7b07cee
HW
4750{
4751 struct drm_device *dev = connector->dev;
4752 struct amdgpu_device *adev = dev->dev_private;
4753 struct dm_connector_state *dm_old_state =
4754 to_dm_connector_state(connector->state);
4755 struct dm_connector_state *dm_new_state =
4756 to_dm_connector_state(connector_state);
4757
4758 int ret = -EINVAL;
4759
4760 if (property == dev->mode_config.scaling_mode_property) {
4761 enum amdgpu_rmx_type rmx_type;
4762
4763 switch (val) {
4764 case DRM_MODE_SCALE_CENTER:
4765 rmx_type = RMX_CENTER;
4766 break;
4767 case DRM_MODE_SCALE_ASPECT:
4768 rmx_type = RMX_ASPECT;
4769 break;
4770 case DRM_MODE_SCALE_FULLSCREEN:
4771 rmx_type = RMX_FULL;
4772 break;
4773 case DRM_MODE_SCALE_NONE:
4774 default:
4775 rmx_type = RMX_OFF;
4776 break;
4777 }
4778
4779 if (dm_old_state->scaling == rmx_type)
4780 return 0;
4781
4782 dm_new_state->scaling = rmx_type;
4783 ret = 0;
4784 } else if (property == adev->mode_info.underscan_hborder_property) {
4785 dm_new_state->underscan_hborder = val;
4786 ret = 0;
4787 } else if (property == adev->mode_info.underscan_vborder_property) {
4788 dm_new_state->underscan_vborder = val;
4789 ret = 0;
4790 } else if (property == adev->mode_info.underscan_property) {
4791 dm_new_state->underscan_enable = val;
4792 ret = 0;
c1ee92f9
DF
4793 } else if (property == adev->mode_info.abm_level_property) {
4794 dm_new_state->abm_level = val;
4795 ret = 0;
e7b07cee
HW
4796 }
4797
4798 return ret;
4799}
4800
3ee6b26b
AD
4801int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4802 const struct drm_connector_state *state,
4803 struct drm_property *property,
4804 uint64_t *val)
e7b07cee
HW
4805{
4806 struct drm_device *dev = connector->dev;
4807 struct amdgpu_device *adev = dev->dev_private;
4808 struct dm_connector_state *dm_state =
4809 to_dm_connector_state(state);
4810 int ret = -EINVAL;
4811
4812 if (property == dev->mode_config.scaling_mode_property) {
4813 switch (dm_state->scaling) {
4814 case RMX_CENTER:
4815 *val = DRM_MODE_SCALE_CENTER;
4816 break;
4817 case RMX_ASPECT:
4818 *val = DRM_MODE_SCALE_ASPECT;
4819 break;
4820 case RMX_FULL:
4821 *val = DRM_MODE_SCALE_FULLSCREEN;
4822 break;
4823 case RMX_OFF:
4824 default:
4825 *val = DRM_MODE_SCALE_NONE;
4826 break;
4827 }
4828 ret = 0;
4829 } else if (property == adev->mode_info.underscan_hborder_property) {
4830 *val = dm_state->underscan_hborder;
4831 ret = 0;
4832 } else if (property == adev->mode_info.underscan_vborder_property) {
4833 *val = dm_state->underscan_vborder;
4834 ret = 0;
4835 } else if (property == adev->mode_info.underscan_property) {
4836 *val = dm_state->underscan_enable;
4837 ret = 0;
c1ee92f9
DF
4838 } else if (property == adev->mode_info.abm_level_property) {
4839 *val = dm_state->abm_level;
4840 ret = 0;
e7b07cee 4841 }
c1ee92f9 4842
e7b07cee
HW
4843 return ret;
4844}
4845
526c654a
ED
4846static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4847{
4848 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4849
4850 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4851}
4852
7578ecda 4853static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4854{
c84dec2f 4855 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4856 const struct dc_link *link = aconnector->dc_link;
4857 struct amdgpu_device *adev = connector->dev->dev_private;
4858 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4859
e7b07cee
HW
4860#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4861 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4862
89fc8d4e 4863 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
4864 link->type != dc_connection_none &&
4865 dm->backlight_dev) {
4866 backlight_device_unregister(dm->backlight_dev);
4867 dm->backlight_dev = NULL;
e7b07cee
HW
4868 }
4869#endif
dcd5fb82
MF
4870
4871 if (aconnector->dc_em_sink)
4872 dc_sink_release(aconnector->dc_em_sink);
4873 aconnector->dc_em_sink = NULL;
4874 if (aconnector->dc_sink)
4875 dc_sink_release(aconnector->dc_sink);
4876 aconnector->dc_sink = NULL;
4877
e86e8947 4878 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
4879 drm_connector_unregister(connector);
4880 drm_connector_cleanup(connector);
526c654a
ED
4881 if (aconnector->i2c) {
4882 i2c_del_adapter(&aconnector->i2c->base);
4883 kfree(aconnector->i2c);
4884 }
7daec99f 4885 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 4886
e7b07cee
HW
4887 kfree(connector);
4888}
4889
4890void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4891{
4892 struct dm_connector_state *state =
4893 to_dm_connector_state(connector->state);
4894
df099b9b
LSL
4895 if (connector->state)
4896 __drm_atomic_helper_connector_destroy_state(connector->state);
4897
e7b07cee
HW
4898 kfree(state);
4899
4900 state = kzalloc(sizeof(*state), GFP_KERNEL);
4901
4902 if (state) {
4903 state->scaling = RMX_OFF;
4904 state->underscan_enable = false;
4905 state->underscan_hborder = 0;
4906 state->underscan_vborder = 0;
01933ba4 4907 state->base.max_requested_bpc = 8;
3261e013
ML
4908 state->vcpi_slots = 0;
4909 state->pbn = 0;
c3e50f89
NK
4910 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4911 state->abm_level = amdgpu_dm_abm_level;
4912
df099b9b 4913 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
4914 }
4915}
4916
3ee6b26b
AD
4917struct drm_connector_state *
4918amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
4919{
4920 struct dm_connector_state *state =
4921 to_dm_connector_state(connector->state);
4922
4923 struct dm_connector_state *new_state =
4924 kmemdup(state, sizeof(*state), GFP_KERNEL);
4925
98e6436d
AK
4926 if (!new_state)
4927 return NULL;
e7b07cee 4928
98e6436d
AK
4929 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4930
4931 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 4932 new_state->abm_level = state->abm_level;
922454c2
NK
4933 new_state->scaling = state->scaling;
4934 new_state->underscan_enable = state->underscan_enable;
4935 new_state->underscan_hborder = state->underscan_hborder;
4936 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
4937 new_state->vcpi_slots = state->vcpi_slots;
4938 new_state->pbn = state->pbn;
98e6436d 4939 return &new_state->base;
e7b07cee
HW
4940}
4941
14f04fa4
AD
4942static int
4943amdgpu_dm_connector_late_register(struct drm_connector *connector)
4944{
4945 struct amdgpu_dm_connector *amdgpu_dm_connector =
4946 to_amdgpu_dm_connector(connector);
00a8037e 4947 int r;
14f04fa4 4948
00a8037e
AD
4949 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4950 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4951 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4952 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4953 if (r)
4954 return r;
4955 }
4956
4957#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
4958 connector_debugfs_init(amdgpu_dm_connector);
4959#endif
4960
4961 return 0;
4962}
4963
e7b07cee
HW
4964static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4965 .reset = amdgpu_dm_connector_funcs_reset,
4966 .detect = amdgpu_dm_connector_detect,
4967 .fill_modes = drm_helper_probe_single_connector_modes,
4968 .destroy = amdgpu_dm_connector_destroy,
4969 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4970 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4971 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 4972 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 4973 .late_register = amdgpu_dm_connector_late_register,
526c654a 4974 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
4975};
4976
e7b07cee
HW
4977static int get_modes(struct drm_connector *connector)
4978{
4979 return amdgpu_dm_connector_get_modes(connector);
4980}
4981
c84dec2f 4982static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4983{
4984 struct dc_sink_init_data init_params = {
4985 .link = aconnector->dc_link,
4986 .sink_signal = SIGNAL_TYPE_VIRTUAL
4987 };
70e8ffc5 4988 struct edid *edid;
e7b07cee 4989
a89ff457 4990 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
4991 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4992 aconnector->base.name);
4993
4994 aconnector->base.force = DRM_FORCE_OFF;
4995 aconnector->base.override_edid = false;
4996 return;
4997 }
4998
70e8ffc5
HW
4999 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5000
e7b07cee
HW
5001 aconnector->edid = edid;
5002
5003 aconnector->dc_em_sink = dc_link_add_remote_sink(
5004 aconnector->dc_link,
5005 (uint8_t *)edid,
5006 (edid->extensions + 1) * EDID_LENGTH,
5007 &init_params);
5008
dcd5fb82 5009 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5010 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5011 aconnector->dc_link->local_sink :
5012 aconnector->dc_em_sink;
dcd5fb82
MF
5013 dc_sink_retain(aconnector->dc_sink);
5014 }
e7b07cee
HW
5015}
5016
c84dec2f 5017static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5018{
5019 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5020
1f6010a9
DF
5021 /*
5022 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5023 * Those settings have to be != 0 to get initial modeset
5024 */
5025 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5026 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5027 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5028 }
5029
5030
5031 aconnector->base.override_edid = true;
5032 create_eml_sink(aconnector);
5033}
5034
cbd14ae7
SW
5035static struct dc_stream_state *
5036create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5037 const struct drm_display_mode *drm_mode,
5038 const struct dm_connector_state *dm_state,
5039 const struct dc_stream_state *old_stream)
5040{
5041 struct drm_connector *connector = &aconnector->base;
5042 struct amdgpu_device *adev = connector->dev->dev_private;
5043 struct dc_stream_state *stream;
4b7da34b
SW
5044 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5045 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5046 enum dc_status dc_result = DC_OK;
5047
5048 do {
5049 stream = create_stream_for_sink(aconnector, drm_mode,
5050 dm_state, old_stream,
5051 requested_bpc);
5052 if (stream == NULL) {
5053 DRM_ERROR("Failed to create stream for sink!\n");
5054 break;
5055 }
5056
5057 dc_result = dc_validate_stream(adev->dm.dc, stream);
5058
5059 if (dc_result != DC_OK) {
74a16675 5060 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5061 drm_mode->hdisplay,
5062 drm_mode->vdisplay,
5063 drm_mode->clock,
74a16675
RS
5064 dc_result,
5065 dc_status_to_str(dc_result));
cbd14ae7
SW
5066
5067 dc_stream_release(stream);
5068 stream = NULL;
5069 requested_bpc -= 2; /* lower bpc to retry validation */
5070 }
5071
5072 } while (stream == NULL && requested_bpc >= 6);
5073
5074 return stream;
5075}
5076
ba9ca088 5077enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5078 struct drm_display_mode *mode)
e7b07cee
HW
5079{
5080 int result = MODE_ERROR;
5081 struct dc_sink *dc_sink;
e7b07cee 5082 /* TODO: Unhardcode stream count */
0971c40e 5083 struct dc_stream_state *stream;
c84dec2f 5084 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5085
5086 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5087 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5088 return result;
5089
1f6010a9
DF
5090 /*
5091 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5092 * EDID mgmt
5093 */
5094 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5095 !aconnector->dc_em_sink)
5096 handle_edid_mgmt(aconnector);
5097
c84dec2f 5098 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5099
b830ebc9 5100 if (dc_sink == NULL) {
e7b07cee
HW
5101 DRM_ERROR("dc_sink is NULL!\n");
5102 goto fail;
5103 }
5104
cbd14ae7
SW
5105 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5106 if (stream) {
5107 dc_stream_release(stream);
e7b07cee 5108 result = MODE_OK;
cbd14ae7 5109 }
e7b07cee
HW
5110
5111fail:
5112 /* TODO: error handling*/
5113 return result;
5114}
5115
88694af9
NK
5116static int fill_hdr_info_packet(const struct drm_connector_state *state,
5117 struct dc_info_packet *out)
5118{
5119 struct hdmi_drm_infoframe frame;
5120 unsigned char buf[30]; /* 26 + 4 */
5121 ssize_t len;
5122 int ret, i;
5123
5124 memset(out, 0, sizeof(*out));
5125
5126 if (!state->hdr_output_metadata)
5127 return 0;
5128
5129 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5130 if (ret)
5131 return ret;
5132
5133 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5134 if (len < 0)
5135 return (int)len;
5136
5137 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5138 if (len != 30)
5139 return -EINVAL;
5140
5141 /* Prepare the infopacket for DC. */
5142 switch (state->connector->connector_type) {
5143 case DRM_MODE_CONNECTOR_HDMIA:
5144 out->hb0 = 0x87; /* type */
5145 out->hb1 = 0x01; /* version */
5146 out->hb2 = 0x1A; /* length */
5147 out->sb[0] = buf[3]; /* checksum */
5148 i = 1;
5149 break;
5150
5151 case DRM_MODE_CONNECTOR_DisplayPort:
5152 case DRM_MODE_CONNECTOR_eDP:
5153 out->hb0 = 0x00; /* sdp id, zero */
5154 out->hb1 = 0x87; /* type */
5155 out->hb2 = 0x1D; /* payload len - 1 */
5156 out->hb3 = (0x13 << 2); /* sdp version */
5157 out->sb[0] = 0x01; /* version */
5158 out->sb[1] = 0x1A; /* length */
5159 i = 2;
5160 break;
5161
5162 default:
5163 return -EINVAL;
5164 }
5165
5166 memcpy(&out->sb[i], &buf[4], 26);
5167 out->valid = true;
5168
5169 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5170 sizeof(out->sb), false);
5171
5172 return 0;
5173}
5174
5175static bool
5176is_hdr_metadata_different(const struct drm_connector_state *old_state,
5177 const struct drm_connector_state *new_state)
5178{
5179 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5180 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5181
5182 if (old_blob != new_blob) {
5183 if (old_blob && new_blob &&
5184 old_blob->length == new_blob->length)
5185 return memcmp(old_blob->data, new_blob->data,
5186 old_blob->length);
5187
5188 return true;
5189 }
5190
5191 return false;
5192}
5193
5194static int
5195amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5196 struct drm_atomic_state *state)
88694af9 5197{
51e857af
SP
5198 struct drm_connector_state *new_con_state =
5199 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5200 struct drm_connector_state *old_con_state =
5201 drm_atomic_get_old_connector_state(state, conn);
5202 struct drm_crtc *crtc = new_con_state->crtc;
5203 struct drm_crtc_state *new_crtc_state;
5204 int ret;
5205
5206 if (!crtc)
5207 return 0;
5208
5209 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5210 struct dc_info_packet hdr_infopacket;
5211
5212 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5213 if (ret)
5214 return ret;
5215
5216 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5217 if (IS_ERR(new_crtc_state))
5218 return PTR_ERR(new_crtc_state);
5219
5220 /*
5221 * DC considers the stream backends changed if the
5222 * static metadata changes. Forcing the modeset also
5223 * gives a simple way for userspace to switch from
b232d4ed
NK
5224 * 8bpc to 10bpc when setting the metadata to enter
5225 * or exit HDR.
5226 *
5227 * Changing the static metadata after it's been
5228 * set is permissible, however. So only force a
5229 * modeset if we're entering or exiting HDR.
88694af9 5230 */
b232d4ed
NK
5231 new_crtc_state->mode_changed =
5232 !old_con_state->hdr_output_metadata ||
5233 !new_con_state->hdr_output_metadata;
88694af9
NK
5234 }
5235
5236 return 0;
5237}
5238
e7b07cee
HW
5239static const struct drm_connector_helper_funcs
5240amdgpu_dm_connector_helper_funcs = {
5241 /*
1f6010a9 5242 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5243 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5244 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5245 * in get_modes call back, not just return the modes count
5246 */
e7b07cee
HW
5247 .get_modes = get_modes,
5248 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5249 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5250};
5251
5252static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5253{
5254}
5255
bc92c065
NK
5256static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5257{
5258 struct drm_device *dev = new_crtc_state->crtc->dev;
5259 struct drm_plane *plane;
5260
5261 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5262 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5263 return true;
5264 }
5265
5266 return false;
5267}
5268
d6ef9b41 5269static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5270{
5271 struct drm_atomic_state *state = new_crtc_state->state;
5272 struct drm_plane *plane;
5273 int num_active = 0;
5274
5275 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5276 struct drm_plane_state *new_plane_state;
5277
5278 /* Cursor planes are "fake". */
5279 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5280 continue;
5281
5282 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5283
5284 if (!new_plane_state) {
5285 /*
5286 * The plane is enable on the CRTC and hasn't changed
5287 * state. This means that it previously passed
5288 * validation and is therefore enabled.
5289 */
5290 num_active += 1;
5291 continue;
5292 }
5293
5294 /* We need a framebuffer to be considered enabled. */
5295 num_active += (new_plane_state->fb != NULL);
5296 }
5297
d6ef9b41
NK
5298 return num_active;
5299}
5300
5301/*
5302 * Sets whether interrupts should be enabled on a specific CRTC.
5303 * We require that the stream be enabled and that there exist active
5304 * DC planes on the stream.
5305 */
5306static void
5307dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5308 struct drm_crtc_state *new_crtc_state)
5309{
5310 struct dm_crtc_state *dm_new_crtc_state =
5311 to_dm_crtc_state(new_crtc_state);
5312
5313 dm_new_crtc_state->active_planes = 0;
5314 dm_new_crtc_state->interrupts_enabled = false;
5315
5316 if (!dm_new_crtc_state->stream)
5317 return;
5318
5319 dm_new_crtc_state->active_planes =
5320 count_crtc_active_planes(new_crtc_state);
5321
5322 dm_new_crtc_state->interrupts_enabled =
5323 dm_new_crtc_state->active_planes > 0;
c14a005c
NK
5324}
5325
3ee6b26b
AD
5326static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5327 struct drm_crtc_state *state)
e7b07cee
HW
5328{
5329 struct amdgpu_device *adev = crtc->dev->dev_private;
5330 struct dc *dc = adev->dm.dc;
5331 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5332 int ret = -EINVAL;
5333
d6ef9b41
NK
5334 /*
5335 * Update interrupt state for the CRTC. This needs to happen whenever
5336 * the CRTC has changed or whenever any of its planes have changed.
5337 * Atomic check satisfies both of these requirements since the CRTC
5338 * is added to the state by DRM during drm_atomic_helper_check_planes.
5339 */
5340 dm_update_crtc_interrupt_state(crtc, state);
5341
9b690ef3
BL
5342 if (unlikely(!dm_crtc_state->stream &&
5343 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5344 WARN_ON(1);
5345 return ret;
5346 }
5347
1f6010a9 5348 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
5349 if (!dm_crtc_state->stream)
5350 return 0;
5351
bc92c065
NK
5352 /*
5353 * We want at least one hardware plane enabled to use
5354 * the stream with a cursor enabled.
5355 */
c14a005c 5356 if (state->enable && state->active &&
bc92c065 5357 does_crtc_have_active_cursor(state) &&
d6ef9b41 5358 dm_crtc_state->active_planes == 0)
c14a005c
NK
5359 return -EINVAL;
5360
62c933f9 5361 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5362 return 0;
5363
5364 return ret;
5365}
5366
3ee6b26b
AD
5367static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5368 const struct drm_display_mode *mode,
5369 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5370{
5371 return true;
5372}
5373
5374static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5375 .disable = dm_crtc_helper_disable,
5376 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5377 .mode_fixup = dm_crtc_helper_mode_fixup,
5378 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5379};
5380
5381static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5382{
5383
5384}
5385
3261e013
ML
5386static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5387{
5388 switch (display_color_depth) {
5389 case COLOR_DEPTH_666:
5390 return 6;
5391 case COLOR_DEPTH_888:
5392 return 8;
5393 case COLOR_DEPTH_101010:
5394 return 10;
5395 case COLOR_DEPTH_121212:
5396 return 12;
5397 case COLOR_DEPTH_141414:
5398 return 14;
5399 case COLOR_DEPTH_161616:
5400 return 16;
5401 default:
5402 break;
5403 }
5404 return 0;
5405}
5406
3ee6b26b
AD
5407static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5408 struct drm_crtc_state *crtc_state,
5409 struct drm_connector_state *conn_state)
e7b07cee 5410{
3261e013
ML
5411 struct drm_atomic_state *state = crtc_state->state;
5412 struct drm_connector *connector = conn_state->connector;
5413 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5414 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5415 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5416 struct drm_dp_mst_topology_mgr *mst_mgr;
5417 struct drm_dp_mst_port *mst_port;
5418 enum dc_color_depth color_depth;
5419 int clock, bpp = 0;
1bc22f20 5420 bool is_y420 = false;
3261e013
ML
5421
5422 if (!aconnector->port || !aconnector->dc_sink)
5423 return 0;
5424
5425 mst_port = aconnector->port;
5426 mst_mgr = &aconnector->mst_port->mst_mgr;
5427
5428 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5429 return 0;
5430
5431 if (!state->duplicated) {
cbd14ae7 5432 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
5433 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5434 aconnector->force_yuv420_output;
cbd14ae7
SW
5435 color_depth = convert_color_depth_from_display_info(connector,
5436 is_y420,
5437 max_bpc);
3261e013
ML
5438 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5439 clock = adjusted_mode->clock;
dc48529f 5440 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5441 }
5442 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5443 mst_mgr,
5444 mst_port,
1c6c1cb5 5445 dm_new_connector_state->pbn,
03ca9600 5446 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
5447 if (dm_new_connector_state->vcpi_slots < 0) {
5448 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5449 return dm_new_connector_state->vcpi_slots;
5450 }
e7b07cee
HW
5451 return 0;
5452}
5453
5454const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5455 .disable = dm_encoder_helper_disable,
5456 .atomic_check = dm_encoder_helper_atomic_check
5457};
5458
d9fe1a4c 5459#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5460static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5461 struct dc_state *dc_state)
5462{
5463 struct dc_stream_state *stream = NULL;
5464 struct drm_connector *connector;
5465 struct drm_connector_state *new_con_state, *old_con_state;
5466 struct amdgpu_dm_connector *aconnector;
5467 struct dm_connector_state *dm_conn_state;
5468 int i, j, clock, bpp;
5469 int vcpi, pbn_div, pbn = 0;
5470
5471 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5472
5473 aconnector = to_amdgpu_dm_connector(connector);
5474
5475 if (!aconnector->port)
5476 continue;
5477
5478 if (!new_con_state || !new_con_state->crtc)
5479 continue;
5480
5481 dm_conn_state = to_dm_connector_state(new_con_state);
5482
5483 for (j = 0; j < dc_state->stream_count; j++) {
5484 stream = dc_state->streams[j];
5485 if (!stream)
5486 continue;
5487
5488 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5489 break;
5490
5491 stream = NULL;
5492 }
5493
5494 if (!stream)
5495 continue;
5496
5497 if (stream->timing.flags.DSC != 1) {
5498 drm_dp_mst_atomic_enable_dsc(state,
5499 aconnector->port,
5500 dm_conn_state->pbn,
5501 0,
5502 false);
5503 continue;
5504 }
5505
5506 pbn_div = dm_mst_get_pbn_divider(stream->link);
5507 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5508 clock = stream->timing.pix_clk_100hz / 10;
5509 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5510 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5511 aconnector->port,
5512 pbn, pbn_div,
5513 true);
5514 if (vcpi < 0)
5515 return vcpi;
5516
5517 dm_conn_state->pbn = pbn;
5518 dm_conn_state->vcpi_slots = vcpi;
5519 }
5520 return 0;
5521}
d9fe1a4c 5522#endif
29b9ba74 5523
e7b07cee
HW
5524static void dm_drm_plane_reset(struct drm_plane *plane)
5525{
5526 struct dm_plane_state *amdgpu_state = NULL;
5527
5528 if (plane->state)
5529 plane->funcs->atomic_destroy_state(plane, plane->state);
5530
5531 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5532 WARN_ON(amdgpu_state == NULL);
1f6010a9 5533
7ddaef96
NK
5534 if (amdgpu_state)
5535 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5536}
5537
5538static struct drm_plane_state *
5539dm_drm_plane_duplicate_state(struct drm_plane *plane)
5540{
5541 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5542
5543 old_dm_plane_state = to_dm_plane_state(plane->state);
5544 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5545 if (!dm_plane_state)
5546 return NULL;
5547
5548 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5549
3be5262e
HW
5550 if (old_dm_plane_state->dc_state) {
5551 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5552 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5553 }
5554
5555 return &dm_plane_state->base;
5556}
5557
dfd84d90 5558static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5559 struct drm_plane_state *state)
e7b07cee
HW
5560{
5561 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5562
3be5262e
HW
5563 if (dm_plane_state->dc_state)
5564 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5565
0627bbd3 5566 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5567}
5568
5569static const struct drm_plane_funcs dm_plane_funcs = {
5570 .update_plane = drm_atomic_helper_update_plane,
5571 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5572 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5573 .reset = dm_drm_plane_reset,
5574 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5575 .atomic_destroy_state = dm_drm_plane_destroy_state,
5576};
5577
3ee6b26b
AD
5578static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5579 struct drm_plane_state *new_state)
e7b07cee
HW
5580{
5581 struct amdgpu_framebuffer *afb;
5582 struct drm_gem_object *obj;
5d43be0c 5583 struct amdgpu_device *adev;
e7b07cee 5584 struct amdgpu_bo *rbo;
e7b07cee 5585 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5586 struct list_head list;
5587 struct ttm_validate_buffer tv;
5588 struct ww_acquire_ctx ticket;
e0634e8d 5589 uint64_t tiling_flags;
5d43be0c
CK
5590 uint32_t domain;
5591 int r;
5888f07a 5592 bool tmz_surface = false;
87b7ebc2 5593 bool force_disable_dcc = false;
e7b07cee
HW
5594
5595 dm_plane_state_old = to_dm_plane_state(plane->state);
5596 dm_plane_state_new = to_dm_plane_state(new_state);
5597
5598 if (!new_state->fb) {
f1ad2f5e 5599 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5600 return 0;
5601 }
5602
5603 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5604 obj = new_state->fb->obj[0];
e7b07cee 5605 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5606 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5607 INIT_LIST_HEAD(&list);
5608
5609 tv.bo = &rbo->tbo;
5610 tv.num_shared = 1;
5611 list_add(&tv.head, &list);
5612
9165fb87 5613 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5614 if (r) {
5615 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5616 return r;
0f257b09 5617 }
e7b07cee 5618
5d43be0c 5619 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5620 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5621 else
5622 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5623
7b7c6c81 5624 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5625 if (unlikely(r != 0)) {
30b7c614
HW
5626 if (r != -ERESTARTSYS)
5627 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5628 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5629 return r;
5630 }
5631
bb812f1e
JZ
5632 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5633 if (unlikely(r != 0)) {
5634 amdgpu_bo_unpin(rbo);
0f257b09 5635 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5636 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5637 return r;
5638 }
7df7e505
NK
5639
5640 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5641
5888f07a
HW
5642 tmz_surface = amdgpu_bo_encrypted(rbo);
5643
0f257b09 5644 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5645
7b7c6c81 5646 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5647
5648 amdgpu_bo_ref(rbo);
5649
3be5262e
HW
5650 if (dm_plane_state_new->dc_state &&
5651 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5652 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 5653
87b7ebc2 5654 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
320932bf 5655 fill_plane_buffer_attributes(
695af5f9
NK
5656 adev, afb, plane_state->format, plane_state->rotation,
5657 tiling_flags, &plane_state->tiling_info,
320932bf 5658 &plane_state->plane_size, &plane_state->dcc,
5888f07a 5659 &plane_state->address, tmz_surface,
87b7ebc2 5660 force_disable_dcc);
e7b07cee
HW
5661 }
5662
e7b07cee
HW
5663 return 0;
5664}
5665
3ee6b26b
AD
5666static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5667 struct drm_plane_state *old_state)
e7b07cee
HW
5668{
5669 struct amdgpu_bo *rbo;
e7b07cee
HW
5670 int r;
5671
5672 if (!old_state->fb)
5673 return;
5674
e68d14dd 5675 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5676 r = amdgpu_bo_reserve(rbo, false);
5677 if (unlikely(r)) {
5678 DRM_ERROR("failed to reserve rbo before unpin\n");
5679 return;
b830ebc9
HW
5680 }
5681
5682 amdgpu_bo_unpin(rbo);
5683 amdgpu_bo_unreserve(rbo);
5684 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5685}
5686
8c44515b
AP
5687static int dm_plane_helper_check_state(struct drm_plane_state *state,
5688 struct drm_crtc_state *new_crtc_state)
5689{
5690 int max_downscale = 0;
5691 int max_upscale = INT_MAX;
5692
5693 /* TODO: These should be checked against DC plane caps */
5694 return drm_atomic_helper_check_plane_state(
5695 state, new_crtc_state, max_downscale, max_upscale, true, true);
5696}
5697
7578ecda
AD
5698static int dm_plane_atomic_check(struct drm_plane *plane,
5699 struct drm_plane_state *state)
cbd19488
AG
5700{
5701 struct amdgpu_device *adev = plane->dev->dev_private;
5702 struct dc *dc = adev->dm.dc;
78171832 5703 struct dm_plane_state *dm_plane_state;
695af5f9 5704 struct dc_scaling_info scaling_info;
8c44515b 5705 struct drm_crtc_state *new_crtc_state;
695af5f9 5706 int ret;
78171832
NK
5707
5708 dm_plane_state = to_dm_plane_state(state);
cbd19488 5709
3be5262e 5710 if (!dm_plane_state->dc_state)
9a3329b1 5711 return 0;
cbd19488 5712
8c44515b
AP
5713 new_crtc_state =
5714 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5715 if (!new_crtc_state)
5716 return -EINVAL;
5717
5718 ret = dm_plane_helper_check_state(state, new_crtc_state);
5719 if (ret)
5720 return ret;
5721
695af5f9
NK
5722 ret = fill_dc_scaling_info(state, &scaling_info);
5723 if (ret)
5724 return ret;
a05bcff1 5725
62c933f9 5726 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5727 return 0;
5728
5729 return -EINVAL;
5730}
5731
674e78ac
NK
5732static int dm_plane_atomic_async_check(struct drm_plane *plane,
5733 struct drm_plane_state *new_plane_state)
5734{
5735 /* Only support async updates on cursor planes. */
5736 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5737 return -EINVAL;
5738
5739 return 0;
5740}
5741
5742static void dm_plane_atomic_async_update(struct drm_plane *plane,
5743 struct drm_plane_state *new_state)
5744{
5745 struct drm_plane_state *old_state =
5746 drm_atomic_get_old_plane_state(new_state->state, plane);
5747
332af874 5748 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5749
5750 plane->state->src_x = new_state->src_x;
5751 plane->state->src_y = new_state->src_y;
5752 plane->state->src_w = new_state->src_w;
5753 plane->state->src_h = new_state->src_h;
5754 plane->state->crtc_x = new_state->crtc_x;
5755 plane->state->crtc_y = new_state->crtc_y;
5756 plane->state->crtc_w = new_state->crtc_w;
5757 plane->state->crtc_h = new_state->crtc_h;
5758
5759 handle_cursor_update(plane, old_state);
5760}
5761
e7b07cee
HW
5762static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5763 .prepare_fb = dm_plane_helper_prepare_fb,
5764 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5765 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5766 .atomic_async_check = dm_plane_atomic_async_check,
5767 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5768};
5769
5770/*
5771 * TODO: these are currently initialized to rgb formats only.
5772 * For future use cases we should either initialize them dynamically based on
5773 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5774 * check will succeed, and let DC implement proper check
e7b07cee 5775 */
d90371b0 5776static const uint32_t rgb_formats[] = {
e7b07cee
HW
5777 DRM_FORMAT_XRGB8888,
5778 DRM_FORMAT_ARGB8888,
5779 DRM_FORMAT_RGBA8888,
5780 DRM_FORMAT_XRGB2101010,
5781 DRM_FORMAT_XBGR2101010,
5782 DRM_FORMAT_ARGB2101010,
5783 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5784 DRM_FORMAT_XBGR8888,
5785 DRM_FORMAT_ABGR8888,
46dd9ff7 5786 DRM_FORMAT_RGB565,
e7b07cee
HW
5787};
5788
0d579c7e
NK
5789static const uint32_t overlay_formats[] = {
5790 DRM_FORMAT_XRGB8888,
5791 DRM_FORMAT_ARGB8888,
5792 DRM_FORMAT_RGBA8888,
5793 DRM_FORMAT_XBGR8888,
5794 DRM_FORMAT_ABGR8888,
7267a1a9 5795 DRM_FORMAT_RGB565
e7b07cee
HW
5796};
5797
5798static const u32 cursor_formats[] = {
5799 DRM_FORMAT_ARGB8888
5800};
5801
37c6a93b
NK
5802static int get_plane_formats(const struct drm_plane *plane,
5803 const struct dc_plane_cap *plane_cap,
5804 uint32_t *formats, int max_formats)
e7b07cee 5805{
37c6a93b
NK
5806 int i, num_formats = 0;
5807
5808 /*
5809 * TODO: Query support for each group of formats directly from
5810 * DC plane caps. This will require adding more formats to the
5811 * caps list.
5812 */
e7b07cee 5813
f180b4bc 5814 switch (plane->type) {
e7b07cee 5815 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5816 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5817 if (num_formats >= max_formats)
5818 break;
5819
5820 formats[num_formats++] = rgb_formats[i];
5821 }
5822
ea36ad34 5823 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5824 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
5825 if (plane_cap && plane_cap->pixel_format_support.p010)
5826 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
5827 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5828 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5829 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
5830 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5831 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 5832 }
e7b07cee 5833 break;
37c6a93b 5834
e7b07cee 5835 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5836 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5837 if (num_formats >= max_formats)
5838 break;
5839
5840 formats[num_formats++] = overlay_formats[i];
5841 }
e7b07cee 5842 break;
37c6a93b 5843
e7b07cee 5844 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5845 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5846 if (num_formats >= max_formats)
5847 break;
5848
5849 formats[num_formats++] = cursor_formats[i];
5850 }
e7b07cee
HW
5851 break;
5852 }
5853
37c6a93b
NK
5854 return num_formats;
5855}
5856
5857static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5858 struct drm_plane *plane,
5859 unsigned long possible_crtcs,
5860 const struct dc_plane_cap *plane_cap)
5861{
5862 uint32_t formats[32];
5863 int num_formats;
5864 int res = -EPERM;
ecc874a6 5865 unsigned int supported_rotations;
37c6a93b
NK
5866
5867 num_formats = get_plane_formats(plane, plane_cap, formats,
5868 ARRAY_SIZE(formats));
5869
5870 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5871 &dm_plane_funcs, formats, num_formats,
5872 NULL, plane->type, NULL);
5873 if (res)
5874 return res;
5875
cc1fec57
NK
5876 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5877 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
5878 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5879 BIT(DRM_MODE_BLEND_PREMULTI);
5880
5881 drm_plane_create_alpha_property(plane);
5882 drm_plane_create_blend_mode_property(plane, blend_caps);
5883 }
5884
fc8e5230 5885 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
5886 plane_cap &&
5887 (plane_cap->pixel_format_support.nv12 ||
5888 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
5889 /* This only affects YUV formats. */
5890 drm_plane_create_color_properties(
5891 plane,
5892 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
5893 BIT(DRM_COLOR_YCBCR_BT709) |
5894 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
5895 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5896 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5897 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5898 }
5899
ecc874a6
PLG
5900 supported_rotations =
5901 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5902 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5903
5904 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5905 supported_rotations);
5906
f180b4bc 5907 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 5908
96719c54 5909 /* Create (reset) the plane state */
f180b4bc
HW
5910 if (plane->funcs->reset)
5911 plane->funcs->reset(plane);
96719c54 5912
37c6a93b 5913 return 0;
e7b07cee
HW
5914}
5915
7578ecda
AD
5916static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5917 struct drm_plane *plane,
5918 uint32_t crtc_index)
e7b07cee
HW
5919{
5920 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 5921 struct drm_plane *cursor_plane;
e7b07cee
HW
5922
5923 int res = -ENOMEM;
5924
5925 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5926 if (!cursor_plane)
5927 goto fail;
5928
f180b4bc 5929 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 5930 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
5931
5932 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5933 if (!acrtc)
5934 goto fail;
5935
5936 res = drm_crtc_init_with_planes(
5937 dm->ddev,
5938 &acrtc->base,
5939 plane,
f180b4bc 5940 cursor_plane,
e7b07cee
HW
5941 &amdgpu_dm_crtc_funcs, NULL);
5942
5943 if (res)
5944 goto fail;
5945
5946 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5947
96719c54
HW
5948 /* Create (reset) the plane state */
5949 if (acrtc->base.funcs->reset)
5950 acrtc->base.funcs->reset(&acrtc->base);
5951
e7b07cee
HW
5952 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5953 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5954
5955 acrtc->crtc_id = crtc_index;
5956 acrtc->base.enabled = false;
c37e2d29 5957 acrtc->otg_inst = -1;
e7b07cee
HW
5958
5959 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
5960 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5961 true, MAX_COLOR_LUT_ENTRIES);
086247a4 5962 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
5963
5964 return 0;
5965
5966fail:
b830ebc9
HW
5967 kfree(acrtc);
5968 kfree(cursor_plane);
e7b07cee
HW
5969 return res;
5970}
5971
5972
5973static int to_drm_connector_type(enum signal_type st)
5974{
5975 switch (st) {
5976 case SIGNAL_TYPE_HDMI_TYPE_A:
5977 return DRM_MODE_CONNECTOR_HDMIA;
5978 case SIGNAL_TYPE_EDP:
5979 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
5980 case SIGNAL_TYPE_LVDS:
5981 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
5982 case SIGNAL_TYPE_RGB:
5983 return DRM_MODE_CONNECTOR_VGA;
5984 case SIGNAL_TYPE_DISPLAY_PORT:
5985 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5986 return DRM_MODE_CONNECTOR_DisplayPort;
5987 case SIGNAL_TYPE_DVI_DUAL_LINK:
5988 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5989 return DRM_MODE_CONNECTOR_DVID;
5990 case SIGNAL_TYPE_VIRTUAL:
5991 return DRM_MODE_CONNECTOR_VIRTUAL;
5992
5993 default:
5994 return DRM_MODE_CONNECTOR_Unknown;
5995 }
5996}
5997
2b4c1c05
DV
5998static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5999{
62afb4ad
JRS
6000 struct drm_encoder *encoder;
6001
6002 /* There is only one encoder per connector */
6003 drm_connector_for_each_possible_encoder(connector, encoder)
6004 return encoder;
6005
6006 return NULL;
2b4c1c05
DV
6007}
6008
e7b07cee
HW
6009static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6010{
e7b07cee
HW
6011 struct drm_encoder *encoder;
6012 struct amdgpu_encoder *amdgpu_encoder;
6013
2b4c1c05 6014 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6015
6016 if (encoder == NULL)
6017 return;
6018
6019 amdgpu_encoder = to_amdgpu_encoder(encoder);
6020
6021 amdgpu_encoder->native_mode.clock = 0;
6022
6023 if (!list_empty(&connector->probed_modes)) {
6024 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6025
e7b07cee 6026 list_for_each_entry(preferred_mode,
b830ebc9
HW
6027 &connector->probed_modes,
6028 head) {
6029 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6030 amdgpu_encoder->native_mode = *preferred_mode;
6031
e7b07cee
HW
6032 break;
6033 }
6034
6035 }
6036}
6037
3ee6b26b
AD
6038static struct drm_display_mode *
6039amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6040 char *name,
6041 int hdisplay, int vdisplay)
e7b07cee
HW
6042{
6043 struct drm_device *dev = encoder->dev;
6044 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6045 struct drm_display_mode *mode = NULL;
6046 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6047
6048 mode = drm_mode_duplicate(dev, native_mode);
6049
b830ebc9 6050 if (mode == NULL)
e7b07cee
HW
6051 return NULL;
6052
6053 mode->hdisplay = hdisplay;
6054 mode->vdisplay = vdisplay;
6055 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6056 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6057
6058 return mode;
6059
6060}
6061
6062static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6063 struct drm_connector *connector)
e7b07cee
HW
6064{
6065 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6066 struct drm_display_mode *mode = NULL;
6067 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6068 struct amdgpu_dm_connector *amdgpu_dm_connector =
6069 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6070 int i;
6071 int n;
6072 struct mode_size {
6073 char name[DRM_DISPLAY_MODE_LEN];
6074 int w;
6075 int h;
b830ebc9 6076 } common_modes[] = {
e7b07cee
HW
6077 { "640x480", 640, 480},
6078 { "800x600", 800, 600},
6079 { "1024x768", 1024, 768},
6080 { "1280x720", 1280, 720},
6081 { "1280x800", 1280, 800},
6082 {"1280x1024", 1280, 1024},
6083 { "1440x900", 1440, 900},
6084 {"1680x1050", 1680, 1050},
6085 {"1600x1200", 1600, 1200},
6086 {"1920x1080", 1920, 1080},
6087 {"1920x1200", 1920, 1200}
6088 };
6089
b830ebc9 6090 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6091
6092 for (i = 0; i < n; i++) {
6093 struct drm_display_mode *curmode = NULL;
6094 bool mode_existed = false;
6095
6096 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6097 common_modes[i].h > native_mode->vdisplay ||
6098 (common_modes[i].w == native_mode->hdisplay &&
6099 common_modes[i].h == native_mode->vdisplay))
6100 continue;
e7b07cee
HW
6101
6102 list_for_each_entry(curmode, &connector->probed_modes, head) {
6103 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6104 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6105 mode_existed = true;
6106 break;
6107 }
6108 }
6109
6110 if (mode_existed)
6111 continue;
6112
6113 mode = amdgpu_dm_create_common_mode(encoder,
6114 common_modes[i].name, common_modes[i].w,
6115 common_modes[i].h);
6116 drm_mode_probed_add(connector, mode);
c84dec2f 6117 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6118 }
6119}
6120
3ee6b26b
AD
6121static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6122 struct edid *edid)
e7b07cee 6123{
c84dec2f
HW
6124 struct amdgpu_dm_connector *amdgpu_dm_connector =
6125 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6126
6127 if (edid) {
6128 /* empty probed_modes */
6129 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6130 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6131 drm_add_edid_modes(connector, edid);
6132
f1e5e913
YMM
6133 /* sorting the probed modes before calling function
6134 * amdgpu_dm_get_native_mode() since EDID can have
6135 * more than one preferred mode. The modes that are
6136 * later in the probed mode list could be of higher
6137 * and preferred resolution. For example, 3840x2160
6138 * resolution in base EDID preferred timing and 4096x2160
6139 * preferred resolution in DID extension block later.
6140 */
6141 drm_mode_sort(&connector->probed_modes);
e7b07cee 6142 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6143 } else {
c84dec2f 6144 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6145 }
e7b07cee
HW
6146}
6147
7578ecda 6148static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6149{
c84dec2f
HW
6150 struct amdgpu_dm_connector *amdgpu_dm_connector =
6151 to_amdgpu_dm_connector(connector);
e7b07cee 6152 struct drm_encoder *encoder;
c84dec2f 6153 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6154
2b4c1c05 6155 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6156
85ee15d6 6157 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
6158 amdgpu_dm_connector->num_modes =
6159 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6160 } else {
6161 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6162 amdgpu_dm_connector_add_common_modes(encoder, connector);
6163 }
3e332d3a 6164 amdgpu_dm_fbc_init(connector);
5099114b 6165
c84dec2f 6166 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6167}
6168
3ee6b26b
AD
6169void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6170 struct amdgpu_dm_connector *aconnector,
6171 int connector_type,
6172 struct dc_link *link,
6173 int link_index)
e7b07cee
HW
6174{
6175 struct amdgpu_device *adev = dm->ddev->dev_private;
6176
f04bee34
NK
6177 /*
6178 * Some of the properties below require access to state, like bpc.
6179 * Allocate some default initial connector state with our reset helper.
6180 */
6181 if (aconnector->base.funcs->reset)
6182 aconnector->base.funcs->reset(&aconnector->base);
6183
e7b07cee
HW
6184 aconnector->connector_id = link_index;
6185 aconnector->dc_link = link;
6186 aconnector->base.interlace_allowed = false;
6187 aconnector->base.doublescan_allowed = false;
6188 aconnector->base.stereo_allowed = false;
6189 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6190 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6191 aconnector->audio_inst = -1;
e7b07cee
HW
6192 mutex_init(&aconnector->hpd_lock);
6193
1f6010a9
DF
6194 /*
6195 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6196 * which means HPD hot plug not supported
6197 */
e7b07cee
HW
6198 switch (connector_type) {
6199 case DRM_MODE_CONNECTOR_HDMIA:
6200 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6201 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6202 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6203 break;
6204 case DRM_MODE_CONNECTOR_DisplayPort:
6205 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6206 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6207 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6208 break;
6209 case DRM_MODE_CONNECTOR_DVID:
6210 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6211 break;
6212 default:
6213 break;
6214 }
6215
6216 drm_object_attach_property(&aconnector->base.base,
6217 dm->ddev->mode_config.scaling_mode_property,
6218 DRM_MODE_SCALE_NONE);
6219
6220 drm_object_attach_property(&aconnector->base.base,
6221 adev->mode_info.underscan_property,
6222 UNDERSCAN_OFF);
6223 drm_object_attach_property(&aconnector->base.base,
6224 adev->mode_info.underscan_hborder_property,
6225 0);
6226 drm_object_attach_property(&aconnector->base.base,
6227 adev->mode_info.underscan_vborder_property,
6228 0);
1825fd34 6229
8c61b31e
JFZ
6230 if (!aconnector->mst_port)
6231 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 6232
4a8ca46b
RL
6233 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6234 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6235 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 6236
c1ee92f9
DF
6237 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6238 dc_is_dmcu_initialized(adev->dm.dc)) {
6239 drm_object_attach_property(&aconnector->base.base,
6240 adev->mode_info.abm_level_property, 0);
6241 }
bb47de73
NK
6242
6243 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
6244 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6245 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
6246 drm_object_attach_property(
6247 &aconnector->base.base,
6248 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6249
8c61b31e
JFZ
6250 if (!aconnector->mst_port)
6251 drm_connector_attach_vrr_capable_property(&aconnector->base);
6252
0c8620d6 6253#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 6254 if (adev->dm.hdcp_workqueue)
53e108aa 6255 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 6256#endif
bb47de73 6257 }
e7b07cee
HW
6258}
6259
7578ecda
AD
6260static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6261 struct i2c_msg *msgs, int num)
e7b07cee
HW
6262{
6263 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6264 struct ddc_service *ddc_service = i2c->ddc_service;
6265 struct i2c_command cmd;
6266 int i;
6267 int result = -EIO;
6268
b830ebc9 6269 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
6270
6271 if (!cmd.payloads)
6272 return result;
6273
6274 cmd.number_of_payloads = num;
6275 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6276 cmd.speed = 100;
6277
6278 for (i = 0; i < num; i++) {
6279 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6280 cmd.payloads[i].address = msgs[i].addr;
6281 cmd.payloads[i].length = msgs[i].len;
6282 cmd.payloads[i].data = msgs[i].buf;
6283 }
6284
c85e6e54
DF
6285 if (dc_submit_i2c(
6286 ddc_service->ctx->dc,
6287 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
6288 &cmd))
6289 result = num;
6290
6291 kfree(cmd.payloads);
6292 return result;
6293}
6294
7578ecda 6295static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
6296{
6297 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6298}
6299
6300static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6301 .master_xfer = amdgpu_dm_i2c_xfer,
6302 .functionality = amdgpu_dm_i2c_func,
6303};
6304
3ee6b26b
AD
6305static struct amdgpu_i2c_adapter *
6306create_i2c(struct ddc_service *ddc_service,
6307 int link_index,
6308 int *res)
e7b07cee
HW
6309{
6310 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6311 struct amdgpu_i2c_adapter *i2c;
6312
b830ebc9 6313 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
6314 if (!i2c)
6315 return NULL;
e7b07cee
HW
6316 i2c->base.owner = THIS_MODULE;
6317 i2c->base.class = I2C_CLASS_DDC;
6318 i2c->base.dev.parent = &adev->pdev->dev;
6319 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 6320 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
6321 i2c_set_adapdata(&i2c->base, i2c);
6322 i2c->ddc_service = ddc_service;
c85e6e54 6323 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
6324
6325 return i2c;
6326}
6327
89fc8d4e 6328
1f6010a9
DF
6329/*
6330 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
6331 * dc_link which will be represented by this aconnector.
6332 */
7578ecda
AD
6333static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6334 struct amdgpu_dm_connector *aconnector,
6335 uint32_t link_index,
6336 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6337{
6338 int res = 0;
6339 int connector_type;
6340 struct dc *dc = dm->dc;
6341 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6342 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6343
6344 link->priv = aconnector;
e7b07cee 6345
f1ad2f5e 6346 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6347
6348 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6349 if (!i2c) {
6350 DRM_ERROR("Failed to create i2c adapter data\n");
6351 return -ENOMEM;
6352 }
6353
e7b07cee
HW
6354 aconnector->i2c = i2c;
6355 res = i2c_add_adapter(&i2c->base);
6356
6357 if (res) {
6358 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6359 goto out_free;
6360 }
6361
6362 connector_type = to_drm_connector_type(link->connector_signal);
6363
17165de2 6364 res = drm_connector_init_with_ddc(
e7b07cee
HW
6365 dm->ddev,
6366 &aconnector->base,
6367 &amdgpu_dm_connector_funcs,
17165de2
AP
6368 connector_type,
6369 &i2c->base);
e7b07cee
HW
6370
6371 if (res) {
6372 DRM_ERROR("connector_init failed\n");
6373 aconnector->connector_id = -1;
6374 goto out_free;
6375 }
6376
6377 drm_connector_helper_add(
6378 &aconnector->base,
6379 &amdgpu_dm_connector_helper_funcs);
6380
6381 amdgpu_dm_connector_init_helper(
6382 dm,
6383 aconnector,
6384 connector_type,
6385 link,
6386 link_index);
6387
cde4c44d 6388 drm_connector_attach_encoder(
e7b07cee
HW
6389 &aconnector->base, &aencoder->base);
6390
e7b07cee
HW
6391 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6392 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 6393 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 6394
e7b07cee
HW
6395out_free:
6396 if (res) {
6397 kfree(i2c);
6398 aconnector->i2c = NULL;
6399 }
6400 return res;
6401}
6402
6403int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6404{
6405 switch (adev->mode_info.num_crtc) {
6406 case 1:
6407 return 0x1;
6408 case 2:
6409 return 0x3;
6410 case 3:
6411 return 0x7;
6412 case 4:
6413 return 0xf;
6414 case 5:
6415 return 0x1f;
6416 case 6:
6417 default:
6418 return 0x3f;
6419 }
6420}
6421
7578ecda
AD
6422static int amdgpu_dm_encoder_init(struct drm_device *dev,
6423 struct amdgpu_encoder *aencoder,
6424 uint32_t link_index)
e7b07cee
HW
6425{
6426 struct amdgpu_device *adev = dev->dev_private;
6427
6428 int res = drm_encoder_init(dev,
6429 &aencoder->base,
6430 &amdgpu_dm_encoder_funcs,
6431 DRM_MODE_ENCODER_TMDS,
6432 NULL);
6433
6434 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6435
6436 if (!res)
6437 aencoder->encoder_id = link_index;
6438 else
6439 aencoder->encoder_id = -1;
6440
6441 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6442
6443 return res;
6444}
6445
3ee6b26b
AD
6446static void manage_dm_interrupts(struct amdgpu_device *adev,
6447 struct amdgpu_crtc *acrtc,
6448 bool enable)
e7b07cee
HW
6449{
6450 /*
6451 * this is not correct translation but will work as soon as VBLANK
6452 * constant is the same as PFLIP
6453 */
6454 int irq_type =
734dd01d 6455 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6456 adev,
6457 acrtc->crtc_id);
6458
6459 if (enable) {
6460 drm_crtc_vblank_on(&acrtc->base);
6461 amdgpu_irq_get(
6462 adev,
6463 &adev->pageflip_irq,
6464 irq_type);
6465 } else {
6466
6467 amdgpu_irq_put(
6468 adev,
6469 &adev->pageflip_irq,
6470 irq_type);
6471 drm_crtc_vblank_off(&acrtc->base);
6472 }
6473}
6474
3ee6b26b
AD
6475static bool
6476is_scaling_state_different(const struct dm_connector_state *dm_state,
6477 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6478{
6479 if (dm_state->scaling != old_dm_state->scaling)
6480 return true;
6481 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6482 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6483 return true;
6484 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6485 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6486 return true;
b830ebc9
HW
6487 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6488 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6489 return true;
e7b07cee
HW
6490 return false;
6491}
6492
0c8620d6
BL
6493#ifdef CONFIG_DRM_AMD_DC_HDCP
6494static bool is_content_protection_different(struct drm_connector_state *state,
6495 const struct drm_connector_state *old_state,
6496 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6497{
6498 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6499
53e108aa
BL
6500 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6501 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6502 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6503 return true;
6504 }
6505
0c8620d6
BL
6506 /* CP is being re enabled, ignore this */
6507 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6508 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6509 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6510 return false;
6511 }
6512
6513 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6514 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6515 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6516 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6517
6518 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6519 * hot-plug, headless s3, dpms
6520 */
6521 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6522 aconnector->dc_sink != NULL)
6523 return true;
6524
6525 if (old_state->content_protection == state->content_protection)
6526 return false;
6527
6528 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6529 return true;
6530
6531 return false;
6532}
6533
0c8620d6 6534#endif
3ee6b26b
AD
6535static void remove_stream(struct amdgpu_device *adev,
6536 struct amdgpu_crtc *acrtc,
6537 struct dc_stream_state *stream)
e7b07cee
HW
6538{
6539 /* this is the update mode case */
e7b07cee
HW
6540
6541 acrtc->otg_inst = -1;
6542 acrtc->enabled = false;
6543}
6544
7578ecda
AD
6545static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6546 struct dc_cursor_position *position)
2a8f6ccb 6547{
f4c2cc43 6548 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6549 int x, y;
6550 int xorigin = 0, yorigin = 0;
6551
e371e19c
NK
6552 position->enable = false;
6553 position->x = 0;
6554 position->y = 0;
6555
6556 if (!crtc || !plane->state->fb)
2a8f6ccb 6557 return 0;
2a8f6ccb
HW
6558
6559 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6560 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6561 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6562 __func__,
6563 plane->state->crtc_w,
6564 plane->state->crtc_h);
6565 return -EINVAL;
6566 }
6567
6568 x = plane->state->crtc_x;
6569 y = plane->state->crtc_y;
c14a005c 6570
e371e19c
NK
6571 if (x <= -amdgpu_crtc->max_cursor_width ||
6572 y <= -amdgpu_crtc->max_cursor_height)
6573 return 0;
6574
2a8f6ccb
HW
6575 if (x < 0) {
6576 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6577 x = 0;
6578 }
6579 if (y < 0) {
6580 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6581 y = 0;
6582 }
6583 position->enable = true;
d243b6ff 6584 position->translate_by_source = true;
2a8f6ccb
HW
6585 position->x = x;
6586 position->y = y;
6587 position->x_hotspot = xorigin;
6588 position->y_hotspot = yorigin;
6589
6590 return 0;
6591}
6592
3ee6b26b
AD
6593static void handle_cursor_update(struct drm_plane *plane,
6594 struct drm_plane_state *old_plane_state)
e7b07cee 6595{
674e78ac 6596 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
6597 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6598 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6599 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6600 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6601 uint64_t address = afb ? afb->address : 0;
6602 struct dc_cursor_position position;
6603 struct dc_cursor_attributes attributes;
6604 int ret;
6605
e7b07cee
HW
6606 if (!plane->state->fb && !old_plane_state->fb)
6607 return;
6608
f1ad2f5e 6609 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6610 __func__,
6611 amdgpu_crtc->crtc_id,
6612 plane->state->crtc_w,
6613 plane->state->crtc_h);
2a8f6ccb
HW
6614
6615 ret = get_cursor_position(plane, crtc, &position);
6616 if (ret)
6617 return;
6618
6619 if (!position.enable) {
6620 /* turn off cursor */
674e78ac
NK
6621 if (crtc_state && crtc_state->stream) {
6622 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6623 dc_stream_set_cursor_position(crtc_state->stream,
6624 &position);
674e78ac
NK
6625 mutex_unlock(&adev->dm.dc_lock);
6626 }
2a8f6ccb 6627 return;
e7b07cee 6628 }
e7b07cee 6629
2a8f6ccb
HW
6630 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6631 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6632
c1cefe11 6633 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6634 attributes.address.high_part = upper_32_bits(address);
6635 attributes.address.low_part = lower_32_bits(address);
6636 attributes.width = plane->state->crtc_w;
6637 attributes.height = plane->state->crtc_h;
6638 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6639 attributes.rotation_angle = 0;
6640 attributes.attribute_flags.value = 0;
6641
6642 attributes.pitch = attributes.width;
6643
886daac9 6644 if (crtc_state->stream) {
674e78ac 6645 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6646 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6647 &attributes))
6648 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6649
2a8f6ccb
HW
6650 if (!dc_stream_set_cursor_position(crtc_state->stream,
6651 &position))
6652 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6653 mutex_unlock(&adev->dm.dc_lock);
886daac9 6654 }
2a8f6ccb 6655}
e7b07cee
HW
6656
6657static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6658{
6659
6660 assert_spin_locked(&acrtc->base.dev->event_lock);
6661 WARN_ON(acrtc->event);
6662
6663 acrtc->event = acrtc->base.state->event;
6664
6665 /* Set the flip status */
6666 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6667
6668 /* Mark this event as consumed */
6669 acrtc->base.state->event = NULL;
6670
6671 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6672 acrtc->crtc_id);
6673}
6674
bb47de73
NK
6675static void update_freesync_state_on_stream(
6676 struct amdgpu_display_manager *dm,
6677 struct dm_crtc_state *new_crtc_state,
180db303
NK
6678 struct dc_stream_state *new_stream,
6679 struct dc_plane_state *surface,
6680 u32 flip_timestamp_in_us)
bb47de73 6681{
09aef2c4 6682 struct mod_vrr_params vrr_params;
bb47de73 6683 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
6684 struct amdgpu_device *adev = dm->adev;
6685 unsigned long flags;
bb47de73
NK
6686
6687 if (!new_stream)
6688 return;
6689
6690 /*
6691 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6692 * For now it's sufficient to just guard against these conditions.
6693 */
6694
6695 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6696 return;
6697
09aef2c4
MK
6698 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6699 vrr_params = new_crtc_state->vrr_params;
6700
180db303
NK
6701 if (surface) {
6702 mod_freesync_handle_preflip(
6703 dm->freesync_module,
6704 surface,
6705 new_stream,
6706 flip_timestamp_in_us,
6707 &vrr_params);
09aef2c4
MK
6708
6709 if (adev->family < AMDGPU_FAMILY_AI &&
6710 amdgpu_dm_vrr_active(new_crtc_state)) {
6711 mod_freesync_handle_v_update(dm->freesync_module,
6712 new_stream, &vrr_params);
e63e2491
EB
6713
6714 /* Need to call this before the frame ends. */
6715 dc_stream_adjust_vmin_vmax(dm->dc,
6716 new_crtc_state->stream,
6717 &vrr_params.adjust);
09aef2c4 6718 }
180db303 6719 }
bb47de73
NK
6720
6721 mod_freesync_build_vrr_infopacket(
6722 dm->freesync_module,
6723 new_stream,
180db303 6724 &vrr_params,
ecd0136b
HT
6725 PACKET_TYPE_VRR,
6726 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6727 &vrr_infopacket);
6728
8a48b44c 6729 new_crtc_state->freesync_timing_changed |=
180db303
NK
6730 (memcmp(&new_crtc_state->vrr_params.adjust,
6731 &vrr_params.adjust,
6732 sizeof(vrr_params.adjust)) != 0);
bb47de73 6733
8a48b44c 6734 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6735 (memcmp(&new_crtc_state->vrr_infopacket,
6736 &vrr_infopacket,
6737 sizeof(vrr_infopacket)) != 0);
6738
180db303 6739 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6740 new_crtc_state->vrr_infopacket = vrr_infopacket;
6741
180db303 6742 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6743 new_stream->vrr_infopacket = vrr_infopacket;
6744
6745 if (new_crtc_state->freesync_vrr_info_changed)
6746 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6747 new_crtc_state->base.crtc->base.id,
6748 (int)new_crtc_state->base.vrr_enabled,
180db303 6749 (int)vrr_params.state);
09aef2c4
MK
6750
6751 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6752}
6753
e854194c
MK
6754static void pre_update_freesync_state_on_stream(
6755 struct amdgpu_display_manager *dm,
6756 struct dm_crtc_state *new_crtc_state)
6757{
6758 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6759 struct mod_vrr_params vrr_params;
e854194c 6760 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6761 struct amdgpu_device *adev = dm->adev;
6762 unsigned long flags;
e854194c
MK
6763
6764 if (!new_stream)
6765 return;
6766
6767 /*
6768 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6769 * For now it's sufficient to just guard against these conditions.
6770 */
6771 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6772 return;
6773
09aef2c4
MK
6774 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6775 vrr_params = new_crtc_state->vrr_params;
6776
e854194c
MK
6777 if (new_crtc_state->vrr_supported &&
6778 config.min_refresh_in_uhz &&
6779 config.max_refresh_in_uhz) {
6780 config.state = new_crtc_state->base.vrr_enabled ?
6781 VRR_STATE_ACTIVE_VARIABLE :
6782 VRR_STATE_INACTIVE;
6783 } else {
6784 config.state = VRR_STATE_UNSUPPORTED;
6785 }
6786
6787 mod_freesync_build_vrr_params(dm->freesync_module,
6788 new_stream,
6789 &config, &vrr_params);
6790
6791 new_crtc_state->freesync_timing_changed |=
6792 (memcmp(&new_crtc_state->vrr_params.adjust,
6793 &vrr_params.adjust,
6794 sizeof(vrr_params.adjust)) != 0);
6795
6796 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6797 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6798}
6799
66b0c973
MK
6800static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6801 struct dm_crtc_state *new_state)
6802{
6803 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6804 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6805
6806 if (!old_vrr_active && new_vrr_active) {
6807 /* Transition VRR inactive -> active:
6808 * While VRR is active, we must not disable vblank irq, as a
6809 * reenable after disable would compute bogus vblank/pflip
6810 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6811 *
6812 * We also need vupdate irq for the actual core vblank handling
6813 * at end of vblank.
66b0c973 6814 */
d2574c33 6815 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6816 drm_crtc_vblank_get(new_state->base.crtc);
6817 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6818 __func__, new_state->base.crtc->base.id);
6819 } else if (old_vrr_active && !new_vrr_active) {
6820 /* Transition VRR active -> inactive:
6821 * Allow vblank irq disable again for fixed refresh rate.
6822 */
d2574c33 6823 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6824 drm_crtc_vblank_put(new_state->base.crtc);
6825 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6826 __func__, new_state->base.crtc->base.id);
6827 }
6828}
6829
8ad27806
NK
6830static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6831{
6832 struct drm_plane *plane;
6833 struct drm_plane_state *old_plane_state, *new_plane_state;
6834 int i;
6835
6836 /*
6837 * TODO: Make this per-stream so we don't issue redundant updates for
6838 * commits with multiple streams.
6839 */
6840 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6841 new_plane_state, i)
6842 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6843 handle_cursor_update(plane, old_plane_state);
6844}
6845
3be5262e 6846static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6847 struct dc_state *dc_state,
3ee6b26b
AD
6848 struct drm_device *dev,
6849 struct amdgpu_display_manager *dm,
6850 struct drm_crtc *pcrtc,
420cd472 6851 bool wait_for_vblank)
e7b07cee 6852{
570c91d5 6853 uint32_t i;
8a48b44c 6854 uint64_t timestamp_ns;
e7b07cee 6855 struct drm_plane *plane;
0bc9706d 6856 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6857 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6858 struct drm_crtc_state *new_pcrtc_state =
6859 drm_atomic_get_new_crtc_state(state, pcrtc);
6860 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
6861 struct dm_crtc_state *dm_old_crtc_state =
6862 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 6863 int planes_count = 0, vpos, hpos;
570c91d5 6864 long r;
e7b07cee 6865 unsigned long flags;
8a48b44c 6866 struct amdgpu_bo *abo;
09e5665a 6867 uint64_t tiling_flags;
5888f07a 6868 bool tmz_surface = false;
fdd1fe57
MK
6869 uint32_t target_vblank, last_flip_vblank;
6870 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 6871 bool pflip_present = false;
bc7f670e
DF
6872 struct {
6873 struct dc_surface_update surface_updates[MAX_SURFACES];
6874 struct dc_plane_info plane_infos[MAX_SURFACES];
6875 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 6876 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 6877 struct dc_stream_update stream_update;
74aa7bd4 6878 } *bundle;
bc7f670e 6879
74aa7bd4 6880 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 6881
74aa7bd4
DF
6882 if (!bundle) {
6883 dm_error("Failed to allocate update bundle\n");
4b510503
NK
6884 goto cleanup;
6885 }
e7b07cee 6886
8ad27806
NK
6887 /*
6888 * Disable the cursor first if we're disabling all the planes.
6889 * It'll remain on the screen after the planes are re-enabled
6890 * if we don't.
6891 */
6892 if (acrtc_state->active_planes == 0)
6893 amdgpu_dm_commit_cursors(state);
6894
e7b07cee 6895 /* update planes when needed */
0bc9706d
LSL
6896 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6897 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 6898 struct drm_crtc_state *new_crtc_state;
0bc9706d 6899 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 6900 bool plane_needs_flip;
c7af5f77 6901 struct dc_plane_state *dc_plane;
54d76575 6902 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 6903
80c218d5
NK
6904 /* Cursor plane is handled after stream updates */
6905 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 6906 continue;
e7b07cee 6907
f5ba60fe
DD
6908 if (!fb || !crtc || pcrtc != crtc)
6909 continue;
6910
6911 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6912 if (!new_crtc_state->active)
e7b07cee
HW
6913 continue;
6914
bc7f670e 6915 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 6916
74aa7bd4 6917 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 6918 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
6919 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6920 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 6921 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 6922 }
8a48b44c 6923
695af5f9
NK
6924 fill_dc_scaling_info(new_plane_state,
6925 &bundle->scaling_infos[planes_count]);
8a48b44c 6926
695af5f9
NK
6927 bundle->surface_updates[planes_count].scaling_info =
6928 &bundle->scaling_infos[planes_count];
8a48b44c 6929
f5031000 6930 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 6931
f5031000 6932 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 6933
f5031000
DF
6934 if (!plane_needs_flip) {
6935 planes_count += 1;
6936 continue;
6937 }
8a48b44c 6938
2fac0f53
CK
6939 abo = gem_to_amdgpu_bo(fb->obj[0]);
6940
f8308898
AG
6941 /*
6942 * Wait for all fences on this FB. Do limited wait to avoid
6943 * deadlock during GPU reset when this fence will not signal
6944 * but we hold reservation lock for the BO.
6945 */
52791eee 6946 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 6947 false,
f8308898
AG
6948 msecs_to_jiffies(5000));
6949 if (unlikely(r <= 0))
ed8a5fb2 6950 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 6951
f5031000
DF
6952 /*
6953 * TODO This might fail and hence better not used, wait
6954 * explicitly on fences instead
6955 * and in general should be called for
6956 * blocking commit to as per framework helpers
6957 */
f5031000 6958 r = amdgpu_bo_reserve(abo, true);
f8308898 6959 if (unlikely(r != 0))
f5031000 6960 DRM_ERROR("failed to reserve buffer before flip\n");
8a48b44c 6961
f5031000 6962 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
8a48b44c 6963
5888f07a
HW
6964 tmz_surface = amdgpu_bo_encrypted(abo);
6965
f5031000 6966 amdgpu_bo_unreserve(abo);
8a48b44c 6967
695af5f9
NK
6968 fill_dc_plane_info_and_addr(
6969 dm->adev, new_plane_state, tiling_flags,
6970 &bundle->plane_infos[planes_count],
87b7ebc2 6971 &bundle->flip_addrs[planes_count].address,
5888f07a 6972 tmz_surface,
87b7ebc2
RS
6973 false);
6974
6975 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6976 new_plane_state->plane->index,
6977 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
6978
6979 bundle->surface_updates[planes_count].plane_info =
6980 &bundle->plane_infos[planes_count];
8a48b44c 6981
caff0e66
NK
6982 /*
6983 * Only allow immediate flips for fast updates that don't
6984 * change FB pitch, DCC state, rotation or mirroing.
6985 */
f5031000 6986 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 6987 crtc->state->async_flip &&
caff0e66 6988 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 6989
f5031000
DF
6990 timestamp_ns = ktime_get_ns();
6991 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6992 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6993 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 6994
f5031000
DF
6995 if (!bundle->surface_updates[planes_count].surface) {
6996 DRM_ERROR("No surface for CRTC: id=%d\n",
6997 acrtc_attach->crtc_id);
6998 continue;
bc7f670e
DF
6999 }
7000
f5031000
DF
7001 if (plane == pcrtc->primary)
7002 update_freesync_state_on_stream(
7003 dm,
7004 acrtc_state,
7005 acrtc_state->stream,
7006 dc_plane,
7007 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7008
f5031000
DF
7009 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7010 __func__,
7011 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7012 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7013
7014 planes_count += 1;
7015
8a48b44c
DF
7016 }
7017
74aa7bd4 7018 if (pflip_present) {
634092b1
MK
7019 if (!vrr_active) {
7020 /* Use old throttling in non-vrr fixed refresh rate mode
7021 * to keep flip scheduling based on target vblank counts
7022 * working in a backwards compatible way, e.g., for
7023 * clients using the GLX_OML_sync_control extension or
7024 * DRI3/Present extension with defined target_msc.
7025 */
e3eff4b5 7026 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7027 }
7028 else {
7029 /* For variable refresh rate mode only:
7030 * Get vblank of last completed flip to avoid > 1 vrr
7031 * flips per video frame by use of throttling, but allow
7032 * flip programming anywhere in the possibly large
7033 * variable vrr vblank interval for fine-grained flip
7034 * timing control and more opportunity to avoid stutter
7035 * on late submission of flips.
7036 */
7037 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7038 last_flip_vblank = acrtc_attach->last_flip_vblank;
7039 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7040 }
7041
fdd1fe57 7042 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7043
7044 /*
7045 * Wait until we're out of the vertical blank period before the one
7046 * targeted by the flip
7047 */
7048 while ((acrtc_attach->enabled &&
7049 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7050 0, &vpos, &hpos, NULL,
7051 NULL, &pcrtc->hwmode)
7052 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7053 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7054 (int)(target_vblank -
e3eff4b5 7055 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7056 usleep_range(1000, 1100);
7057 }
7058
7059 if (acrtc_attach->base.state->event) {
7060 drm_crtc_vblank_get(pcrtc);
7061
7062 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7063
7064 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7065 prepare_flip_isr(acrtc_attach);
7066
7067 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7068 }
7069
7070 if (acrtc_state->stream) {
8a48b44c 7071 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7072 bundle->stream_update.vrr_infopacket =
8a48b44c 7073 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7074 }
e7b07cee
HW
7075 }
7076
bc92c065 7077 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7078 if ((planes_count || acrtc_state->active_planes == 0) &&
7079 acrtc_state->stream) {
b6e881c9 7080 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7081 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7082 bundle->stream_update.src = acrtc_state->stream->src;
7083 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7084 }
7085
cf020d49
NK
7086 if (new_pcrtc_state->color_mgmt_changed) {
7087 /*
7088 * TODO: This isn't fully correct since we've actually
7089 * already modified the stream in place.
7090 */
7091 bundle->stream_update.gamut_remap =
7092 &acrtc_state->stream->gamut_remap_matrix;
7093 bundle->stream_update.output_csc_transform =
7094 &acrtc_state->stream->csc_color_matrix;
7095 bundle->stream_update.out_transfer_func =
7096 acrtc_state->stream->out_transfer_func;
7097 }
bc7f670e 7098
8a48b44c 7099 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7100 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7101 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7102
e63e2491
EB
7103 /*
7104 * If FreeSync state on the stream has changed then we need to
7105 * re-adjust the min/max bounds now that DC doesn't handle this
7106 * as part of commit.
7107 */
7108 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7109 amdgpu_dm_vrr_active(acrtc_state)) {
7110 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7111 dc_stream_adjust_vmin_vmax(
7112 dm->dc, acrtc_state->stream,
7113 &acrtc_state->vrr_params.adjust);
7114 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7115 }
bc7f670e 7116 mutex_lock(&dm->dc_lock);
8c322309 7117 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7118 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7119 amdgpu_dm_psr_disable(acrtc_state->stream);
7120
bc7f670e 7121 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7122 bundle->surface_updates,
bc7f670e
DF
7123 planes_count,
7124 acrtc_state->stream,
74aa7bd4 7125 &bundle->stream_update,
bc7f670e 7126 dc_state);
8c322309
RL
7127
7128 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7129 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7130 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7131 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7132 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7133 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7134 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7135 amdgpu_dm_psr_enable(acrtc_state->stream);
7136 }
7137
bc7f670e 7138 mutex_unlock(&dm->dc_lock);
e7b07cee 7139 }
4b510503 7140
8ad27806
NK
7141 /*
7142 * Update cursor state *after* programming all the planes.
7143 * This avoids redundant programming in the case where we're going
7144 * to be disabling a single plane - those pipes are being disabled.
7145 */
7146 if (acrtc_state->active_planes)
7147 amdgpu_dm_commit_cursors(state);
80c218d5 7148
4b510503 7149cleanup:
74aa7bd4 7150 kfree(bundle);
e7b07cee
HW
7151}
7152
6ce8f316
NK
7153static void amdgpu_dm_commit_audio(struct drm_device *dev,
7154 struct drm_atomic_state *state)
7155{
7156 struct amdgpu_device *adev = dev->dev_private;
7157 struct amdgpu_dm_connector *aconnector;
7158 struct drm_connector *connector;
7159 struct drm_connector_state *old_con_state, *new_con_state;
7160 struct drm_crtc_state *new_crtc_state;
7161 struct dm_crtc_state *new_dm_crtc_state;
7162 const struct dc_stream_status *status;
7163 int i, inst;
7164
7165 /* Notify device removals. */
7166 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7167 if (old_con_state->crtc != new_con_state->crtc) {
7168 /* CRTC changes require notification. */
7169 goto notify;
7170 }
7171
7172 if (!new_con_state->crtc)
7173 continue;
7174
7175 new_crtc_state = drm_atomic_get_new_crtc_state(
7176 state, new_con_state->crtc);
7177
7178 if (!new_crtc_state)
7179 continue;
7180
7181 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7182 continue;
7183
7184 notify:
7185 aconnector = to_amdgpu_dm_connector(connector);
7186
7187 mutex_lock(&adev->dm.audio_lock);
7188 inst = aconnector->audio_inst;
7189 aconnector->audio_inst = -1;
7190 mutex_unlock(&adev->dm.audio_lock);
7191
7192 amdgpu_dm_audio_eld_notify(adev, inst);
7193 }
7194
7195 /* Notify audio device additions. */
7196 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7197 if (!new_con_state->crtc)
7198 continue;
7199
7200 new_crtc_state = drm_atomic_get_new_crtc_state(
7201 state, new_con_state->crtc);
7202
7203 if (!new_crtc_state)
7204 continue;
7205
7206 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7207 continue;
7208
7209 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7210 if (!new_dm_crtc_state->stream)
7211 continue;
7212
7213 status = dc_stream_get_status(new_dm_crtc_state->stream);
7214 if (!status)
7215 continue;
7216
7217 aconnector = to_amdgpu_dm_connector(connector);
7218
7219 mutex_lock(&adev->dm.audio_lock);
7220 inst = status->audio_inst;
7221 aconnector->audio_inst = inst;
7222 mutex_unlock(&adev->dm.audio_lock);
7223
7224 amdgpu_dm_audio_eld_notify(adev, inst);
7225 }
7226}
7227
b5e83f6f
NK
7228/*
7229 * Enable interrupts on CRTCs that are newly active, undergone
7230 * a modeset, or have active planes again.
7231 *
7232 * Done in two passes, based on the for_modeset flag:
7233 * Pass 1: For CRTCs going through modeset
7234 * Pass 2: For CRTCs going from 0 to n active planes
7235 *
7236 * Interrupts can only be enabled after the planes are programmed,
7237 * so this requires a two-pass approach since we don't want to
7238 * just defer the interrupts until after commit planes every time.
7239 */
7240static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
7241 struct drm_atomic_state *state,
7242 bool for_modeset)
7243{
7244 struct amdgpu_device *adev = dev->dev_private;
7245 struct drm_crtc *crtc;
7246 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7247 int i;
148d31e3 7248#ifdef CONFIG_DEBUG_FS
14b25846 7249 enum amdgpu_dm_pipe_crc_source source;
148d31e3 7250#endif
b5e83f6f
NK
7251
7252 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7253 new_crtc_state, i) {
7254 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7255 struct dm_crtc_state *dm_new_crtc_state =
7256 to_dm_crtc_state(new_crtc_state);
7257 struct dm_crtc_state *dm_old_crtc_state =
7258 to_dm_crtc_state(old_crtc_state);
7259 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7260 bool run_pass;
7261
7262 run_pass = (for_modeset && modeset) ||
7263 (!for_modeset && !modeset &&
7264 !dm_old_crtc_state->interrupts_enabled);
7265
7266 if (!run_pass)
7267 continue;
7268
b5e83f6f
NK
7269 if (!dm_new_crtc_state->interrupts_enabled)
7270 continue;
7271
7272 manage_dm_interrupts(adev, acrtc, true);
7273
7274#ifdef CONFIG_DEBUG_FS
7275 /* The stream has changed so CRC capture needs to re-enabled. */
14b25846
DZ
7276 source = dm_new_crtc_state->crc_src;
7277 if (amdgpu_dm_is_valid_crc_source(source)) {
57638021
NK
7278 amdgpu_dm_crtc_configure_crc_source(
7279 crtc, dm_new_crtc_state,
7280 dm_new_crtc_state->crc_src);
b5e83f6f
NK
7281 }
7282#endif
7283 }
7284}
7285
1f6010a9 7286/*
27b3f4fc
LSL
7287 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7288 * @crtc_state: the DRM CRTC state
7289 * @stream_state: the DC stream state.
7290 *
7291 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7292 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7293 */
7294static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7295 struct dc_stream_state *stream_state)
7296{
b9952f93 7297 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 7298}
e7b07cee 7299
7578ecda
AD
7300static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7301 struct drm_atomic_state *state,
7302 bool nonblock)
e7b07cee
HW
7303{
7304 struct drm_crtc *crtc;
c2cea706 7305 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7306 struct amdgpu_device *adev = dev->dev_private;
7307 int i;
7308
7309 /*
d6ef9b41
NK
7310 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7311 * a modeset, being disabled, or have no active planes.
7312 *
7313 * It's done in atomic commit rather than commit tail for now since
7314 * some of these interrupt handlers access the current CRTC state and
7315 * potentially the stream pointer itself.
7316 *
7317 * Since the atomic state is swapped within atomic commit and not within
7318 * commit tail this would leave to new state (that hasn't been committed yet)
7319 * being accesssed from within the handlers.
7320 *
7321 * TODO: Fix this so we can do this in commit tail and not have to block
7322 * in atomic check.
e7b07cee 7323 */
c2cea706 7324 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 7325 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
428da2bd 7326 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee
HW
7327 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7328
d6ef9b41
NK
7329 if (dm_old_crtc_state->interrupts_enabled &&
7330 (!dm_new_crtc_state->interrupts_enabled ||
57638021 7331 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
7332 manage_dm_interrupts(adev, acrtc, false);
7333 }
1f6010a9
DF
7334 /*
7335 * Add check here for SoC's that support hardware cursor plane, to
7336 * unset legacy_cursor_update
7337 */
e7b07cee
HW
7338
7339 return drm_atomic_helper_commit(dev, state, nonblock);
7340
7341 /*TODO Handle EINTR, reenable IRQ*/
7342}
7343
b8592b48
LL
7344/**
7345 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7346 * @state: The atomic state to commit
7347 *
7348 * This will tell DC to commit the constructed DC state from atomic_check,
7349 * programming the hardware. Any failures here implies a hardware failure, since
7350 * atomic check should have filtered anything non-kosher.
7351 */
7578ecda 7352static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7353{
7354 struct drm_device *dev = state->dev;
7355 struct amdgpu_device *adev = dev->dev_private;
7356 struct amdgpu_display_manager *dm = &adev->dm;
7357 struct dm_atomic_state *dm_state;
eb3dc897 7358 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7359 uint32_t i, j;
5cc6dcbd 7360 struct drm_crtc *crtc;
0bc9706d 7361 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7362 unsigned long flags;
7363 bool wait_for_vblank = true;
7364 struct drm_connector *connector;
c2cea706 7365 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7366 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7367 int crtc_disable_count = 0;
e7b07cee
HW
7368
7369 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7370
eb3dc897
NK
7371 dm_state = dm_atomic_get_new_state(state);
7372 if (dm_state && dm_state->context) {
7373 dc_state = dm_state->context;
7374 } else {
7375 /* No state changes, retain current state. */
813d20dc 7376 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7377 ASSERT(dc_state_temp);
7378 dc_state = dc_state_temp;
7379 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7380 }
e7b07cee
HW
7381
7382 /* update changed items */
0bc9706d 7383 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7384 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7385
54d76575
LSL
7386 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7387 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7388
f1ad2f5e 7389 DRM_DEBUG_DRIVER(
e7b07cee
HW
7390 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7391 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7392 "connectors_changed:%d\n",
7393 acrtc->crtc_id,
0bc9706d
LSL
7394 new_crtc_state->enable,
7395 new_crtc_state->active,
7396 new_crtc_state->planes_changed,
7397 new_crtc_state->mode_changed,
7398 new_crtc_state->active_changed,
7399 new_crtc_state->connectors_changed);
e7b07cee 7400
27b3f4fc
LSL
7401 /* Copy all transient state flags into dc state */
7402 if (dm_new_crtc_state->stream) {
7403 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7404 dm_new_crtc_state->stream);
7405 }
7406
e7b07cee
HW
7407 /* handles headless hotplug case, updating new_state and
7408 * aconnector as needed
7409 */
7410
54d76575 7411 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7412
f1ad2f5e 7413 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7414
54d76575 7415 if (!dm_new_crtc_state->stream) {
e7b07cee 7416 /*
b830ebc9
HW
7417 * this could happen because of issues with
7418 * userspace notifications delivery.
7419 * In this case userspace tries to set mode on
1f6010a9
DF
7420 * display which is disconnected in fact.
7421 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7422 * We expect reset mode will come soon.
7423 *
7424 * This can also happen when unplug is done
7425 * during resume sequence ended
7426 *
7427 * In this case, we want to pretend we still
7428 * have a sink to keep the pipe running so that
7429 * hw state is consistent with the sw state
7430 */
f1ad2f5e 7431 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7432 __func__, acrtc->base.base.id);
7433 continue;
7434 }
7435
54d76575
LSL
7436 if (dm_old_crtc_state->stream)
7437 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7438
97028037
LP
7439 pm_runtime_get_noresume(dev->dev);
7440
e7b07cee 7441 acrtc->enabled = true;
0bc9706d
LSL
7442 acrtc->hw_mode = new_crtc_state->mode;
7443 crtc->hwmode = new_crtc_state->mode;
7444 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7445 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7446 /* i.e. reset mode */
8c322309 7447 if (dm_old_crtc_state->stream) {
d1ebfdd8 7448 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7449 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7450
54d76575 7451 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8c322309 7452 }
e7b07cee
HW
7453 }
7454 } /* for_each_crtc_in_state() */
7455
eb3dc897
NK
7456 if (dc_state) {
7457 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7458 mutex_lock(&dm->dc_lock);
eb3dc897 7459 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7460 mutex_unlock(&dm->dc_lock);
fa2123db 7461 }
e7b07cee 7462
0bc9706d 7463 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7464 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7465
54d76575 7466 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7467
54d76575 7468 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7469 const struct dc_stream_status *status =
54d76575 7470 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7471
eb3dc897 7472 if (!status)
09f609c3
LL
7473 status = dc_stream_get_status_from_state(dc_state,
7474 dm_new_crtc_state->stream);
eb3dc897 7475
e7b07cee 7476 if (!status)
54d76575 7477 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7478 else
7479 acrtc->otg_inst = status->primary_otg_inst;
7480 }
7481 }
0c8620d6
BL
7482#ifdef CONFIG_DRM_AMD_DC_HDCP
7483 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7484 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7485 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7486 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7487
7488 new_crtc_state = NULL;
7489
7490 if (acrtc)
7491 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7492
7493 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7494
7495 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7496 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7497 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7498 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7499 continue;
7500 }
7501
7502 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7503 hdcp_update_display(
7504 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7505 new_con_state->hdcp_content_type,
b1abe558
BL
7506 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7507 : false);
0c8620d6
BL
7508 }
7509#endif
e7b07cee 7510
02d6a6fc 7511 /* Handle connector state changes */
c2cea706 7512 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7513 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7514 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7515 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7516 struct dc_surface_update dummy_updates[MAX_SURFACES];
7517 struct dc_stream_update stream_update;
b232d4ed 7518 struct dc_info_packet hdr_packet;
e7b07cee 7519 struct dc_stream_status *status = NULL;
b232d4ed 7520 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7521
19afd799
NC
7522 memset(&dummy_updates, 0, sizeof(dummy_updates));
7523 memset(&stream_update, 0, sizeof(stream_update));
7524
44d09c6a 7525 if (acrtc) {
0bc9706d 7526 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7527 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7528 }
0bc9706d 7529
e7b07cee 7530 /* Skip any modesets/resets */
0bc9706d 7531 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7532 continue;
7533
54d76575 7534 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7535 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7536
b232d4ed
NK
7537 scaling_changed = is_scaling_state_different(dm_new_con_state,
7538 dm_old_con_state);
7539
7540 abm_changed = dm_new_crtc_state->abm_level !=
7541 dm_old_crtc_state->abm_level;
7542
7543 hdr_changed =
7544 is_hdr_metadata_different(old_con_state, new_con_state);
7545
7546 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7547 continue;
e7b07cee 7548
b6e881c9 7549 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7550 if (scaling_changed) {
02d6a6fc 7551 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7552 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7553
02d6a6fc
DF
7554 stream_update.src = dm_new_crtc_state->stream->src;
7555 stream_update.dst = dm_new_crtc_state->stream->dst;
7556 }
7557
b232d4ed 7558 if (abm_changed) {
02d6a6fc
DF
7559 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7560
7561 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7562 }
70e8ffc5 7563
b232d4ed
NK
7564 if (hdr_changed) {
7565 fill_hdr_info_packet(new_con_state, &hdr_packet);
7566 stream_update.hdr_static_metadata = &hdr_packet;
7567 }
7568
54d76575 7569 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7570 WARN_ON(!status);
3be5262e 7571 WARN_ON(!status->plane_count);
e7b07cee 7572
02d6a6fc
DF
7573 /*
7574 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7575 * Here we create an empty update on each plane.
7576 * To fix this, DC should permit updating only stream properties.
7577 */
7578 for (j = 0; j < status->plane_count; j++)
7579 dummy_updates[j].surface = status->plane_states[0];
7580
7581
7582 mutex_lock(&dm->dc_lock);
7583 dc_commit_updates_for_stream(dm->dc,
7584 dummy_updates,
7585 status->plane_count,
7586 dm_new_crtc_state->stream,
7587 &stream_update,
7588 dc_state);
7589 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7590 }
7591
b5e83f6f 7592 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7593 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7594 new_crtc_state, i) {
fe2a1965
LP
7595 if (old_crtc_state->active && !new_crtc_state->active)
7596 crtc_disable_count++;
7597
54d76575 7598 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7599 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7600
057be086
NK
7601 /* Update freesync active state. */
7602 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7603
66b0c973
MK
7604 /* Handle vrr on->off / off->on transitions */
7605 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7606 dm_new_crtc_state);
e7b07cee
HW
7607 }
7608
b5e83f6f
NK
7609 /* Enable interrupts for CRTCs going through a modeset. */
7610 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
e7b07cee 7611
420cd472 7612 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7613 if (new_crtc_state->async_flip)
420cd472
DF
7614 wait_for_vblank = false;
7615
e7b07cee 7616 /* update planes when needed per crtc*/
5cc6dcbd 7617 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7618 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7619
54d76575 7620 if (dm_new_crtc_state->stream)
eb3dc897 7621 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7622 dm, crtc, wait_for_vblank);
e7b07cee
HW
7623 }
7624
b5e83f6f
NK
7625 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7626 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
e7b07cee 7627
6ce8f316
NK
7628 /* Update audio instances for each connector. */
7629 amdgpu_dm_commit_audio(dev, state);
7630
e7b07cee
HW
7631 /*
7632 * send vblank event on all events not handled in flip and
7633 * mark consumed event for drm_atomic_helper_commit_hw_done
7634 */
7635 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 7636 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7637
0bc9706d
LSL
7638 if (new_crtc_state->event)
7639 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7640
0bc9706d 7641 new_crtc_state->event = NULL;
e7b07cee
HW
7642 }
7643 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7644
29c8f234
LL
7645 /* Signal HW programming completion */
7646 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7647
7648 if (wait_for_vblank)
320a1274 7649 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7650
7651 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7652
1f6010a9
DF
7653 /*
7654 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7655 * so we can put the GPU into runtime suspend if we're not driving any
7656 * displays anymore
7657 */
fe2a1965
LP
7658 for (i = 0; i < crtc_disable_count; i++)
7659 pm_runtime_put_autosuspend(dev->dev);
97028037 7660 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7661
7662 if (dc_state_temp)
7663 dc_release_state(dc_state_temp);
e7b07cee
HW
7664}
7665
7666
7667static int dm_force_atomic_commit(struct drm_connector *connector)
7668{
7669 int ret = 0;
7670 struct drm_device *ddev = connector->dev;
7671 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7672 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7673 struct drm_plane *plane = disconnected_acrtc->base.primary;
7674 struct drm_connector_state *conn_state;
7675 struct drm_crtc_state *crtc_state;
7676 struct drm_plane_state *plane_state;
7677
7678 if (!state)
7679 return -ENOMEM;
7680
7681 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7682
7683 /* Construct an atomic state to restore previous display setting */
7684
7685 /*
7686 * Attach connectors to drm_atomic_state
7687 */
7688 conn_state = drm_atomic_get_connector_state(state, connector);
7689
7690 ret = PTR_ERR_OR_ZERO(conn_state);
7691 if (ret)
7692 goto err;
7693
7694 /* Attach crtc to drm_atomic_state*/
7695 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7696
7697 ret = PTR_ERR_OR_ZERO(crtc_state);
7698 if (ret)
7699 goto err;
7700
7701 /* force a restore */
7702 crtc_state->mode_changed = true;
7703
7704 /* Attach plane to drm_atomic_state */
7705 plane_state = drm_atomic_get_plane_state(state, plane);
7706
7707 ret = PTR_ERR_OR_ZERO(plane_state);
7708 if (ret)
7709 goto err;
7710
7711
7712 /* Call commit internally with the state we just constructed */
7713 ret = drm_atomic_commit(state);
7714 if (!ret)
7715 return 0;
7716
7717err:
7718 DRM_ERROR("Restoring old state failed with %i\n", ret);
7719 drm_atomic_state_put(state);
7720
7721 return ret;
7722}
7723
7724/*
1f6010a9
DF
7725 * This function handles all cases when set mode does not come upon hotplug.
7726 * This includes when a display is unplugged then plugged back into the
7727 * same port and when running without usermode desktop manager supprot
e7b07cee 7728 */
3ee6b26b
AD
7729void dm_restore_drm_connector_state(struct drm_device *dev,
7730 struct drm_connector *connector)
e7b07cee 7731{
c84dec2f 7732 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7733 struct amdgpu_crtc *disconnected_acrtc;
7734 struct dm_crtc_state *acrtc_state;
7735
7736 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7737 return;
7738
7739 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7740 if (!disconnected_acrtc)
7741 return;
e7b07cee 7742
70e8ffc5
HW
7743 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7744 if (!acrtc_state->stream)
e7b07cee
HW
7745 return;
7746
7747 /*
7748 * If the previous sink is not released and different from the current,
7749 * we deduce we are in a state where we can not rely on usermode call
7750 * to turn on the display, so we do it here
7751 */
7752 if (acrtc_state->stream->sink != aconnector->dc_sink)
7753 dm_force_atomic_commit(&aconnector->base);
7754}
7755
1f6010a9 7756/*
e7b07cee
HW
7757 * Grabs all modesetting locks to serialize against any blocking commits,
7758 * Waits for completion of all non blocking commits.
7759 */
3ee6b26b
AD
7760static int do_aquire_global_lock(struct drm_device *dev,
7761 struct drm_atomic_state *state)
e7b07cee
HW
7762{
7763 struct drm_crtc *crtc;
7764 struct drm_crtc_commit *commit;
7765 long ret;
7766
1f6010a9
DF
7767 /*
7768 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7769 * ensure that when the framework release it the
7770 * extra locks we are locking here will get released to
7771 */
7772 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7773 if (ret)
7774 return ret;
7775
7776 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7777 spin_lock(&crtc->commit_lock);
7778 commit = list_first_entry_or_null(&crtc->commit_list,
7779 struct drm_crtc_commit, commit_entry);
7780 if (commit)
7781 drm_crtc_commit_get(commit);
7782 spin_unlock(&crtc->commit_lock);
7783
7784 if (!commit)
7785 continue;
7786
1f6010a9
DF
7787 /*
7788 * Make sure all pending HW programming completed and
e7b07cee
HW
7789 * page flips done
7790 */
7791 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7792
7793 if (ret > 0)
7794 ret = wait_for_completion_interruptible_timeout(
7795 &commit->flip_done, 10*HZ);
7796
7797 if (ret == 0)
7798 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7799 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7800
7801 drm_crtc_commit_put(commit);
7802 }
7803
7804 return ret < 0 ? ret : 0;
7805}
7806
bb47de73
NK
7807static void get_freesync_config_for_crtc(
7808 struct dm_crtc_state *new_crtc_state,
7809 struct dm_connector_state *new_con_state)
98e6436d
AK
7810{
7811 struct mod_freesync_config config = {0};
98e6436d
AK
7812 struct amdgpu_dm_connector *aconnector =
7813 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7814 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7815 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7816
a057ec46 7817 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7818 vrefresh >= aconnector->min_vfreq &&
7819 vrefresh <= aconnector->max_vfreq;
bb47de73 7820
a057ec46
IB
7821 if (new_crtc_state->vrr_supported) {
7822 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7823 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7824 VRR_STATE_ACTIVE_VARIABLE :
7825 VRR_STATE_INACTIVE;
7826 config.min_refresh_in_uhz =
7827 aconnector->min_vfreq * 1000000;
7828 config.max_refresh_in_uhz =
7829 aconnector->max_vfreq * 1000000;
69ff8845 7830 config.vsif_supported = true;
180db303 7831 config.btr = true;
98e6436d
AK
7832 }
7833
bb47de73
NK
7834 new_crtc_state->freesync_config = config;
7835}
98e6436d 7836
bb47de73
NK
7837static void reset_freesync_config_for_crtc(
7838 struct dm_crtc_state *new_crtc_state)
7839{
7840 new_crtc_state->vrr_supported = false;
98e6436d 7841
180db303
NK
7842 memset(&new_crtc_state->vrr_params, 0,
7843 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7844 memset(&new_crtc_state->vrr_infopacket, 0,
7845 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7846}
7847
4b9674e5
LL
7848static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7849 struct drm_atomic_state *state,
7850 struct drm_crtc *crtc,
7851 struct drm_crtc_state *old_crtc_state,
7852 struct drm_crtc_state *new_crtc_state,
7853 bool enable,
7854 bool *lock_and_validation_needed)
e7b07cee 7855{
eb3dc897 7856 struct dm_atomic_state *dm_state = NULL;
54d76575 7857 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7858 struct dc_stream_state *new_stream;
62f55537 7859 int ret = 0;
d4d4a645 7860
1f6010a9
DF
7861 /*
7862 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7863 * update changed items
7864 */
4b9674e5
LL
7865 struct amdgpu_crtc *acrtc = NULL;
7866 struct amdgpu_dm_connector *aconnector = NULL;
7867 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7868 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7869
4b9674e5 7870 new_stream = NULL;
9635b754 7871
4b9674e5
LL
7872 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7873 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7874 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7875 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7876
4b9674e5
LL
7877 /* TODO This hack should go away */
7878 if (aconnector && enable) {
7879 /* Make sure fake sink is created in plug-in scenario */
7880 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7881 &aconnector->base);
7882 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7883 &aconnector->base);
19f89e23 7884
4b9674e5
LL
7885 if (IS_ERR(drm_new_conn_state)) {
7886 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7887 goto fail;
7888 }
19f89e23 7889
4b9674e5
LL
7890 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7891 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 7892
02d35a67
JFZ
7893 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7894 goto skip_modeset;
7895
cbd14ae7
SW
7896 new_stream = create_validate_stream_for_sink(aconnector,
7897 &new_crtc_state->mode,
7898 dm_new_conn_state,
7899 dm_old_crtc_state->stream);
19f89e23 7900
4b9674e5
LL
7901 /*
7902 * we can have no stream on ACTION_SET if a display
7903 * was disconnected during S3, in this case it is not an
7904 * error, the OS will be updated after detection, and
7905 * will do the right thing on next atomic commit
7906 */
19f89e23 7907
4b9674e5
LL
7908 if (!new_stream) {
7909 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7910 __func__, acrtc->base.base.id);
7911 ret = -ENOMEM;
7912 goto fail;
7913 }
e7b07cee 7914
4b9674e5 7915 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 7916
88694af9
NK
7917 ret = fill_hdr_info_packet(drm_new_conn_state,
7918 &new_stream->hdr_static_metadata);
7919 if (ret)
7920 goto fail;
7921
7e930949
NK
7922 /*
7923 * If we already removed the old stream from the context
7924 * (and set the new stream to NULL) then we can't reuse
7925 * the old stream even if the stream and scaling are unchanged.
7926 * We'll hit the BUG_ON and black screen.
7927 *
7928 * TODO: Refactor this function to allow this check to work
7929 * in all conditions.
7930 */
7931 if (dm_new_crtc_state->stream &&
7932 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
7933 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7934 new_crtc_state->mode_changed = false;
7935 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7936 new_crtc_state->mode_changed);
62f55537 7937 }
4b9674e5 7938 }
b830ebc9 7939
02d35a67 7940 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
7941 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7942 goto skip_modeset;
e7b07cee 7943
4b9674e5
LL
7944 DRM_DEBUG_DRIVER(
7945 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7946 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7947 "connectors_changed:%d\n",
7948 acrtc->crtc_id,
7949 new_crtc_state->enable,
7950 new_crtc_state->active,
7951 new_crtc_state->planes_changed,
7952 new_crtc_state->mode_changed,
7953 new_crtc_state->active_changed,
7954 new_crtc_state->connectors_changed);
62f55537 7955
4b9674e5
LL
7956 /* Remove stream for any changed/disabled CRTC */
7957 if (!enable) {
62f55537 7958
4b9674e5
LL
7959 if (!dm_old_crtc_state->stream)
7960 goto skip_modeset;
eb3dc897 7961
4b9674e5
LL
7962 ret = dm_atomic_get_state(state, &dm_state);
7963 if (ret)
7964 goto fail;
e7b07cee 7965
4b9674e5
LL
7966 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7967 crtc->base.id);
62f55537 7968
4b9674e5
LL
7969 /* i.e. reset mode */
7970 if (dc_remove_stream_from_ctx(
7971 dm->dc,
7972 dm_state->context,
7973 dm_old_crtc_state->stream) != DC_OK) {
7974 ret = -EINVAL;
7975 goto fail;
7976 }
62f55537 7977
4b9674e5
LL
7978 dc_stream_release(dm_old_crtc_state->stream);
7979 dm_new_crtc_state->stream = NULL;
bb47de73 7980
4b9674e5 7981 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 7982
4b9674e5 7983 *lock_and_validation_needed = true;
62f55537 7984
4b9674e5
LL
7985 } else {/* Add stream for any updated/enabled CRTC */
7986 /*
7987 * Quick fix to prevent NULL pointer on new_stream when
7988 * added MST connectors not found in existing crtc_state in the chained mode
7989 * TODO: need to dig out the root cause of that
7990 */
7991 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7992 goto skip_modeset;
62f55537 7993
4b9674e5
LL
7994 if (modereset_required(new_crtc_state))
7995 goto skip_modeset;
62f55537 7996
4b9674e5
LL
7997 if (modeset_required(new_crtc_state, new_stream,
7998 dm_old_crtc_state->stream)) {
62f55537 7999
4b9674e5 8000 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8001
4b9674e5
LL
8002 ret = dm_atomic_get_state(state, &dm_state);
8003 if (ret)
8004 goto fail;
27b3f4fc 8005
4b9674e5 8006 dm_new_crtc_state->stream = new_stream;
62f55537 8007
4b9674e5 8008 dc_stream_retain(new_stream);
1dc90497 8009
4b9674e5
LL
8010 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8011 crtc->base.id);
1dc90497 8012
4b9674e5
LL
8013 if (dc_add_stream_to_ctx(
8014 dm->dc,
8015 dm_state->context,
8016 dm_new_crtc_state->stream) != DC_OK) {
8017 ret = -EINVAL;
8018 goto fail;
9b690ef3
BL
8019 }
8020
4b9674e5
LL
8021 *lock_and_validation_needed = true;
8022 }
8023 }
e277adc5 8024
4b9674e5
LL
8025skip_modeset:
8026 /* Release extra reference */
8027 if (new_stream)
8028 dc_stream_release(new_stream);
e277adc5 8029
4b9674e5
LL
8030 /*
8031 * We want to do dc stream updates that do not require a
8032 * full modeset below.
8033 */
8034 if (!(enable && aconnector && new_crtc_state->enable &&
8035 new_crtc_state->active))
8036 return 0;
8037 /*
8038 * Given above conditions, the dc state cannot be NULL because:
8039 * 1. We're in the process of enabling CRTCs (just been added
8040 * to the dc context, or already is on the context)
8041 * 2. Has a valid connector attached, and
8042 * 3. Is currently active and enabled.
8043 * => The dc stream state currently exists.
8044 */
8045 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8046
4b9674e5
LL
8047 /* Scaling or underscan settings */
8048 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8049 update_stream_scaling_settings(
8050 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8051
b05e2c5e
DF
8052 /* ABM settings */
8053 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8054
4b9674e5
LL
8055 /*
8056 * Color management settings. We also update color properties
8057 * when a modeset is needed, to ensure it gets reprogrammed.
8058 */
8059 if (dm_new_crtc_state->base.color_mgmt_changed ||
8060 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8061 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8062 if (ret)
8063 goto fail;
62f55537 8064 }
e7b07cee 8065
4b9674e5
LL
8066 /* Update Freesync settings. */
8067 get_freesync_config_for_crtc(dm_new_crtc_state,
8068 dm_new_conn_state);
8069
62f55537 8070 return ret;
9635b754
DS
8071
8072fail:
8073 if (new_stream)
8074 dc_stream_release(new_stream);
8075 return ret;
62f55537 8076}
9b690ef3 8077
f6ff2a08
NK
8078static bool should_reset_plane(struct drm_atomic_state *state,
8079 struct drm_plane *plane,
8080 struct drm_plane_state *old_plane_state,
8081 struct drm_plane_state *new_plane_state)
8082{
8083 struct drm_plane *other;
8084 struct drm_plane_state *old_other_state, *new_other_state;
8085 struct drm_crtc_state *new_crtc_state;
8086 int i;
8087
70a1efac
NK
8088 /*
8089 * TODO: Remove this hack once the checks below are sufficient
8090 * enough to determine when we need to reset all the planes on
8091 * the stream.
8092 */
8093 if (state->allow_modeset)
8094 return true;
8095
f6ff2a08
NK
8096 /* Exit early if we know that we're adding or removing the plane. */
8097 if (old_plane_state->crtc != new_plane_state->crtc)
8098 return true;
8099
8100 /* old crtc == new_crtc == NULL, plane not in context. */
8101 if (!new_plane_state->crtc)
8102 return false;
8103
8104 new_crtc_state =
8105 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8106
8107 if (!new_crtc_state)
8108 return true;
8109
7316c4ad
NK
8110 /* CRTC Degamma changes currently require us to recreate planes. */
8111 if (new_crtc_state->color_mgmt_changed)
8112 return true;
8113
f6ff2a08
NK
8114 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8115 return true;
8116
8117 /*
8118 * If there are any new primary or overlay planes being added or
8119 * removed then the z-order can potentially change. To ensure
8120 * correct z-order and pipe acquisition the current DC architecture
8121 * requires us to remove and recreate all existing planes.
8122 *
8123 * TODO: Come up with a more elegant solution for this.
8124 */
8125 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8126 if (other->type == DRM_PLANE_TYPE_CURSOR)
8127 continue;
8128
8129 if (old_other_state->crtc != new_plane_state->crtc &&
8130 new_other_state->crtc != new_plane_state->crtc)
8131 continue;
8132
8133 if (old_other_state->crtc != new_other_state->crtc)
8134 return true;
8135
8136 /* TODO: Remove this once we can handle fast format changes. */
8137 if (old_other_state->fb && new_other_state->fb &&
8138 old_other_state->fb->format != new_other_state->fb->format)
8139 return true;
8140 }
8141
8142 return false;
8143}
8144
9e869063
LL
8145static int dm_update_plane_state(struct dc *dc,
8146 struct drm_atomic_state *state,
8147 struct drm_plane *plane,
8148 struct drm_plane_state *old_plane_state,
8149 struct drm_plane_state *new_plane_state,
8150 bool enable,
8151 bool *lock_and_validation_needed)
62f55537 8152{
eb3dc897
NK
8153
8154 struct dm_atomic_state *dm_state = NULL;
62f55537 8155 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 8156 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 8157 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 8158 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 8159 struct amdgpu_crtc *new_acrtc;
f6ff2a08 8160 bool needs_reset;
62f55537 8161 int ret = 0;
e7b07cee 8162
9b690ef3 8163
9e869063
LL
8164 new_plane_crtc = new_plane_state->crtc;
8165 old_plane_crtc = old_plane_state->crtc;
8166 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8167 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 8168
626bf90f
SS
8169 /*TODO Implement better atomic check for cursor plane */
8170 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8171 if (!enable || !new_plane_crtc ||
8172 drm_atomic_plane_disabling(plane->state, new_plane_state))
8173 return 0;
8174
8175 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8176
8177 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8178 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8179 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8180 new_plane_state->crtc_w, new_plane_state->crtc_h);
8181 return -EINVAL;
8182 }
8183
9e869063 8184 return 0;
626bf90f 8185 }
9b690ef3 8186
f6ff2a08
NK
8187 needs_reset = should_reset_plane(state, plane, old_plane_state,
8188 new_plane_state);
8189
9e869063
LL
8190 /* Remove any changed/removed planes */
8191 if (!enable) {
f6ff2a08 8192 if (!needs_reset)
9e869063 8193 return 0;
a7b06724 8194
9e869063
LL
8195 if (!old_plane_crtc)
8196 return 0;
62f55537 8197
9e869063
LL
8198 old_crtc_state = drm_atomic_get_old_crtc_state(
8199 state, old_plane_crtc);
8200 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 8201
9e869063
LL
8202 if (!dm_old_crtc_state->stream)
8203 return 0;
62f55537 8204
9e869063
LL
8205 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8206 plane->base.id, old_plane_crtc->base.id);
9b690ef3 8207
9e869063
LL
8208 ret = dm_atomic_get_state(state, &dm_state);
8209 if (ret)
8210 return ret;
eb3dc897 8211
9e869063
LL
8212 if (!dc_remove_plane_from_context(
8213 dc,
8214 dm_old_crtc_state->stream,
8215 dm_old_plane_state->dc_state,
8216 dm_state->context)) {
62f55537 8217
9e869063
LL
8218 ret = EINVAL;
8219 return ret;
8220 }
e7b07cee 8221
9b690ef3 8222
9e869063
LL
8223 dc_plane_state_release(dm_old_plane_state->dc_state);
8224 dm_new_plane_state->dc_state = NULL;
1dc90497 8225
9e869063 8226 *lock_and_validation_needed = true;
1dc90497 8227
9e869063
LL
8228 } else { /* Add new planes */
8229 struct dc_plane_state *dc_new_plane_state;
1dc90497 8230
9e869063
LL
8231 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8232 return 0;
e7b07cee 8233
9e869063
LL
8234 if (!new_plane_crtc)
8235 return 0;
e7b07cee 8236
9e869063
LL
8237 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8238 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 8239
9e869063
LL
8240 if (!dm_new_crtc_state->stream)
8241 return 0;
62f55537 8242
f6ff2a08 8243 if (!needs_reset)
9e869063 8244 return 0;
62f55537 8245
8c44515b
AP
8246 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8247 if (ret)
8248 return ret;
8249
9e869063 8250 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 8251
9e869063
LL
8252 dc_new_plane_state = dc_create_plane_state(dc);
8253 if (!dc_new_plane_state)
8254 return -ENOMEM;
62f55537 8255
9e869063
LL
8256 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8257 plane->base.id, new_plane_crtc->base.id);
8c45c5db 8258
695af5f9 8259 ret = fill_dc_plane_attributes(
9e869063
LL
8260 new_plane_crtc->dev->dev_private,
8261 dc_new_plane_state,
8262 new_plane_state,
8263 new_crtc_state);
8264 if (ret) {
8265 dc_plane_state_release(dc_new_plane_state);
8266 return ret;
8267 }
62f55537 8268
9e869063
LL
8269 ret = dm_atomic_get_state(state, &dm_state);
8270 if (ret) {
8271 dc_plane_state_release(dc_new_plane_state);
8272 return ret;
8273 }
eb3dc897 8274
9e869063
LL
8275 /*
8276 * Any atomic check errors that occur after this will
8277 * not need a release. The plane state will be attached
8278 * to the stream, and therefore part of the atomic
8279 * state. It'll be released when the atomic state is
8280 * cleaned.
8281 */
8282 if (!dc_add_plane_to_context(
8283 dc,
8284 dm_new_crtc_state->stream,
8285 dc_new_plane_state,
8286 dm_state->context)) {
62f55537 8287
9e869063
LL
8288 dc_plane_state_release(dc_new_plane_state);
8289 return -EINVAL;
8290 }
8c45c5db 8291
9e869063 8292 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 8293
9e869063
LL
8294 /* Tell DC to do a full surface update every time there
8295 * is a plane change. Inefficient, but works for now.
8296 */
8297 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8298
8299 *lock_and_validation_needed = true;
62f55537 8300 }
e7b07cee
HW
8301
8302
62f55537
AG
8303 return ret;
8304}
a87fa993 8305
eb3dc897 8306static int
f843b308 8307dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
eb3dc897
NK
8308 struct drm_atomic_state *state,
8309 enum surface_update_type *out_type)
8310{
f843b308 8311 struct dc *dc = dm->dc;
eb3dc897
NK
8312 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8313 int i, j, num_plane, ret = 0;
a87fa993
BL
8314 struct drm_plane_state *old_plane_state, *new_plane_state;
8315 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
d3b65841 8316 struct drm_crtc *new_plane_crtc;
a87fa993
BL
8317 struct drm_plane *plane;
8318
8319 struct drm_crtc *crtc;
8320 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8321 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8322 struct dc_stream_status *status = NULL;
a87fa993 8323 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7527791e
RL
8324 struct surface_info_bundle {
8325 struct dc_surface_update surface_updates[MAX_SURFACES];
8326 struct dc_plane_info plane_infos[MAX_SURFACES];
8327 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8328 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8329 struct dc_stream_update stream_update;
8330 } *bundle;
a87fa993 8331
7527791e 8332 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
fe96b99d 8333
7527791e
RL
8334 if (!bundle) {
8335 DRM_ERROR("Failed to allocate update bundle\n");
4f712911
BL
8336 /* Set type to FULL to avoid crashing in DC*/
8337 update_type = UPDATE_TYPE_FULL;
eb3dc897 8338 goto cleanup;
4f712911 8339 }
a87fa993
BL
8340
8341 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2aa632c5 8342
7527791e 8343 memset(bundle, 0, sizeof(struct surface_info_bundle));
c448a53a 8344
a87fa993
BL
8345 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8346 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8347 num_plane = 0;
8348
6836d239
NK
8349 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8350 update_type = UPDATE_TYPE_FULL;
8351 goto cleanup;
8352 }
a87fa993 8353
6836d239 8354 if (!new_dm_crtc_state->stream)
c744e974 8355 continue;
eb3dc897 8356
c744e974 8357 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
2cc450ce
NK
8358 const struct amdgpu_framebuffer *amdgpu_fb =
8359 to_amdgpu_framebuffer(new_plane_state->fb);
7527791e
RL
8360 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8361 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8362 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
2cc450ce 8363 uint64_t tiling_flags;
5888f07a 8364 bool tmz_surface = false;
2cc450ce 8365
c744e974 8366 new_plane_crtc = new_plane_state->crtc;
c744e974
NK
8367 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8368 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 8369
c744e974
NK
8370 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8371 continue;
eb3dc897 8372
6836d239
NK
8373 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8374 update_type = UPDATE_TYPE_FULL;
8375 goto cleanup;
8376 }
8377
c744e974
NK
8378 if (crtc != new_plane_crtc)
8379 continue;
8380
7527791e
RL
8381 bundle->surface_updates[num_plane].surface =
8382 new_dm_plane_state->dc_state;
c744e974
NK
8383
8384 if (new_crtc_state->mode_changed) {
7527791e
RL
8385 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8386 bundle->stream_update.src = new_dm_crtc_state->stream->src;
c744e974
NK
8387 }
8388
8389 if (new_crtc_state->color_mgmt_changed) {
7527791e 8390 bundle->surface_updates[num_plane].gamma =
c744e974 8391 new_dm_plane_state->dc_state->gamma_correction;
7527791e 8392 bundle->surface_updates[num_plane].in_transfer_func =
c744e974 8393 new_dm_plane_state->dc_state->in_transfer_func;
44efb784
SW
8394 bundle->surface_updates[num_plane].gamut_remap_matrix =
8395 &new_dm_plane_state->dc_state->gamut_remap_matrix;
7527791e 8396 bundle->stream_update.gamut_remap =
c744e974 8397 &new_dm_crtc_state->stream->gamut_remap_matrix;
7527791e 8398 bundle->stream_update.output_csc_transform =
cf020d49 8399 &new_dm_crtc_state->stream->csc_color_matrix;
7527791e 8400 bundle->stream_update.out_transfer_func =
c744e974 8401 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
8402 }
8403
004b3938 8404 ret = fill_dc_scaling_info(new_plane_state,
7527791e 8405 scaling_info);
004b3938
NK
8406 if (ret)
8407 goto cleanup;
8408
7527791e 8409 bundle->surface_updates[num_plane].scaling_info = scaling_info;
004b3938 8410
2cc450ce 8411 if (amdgpu_fb) {
5888f07a 8412 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
2cc450ce
NK
8413 if (ret)
8414 goto cleanup;
8415
2cc450ce
NK
8416 ret = fill_dc_plane_info_and_addr(
8417 dm->adev, new_plane_state, tiling_flags,
7527791e 8418 plane_info,
5888f07a 8419 &flip_addr->address, tmz_surface,
87b7ebc2 8420 false);
2cc450ce
NK
8421 if (ret)
8422 goto cleanup;
8423
7527791e
RL
8424 bundle->surface_updates[num_plane].plane_info = plane_info;
8425 bundle->surface_updates[num_plane].flip_addr = flip_addr;
2cc450ce
NK
8426 }
8427
c744e974
NK
8428 num_plane++;
8429 }
8430
8431 if (num_plane == 0)
8432 continue;
8433
8434 ret = dm_atomic_get_state(state, &dm_state);
8435 if (ret)
8436 goto cleanup;
8437
8438 old_dm_state = dm_atomic_get_old_state(state);
8439 if (!old_dm_state) {
8440 ret = -EINVAL;
8441 goto cleanup;
8442 }
8443
8444 status = dc_stream_get_status_from_state(old_dm_state->context,
8445 new_dm_crtc_state->stream);
7527791e 8446 bundle->stream_update.stream = new_dm_crtc_state->stream;
f843b308
NK
8447 /*
8448 * TODO: DC modifies the surface during this call so we need
8449 * to lock here - find a way to do this without locking.
8450 */
8451 mutex_lock(&dm->dc_lock);
7527791e
RL
8452 update_type = dc_check_update_surfaces_for_stream(
8453 dc, bundle->surface_updates, num_plane,
8454 &bundle->stream_update, status);
f843b308 8455 mutex_unlock(&dm->dc_lock);
c744e974
NK
8456
8457 if (update_type > UPDATE_TYPE_MED) {
a87fa993 8458 update_type = UPDATE_TYPE_FULL;
eb3dc897 8459 goto cleanup;
a87fa993
BL
8460 }
8461 }
8462
eb3dc897 8463cleanup:
7527791e 8464 kfree(bundle);
a87fa993 8465
eb3dc897
NK
8466 *out_type = update_type;
8467 return ret;
a87fa993 8468}
62f55537 8469
44be939f
ML
8470static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8471{
8472 struct drm_connector *connector;
8473 struct drm_connector_state *conn_state;
8474 struct amdgpu_dm_connector *aconnector = NULL;
8475 int i;
8476 for_each_new_connector_in_state(state, connector, conn_state, i) {
8477 if (conn_state->crtc != crtc)
8478 continue;
8479
8480 aconnector = to_amdgpu_dm_connector(connector);
8481 if (!aconnector->port || !aconnector->mst_port)
8482 aconnector = NULL;
8483 else
8484 break;
8485 }
8486
8487 if (!aconnector)
8488 return 0;
8489
8490 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8491}
8492
b8592b48
LL
8493/**
8494 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8495 * @dev: The DRM device
8496 * @state: The atomic state to commit
8497 *
8498 * Validate that the given atomic state is programmable by DC into hardware.
8499 * This involves constructing a &struct dc_state reflecting the new hardware
8500 * state we wish to commit, then querying DC to see if it is programmable. It's
8501 * important not to modify the existing DC state. Otherwise, atomic_check
8502 * may unexpectedly commit hardware changes.
8503 *
8504 * When validating the DC state, it's important that the right locks are
8505 * acquired. For full updates case which removes/adds/updates streams on one
8506 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8507 * that any such full update commit will wait for completion of any outstanding
8508 * flip using DRMs synchronization events. See
8509 * dm_determine_update_type_for_commit()
8510 *
8511 * Note that DM adds the affected connectors for all CRTCs in state, when that
8512 * might not seem necessary. This is because DC stream creation requires the
8513 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8514 * be possible but non-trivial - a possible TODO item.
8515 *
8516 * Return: -Error code if validation failed.
8517 */
7578ecda
AD
8518static int amdgpu_dm_atomic_check(struct drm_device *dev,
8519 struct drm_atomic_state *state)
62f55537 8520{
62f55537 8521 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 8522 struct dm_atomic_state *dm_state = NULL;
62f55537 8523 struct dc *dc = adev->dm.dc;
62f55537 8524 struct drm_connector *connector;
c2cea706 8525 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8526 struct drm_crtc *crtc;
fc9e9920 8527 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8528 struct drm_plane *plane;
8529 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
8530 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8531 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
74a16675 8532 enum dc_status status;
1e88ad0a 8533 int ret, i;
e7b07cee 8534
62f55537
AG
8535 /*
8536 * This bool will be set for true for any modeset/reset
8537 * or plane update which implies non fast surface update.
8538 */
8539 bool lock_and_validation_needed = false;
8540
8541 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8542 if (ret)
8543 goto fail;
62f55537 8544
44be939f
ML
8545 if (adev->asic_type >= CHIP_NAVI10) {
8546 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8547 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8548 ret = add_affected_mst_dsc_crtcs(state, crtc);
8549 if (ret)
8550 goto fail;
8551 }
8552 }
8553 }
8554
1e88ad0a
S
8555 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8556 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8557 !new_crtc_state->color_mgmt_changed &&
a93587b3 8558 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8559 continue;
7bef1af3 8560
1e88ad0a
S
8561 if (!new_crtc_state->enable)
8562 continue;
fc9e9920 8563
1e88ad0a
S
8564 ret = drm_atomic_add_affected_connectors(state, crtc);
8565 if (ret)
8566 return ret;
fc9e9920 8567
1e88ad0a
S
8568 ret = drm_atomic_add_affected_planes(state, crtc);
8569 if (ret)
8570 goto fail;
e7b07cee
HW
8571 }
8572
2d9e6431
NK
8573 /*
8574 * Add all primary and overlay planes on the CRTC to the state
8575 * whenever a plane is enabled to maintain correct z-ordering
8576 * and to enable fast surface updates.
8577 */
8578 drm_for_each_crtc(crtc, dev) {
8579 bool modified = false;
8580
8581 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8582 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8583 continue;
8584
8585 if (new_plane_state->crtc == crtc ||
8586 old_plane_state->crtc == crtc) {
8587 modified = true;
8588 break;
8589 }
8590 }
8591
8592 if (!modified)
8593 continue;
8594
8595 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8596 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8597 continue;
8598
8599 new_plane_state =
8600 drm_atomic_get_plane_state(state, plane);
8601
8602 if (IS_ERR(new_plane_state)) {
8603 ret = PTR_ERR(new_plane_state);
8604 goto fail;
8605 }
8606 }
8607 }
8608
62f55537 8609 /* Remove exiting planes if they are modified */
9e869063
LL
8610 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8611 ret = dm_update_plane_state(dc, state, plane,
8612 old_plane_state,
8613 new_plane_state,
8614 false,
8615 &lock_and_validation_needed);
8616 if (ret)
8617 goto fail;
62f55537
AG
8618 }
8619
8620 /* Disable all crtcs which require disable */
4b9674e5
LL
8621 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8622 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8623 old_crtc_state,
8624 new_crtc_state,
8625 false,
8626 &lock_and_validation_needed);
8627 if (ret)
8628 goto fail;
62f55537
AG
8629 }
8630
8631 /* Enable all crtcs which require enable */
4b9674e5
LL
8632 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8633 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8634 old_crtc_state,
8635 new_crtc_state,
8636 true,
8637 &lock_and_validation_needed);
8638 if (ret)
8639 goto fail;
62f55537
AG
8640 }
8641
8642 /* Add new/modified planes */
9e869063
LL
8643 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8644 ret = dm_update_plane_state(dc, state, plane,
8645 old_plane_state,
8646 new_plane_state,
8647 true,
8648 &lock_and_validation_needed);
8649 if (ret)
8650 goto fail;
62f55537
AG
8651 }
8652
b349f76e
ES
8653 /* Run this here since we want to validate the streams we created */
8654 ret = drm_atomic_helper_check_planes(dev, state);
8655 if (ret)
8656 goto fail;
62f55537 8657
43d10d30
NK
8658 if (state->legacy_cursor_update) {
8659 /*
8660 * This is a fast cursor update coming from the plane update
8661 * helper, check if it can be done asynchronously for better
8662 * performance.
8663 */
8664 state->async_update =
8665 !drm_atomic_helper_async_check(dev, state);
8666
8667 /*
8668 * Skip the remaining global validation if this is an async
8669 * update. Cursor updates can be done without affecting
8670 * state or bandwidth calcs and this avoids the performance
8671 * penalty of locking the private state object and
8672 * allocating a new dc_state.
8673 */
8674 if (state->async_update)
8675 return 0;
8676 }
8677
ebdd27e1 8678 /* Check scaling and underscan changes*/
1f6010a9 8679 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8680 * new stream into context w\o causing full reset. Need to
8681 * decide how to handle.
8682 */
c2cea706 8683 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8684 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8685 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8686 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8687
8688 /* Skip any modesets/resets */
0bc9706d
LSL
8689 if (!acrtc || drm_atomic_crtc_needs_modeset(
8690 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8691 continue;
8692
b830ebc9 8693 /* Skip any thing not scale or underscan changes */
54d76575 8694 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8695 continue;
8696
a87fa993 8697 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
8698 lock_and_validation_needed = true;
8699 }
8700
f843b308 8701 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
eb3dc897
NK
8702 if (ret)
8703 goto fail;
a87fa993
BL
8704
8705 if (overall_update_type < update_type)
8706 overall_update_type = update_type;
8707
8708 /*
8709 * lock_and_validation_needed was an old way to determine if we need to set
8710 * the global lock. Leaving it in to check if we broke any corner cases
8711 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8712 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8713 */
8714 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8715 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
e7b07cee 8716
a87fa993 8717 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
8718 ret = dm_atomic_get_state(state, &dm_state);
8719 if (ret)
8720 goto fail;
e7b07cee
HW
8721
8722 ret = do_aquire_global_lock(dev, state);
8723 if (ret)
8724 goto fail;
1dc90497 8725
d9fe1a4c 8726#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8727 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8728 goto fail;
8729
29b9ba74
ML
8730 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8731 if (ret)
8732 goto fail;
d9fe1a4c 8733#endif
29b9ba74 8734
ded58c7b
ZL
8735 /*
8736 * Perform validation of MST topology in the state:
8737 * We need to perform MST atomic check before calling
8738 * dc_validate_global_state(), or there is a chance
8739 * to get stuck in an infinite loop and hang eventually.
8740 */
8741 ret = drm_dp_mst_atomic_check(state);
8742 if (ret)
8743 goto fail;
74a16675
RS
8744 status = dc_validate_global_state(dc, dm_state->context, false);
8745 if (status != DC_OK) {
8746 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8747 dc_status_to_str(status), status);
e7b07cee
HW
8748 ret = -EINVAL;
8749 goto fail;
8750 }
bd200d19 8751 } else {
674e78ac 8752 /*
bd200d19
NK
8753 * The commit is a fast update. Fast updates shouldn't change
8754 * the DC context, affect global validation, and can have their
8755 * commit work done in parallel with other commits not touching
8756 * the same resource. If we have a new DC context as part of
8757 * the DM atomic state from validation we need to free it and
8758 * retain the existing one instead.
674e78ac 8759 */
bd200d19
NK
8760 struct dm_atomic_state *new_dm_state, *old_dm_state;
8761
8762 new_dm_state = dm_atomic_get_new_state(state);
8763 old_dm_state = dm_atomic_get_old_state(state);
8764
8765 if (new_dm_state && old_dm_state) {
8766 if (new_dm_state->context)
8767 dc_release_state(new_dm_state->context);
8768
8769 new_dm_state->context = old_dm_state->context;
8770
8771 if (old_dm_state->context)
8772 dc_retain_state(old_dm_state->context);
8773 }
e7b07cee
HW
8774 }
8775
caff0e66
NK
8776 /* Store the overall update type for use later in atomic check. */
8777 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8778 struct dm_crtc_state *dm_new_crtc_state =
8779 to_dm_crtc_state(new_crtc_state);
8780
8781 dm_new_crtc_state->update_type = (int)overall_update_type;
e7b07cee
HW
8782 }
8783
8784 /* Must be success */
8785 WARN_ON(ret);
8786 return ret;
8787
8788fail:
8789 if (ret == -EDEADLK)
01e28f9c 8790 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8791 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8792 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8793 else
01e28f9c 8794 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8795
8796 return ret;
8797}
8798
3ee6b26b
AD
8799static bool is_dp_capable_without_timing_msa(struct dc *dc,
8800 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8801{
8802 uint8_t dpcd_data;
8803 bool capable = false;
8804
c84dec2f 8805 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8806 dm_helpers_dp_read_dpcd(
8807 NULL,
c84dec2f 8808 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8809 DP_DOWN_STREAM_PORT_COUNT,
8810 &dpcd_data,
8811 sizeof(dpcd_data))) {
8812 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8813 }
8814
8815 return capable;
8816}
98e6436d
AK
8817void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8818 struct edid *edid)
e7b07cee
HW
8819{
8820 int i;
e7b07cee
HW
8821 bool edid_check_required;
8822 struct detailed_timing *timing;
8823 struct detailed_non_pixel *data;
8824 struct detailed_data_monitor_range *range;
c84dec2f
HW
8825 struct amdgpu_dm_connector *amdgpu_dm_connector =
8826 to_amdgpu_dm_connector(connector);
bb47de73 8827 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8828
8829 struct drm_device *dev = connector->dev;
8830 struct amdgpu_device *adev = dev->dev_private;
bb47de73 8831 bool freesync_capable = false;
b830ebc9 8832
8218d7f1
HW
8833 if (!connector->state) {
8834 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 8835 goto update;
8218d7f1
HW
8836 }
8837
98e6436d
AK
8838 if (!edid) {
8839 dm_con_state = to_dm_connector_state(connector->state);
8840
8841 amdgpu_dm_connector->min_vfreq = 0;
8842 amdgpu_dm_connector->max_vfreq = 0;
8843 amdgpu_dm_connector->pixel_clock_mhz = 0;
8844
bb47de73 8845 goto update;
98e6436d
AK
8846 }
8847
8218d7f1
HW
8848 dm_con_state = to_dm_connector_state(connector->state);
8849
e7b07cee 8850 edid_check_required = false;
c84dec2f 8851 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 8852 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 8853 goto update;
e7b07cee
HW
8854 }
8855 if (!adev->dm.freesync_module)
bb47de73 8856 goto update;
e7b07cee
HW
8857 /*
8858 * if edid non zero restrict freesync only for dp and edp
8859 */
8860 if (edid) {
c84dec2f
HW
8861 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8862 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
8863 edid_check_required = is_dp_capable_without_timing_msa(
8864 adev->dm.dc,
c84dec2f 8865 amdgpu_dm_connector);
e7b07cee
HW
8866 }
8867 }
e7b07cee
HW
8868 if (edid_check_required == true && (edid->version > 1 ||
8869 (edid->version == 1 && edid->revision > 1))) {
8870 for (i = 0; i < 4; i++) {
8871
8872 timing = &edid->detailed_timings[i];
8873 data = &timing->data.other_data;
8874 range = &data->data.range;
8875 /*
8876 * Check if monitor has continuous frequency mode
8877 */
8878 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8879 continue;
8880 /*
8881 * Check for flag range limits only. If flag == 1 then
8882 * no additional timing information provided.
8883 * Default GTF, GTF Secondary curve and CVT are not
8884 * supported
8885 */
8886 if (range->flags != 1)
8887 continue;
8888
c84dec2f
HW
8889 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8890 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8891 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
8892 range->pixel_clock_mhz * 10;
8893 break;
8894 }
8895
c84dec2f 8896 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
8897 amdgpu_dm_connector->min_vfreq > 10) {
8898
bb47de73 8899 freesync_capable = true;
e7b07cee
HW
8900 }
8901 }
bb47de73
NK
8902
8903update:
8904 if (dm_con_state)
8905 dm_con_state->freesync_capable = freesync_capable;
8906
8907 if (connector->vrr_capable_property)
8908 drm_connector_set_vrr_capable_property(connector,
8909 freesync_capable);
e7b07cee
HW
8910}
8911
8c322309
RL
8912static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8913{
8914 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8915
8916 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8917 return;
8918 if (link->type == dc_connection_none)
8919 return;
8920 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8921 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
8922 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8923
8924 if (dpcd_data[0] == 0) {
1cfbbdde 8925 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
8926 link->psr_settings.psr_feature_enabled = false;
8927 } else {
1cfbbdde 8928 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
8929 link->psr_settings.psr_feature_enabled = true;
8930 }
8931
8932 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
8933 }
8934}
8935
8936/*
8937 * amdgpu_dm_link_setup_psr() - configure psr link
8938 * @stream: stream state
8939 *
8940 * Return: true if success
8941 */
8942static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8943{
8944 struct dc_link *link = NULL;
8945 struct psr_config psr_config = {0};
8946 struct psr_context psr_context = {0};
8c322309
RL
8947 bool ret = false;
8948
8949 if (stream == NULL)
8950 return false;
8951
8952 link = stream->link;
8c322309 8953
d1ebfdd8 8954 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
8955
8956 if (psr_config.psr_version > 0) {
8957 psr_config.psr_exit_link_training_required = 0x1;
8958 psr_config.psr_frame_capture_indication_req = 0;
8959 psr_config.psr_rfb_setup_time = 0x37;
8960 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8961 psr_config.allow_smu_optimizations = 0x0;
8962
8963 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8964
8965 }
d1ebfdd8 8966 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
8967
8968 return ret;
8969}
8970
8971/*
8972 * amdgpu_dm_psr_enable() - enable psr f/w
8973 * @stream: stream state
8974 *
8975 * Return: true if success
8976 */
8977bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8978{
8979 struct dc_link *link = stream->link;
5b5abe95
AK
8980 unsigned int vsync_rate_hz = 0;
8981 struct dc_static_screen_params params = {0};
8982 /* Calculate number of static frames before generating interrupt to
8983 * enter PSR.
8984 */
5b5abe95
AK
8985 // Init fail safe of 2 frames static
8986 unsigned int num_frames_static = 2;
8c322309
RL
8987
8988 DRM_DEBUG_DRIVER("Enabling psr...\n");
8989
5b5abe95
AK
8990 vsync_rate_hz = div64_u64(div64_u64((
8991 stream->timing.pix_clk_100hz * 100),
8992 stream->timing.v_total),
8993 stream->timing.h_total);
8994
8995 /* Round up
8996 * Calculate number of frames such that at least 30 ms of time has
8997 * passed.
8998 */
7aa62404
RL
8999 if (vsync_rate_hz != 0) {
9000 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9001 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9002 }
5b5abe95
AK
9003
9004 params.triggers.cursor_update = true;
9005 params.triggers.overlay_update = true;
9006 params.triggers.surface_update = true;
9007 params.num_frames = num_frames_static;
8c322309 9008
5b5abe95 9009 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9010 &stream, 1,
5b5abe95 9011 &params);
8c322309
RL
9012
9013 return dc_link_set_psr_allow_active(link, true, false);
9014}
9015
9016/*
9017 * amdgpu_dm_psr_disable() - disable psr f/w
9018 * @stream: stream state
9019 *
9020 * Return: true if success
9021 */
9022static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9023{
9024
9025 DRM_DEBUG_DRIVER("Disabling psr...\n");
9026
9027 return dc_link_set_psr_allow_active(stream->link, false, true);
9028}