drm/amdkfd: Fix circular locking dependency warning
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100#endif
2200eb9e 101
a94d5569
DF
102#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
103MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 104
5ea23931
RL
105#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
106MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
107
8c7aea40
NK
108/* Number of bytes in PSP header for firmware. */
109#define PSP_HEADER_BYTES 0x100
110
111/* Number of bytes in PSP footer for firmware. */
112#define PSP_FOOTER_BYTES 0x100
113
b8592b48
LL
114/**
115 * DOC: overview
116 *
117 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
118 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
119 * requests into DC requests, and DC responses into DRM responses.
120 *
121 * The root control structure is &struct amdgpu_display_manager.
122 */
123
7578ecda
AD
124/* basic init/fini API */
125static int amdgpu_dm_init(struct amdgpu_device *adev);
126static void amdgpu_dm_fini(struct amdgpu_device *adev);
127
1f6010a9
DF
128/*
129 * initializes drm_device display related structures, based on the information
7578ecda
AD
130 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
131 * drm_encoder, drm_mode_config
132 *
133 * Returns 0 on success
134 */
135static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
136/* removes and deallocates the drm structures, created by the above function */
137static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
138
7578ecda 139static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 140 struct drm_plane *plane,
cc1fec57
NK
141 unsigned long possible_crtcs,
142 const struct dc_plane_cap *plane_cap);
7578ecda
AD
143static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
144 struct drm_plane *plane,
145 uint32_t link_index);
146static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
147 struct amdgpu_dm_connector *amdgpu_dm_connector,
148 uint32_t link_index,
149 struct amdgpu_encoder *amdgpu_encoder);
150static int amdgpu_dm_encoder_init(struct drm_device *dev,
151 struct amdgpu_encoder *aencoder,
152 uint32_t link_index);
153
154static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
155
156static int amdgpu_dm_atomic_commit(struct drm_device *dev,
157 struct drm_atomic_state *state,
158 bool nonblock);
159
160static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
161
162static int amdgpu_dm_atomic_check(struct drm_device *dev,
163 struct drm_atomic_state *state);
164
674e78ac
NK
165static void handle_cursor_update(struct drm_plane *plane,
166 struct drm_plane_state *old_plane_state);
7578ecda 167
8c322309
RL
168static void amdgpu_dm_set_psr_caps(struct dc_link *link);
169static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
170static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
171static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
172
173
4562236b
HW
174/*
175 * dm_vblank_get_counter
176 *
177 * @brief
178 * Get counter for number of vertical blanks
179 *
180 * @param
181 * struct amdgpu_device *adev - [in] desired amdgpu device
182 * int disp_idx - [in] which CRTC to get the counter from
183 *
184 * @return
185 * Counter for vertical blanks
186 */
187static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
188{
189 if (crtc >= adev->mode_info.num_crtc)
190 return 0;
191 else {
192 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
193 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
194 acrtc->base.state);
4562236b 195
da5c47f6
AG
196
197 if (acrtc_state->stream == NULL) {
0971c40e
HW
198 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 crtc);
4562236b
HW
200 return 0;
201 }
202
da5c47f6 203 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
204 }
205}
206
207static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 208 u32 *vbl, u32 *position)
4562236b 209{
81c50963
ST
210 uint32_t v_blank_start, v_blank_end, h_position, v_position;
211
4562236b
HW
212 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
213 return -EINVAL;
214 else {
215 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
216 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
217 acrtc->base.state);
4562236b 218
da5c47f6 219 if (acrtc_state->stream == NULL) {
0971c40e
HW
220 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
221 crtc);
4562236b
HW
222 return 0;
223 }
224
81c50963
ST
225 /*
226 * TODO rework base driver to use values directly.
227 * for now parse it back into reg-format
228 */
da5c47f6 229 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
230 &v_blank_start,
231 &v_blank_end,
232 &h_position,
233 &v_position);
234
e806208d
AG
235 *position = v_position | (h_position << 16);
236 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
237 }
238
239 return 0;
240}
241
242static bool dm_is_idle(void *handle)
243{
244 /* XXX todo */
245 return true;
246}
247
248static int dm_wait_for_idle(void *handle)
249{
250 /* XXX todo */
251 return 0;
252}
253
254static bool dm_check_soft_reset(void *handle)
255{
256 return false;
257}
258
259static int dm_soft_reset(void *handle)
260{
261 /* XXX todo */
262 return 0;
263}
264
3ee6b26b
AD
265static struct amdgpu_crtc *
266get_crtc_by_otg_inst(struct amdgpu_device *adev,
267 int otg_inst)
4562236b
HW
268{
269 struct drm_device *dev = adev->ddev;
270 struct drm_crtc *crtc;
271 struct amdgpu_crtc *amdgpu_crtc;
272
4562236b
HW
273 if (otg_inst == -1) {
274 WARN_ON(1);
275 return adev->mode_info.crtcs[0];
276 }
277
278 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
279 amdgpu_crtc = to_amdgpu_crtc(crtc);
280
281 if (amdgpu_crtc->otg_inst == otg_inst)
282 return amdgpu_crtc;
283 }
284
285 return NULL;
286}
287
66b0c973
MK
288static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
289{
290 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
291 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
292}
293
b8e8c934
HW
294/**
295 * dm_pflip_high_irq() - Handle pageflip interrupt
296 * @interrupt_params: ignored
297 *
298 * Handles the pageflip interrupt by notifying all interested parties
299 * that the pageflip has been completed.
300 */
4562236b
HW
301static void dm_pflip_high_irq(void *interrupt_params)
302{
4562236b
HW
303 struct amdgpu_crtc *amdgpu_crtc;
304 struct common_irq_params *irq_params = interrupt_params;
305 struct amdgpu_device *adev = irq_params->adev;
306 unsigned long flags;
71bbe51a
MK
307 struct drm_pending_vblank_event *e;
308 struct dm_crtc_state *acrtc_state;
309 uint32_t vpos, hpos, v_blank_start, v_blank_end;
310 bool vrr_active;
4562236b
HW
311
312 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
313
314 /* IRQ could occur when in initial stage */
1f6010a9 315 /* TODO work and BO cleanup */
4562236b
HW
316 if (amdgpu_crtc == NULL) {
317 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
318 return;
319 }
320
321 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
322
323 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
324 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
325 amdgpu_crtc->pflip_status,
326 AMDGPU_FLIP_SUBMITTED,
327 amdgpu_crtc->crtc_id,
328 amdgpu_crtc);
329 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
330 return;
331 }
332
71bbe51a
MK
333 /* page flip completed. */
334 e = amdgpu_crtc->event;
335 amdgpu_crtc->event = NULL;
4562236b 336
71bbe51a
MK
337 if (!e)
338 WARN_ON(1);
1159898a 339
71bbe51a
MK
340 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
341 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
342
343 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
344 if (!vrr_active ||
345 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
346 &v_blank_end, &hpos, &vpos) ||
347 (vpos < v_blank_start)) {
348 /* Update to correct count and vblank timestamp if racing with
349 * vblank irq. This also updates to the correct vblank timestamp
350 * even in VRR mode, as scanout is past the front-porch atm.
351 */
352 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 353
71bbe51a
MK
354 /* Wake up userspace by sending the pageflip event with proper
355 * count and timestamp of vblank of flip completion.
356 */
357 if (e) {
358 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
359
360 /* Event sent, so done with vblank for this flip */
361 drm_crtc_vblank_put(&amdgpu_crtc->base);
362 }
363 } else if (e) {
364 /* VRR active and inside front-porch: vblank count and
365 * timestamp for pageflip event will only be up to date after
366 * drm_crtc_handle_vblank() has been executed from late vblank
367 * irq handler after start of back-porch (vline 0). We queue the
368 * pageflip event for send-out by drm_crtc_handle_vblank() with
369 * updated timestamp and count, once it runs after us.
370 *
371 * We need to open-code this instead of using the helper
372 * drm_crtc_arm_vblank_event(), as that helper would
373 * call drm_crtc_accurate_vblank_count(), which we must
374 * not call in VRR mode while we are in front-porch!
375 */
376
377 /* sequence will be replaced by real count during send-out. */
378 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
379 e->pipe = amdgpu_crtc->crtc_id;
380
381 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
382 e = NULL;
383 }
4562236b 384
fdd1fe57
MK
385 /* Keep track of vblank of this flip for flip throttling. We use the
386 * cooked hw counter, as that one incremented at start of this vblank
387 * of pageflip completion, so last_flip_vblank is the forbidden count
388 * for queueing new pageflips if vsync + VRR is enabled.
389 */
e3eff4b5
TZ
390 amdgpu_crtc->last_flip_vblank =
391 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 392
54f5499a 393 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
394 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
395
71bbe51a
MK
396 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
397 amdgpu_crtc->crtc_id, amdgpu_crtc,
398 vrr_active, (int) !e);
4562236b
HW
399}
400
d2574c33
MK
401static void dm_vupdate_high_irq(void *interrupt_params)
402{
403 struct common_irq_params *irq_params = interrupt_params;
404 struct amdgpu_device *adev = irq_params->adev;
405 struct amdgpu_crtc *acrtc;
406 struct dm_crtc_state *acrtc_state;
09aef2c4 407 unsigned long flags;
d2574c33
MK
408
409 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
410
411 if (acrtc) {
412 acrtc_state = to_dm_crtc_state(acrtc->base.state);
413
7f2be468
LP
414 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
415 acrtc->crtc_id,
416 amdgpu_dm_vrr_active(acrtc_state));
d2574c33
MK
417
418 /* Core vblank handling is done here after end of front-porch in
419 * vrr mode, as vblank timestamping will give valid results
420 * while now done after front-porch. This will also deliver
421 * page-flip completion events that have been queued to us
422 * if a pageflip happened inside front-porch.
423 */
09aef2c4 424 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 425 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
426
427 /* BTR processing for pre-DCE12 ASICs */
428 if (acrtc_state->stream &&
429 adev->family < AMDGPU_FAMILY_AI) {
430 spin_lock_irqsave(&adev->ddev->event_lock, flags);
431 mod_freesync_handle_v_update(
432 adev->dm.freesync_module,
433 acrtc_state->stream,
434 &acrtc_state->vrr_params);
435
436 dc_stream_adjust_vmin_vmax(
437 adev->dm.dc,
438 acrtc_state->stream,
439 &acrtc_state->vrr_params.adjust);
440 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
441 }
442 }
d2574c33
MK
443 }
444}
445
b8e8c934
HW
446/**
447 * dm_crtc_high_irq() - Handles CRTC interrupt
b931e199 448 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
449 *
450 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
451 * event handler.
452 */
4562236b
HW
453static void dm_crtc_high_irq(void *interrupt_params)
454{
455 struct common_irq_params *irq_params = interrupt_params;
456 struct amdgpu_device *adev = irq_params->adev;
4562236b 457 struct amdgpu_crtc *acrtc;
180db303 458 struct dm_crtc_state *acrtc_state;
09aef2c4 459 unsigned long flags;
4562236b 460
b57de80a 461 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
462 if (!acrtc)
463 return;
464
465 acrtc_state = to_dm_crtc_state(acrtc->base.state);
466
2b5aed9a
MK
467 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
468 amdgpu_dm_vrr_active(acrtc_state),
469 acrtc_state->active_planes);
16f17eda 470
b931e199
NK
471 /**
472 * Core vblank handling at start of front-porch is only possible
473 * in non-vrr mode, as only there vblank timestamping will give
474 * valid results while done in front-porch. Otherwise defer it
475 * to dm_vupdate_high_irq after end of front-porch.
476 */
477 if (!amdgpu_dm_vrr_active(acrtc_state))
478 drm_crtc_handle_vblank(&acrtc->base);
479
480 /**
481 * Following stuff must happen at start of vblank, for crc
482 * computation and below-the-range btr support in vrr mode.
483 */
16f17eda 484 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
b931e199
NK
485
486 /* BTR updates need to happen before VUPDATE on Vega and above. */
487 if (adev->family < AMDGPU_FAMILY_AI)
488 return;
16f17eda
LL
489
490 spin_lock_irqsave(&adev->ddev->event_lock, flags);
491
b931e199 492 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
16f17eda 493 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
b931e199
NK
494 mod_freesync_handle_v_update(adev->dm.freesync_module,
495 acrtc_state->stream,
496 &acrtc_state->vrr_params);
16f17eda 497
b931e199
NK
498 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
499 &acrtc_state->vrr_params.adjust);
16f17eda
LL
500 }
501
2b5aed9a
MK
502 /*
503 * If there aren't any active_planes then DCH HUBP may be clock-gated.
504 * In that case, pageflip completion interrupts won't fire and pageflip
505 * completion events won't get delivered. Prevent this by sending
506 * pending pageflip events from here if a flip is still pending.
507 *
508 * If any planes are enabled, use dm_pflip_high_irq() instead, to
509 * avoid race conditions between flip programming and completion,
510 * which could cause too early flip completion events.
511 */
b931e199
NK
512 if (adev->family >= AMDGPU_FAMILY_RV &&
513 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
2b5aed9a 514 acrtc_state->active_planes == 0) {
16f17eda
LL
515 if (acrtc->event) {
516 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
517 acrtc->event = NULL;
518 drm_crtc_vblank_put(&acrtc->base);
519 }
520 acrtc->pflip_status = AMDGPU_FLIP_NONE;
521 }
522
523 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
524}
525
4562236b
HW
526static int dm_set_clockgating_state(void *handle,
527 enum amd_clockgating_state state)
528{
529 return 0;
530}
531
532static int dm_set_powergating_state(void *handle,
533 enum amd_powergating_state state)
534{
535 return 0;
536}
537
538/* Prototypes of private functions */
539static int dm_early_init(void* handle);
540
a32e24b4 541/* Allocate memory for FBC compressed data */
3e332d3a 542static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 543{
3e332d3a
RL
544 struct drm_device *dev = connector->dev;
545 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 546 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
547 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
548 struct drm_display_mode *mode;
42e67c3b
RL
549 unsigned long max_size = 0;
550
551 if (adev->dm.dc->fbc_compressor == NULL)
552 return;
a32e24b4 553
3e332d3a 554 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
555 return;
556
3e332d3a
RL
557 if (compressor->bo_ptr)
558 return;
42e67c3b 559
42e67c3b 560
3e332d3a
RL
561 list_for_each_entry(mode, &connector->modes, head) {
562 if (max_size < mode->htotal * mode->vtotal)
563 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
564 }
565
566 if (max_size) {
567 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 568 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 569 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
570
571 if (r)
42e67c3b
RL
572 DRM_ERROR("DM: Failed to initialize FBC\n");
573 else {
574 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
575 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
576 }
577
a32e24b4
RL
578 }
579
580}
a32e24b4 581
6ce8f316
NK
582static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
583 int pipe, bool *enabled,
584 unsigned char *buf, int max_bytes)
585{
586 struct drm_device *dev = dev_get_drvdata(kdev);
587 struct amdgpu_device *adev = dev->dev_private;
588 struct drm_connector *connector;
589 struct drm_connector_list_iter conn_iter;
590 struct amdgpu_dm_connector *aconnector;
591 int ret = 0;
592
593 *enabled = false;
594
595 mutex_lock(&adev->dm.audio_lock);
596
597 drm_connector_list_iter_begin(dev, &conn_iter);
598 drm_for_each_connector_iter(connector, &conn_iter) {
599 aconnector = to_amdgpu_dm_connector(connector);
600 if (aconnector->audio_inst != port)
601 continue;
602
603 *enabled = true;
604 ret = drm_eld_size(connector->eld);
605 memcpy(buf, connector->eld, min(max_bytes, ret));
606
607 break;
608 }
609 drm_connector_list_iter_end(&conn_iter);
610
611 mutex_unlock(&adev->dm.audio_lock);
612
613 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
614
615 return ret;
616}
617
618static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
619 .get_eld = amdgpu_dm_audio_component_get_eld,
620};
621
622static int amdgpu_dm_audio_component_bind(struct device *kdev,
623 struct device *hda_kdev, void *data)
624{
625 struct drm_device *dev = dev_get_drvdata(kdev);
626 struct amdgpu_device *adev = dev->dev_private;
627 struct drm_audio_component *acomp = data;
628
629 acomp->ops = &amdgpu_dm_audio_component_ops;
630 acomp->dev = kdev;
631 adev->dm.audio_component = acomp;
632
633 return 0;
634}
635
636static void amdgpu_dm_audio_component_unbind(struct device *kdev,
637 struct device *hda_kdev, void *data)
638{
639 struct drm_device *dev = dev_get_drvdata(kdev);
640 struct amdgpu_device *adev = dev->dev_private;
641 struct drm_audio_component *acomp = data;
642
643 acomp->ops = NULL;
644 acomp->dev = NULL;
645 adev->dm.audio_component = NULL;
646}
647
648static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
649 .bind = amdgpu_dm_audio_component_bind,
650 .unbind = amdgpu_dm_audio_component_unbind,
651};
652
653static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
654{
655 int i, ret;
656
657 if (!amdgpu_audio)
658 return 0;
659
660 adev->mode_info.audio.enabled = true;
661
662 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
663
664 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
665 adev->mode_info.audio.pin[i].channels = -1;
666 adev->mode_info.audio.pin[i].rate = -1;
667 adev->mode_info.audio.pin[i].bits_per_sample = -1;
668 adev->mode_info.audio.pin[i].status_bits = 0;
669 adev->mode_info.audio.pin[i].category_code = 0;
670 adev->mode_info.audio.pin[i].connected = false;
671 adev->mode_info.audio.pin[i].id =
672 adev->dm.dc->res_pool->audios[i]->inst;
673 adev->mode_info.audio.pin[i].offset = 0;
674 }
675
676 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
677 if (ret < 0)
678 return ret;
679
680 adev->dm.audio_registered = true;
681
682 return 0;
683}
684
685static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
686{
687 if (!amdgpu_audio)
688 return;
689
690 if (!adev->mode_info.audio.enabled)
691 return;
692
693 if (adev->dm.audio_registered) {
694 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
695 adev->dm.audio_registered = false;
696 }
697
698 /* TODO: Disable audio? */
699
700 adev->mode_info.audio.enabled = false;
701}
702
dfd84d90 703static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
704{
705 struct drm_audio_component *acomp = adev->dm.audio_component;
706
707 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
708 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
709
710 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
711 pin, -1);
712 }
713}
714
743b9786
NK
715static int dm_dmub_hw_init(struct amdgpu_device *adev)
716{
743b9786
NK
717 const struct dmcub_firmware_header_v1_0 *hdr;
718 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 719 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
720 const struct firmware *dmub_fw = adev->dm.dmub_fw;
721 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
722 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
723 struct dmub_srv_hw_params hw_params;
724 enum dmub_status status;
725 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 726 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
727 bool has_hw_support;
728
729 if (!dmub_srv)
730 /* DMUB isn't supported on the ASIC. */
731 return 0;
732
8c7aea40
NK
733 if (!fb_info) {
734 DRM_ERROR("No framebuffer info for DMUB service.\n");
735 return -EINVAL;
736 }
737
743b9786
NK
738 if (!dmub_fw) {
739 /* Firmware required for DMUB support. */
740 DRM_ERROR("No firmware provided for DMUB.\n");
741 return -EINVAL;
742 }
743
744 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
745 if (status != DMUB_STATUS_OK) {
746 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
747 return -EINVAL;
748 }
749
750 if (!has_hw_support) {
751 DRM_INFO("DMUB unsupported on ASIC\n");
752 return 0;
753 }
754
755 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
756
743b9786
NK
757 fw_inst_const = dmub_fw->data +
758 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 759 PSP_HEADER_BYTES;
743b9786
NK
760
761 fw_bss_data = dmub_fw->data +
762 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763 le32_to_cpu(hdr->inst_const_bytes);
764
765 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
766 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
767 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
768
769 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
770
ddde28a5
HW
771 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
772 * amdgpu_ucode_init_single_fw will load dmub firmware
773 * fw_inst_const part to cw0; otherwise, the firmware back door load
774 * will be done by dm_dmub_hw_init
775 */
776 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
777 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
778 fw_inst_const_size);
779 }
780
a576b345
NK
781 if (fw_bss_data_size)
782 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
783 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
784
785 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
786 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
787 adev->bios_size);
788
789 /* Reset regions that need to be reset. */
790 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
791 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
792
793 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
794 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
795
796 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
797 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
798
799 /* Initialize hardware. */
800 memset(&hw_params, 0, sizeof(hw_params));
801 hw_params.fb_base = adev->gmc.fb_start;
802 hw_params.fb_offset = adev->gmc.aper_base;
803
31a7f4bb
HW
804 /* backdoor load firmware and trigger dmub running */
805 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
806 hw_params.load_inst_const = true;
807
743b9786
NK
808 if (dmcu)
809 hw_params.psp_version = dmcu->psp_version;
810
8c7aea40
NK
811 for (i = 0; i < fb_info->num_fb; ++i)
812 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
813
814 status = dmub_srv_hw_init(dmub_srv, &hw_params);
815 if (status != DMUB_STATUS_OK) {
816 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
817 return -EINVAL;
818 }
819
820 /* Wait for firmware load to finish. */
821 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
822 if (status != DMUB_STATUS_OK)
823 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
824
825 /* Init DMCU and ABM if available. */
826 if (dmcu && abm) {
827 dmcu->funcs->dmcu_init(dmcu);
828 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
829 }
830
9a71c7d3
NK
831 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
832 if (!adev->dm.dc->ctx->dmub_srv) {
833 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
834 return -ENOMEM;
835 }
836
743b9786
NK
837 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
838 adev->dm.dmcub_fw_version);
839
840 return 0;
841}
842
7578ecda 843static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
844{
845 struct dc_init_data init_data;
52704fca
BL
846#ifdef CONFIG_DRM_AMD_DC_HDCP
847 struct dc_callback_init init_params;
848#endif
743b9786 849 int r;
52704fca 850
4562236b
HW
851 adev->dm.ddev = adev->ddev;
852 adev->dm.adev = adev;
853
4562236b
HW
854 /* Zero all the fields */
855 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
856#ifdef CONFIG_DRM_AMD_DC_HDCP
857 memset(&init_params, 0, sizeof(init_params));
858#endif
4562236b 859
674e78ac 860 mutex_init(&adev->dm.dc_lock);
6ce8f316 861 mutex_init(&adev->dm.audio_lock);
674e78ac 862
4562236b
HW
863 if(amdgpu_dm_irq_init(adev)) {
864 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
865 goto error;
866 }
867
868 init_data.asic_id.chip_family = adev->family;
869
2dc31ca1 870 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
871 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
872
770d13b1 873 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
874 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
875 init_data.asic_id.atombios_base_address =
876 adev->mode_info.atom_context->bios;
877
878 init_data.driver = adev;
879
880 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
881
882 if (!adev->dm.cgs_device) {
883 DRM_ERROR("amdgpu: failed to create cgs device.\n");
884 goto error;
885 }
886
887 init_data.cgs_device = adev->dm.cgs_device;
888
4562236b
HW
889 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
890
60fb100b
AD
891 switch (adev->asic_type) {
892 case CHIP_CARRIZO:
893 case CHIP_STONEY:
894 case CHIP_RAVEN:
fe3db437 895 case CHIP_RENOIR:
6e227308 896 init_data.flags.gpu_vm_support = true;
60fb100b
AD
897 break;
898 default:
899 break;
900 }
6e227308 901
04b94af4
AD
902 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
903 init_data.flags.fbc_support = true;
904
d99f38ae
AD
905 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
906 init_data.flags.multi_mon_pp_mclk_switch = true;
907
eaf56410
LL
908 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
909 init_data.flags.disable_fractional_pwm = true;
910
27eaa492 911 init_data.flags.power_down_display_on_boot = true;
78ad75f8 912
48321c3d 913 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 914
4562236b
HW
915 /* Display Core create. */
916 adev->dm.dc = dc_create(&init_data);
917
423788c7 918 if (adev->dm.dc) {
76121231 919 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 920 } else {
76121231 921 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
922 goto error;
923 }
4562236b 924
8a791dab
HW
925 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
926 adev->dm.dc->debug.force_single_disp_pipe_split = false;
927 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
928 }
929
f99d8762
HW
930 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
931 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
932
8a791dab
HW
933 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
934 adev->dm.dc->debug.disable_stutter = true;
935
936 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
937 adev->dm.dc->debug.disable_dsc = true;
938
939 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
940 adev->dm.dc->debug.disable_clock_gate = true;
941
743b9786
NK
942 r = dm_dmub_hw_init(adev);
943 if (r) {
944 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
945 goto error;
946 }
947
bb6785c1
NK
948 dc_hardware_init(adev->dm.dc);
949
4562236b
HW
950 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
951 if (!adev->dm.freesync_module) {
952 DRM_ERROR(
953 "amdgpu: failed to initialize freesync_module.\n");
954 } else
f1ad2f5e 955 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
956 adev->dm.freesync_module);
957
e277adc5
LSL
958 amdgpu_dm_init_color_mod();
959
52704fca 960#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 961 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 962 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 963
96a3b32e
BL
964 if (!adev->dm.hdcp_workqueue)
965 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
966 else
967 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 968
96a3b32e
BL
969 dc_init_callbacks(adev->dm.dc, &init_params);
970 }
52704fca 971#endif
4562236b
HW
972 if (amdgpu_dm_initialize_drm_device(adev)) {
973 DRM_ERROR(
974 "amdgpu: failed to initialize sw for display support.\n");
975 goto error;
976 }
977
978 /* Update the actual used number of crtc */
979 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
980
981 /* TODO: Add_display_info? */
982
983 /* TODO use dynamic cursor width */
ce75805e
AG
984 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
985 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
986
987 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
988 DRM_ERROR(
989 "amdgpu: failed to initialize sw for display support.\n");
990 goto error;
991 }
992
f1ad2f5e 993 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
994
995 return 0;
996error:
997 amdgpu_dm_fini(adev);
998
59d0f396 999 return -EINVAL;
4562236b
HW
1000}
1001
7578ecda 1002static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1003{
6ce8f316
NK
1004 amdgpu_dm_audio_fini(adev);
1005
4562236b 1006 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1007
52704fca
BL
1008#ifdef CONFIG_DRM_AMD_DC_HDCP
1009 if (adev->dm.hdcp_workqueue) {
1010 hdcp_destroy(adev->dm.hdcp_workqueue);
1011 adev->dm.hdcp_workqueue = NULL;
1012 }
1013
1014 if (adev->dm.dc)
1015 dc_deinit_callbacks(adev->dm.dc);
1016#endif
9a71c7d3
NK
1017 if (adev->dm.dc->ctx->dmub_srv) {
1018 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1019 adev->dm.dc->ctx->dmub_srv = NULL;
1020 }
1021
743b9786
NK
1022 if (adev->dm.dmub_bo)
1023 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1024 &adev->dm.dmub_bo_gpu_addr,
1025 &adev->dm.dmub_bo_cpu_addr);
52704fca 1026
c8bdf2b6
ED
1027 /* DC Destroy TODO: Replace destroy DAL */
1028 if (adev->dm.dc)
1029 dc_destroy(&adev->dm.dc);
4562236b
HW
1030 /*
1031 * TODO: pageflip, vlank interrupt
1032 *
1033 * amdgpu_dm_irq_fini(adev);
1034 */
1035
1036 if (adev->dm.cgs_device) {
1037 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1038 adev->dm.cgs_device = NULL;
1039 }
1040 if (adev->dm.freesync_module) {
1041 mod_freesync_destroy(adev->dm.freesync_module);
1042 adev->dm.freesync_module = NULL;
1043 }
674e78ac 1044
6ce8f316 1045 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1046 mutex_destroy(&adev->dm.dc_lock);
1047
4562236b
HW
1048 return;
1049}
1050
a94d5569 1051static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1052{
a7669aff 1053 const char *fw_name_dmcu = NULL;
a94d5569
DF
1054 int r;
1055 const struct dmcu_firmware_header_v1_0 *hdr;
1056
1057 switch(adev->asic_type) {
1058 case CHIP_BONAIRE:
1059 case CHIP_HAWAII:
1060 case CHIP_KAVERI:
1061 case CHIP_KABINI:
1062 case CHIP_MULLINS:
1063 case CHIP_TONGA:
1064 case CHIP_FIJI:
1065 case CHIP_CARRIZO:
1066 case CHIP_STONEY:
1067 case CHIP_POLARIS11:
1068 case CHIP_POLARIS10:
1069 case CHIP_POLARIS12:
1070 case CHIP_VEGAM:
1071 case CHIP_VEGA10:
1072 case CHIP_VEGA12:
1073 case CHIP_VEGA20:
476e955d 1074 case CHIP_NAVI10:
baebcf2e 1075 case CHIP_NAVI14:
30221ad8 1076 case CHIP_RENOIR:
79037324
BL
1077#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1078 case CHIP_SIENNA_CICHLID:
1079#endif
a94d5569 1080 return 0;
5ea23931
RL
1081 case CHIP_NAVI12:
1082 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1083 break;
a94d5569 1084 case CHIP_RAVEN:
a7669aff
HW
1085 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1086 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1087 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1088 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1089 else
a7669aff 1090 return 0;
a94d5569
DF
1091 break;
1092 default:
1093 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1094 return -EINVAL;
a94d5569
DF
1095 }
1096
1097 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1098 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1099 return 0;
1100 }
1101
1102 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1103 if (r == -ENOENT) {
1104 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1105 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1106 adev->dm.fw_dmcu = NULL;
1107 return 0;
1108 }
1109 if (r) {
1110 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1111 fw_name_dmcu);
1112 return r;
1113 }
1114
1115 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1116 if (r) {
1117 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1118 fw_name_dmcu);
1119 release_firmware(adev->dm.fw_dmcu);
1120 adev->dm.fw_dmcu = NULL;
1121 return r;
1122 }
1123
1124 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1125 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1126 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1127 adev->firmware.fw_size +=
1128 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1129
1130 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1131 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1132 adev->firmware.fw_size +=
1133 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1134
ee6e89c0
DF
1135 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1136
a94d5569
DF
1137 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1138
4562236b
HW
1139 return 0;
1140}
1141
743b9786
NK
1142static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1143{
1144 struct amdgpu_device *adev = ctx;
1145
1146 return dm_read_reg(adev->dm.dc->ctx, address);
1147}
1148
1149static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1150 uint32_t value)
1151{
1152 struct amdgpu_device *adev = ctx;
1153
1154 return dm_write_reg(adev->dm.dc->ctx, address, value);
1155}
1156
1157static int dm_dmub_sw_init(struct amdgpu_device *adev)
1158{
1159 struct dmub_srv_create_params create_params;
8c7aea40
NK
1160 struct dmub_srv_region_params region_params;
1161 struct dmub_srv_region_info region_info;
1162 struct dmub_srv_fb_params fb_params;
1163 struct dmub_srv_fb_info *fb_info;
1164 struct dmub_srv *dmub_srv;
743b9786
NK
1165 const struct dmcub_firmware_header_v1_0 *hdr;
1166 const char *fw_name_dmub;
1167 enum dmub_asic dmub_asic;
1168 enum dmub_status status;
1169 int r;
1170
1171 switch (adev->asic_type) {
1172 case CHIP_RENOIR:
1173 dmub_asic = DMUB_ASIC_DCN21;
1174 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1175 break;
79037324
BL
1176#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1177 case CHIP_SIENNA_CICHLID:
1178 dmub_asic = DMUB_ASIC_DCN30;
1179 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1180 break;
1181#endif
743b9786
NK
1182
1183 default:
1184 /* ASIC doesn't support DMUB. */
1185 return 0;
1186 }
1187
743b9786
NK
1188 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1189 if (r) {
1190 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1191 return 0;
1192 }
1193
1194 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1195 if (r) {
1196 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1197 return 0;
1198 }
1199
743b9786 1200 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1201
9a6ed547
NK
1202 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1203 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1204 AMDGPU_UCODE_ID_DMCUB;
1205 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1206 adev->dm.dmub_fw;
1207 adev->firmware.fw_size +=
1208 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1209
9a6ed547
NK
1210 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1211 adev->dm.dmcub_fw_version);
1212 }
1213
1214 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1215
8c7aea40
NK
1216 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1217 dmub_srv = adev->dm.dmub_srv;
1218
1219 if (!dmub_srv) {
1220 DRM_ERROR("Failed to allocate DMUB service!\n");
1221 return -ENOMEM;
1222 }
1223
1224 memset(&create_params, 0, sizeof(create_params));
1225 create_params.user_ctx = adev;
1226 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1227 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1228 create_params.asic = dmub_asic;
1229
1230 /* Create the DMUB service. */
1231 status = dmub_srv_create(dmub_srv, &create_params);
1232 if (status != DMUB_STATUS_OK) {
1233 DRM_ERROR("Error creating DMUB service: %d\n", status);
1234 return -EINVAL;
1235 }
1236
1237 /* Calculate the size of all the regions for the DMUB service. */
1238 memset(&region_params, 0, sizeof(region_params));
1239
1240 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1241 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1242 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1243 region_params.vbios_size = adev->bios_size;
0922b899 1244 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1245 adev->dm.dmub_fw->data +
1246 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1247 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1248 region_params.fw_inst_const =
1249 adev->dm.dmub_fw->data +
1250 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1251 PSP_HEADER_BYTES;
8c7aea40
NK
1252
1253 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1254 &region_info);
1255
1256 if (status != DMUB_STATUS_OK) {
1257 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1258 return -EINVAL;
1259 }
1260
1261 /*
1262 * Allocate a framebuffer based on the total size of all the regions.
1263 * TODO: Move this into GART.
1264 */
1265 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1266 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1267 &adev->dm.dmub_bo_gpu_addr,
1268 &adev->dm.dmub_bo_cpu_addr);
1269 if (r)
1270 return r;
1271
1272 /* Rebase the regions on the framebuffer address. */
1273 memset(&fb_params, 0, sizeof(fb_params));
1274 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1275 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1276 fb_params.region_info = &region_info;
1277
1278 adev->dm.dmub_fb_info =
1279 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1280 fb_info = adev->dm.dmub_fb_info;
1281
1282 if (!fb_info) {
1283 DRM_ERROR(
1284 "Failed to allocate framebuffer info for DMUB service!\n");
1285 return -ENOMEM;
1286 }
1287
1288 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1289 if (status != DMUB_STATUS_OK) {
1290 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1291 return -EINVAL;
1292 }
1293
743b9786
NK
1294 return 0;
1295}
1296
a94d5569
DF
1297static int dm_sw_init(void *handle)
1298{
1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1300 int r;
1301
1302 r = dm_dmub_sw_init(adev);
1303 if (r)
1304 return r;
a94d5569
DF
1305
1306 return load_dmcu_fw(adev);
1307}
1308
4562236b
HW
1309static int dm_sw_fini(void *handle)
1310{
a94d5569
DF
1311 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1312
8c7aea40
NK
1313 kfree(adev->dm.dmub_fb_info);
1314 adev->dm.dmub_fb_info = NULL;
1315
743b9786
NK
1316 if (adev->dm.dmub_srv) {
1317 dmub_srv_destroy(adev->dm.dmub_srv);
1318 adev->dm.dmub_srv = NULL;
1319 }
1320
1321 if (adev->dm.dmub_fw) {
1322 release_firmware(adev->dm.dmub_fw);
1323 adev->dm.dmub_fw = NULL;
1324 }
1325
a94d5569
DF
1326 if(adev->dm.fw_dmcu) {
1327 release_firmware(adev->dm.fw_dmcu);
1328 adev->dm.fw_dmcu = NULL;
1329 }
1330
4562236b
HW
1331 return 0;
1332}
1333
7abcf6b5 1334static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1335{
c84dec2f 1336 struct amdgpu_dm_connector *aconnector;
4562236b 1337 struct drm_connector *connector;
f8d2d39e 1338 struct drm_connector_list_iter iter;
7abcf6b5 1339 int ret = 0;
4562236b 1340
f8d2d39e
LP
1341 drm_connector_list_iter_begin(dev, &iter);
1342 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1343 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1344 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1345 aconnector->mst_mgr.aux) {
f1ad2f5e 1346 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1347 aconnector,
1348 aconnector->base.base.id);
7abcf6b5
AG
1349
1350 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1351 if (ret < 0) {
1352 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1353 aconnector->dc_link->type =
1354 dc_connection_single;
1355 break;
7abcf6b5 1356 }
f8d2d39e 1357 }
4562236b 1358 }
f8d2d39e 1359 drm_connector_list_iter_end(&iter);
4562236b 1360
7abcf6b5
AG
1361 return ret;
1362}
1363
1364static int dm_late_init(void *handle)
1365{
42e67c3b 1366 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1367
bbf854dc
DF
1368 struct dmcu_iram_parameters params;
1369 unsigned int linear_lut[16];
1370 int i;
17bdb4a8 1371 struct dmcu *dmcu = NULL;
2a12c4e9 1372 bool ret;
bbf854dc 1373
17bdb4a8
JFZ
1374 if (!adev->dm.fw_dmcu)
1375 return detect_mst_link_for_all_connectors(adev->ddev);
1376
1377 dmcu = adev->dm.dc->res_pool->dmcu;
1378
bbf854dc
DF
1379 for (i = 0; i < 16; i++)
1380 linear_lut[i] = 0xFFFF * i / 15;
1381
1382 params.set = 0;
1383 params.backlight_ramping_start = 0xCCCC;
1384 params.backlight_ramping_reduction = 0xCCCCCCCC;
1385 params.backlight_lut_array_size = 16;
1386 params.backlight_lut_array = linear_lut;
1387
2ad0cdf9
AK
1388 /* Min backlight level after ABM reduction, Don't allow below 1%
1389 * 0xFFFF x 0.01 = 0x28F
1390 */
1391 params.min_abm_backlight = 0x28F;
1392
2a12c4e9 1393 ret = dmcu_load_iram(dmcu, params);
bbf854dc 1394
2a12c4e9
HW
1395 if (!ret)
1396 return -EINVAL;
bbf854dc 1397
42e67c3b 1398 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1399}
1400
1401static void s3_handle_mst(struct drm_device *dev, bool suspend)
1402{
c84dec2f 1403 struct amdgpu_dm_connector *aconnector;
4562236b 1404 struct drm_connector *connector;
f8d2d39e 1405 struct drm_connector_list_iter iter;
fe7553be
LP
1406 struct drm_dp_mst_topology_mgr *mgr;
1407 int ret;
1408 bool need_hotplug = false;
4562236b 1409
f8d2d39e
LP
1410 drm_connector_list_iter_begin(dev, &iter);
1411 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1412 aconnector = to_amdgpu_dm_connector(connector);
1413 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1414 aconnector->mst_port)
1415 continue;
1416
1417 mgr = &aconnector->mst_mgr;
1418
1419 if (suspend) {
1420 drm_dp_mst_topology_mgr_suspend(mgr);
1421 } else {
6f85f738 1422 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1423 if (ret < 0) {
1424 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1425 need_hotplug = true;
1426 }
1427 }
4562236b 1428 }
f8d2d39e 1429 drm_connector_list_iter_end(&iter);
fe7553be
LP
1430
1431 if (need_hotplug)
1432 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1433}
1434
9340dfd3
HW
1435static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1436{
1437 struct smu_context *smu = &adev->smu;
1438 int ret = 0;
1439
1440 if (!is_support_sw_smu(adev))
1441 return 0;
1442
1443 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1444 * on window driver dc implementation.
1445 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1446 * should be passed to smu during boot up and resume from s3.
1447 * boot up: dc calculate dcn watermark clock settings within dc_create,
1448 * dcn20_resource_construct
1449 * then call pplib functions below to pass the settings to smu:
1450 * smu_set_watermarks_for_clock_ranges
1451 * smu_set_watermarks_table
1452 * navi10_set_watermarks_table
1453 * smu_write_watermarks_table
1454 *
1455 * For Renoir, clock settings of dcn watermark are also fixed values.
1456 * dc has implemented different flow for window driver:
1457 * dc_hardware_init / dc_set_power_state
1458 * dcn10_init_hw
1459 * notify_wm_ranges
1460 * set_wm_ranges
1461 * -- Linux
1462 * smu_set_watermarks_for_clock_ranges
1463 * renoir_set_watermarks_table
1464 * smu_write_watermarks_table
1465 *
1466 * For Linux,
1467 * dc_hardware_init -> amdgpu_dm_init
1468 * dc_set_power_state --> dm_resume
1469 *
1470 * therefore, this function apply to navi10/12/14 but not Renoir
1471 * *
1472 */
1473 switch(adev->asic_type) {
1474 case CHIP_NAVI10:
1475 case CHIP_NAVI14:
1476 case CHIP_NAVI12:
1477 break;
1478 default:
1479 return 0;
1480 }
1481
1482 mutex_lock(&smu->mutex);
1483
1484 /* pass data to smu controller */
1485 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1486 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1487 ret = smu_write_watermarks_table(smu);
1488
1489 if (ret) {
1490 mutex_unlock(&smu->mutex);
1491 DRM_ERROR("Failed to update WMTABLE!\n");
1492 return ret;
1493 }
1494 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1495 }
1496
1497 mutex_unlock(&smu->mutex);
1498
1499 return 0;
1500}
1501
b8592b48
LL
1502/**
1503 * dm_hw_init() - Initialize DC device
28d687ea 1504 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1505 *
1506 * Initialize the &struct amdgpu_display_manager device. This involves calling
1507 * the initializers of each DM component, then populating the struct with them.
1508 *
1509 * Although the function implies hardware initialization, both hardware and
1510 * software are initialized here. Splitting them out to their relevant init
1511 * hooks is a future TODO item.
1512 *
1513 * Some notable things that are initialized here:
1514 *
1515 * - Display Core, both software and hardware
1516 * - DC modules that we need (freesync and color management)
1517 * - DRM software states
1518 * - Interrupt sources and handlers
1519 * - Vblank support
1520 * - Debug FS entries, if enabled
1521 */
4562236b
HW
1522static int dm_hw_init(void *handle)
1523{
1524 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1525 /* Create DAL display manager */
1526 amdgpu_dm_init(adev);
4562236b
HW
1527 amdgpu_dm_hpd_init(adev);
1528
4562236b
HW
1529 return 0;
1530}
1531
b8592b48
LL
1532/**
1533 * dm_hw_fini() - Teardown DC device
28d687ea 1534 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1535 *
1536 * Teardown components within &struct amdgpu_display_manager that require
1537 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1538 * were loaded. Also flush IRQ workqueues and disable them.
1539 */
4562236b
HW
1540static int dm_hw_fini(void *handle)
1541{
1542 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1543
1544 amdgpu_dm_hpd_fini(adev);
1545
1546 amdgpu_dm_irq_fini(adev);
21de3396 1547 amdgpu_dm_fini(adev);
4562236b
HW
1548 return 0;
1549}
1550
cdaae837
BL
1551
1552static int dm_enable_vblank(struct drm_crtc *crtc);
1553static void dm_disable_vblank(struct drm_crtc *crtc);
1554
1555static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1556 struct dc_state *state, bool enable)
1557{
1558 enum dc_irq_source irq_source;
1559 struct amdgpu_crtc *acrtc;
1560 int rc = -EBUSY;
1561 int i = 0;
1562
1563 for (i = 0; i < state->stream_count; i++) {
1564 acrtc = get_crtc_by_otg_inst(
1565 adev, state->stream_status[i].primary_otg_inst);
1566
1567 if (acrtc && state->stream_status[i].plane_count != 0) {
1568 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1569 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1570 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1571 acrtc->crtc_id, enable ? "en" : "dis", rc);
1572 if (rc)
1573 DRM_WARN("Failed to %s pflip interrupts\n",
1574 enable ? "enable" : "disable");
1575
1576 if (enable) {
1577 rc = dm_enable_vblank(&acrtc->base);
1578 if (rc)
1579 DRM_WARN("Failed to enable vblank interrupts\n");
1580 } else {
1581 dm_disable_vblank(&acrtc->base);
1582 }
1583
1584 }
1585 }
1586
1587}
1588
dfd84d90 1589static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1590{
1591 struct dc_state *context = NULL;
1592 enum dc_status res = DC_ERROR_UNEXPECTED;
1593 int i;
1594 struct dc_stream_state *del_streams[MAX_PIPES];
1595 int del_streams_count = 0;
1596
1597 memset(del_streams, 0, sizeof(del_streams));
1598
1599 context = dc_create_state(dc);
1600 if (context == NULL)
1601 goto context_alloc_fail;
1602
1603 dc_resource_state_copy_construct_current(dc, context);
1604
1605 /* First remove from context all streams */
1606 for (i = 0; i < context->stream_count; i++) {
1607 struct dc_stream_state *stream = context->streams[i];
1608
1609 del_streams[del_streams_count++] = stream;
1610 }
1611
1612 /* Remove all planes for removed streams and then remove the streams */
1613 for (i = 0; i < del_streams_count; i++) {
1614 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1615 res = DC_FAIL_DETACH_SURFACES;
1616 goto fail;
1617 }
1618
1619 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1620 if (res != DC_OK)
1621 goto fail;
1622 }
1623
1624
1625 res = dc_validate_global_state(dc, context, false);
1626
1627 if (res != DC_OK) {
1628 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1629 goto fail;
1630 }
1631
1632 res = dc_commit_state(dc, context);
1633
1634fail:
1635 dc_release_state(context);
1636
1637context_alloc_fail:
1638 return res;
1639}
1640
4562236b
HW
1641static int dm_suspend(void *handle)
1642{
1643 struct amdgpu_device *adev = handle;
1644 struct amdgpu_display_manager *dm = &adev->dm;
cdaae837
BL
1645 int ret = 0;
1646
1647 if (adev->in_gpu_reset) {
1648 mutex_lock(&dm->dc_lock);
1649 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1650
1651 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1652
1653 amdgpu_dm_commit_zero_streams(dm->dc);
1654
1655 amdgpu_dm_irq_suspend(adev);
1656
1657 return ret;
1658 }
4562236b 1659
d2f0b53b
LHM
1660 WARN_ON(adev->dm.cached_state);
1661 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1662
4562236b
HW
1663 s3_handle_mst(adev->ddev, true);
1664
4562236b
HW
1665 amdgpu_dm_irq_suspend(adev);
1666
a3621485 1667
32f5062d 1668 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1669
1c2075d4 1670 return 0;
4562236b
HW
1671}
1672
1daf8c63
AD
1673static struct amdgpu_dm_connector *
1674amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1675 struct drm_crtc *crtc)
4562236b
HW
1676{
1677 uint32_t i;
c2cea706 1678 struct drm_connector_state *new_con_state;
4562236b
HW
1679 struct drm_connector *connector;
1680 struct drm_crtc *crtc_from_state;
1681
c2cea706
LSL
1682 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1683 crtc_from_state = new_con_state->crtc;
4562236b
HW
1684
1685 if (crtc_from_state == crtc)
c84dec2f 1686 return to_amdgpu_dm_connector(connector);
4562236b
HW
1687 }
1688
1689 return NULL;
1690}
1691
fbbdadf2
BL
1692static void emulated_link_detect(struct dc_link *link)
1693{
1694 struct dc_sink_init_data sink_init_data = { 0 };
1695 struct display_sink_capability sink_caps = { 0 };
1696 enum dc_edid_status edid_status;
1697 struct dc_context *dc_ctx = link->ctx;
1698 struct dc_sink *sink = NULL;
1699 struct dc_sink *prev_sink = NULL;
1700
1701 link->type = dc_connection_none;
1702 prev_sink = link->local_sink;
1703
1704 if (prev_sink != NULL)
1705 dc_sink_retain(prev_sink);
1706
1707 switch (link->connector_signal) {
1708 case SIGNAL_TYPE_HDMI_TYPE_A: {
1709 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1710 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1711 break;
1712 }
1713
1714 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1715 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1716 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1717 break;
1718 }
1719
1720 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1721 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1722 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1723 break;
1724 }
1725
1726 case SIGNAL_TYPE_LVDS: {
1727 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1728 sink_caps.signal = SIGNAL_TYPE_LVDS;
1729 break;
1730 }
1731
1732 case SIGNAL_TYPE_EDP: {
1733 sink_caps.transaction_type =
1734 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1735 sink_caps.signal = SIGNAL_TYPE_EDP;
1736 break;
1737 }
1738
1739 case SIGNAL_TYPE_DISPLAY_PORT: {
1740 sink_caps.transaction_type =
1741 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1742 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1743 break;
1744 }
1745
1746 default:
1747 DC_ERROR("Invalid connector type! signal:%d\n",
1748 link->connector_signal);
1749 return;
1750 }
1751
1752 sink_init_data.link = link;
1753 sink_init_data.sink_signal = sink_caps.signal;
1754
1755 sink = dc_sink_create(&sink_init_data);
1756 if (!sink) {
1757 DC_ERROR("Failed to create sink!\n");
1758 return;
1759 }
1760
dcd5fb82 1761 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1762 link->local_sink = sink;
1763
1764 edid_status = dm_helpers_read_local_edid(
1765 link->ctx,
1766 link,
1767 sink);
1768
1769 if (edid_status != EDID_OK)
1770 DC_ERROR("Failed to read EDID");
1771
1772}
1773
cdaae837
BL
1774static void dm_gpureset_commit_state(struct dc_state *dc_state,
1775 struct amdgpu_display_manager *dm)
1776{
1777 struct {
1778 struct dc_surface_update surface_updates[MAX_SURFACES];
1779 struct dc_plane_info plane_infos[MAX_SURFACES];
1780 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1781 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1782 struct dc_stream_update stream_update;
1783 } * bundle;
1784 int k, m;
1785
1786 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1787
1788 if (!bundle) {
1789 dm_error("Failed to allocate update bundle\n");
1790 goto cleanup;
1791 }
1792
1793 for (k = 0; k < dc_state->stream_count; k++) {
1794 bundle->stream_update.stream = dc_state->streams[k];
1795
1796 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1797 bundle->surface_updates[m].surface =
1798 dc_state->stream_status->plane_states[m];
1799 bundle->surface_updates[m].surface->force_full_update =
1800 true;
1801 }
1802 dc_commit_updates_for_stream(
1803 dm->dc, bundle->surface_updates,
1804 dc_state->stream_status->plane_count,
1805 dc_state->streams[k], &bundle->stream_update, dc_state);
1806 }
1807
1808cleanup:
1809 kfree(bundle);
1810
1811 return;
1812}
1813
4562236b
HW
1814static int dm_resume(void *handle)
1815{
1816 struct amdgpu_device *adev = handle;
4562236b
HW
1817 struct drm_device *ddev = adev->ddev;
1818 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1819 struct amdgpu_dm_connector *aconnector;
4562236b 1820 struct drm_connector *connector;
f8d2d39e 1821 struct drm_connector_list_iter iter;
4562236b 1822 struct drm_crtc *crtc;
c2cea706 1823 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1824 struct dm_crtc_state *dm_new_crtc_state;
1825 struct drm_plane *plane;
1826 struct drm_plane_state *new_plane_state;
1827 struct dm_plane_state *dm_new_plane_state;
113b7a01 1828 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1829 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
1830 struct dc_state *dc_state;
1831 int i, r, j;
4562236b 1832
cdaae837
BL
1833 if (adev->in_gpu_reset) {
1834 dc_state = dm->cached_dc_state;
1835
1836 r = dm_dmub_hw_init(adev);
1837 if (r)
1838 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1839
1840 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1841 dc_resume(dm->dc);
1842
1843 amdgpu_dm_irq_resume_early(adev);
1844
1845 for (i = 0; i < dc_state->stream_count; i++) {
1846 dc_state->streams[i]->mode_changed = true;
1847 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1848 dc_state->stream_status->plane_states[j]->update_flags.raw
1849 = 0xffffffff;
1850 }
1851 }
1852
1853 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1854
1855 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1856
1857 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1858
1859 dc_release_state(dm->cached_dc_state);
1860 dm->cached_dc_state = NULL;
1861
1862 amdgpu_dm_irq_resume_late(adev);
1863
1864 mutex_unlock(&dm->dc_lock);
1865
1866 return 0;
1867 }
113b7a01
LL
1868 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1869 dc_release_state(dm_state->context);
1870 dm_state->context = dc_create_state(dm->dc);
1871 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1872 dc_resource_state_construct(dm->dc, dm_state->context);
1873
8c7aea40
NK
1874 /* Before powering on DC we need to re-initialize DMUB. */
1875 r = dm_dmub_hw_init(adev);
1876 if (r)
1877 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1878
a80aa93d
ML
1879 /* power on hardware */
1880 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1881
4562236b
HW
1882 /* program HPD filter */
1883 dc_resume(dm->dc);
1884
4562236b
HW
1885 /*
1886 * early enable HPD Rx IRQ, should be done before set mode as short
1887 * pulse interrupts are used for MST
1888 */
1889 amdgpu_dm_irq_resume_early(adev);
1890
d20ebea8 1891 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1892 s3_handle_mst(ddev, false);
1893
4562236b 1894 /* Do detection*/
f8d2d39e
LP
1895 drm_connector_list_iter_begin(ddev, &iter);
1896 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1897 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1898
1899 /*
1900 * this is the case when traversing through already created
1901 * MST connectors, should be skipped
1902 */
1903 if (aconnector->mst_port)
1904 continue;
1905
03ea364c 1906 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1907 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1908 DRM_ERROR("KMS: Failed to detect connector\n");
1909
1910 if (aconnector->base.force && new_connection_type == dc_connection_none)
1911 emulated_link_detect(aconnector->dc_link);
1912 else
1913 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1914
1915 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1916 aconnector->fake_enable = false;
1917
dcd5fb82
MF
1918 if (aconnector->dc_sink)
1919 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1920 aconnector->dc_sink = NULL;
1921 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1922 mutex_unlock(&aconnector->hpd_lock);
4562236b 1923 }
f8d2d39e 1924 drm_connector_list_iter_end(&iter);
4562236b 1925
1f6010a9 1926 /* Force mode set in atomic commit */
a80aa93d 1927 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1928 new_crtc_state->active_changed = true;
4f346e65 1929
fcb4019e
LSL
1930 /*
1931 * atomic_check is expected to create the dc states. We need to release
1932 * them here, since they were duplicated as part of the suspend
1933 * procedure.
1934 */
a80aa93d 1935 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1936 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1937 if (dm_new_crtc_state->stream) {
1938 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1939 dc_stream_release(dm_new_crtc_state->stream);
1940 dm_new_crtc_state->stream = NULL;
1941 }
1942 }
1943
a80aa93d 1944 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1945 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1946 if (dm_new_plane_state->dc_state) {
1947 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1948 dc_plane_state_release(dm_new_plane_state->dc_state);
1949 dm_new_plane_state->dc_state = NULL;
1950 }
1951 }
1952
2d1af6a1 1953 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1954
a80aa93d 1955 dm->cached_state = NULL;
0a214e2f 1956
9faa4237 1957 amdgpu_dm_irq_resume_late(adev);
4562236b 1958
9340dfd3
HW
1959 amdgpu_dm_smu_write_watermarks_table(adev);
1960
2d1af6a1 1961 return 0;
4562236b
HW
1962}
1963
b8592b48
LL
1964/**
1965 * DOC: DM Lifecycle
1966 *
1967 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1968 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1969 * the base driver's device list to be initialized and torn down accordingly.
1970 *
1971 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1972 */
1973
4562236b
HW
1974static const struct amd_ip_funcs amdgpu_dm_funcs = {
1975 .name = "dm",
1976 .early_init = dm_early_init,
7abcf6b5 1977 .late_init = dm_late_init,
4562236b
HW
1978 .sw_init = dm_sw_init,
1979 .sw_fini = dm_sw_fini,
1980 .hw_init = dm_hw_init,
1981 .hw_fini = dm_hw_fini,
1982 .suspend = dm_suspend,
1983 .resume = dm_resume,
1984 .is_idle = dm_is_idle,
1985 .wait_for_idle = dm_wait_for_idle,
1986 .check_soft_reset = dm_check_soft_reset,
1987 .soft_reset = dm_soft_reset,
1988 .set_clockgating_state = dm_set_clockgating_state,
1989 .set_powergating_state = dm_set_powergating_state,
1990};
1991
1992const struct amdgpu_ip_block_version dm_ip_block =
1993{
1994 .type = AMD_IP_BLOCK_TYPE_DCE,
1995 .major = 1,
1996 .minor = 0,
1997 .rev = 0,
1998 .funcs = &amdgpu_dm_funcs,
1999};
2000
ca3268c4 2001
b8592b48
LL
2002/**
2003 * DOC: atomic
2004 *
2005 * *WIP*
2006 */
0a323b84 2007
b3663f70 2008static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2009 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 2010 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2011 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 2012 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
2013};
2014
2015static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2016 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2017};
2018
94562810
RS
2019static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2020{
2021 u32 max_cll, min_cll, max, min, q, r;
2022 struct amdgpu_dm_backlight_caps *caps;
2023 struct amdgpu_display_manager *dm;
2024 struct drm_connector *conn_base;
2025 struct amdgpu_device *adev;
2026 static const u8 pre_computed_values[] = {
2027 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2028 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2029
2030 if (!aconnector || !aconnector->dc_link)
2031 return;
2032
2033 conn_base = &aconnector->base;
2034 adev = conn_base->dev->dev_private;
2035 dm = &adev->dm;
2036 caps = &dm->backlight_caps;
2037 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2038 caps->aux_support = false;
2039 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2040 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2041
2042 if (caps->ext_caps->bits.oled == 1 ||
2043 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2044 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2045 caps->aux_support = true;
2046
2047 /* From the specification (CTA-861-G), for calculating the maximum
2048 * luminance we need to use:
2049 * Luminance = 50*2**(CV/32)
2050 * Where CV is a one-byte value.
2051 * For calculating this expression we may need float point precision;
2052 * to avoid this complexity level, we take advantage that CV is divided
2053 * by a constant. From the Euclids division algorithm, we know that CV
2054 * can be written as: CV = 32*q + r. Next, we replace CV in the
2055 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2056 * need to pre-compute the value of r/32. For pre-computing the values
2057 * We just used the following Ruby line:
2058 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2059 * The results of the above expressions can be verified at
2060 * pre_computed_values.
2061 */
2062 q = max_cll >> 5;
2063 r = max_cll % 32;
2064 max = (1 << q) * pre_computed_values[r];
2065
2066 // min luminance: maxLum * (CV/255)^2 / 100
2067 q = DIV_ROUND_CLOSEST(min_cll, 255);
2068 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2069
2070 caps->aux_max_input_signal = max;
2071 caps->aux_min_input_signal = min;
2072}
2073
97e51c16
HW
2074void amdgpu_dm_update_connector_after_detect(
2075 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2076{
2077 struct drm_connector *connector = &aconnector->base;
2078 struct drm_device *dev = connector->dev;
b73a22d3 2079 struct dc_sink *sink;
4562236b
HW
2080
2081 /* MST handled by drm_mst framework */
2082 if (aconnector->mst_mgr.mst_state == true)
2083 return;
2084
2085
2086 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2087 if (sink)
2088 dc_sink_retain(sink);
4562236b 2089
1f6010a9
DF
2090 /*
2091 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2092 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2093 * Skip if already done during boot.
4562236b
HW
2094 */
2095 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2096 && aconnector->dc_em_sink) {
2097
1f6010a9
DF
2098 /*
2099 * For S3 resume with headless use eml_sink to fake stream
2100 * because on resume connector->sink is set to NULL
4562236b
HW
2101 */
2102 mutex_lock(&dev->mode_config.mutex);
2103
2104 if (sink) {
922aa1e1 2105 if (aconnector->dc_sink) {
98e6436d 2106 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2107 /*
2108 * retain and release below are used to
2109 * bump up refcount for sink because the link doesn't point
2110 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2111 * reshuffle by UMD we will get into unwanted dc_sink release
2112 */
dcd5fb82 2113 dc_sink_release(aconnector->dc_sink);
922aa1e1 2114 }
4562236b 2115 aconnector->dc_sink = sink;
dcd5fb82 2116 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2117 amdgpu_dm_update_freesync_caps(connector,
2118 aconnector->edid);
4562236b 2119 } else {
98e6436d 2120 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2121 if (!aconnector->dc_sink) {
4562236b 2122 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2123 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2124 }
4562236b
HW
2125 }
2126
2127 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2128
2129 if (sink)
2130 dc_sink_release(sink);
4562236b
HW
2131 return;
2132 }
2133
2134 /*
2135 * TODO: temporary guard to look for proper fix
2136 * if this sink is MST sink, we should not do anything
2137 */
dcd5fb82
MF
2138 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2139 dc_sink_release(sink);
4562236b 2140 return;
dcd5fb82 2141 }
4562236b
HW
2142
2143 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2144 /*
2145 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2146 * Do nothing!!
2147 */
f1ad2f5e 2148 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2149 aconnector->connector_id);
dcd5fb82
MF
2150 if (sink)
2151 dc_sink_release(sink);
4562236b
HW
2152 return;
2153 }
2154
f1ad2f5e 2155 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2156 aconnector->connector_id, aconnector->dc_sink, sink);
2157
2158 mutex_lock(&dev->mode_config.mutex);
2159
1f6010a9
DF
2160 /*
2161 * 1. Update status of the drm connector
2162 * 2. Send an event and let userspace tell us what to do
2163 */
4562236b 2164 if (sink) {
1f6010a9
DF
2165 /*
2166 * TODO: check if we still need the S3 mode update workaround.
2167 * If yes, put it here.
2168 */
4562236b 2169 if (aconnector->dc_sink)
98e6436d 2170 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2171
2172 aconnector->dc_sink = sink;
dcd5fb82 2173 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2174 if (sink->dc_edid.length == 0) {
4562236b 2175 aconnector->edid = NULL;
7b353e41
AP
2176 if (aconnector->dc_link->aux_mode) {
2177 drm_dp_cec_unset_edid(
2178 &aconnector->dm_dp_aux.aux);
2179 }
900b3cb1 2180 } else {
4562236b 2181 aconnector->edid =
7b353e41 2182 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2183
c555f023 2184 drm_connector_update_edid_property(connector,
7b353e41
AP
2185 aconnector->edid);
2186
2187 if (aconnector->dc_link->aux_mode)
2188 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2189 aconnector->edid);
4562236b 2190 }
7b353e41 2191
98e6436d 2192 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2193 update_connector_ext_caps(aconnector);
4562236b 2194 } else {
e86e8947 2195 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2196 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2197 drm_connector_update_edid_property(connector, NULL);
4562236b 2198 aconnector->num_modes = 0;
dcd5fb82 2199 dc_sink_release(aconnector->dc_sink);
4562236b 2200 aconnector->dc_sink = NULL;
5326c452 2201 aconnector->edid = NULL;
0c8620d6
BL
2202#ifdef CONFIG_DRM_AMD_DC_HDCP
2203 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2204 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2205 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2206#endif
4562236b
HW
2207 }
2208
2209 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2210
2211 if (sink)
2212 dc_sink_release(sink);
4562236b
HW
2213}
2214
2215static void handle_hpd_irq(void *param)
2216{
c84dec2f 2217 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2218 struct drm_connector *connector = &aconnector->base;
2219 struct drm_device *dev = connector->dev;
fbbdadf2 2220 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
2221#ifdef CONFIG_DRM_AMD_DC_HDCP
2222 struct amdgpu_device *adev = dev->dev_private;
2223#endif
4562236b 2224
1f6010a9
DF
2225 /*
2226 * In case of failure or MST no need to update connector status or notify the OS
2227 * since (for MST case) MST does this in its own context.
4562236b
HW
2228 */
2229 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2230
0c8620d6 2231#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2232 if (adev->dm.hdcp_workqueue)
96a3b32e 2233 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2234#endif
2e0ac3d6
HW
2235 if (aconnector->fake_enable)
2236 aconnector->fake_enable = false;
2237
fbbdadf2
BL
2238 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2239 DRM_ERROR("KMS: Failed to detect connector\n");
2240
2241 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2242 emulated_link_detect(aconnector->dc_link);
2243
2244
2245 drm_modeset_lock_all(dev);
2246 dm_restore_drm_connector_state(dev, connector);
2247 drm_modeset_unlock_all(dev);
2248
2249 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2250 drm_kms_helper_hotplug_event(dev);
2251
2252 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2253 amdgpu_dm_update_connector_after_detect(aconnector);
2254
2255
2256 drm_modeset_lock_all(dev);
2257 dm_restore_drm_connector_state(dev, connector);
2258 drm_modeset_unlock_all(dev);
2259
2260 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2261 drm_kms_helper_hotplug_event(dev);
2262 }
2263 mutex_unlock(&aconnector->hpd_lock);
2264
2265}
2266
c84dec2f 2267static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2268{
2269 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2270 uint8_t dret;
2271 bool new_irq_handled = false;
2272 int dpcd_addr;
2273 int dpcd_bytes_to_read;
2274
2275 const int max_process_count = 30;
2276 int process_count = 0;
2277
2278 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2279
2280 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2281 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2282 /* DPCD 0x200 - 0x201 for downstream IRQ */
2283 dpcd_addr = DP_SINK_COUNT;
2284 } else {
2285 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2286 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2287 dpcd_addr = DP_SINK_COUNT_ESI;
2288 }
2289
2290 dret = drm_dp_dpcd_read(
2291 &aconnector->dm_dp_aux.aux,
2292 dpcd_addr,
2293 esi,
2294 dpcd_bytes_to_read);
2295
2296 while (dret == dpcd_bytes_to_read &&
2297 process_count < max_process_count) {
2298 uint8_t retry;
2299 dret = 0;
2300
2301 process_count++;
2302
f1ad2f5e 2303 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2304 /* handle HPD short pulse irq */
2305 if (aconnector->mst_mgr.mst_state)
2306 drm_dp_mst_hpd_irq(
2307 &aconnector->mst_mgr,
2308 esi,
2309 &new_irq_handled);
4562236b
HW
2310
2311 if (new_irq_handled) {
2312 /* ACK at DPCD to notify down stream */
2313 const int ack_dpcd_bytes_to_write =
2314 dpcd_bytes_to_read - 1;
2315
2316 for (retry = 0; retry < 3; retry++) {
2317 uint8_t wret;
2318
2319 wret = drm_dp_dpcd_write(
2320 &aconnector->dm_dp_aux.aux,
2321 dpcd_addr + 1,
2322 &esi[1],
2323 ack_dpcd_bytes_to_write);
2324 if (wret == ack_dpcd_bytes_to_write)
2325 break;
2326 }
2327
1f6010a9 2328 /* check if there is new irq to be handled */
4562236b
HW
2329 dret = drm_dp_dpcd_read(
2330 &aconnector->dm_dp_aux.aux,
2331 dpcd_addr,
2332 esi,
2333 dpcd_bytes_to_read);
2334
2335 new_irq_handled = false;
d4a6e8a9 2336 } else {
4562236b 2337 break;
d4a6e8a9 2338 }
4562236b
HW
2339 }
2340
2341 if (process_count == max_process_count)
f1ad2f5e 2342 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2343}
2344
2345static void handle_hpd_rx_irq(void *param)
2346{
c84dec2f 2347 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2348 struct drm_connector *connector = &aconnector->base;
2349 struct drm_device *dev = connector->dev;
53cbf65c 2350 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2351 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2352 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2353#ifdef CONFIG_DRM_AMD_DC_HDCP
2354 union hpd_irq_data hpd_irq_data;
2355 struct amdgpu_device *adev = dev->dev_private;
2356
2357 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2358#endif
4562236b 2359
1f6010a9
DF
2360 /*
2361 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2362 * conflict, after implement i2c helper, this mutex should be
2363 * retired.
2364 */
53cbf65c 2365 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2366 mutex_lock(&aconnector->hpd_lock);
2367
2a0f9270
BL
2368
2369#ifdef CONFIG_DRM_AMD_DC_HDCP
2370 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2371#else
4e18814e 2372 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2373#endif
4562236b
HW
2374 !is_mst_root_connector) {
2375 /* Downstream Port status changed. */
fbbdadf2
BL
2376 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2377 DRM_ERROR("KMS: Failed to detect connector\n");
2378
2379 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2380 emulated_link_detect(dc_link);
2381
2382 if (aconnector->fake_enable)
2383 aconnector->fake_enable = false;
2384
2385 amdgpu_dm_update_connector_after_detect(aconnector);
2386
2387
2388 drm_modeset_lock_all(dev);
2389 dm_restore_drm_connector_state(dev, connector);
2390 drm_modeset_unlock_all(dev);
2391
2392 drm_kms_helper_hotplug_event(dev);
2393 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2394
2395 if (aconnector->fake_enable)
2396 aconnector->fake_enable = false;
2397
4562236b
HW
2398 amdgpu_dm_update_connector_after_detect(aconnector);
2399
2400
2401 drm_modeset_lock_all(dev);
2402 dm_restore_drm_connector_state(dev, connector);
2403 drm_modeset_unlock_all(dev);
2404
2405 drm_kms_helper_hotplug_event(dev);
2406 }
2407 }
2a0f9270 2408#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2409 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2410 if (adev->dm.hdcp_workqueue)
2411 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2412 }
2a0f9270 2413#endif
4562236b 2414 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2415 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2416 dm_handle_hpd_rx_irq(aconnector);
2417
e86e8947
HV
2418 if (dc_link->type != dc_connection_mst_branch) {
2419 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2420 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2421 }
4562236b
HW
2422}
2423
2424static void register_hpd_handlers(struct amdgpu_device *adev)
2425{
2426 struct drm_device *dev = adev->ddev;
2427 struct drm_connector *connector;
c84dec2f 2428 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2429 const struct dc_link *dc_link;
2430 struct dc_interrupt_params int_params = {0};
2431
2432 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2433 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2434
2435 list_for_each_entry(connector,
2436 &dev->mode_config.connector_list, head) {
2437
c84dec2f 2438 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2439 dc_link = aconnector->dc_link;
2440
2441 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2442 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2443 int_params.irq_source = dc_link->irq_source_hpd;
2444
2445 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2446 handle_hpd_irq,
2447 (void *) aconnector);
2448 }
2449
2450 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2451
2452 /* Also register for DP short pulse (hpd_rx). */
2453 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2454 int_params.irq_source = dc_link->irq_source_hpd_rx;
2455
2456 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2457 handle_hpd_rx_irq,
2458 (void *) aconnector);
2459 }
2460 }
2461}
2462
2463/* Register IRQ sources and initialize IRQ callbacks */
2464static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2465{
2466 struct dc *dc = adev->dm.dc;
2467 struct common_irq_params *c_irq_params;
2468 struct dc_interrupt_params int_params = {0};
2469 int r;
2470 int i;
1ffdeca6 2471 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2472
84374725 2473 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2474 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2475
2476 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2477 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2478
1f6010a9
DF
2479 /*
2480 * Actions of amdgpu_irq_add_id():
4562236b
HW
2481 * 1. Register a set() function with base driver.
2482 * Base driver will call set() function to enable/disable an
2483 * interrupt in DC hardware.
2484 * 2. Register amdgpu_dm_irq_handler().
2485 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2486 * coming from DC hardware.
2487 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2488 * for acknowledging and handling. */
2489
b57de80a 2490 /* Use VBLANK interrupt */
e9029155 2491 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2492 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2493 if (r) {
2494 DRM_ERROR("Failed to add crtc irq id!\n");
2495 return r;
2496 }
2497
2498 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2499 int_params.irq_source =
3d761e79 2500 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2501
b57de80a 2502 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2503
2504 c_irq_params->adev = adev;
2505 c_irq_params->irq_src = int_params.irq_source;
2506
2507 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2508 dm_crtc_high_irq, c_irq_params);
2509 }
2510
d2574c33
MK
2511 /* Use VUPDATE interrupt */
2512 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2513 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2514 if (r) {
2515 DRM_ERROR("Failed to add vupdate irq id!\n");
2516 return r;
2517 }
2518
2519 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2520 int_params.irq_source =
2521 dc_interrupt_to_irq_source(dc, i, 0);
2522
2523 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2524
2525 c_irq_params->adev = adev;
2526 c_irq_params->irq_src = int_params.irq_source;
2527
2528 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2529 dm_vupdate_high_irq, c_irq_params);
2530 }
2531
3d761e79 2532 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2533 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2534 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2535 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2536 if (r) {
2537 DRM_ERROR("Failed to add page flip irq id!\n");
2538 return r;
2539 }
2540
2541 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2542 int_params.irq_source =
2543 dc_interrupt_to_irq_source(dc, i, 0);
2544
2545 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2546
2547 c_irq_params->adev = adev;
2548 c_irq_params->irq_src = int_params.irq_source;
2549
2550 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2551 dm_pflip_high_irq, c_irq_params);
2552
2553 }
2554
2555 /* HPD */
2c8ad2d5
AD
2556 r = amdgpu_irq_add_id(adev, client_id,
2557 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2558 if (r) {
2559 DRM_ERROR("Failed to add hpd irq id!\n");
2560 return r;
2561 }
2562
2563 register_hpd_handlers(adev);
2564
2565 return 0;
2566}
2567
b86a1aa3 2568#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2569/* Register IRQ sources and initialize IRQ callbacks */
2570static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2571{
2572 struct dc *dc = adev->dm.dc;
2573 struct common_irq_params *c_irq_params;
2574 struct dc_interrupt_params int_params = {0};
2575 int r;
2576 int i;
2577
2578 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2579 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2580
1f6010a9
DF
2581 /*
2582 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2583 * 1. Register a set() function with base driver.
2584 * Base driver will call set() function to enable/disable an
2585 * interrupt in DC hardware.
2586 * 2. Register amdgpu_dm_irq_handler().
2587 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2588 * coming from DC hardware.
2589 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2590 * for acknowledging and handling.
1f6010a9 2591 */
ff5ef992
AD
2592
2593 /* Use VSTARTUP interrupt */
2594 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2595 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2596 i++) {
3760f76c 2597 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2598
2599 if (r) {
2600 DRM_ERROR("Failed to add crtc irq id!\n");
2601 return r;
2602 }
2603
2604 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2605 int_params.irq_source =
2606 dc_interrupt_to_irq_source(dc, i, 0);
2607
2608 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2609
2610 c_irq_params->adev = adev;
2611 c_irq_params->irq_src = int_params.irq_source;
2612
b931e199
NK
2613 amdgpu_dm_irq_register_interrupt(
2614 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2615 }
2616
2617 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2618 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2619 * to trigger at end of each vblank, regardless of state of the lock,
2620 * matching DCE behaviour.
2621 */
2622 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2623 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2624 i++) {
2625 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2626
2627 if (r) {
2628 DRM_ERROR("Failed to add vupdate irq id!\n");
2629 return r;
2630 }
2631
2632 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2633 int_params.irq_source =
2634 dc_interrupt_to_irq_source(dc, i, 0);
2635
2636 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2637
2638 c_irq_params->adev = adev;
2639 c_irq_params->irq_src = int_params.irq_source;
2640
ff5ef992 2641 amdgpu_dm_irq_register_interrupt(adev, &int_params,
b931e199 2642 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2643 }
2644
ff5ef992
AD
2645 /* Use GRPH_PFLIP interrupt */
2646 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2647 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2648 i++) {
3760f76c 2649 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2650 if (r) {
2651 DRM_ERROR("Failed to add page flip irq id!\n");
2652 return r;
2653 }
2654
2655 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2656 int_params.irq_source =
2657 dc_interrupt_to_irq_source(dc, i, 0);
2658
2659 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2660
2661 c_irq_params->adev = adev;
2662 c_irq_params->irq_src = int_params.irq_source;
2663
2664 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2665 dm_pflip_high_irq, c_irq_params);
2666
2667 }
2668
2669 /* HPD */
3760f76c 2670 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2671 &adev->hpd_irq);
2672 if (r) {
2673 DRM_ERROR("Failed to add hpd irq id!\n");
2674 return r;
2675 }
2676
2677 register_hpd_handlers(adev);
2678
2679 return 0;
2680}
2681#endif
2682
eb3dc897
NK
2683/*
2684 * Acquires the lock for the atomic state object and returns
2685 * the new atomic state.
2686 *
2687 * This should only be called during atomic check.
2688 */
2689static int dm_atomic_get_state(struct drm_atomic_state *state,
2690 struct dm_atomic_state **dm_state)
2691{
2692 struct drm_device *dev = state->dev;
2693 struct amdgpu_device *adev = dev->dev_private;
2694 struct amdgpu_display_manager *dm = &adev->dm;
2695 struct drm_private_state *priv_state;
eb3dc897
NK
2696
2697 if (*dm_state)
2698 return 0;
2699
eb3dc897
NK
2700 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2701 if (IS_ERR(priv_state))
2702 return PTR_ERR(priv_state);
2703
2704 *dm_state = to_dm_atomic_state(priv_state);
2705
2706 return 0;
2707}
2708
dfd84d90 2709static struct dm_atomic_state *
eb3dc897
NK
2710dm_atomic_get_new_state(struct drm_atomic_state *state)
2711{
2712 struct drm_device *dev = state->dev;
2713 struct amdgpu_device *adev = dev->dev_private;
2714 struct amdgpu_display_manager *dm = &adev->dm;
2715 struct drm_private_obj *obj;
2716 struct drm_private_state *new_obj_state;
2717 int i;
2718
2719 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2720 if (obj->funcs == dm->atomic_obj.funcs)
2721 return to_dm_atomic_state(new_obj_state);
2722 }
2723
2724 return NULL;
2725}
2726
dfd84d90 2727static struct dm_atomic_state *
eb3dc897
NK
2728dm_atomic_get_old_state(struct drm_atomic_state *state)
2729{
2730 struct drm_device *dev = state->dev;
2731 struct amdgpu_device *adev = dev->dev_private;
2732 struct amdgpu_display_manager *dm = &adev->dm;
2733 struct drm_private_obj *obj;
2734 struct drm_private_state *old_obj_state;
2735 int i;
2736
2737 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2738 if (obj->funcs == dm->atomic_obj.funcs)
2739 return to_dm_atomic_state(old_obj_state);
2740 }
2741
2742 return NULL;
2743}
2744
2745static struct drm_private_state *
2746dm_atomic_duplicate_state(struct drm_private_obj *obj)
2747{
2748 struct dm_atomic_state *old_state, *new_state;
2749
2750 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2751 if (!new_state)
2752 return NULL;
2753
2754 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2755
813d20dc
AW
2756 old_state = to_dm_atomic_state(obj->state);
2757
2758 if (old_state && old_state->context)
2759 new_state->context = dc_copy_state(old_state->context);
2760
eb3dc897
NK
2761 if (!new_state->context) {
2762 kfree(new_state);
2763 return NULL;
2764 }
2765
eb3dc897
NK
2766 return &new_state->base;
2767}
2768
2769static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2770 struct drm_private_state *state)
2771{
2772 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2773
2774 if (dm_state && dm_state->context)
2775 dc_release_state(dm_state->context);
2776
2777 kfree(dm_state);
2778}
2779
2780static struct drm_private_state_funcs dm_atomic_state_funcs = {
2781 .atomic_duplicate_state = dm_atomic_duplicate_state,
2782 .atomic_destroy_state = dm_atomic_destroy_state,
2783};
2784
4562236b
HW
2785static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2786{
eb3dc897 2787 struct dm_atomic_state *state;
4562236b
HW
2788 int r;
2789
2790 adev->mode_info.mode_config_initialized = true;
2791
4562236b 2792 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2793 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2794
2795 adev->ddev->mode_config.max_width = 16384;
2796 adev->ddev->mode_config.max_height = 16384;
2797
2798 adev->ddev->mode_config.preferred_depth = 24;
2799 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2800 /* indicates support for immediate flip */
4562236b
HW
2801 adev->ddev->mode_config.async_page_flip = true;
2802
770d13b1 2803 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2804
eb3dc897
NK
2805 state = kzalloc(sizeof(*state), GFP_KERNEL);
2806 if (!state)
2807 return -ENOMEM;
2808
813d20dc 2809 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2810 if (!state->context) {
2811 kfree(state);
2812 return -ENOMEM;
2813 }
2814
2815 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2816
8c1a765b
DA
2817 drm_atomic_private_obj_init(adev->ddev,
2818 &adev->dm.atomic_obj,
eb3dc897
NK
2819 &state->base,
2820 &dm_atomic_state_funcs);
2821
3dc9b1ce 2822 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2823 if (r)
2824 return r;
2825
6ce8f316
NK
2826 r = amdgpu_dm_audio_init(adev);
2827 if (r)
2828 return r;
2829
4562236b
HW
2830 return 0;
2831}
2832
206bbafe
DF
2833#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2834#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 2835#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 2836
4562236b
HW
2837#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2838 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2839
206bbafe
DF
2840static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2841{
2842#if defined(CONFIG_ACPI)
2843 struct amdgpu_dm_backlight_caps caps;
2844
2845 if (dm->backlight_caps.caps_valid)
2846 return;
2847
2848 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2849 if (caps.caps_valid) {
94562810
RS
2850 dm->backlight_caps.caps_valid = true;
2851 if (caps.aux_support)
2852 return;
206bbafe
DF
2853 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2854 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
2855 } else {
2856 dm->backlight_caps.min_input_signal =
2857 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2858 dm->backlight_caps.max_input_signal =
2859 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2860 }
2861#else
94562810
RS
2862 if (dm->backlight_caps.aux_support)
2863 return;
2864
8bcbc9ef
DF
2865 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2866 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2867#endif
2868}
2869
94562810
RS
2870static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2871{
2872 bool rc;
2873
2874 if (!link)
2875 return 1;
2876
2877 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2878 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2879
2880 return rc ? 0 : 1;
2881}
2882
2883static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2884 const uint32_t user_brightness)
2885{
2886 u32 min, max, conversion_pace;
2887 u32 brightness = user_brightness;
2888
2889 if (!caps)
2890 goto out;
2891
2892 if (!caps->aux_support) {
2893 max = caps->max_input_signal;
2894 min = caps->min_input_signal;
2895 /*
2896 * The brightness input is in the range 0-255
2897 * It needs to be rescaled to be between the
2898 * requested min and max input signal
2899 * It also needs to be scaled up by 0x101 to
2900 * match the DC interface which has a range of
2901 * 0 to 0xffff
2902 */
2903 conversion_pace = 0x101;
2904 brightness =
2905 user_brightness
2906 * conversion_pace
2907 * (max - min)
2908 / AMDGPU_MAX_BL_LEVEL
2909 + min * conversion_pace;
2910 } else {
2911 /* TODO
2912 * We are doing a linear interpolation here, which is OK but
2913 * does not provide the optimal result. We probably want
2914 * something close to the Perceptual Quantizer (PQ) curve.
2915 */
2916 max = caps->aux_max_input_signal;
2917 min = caps->aux_min_input_signal;
2918
2919 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2920 + user_brightness * max;
2921 // Multiple the value by 1000 since we use millinits
2922 brightness *= 1000;
2923 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2924 }
2925
2926out:
2927 return brightness;
2928}
2929
4562236b
HW
2930static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2931{
2932 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 2933 struct amdgpu_dm_backlight_caps caps;
94562810
RS
2934 struct dc_link *link = NULL;
2935 u32 brightness;
2936 bool rc;
4562236b 2937
206bbafe
DF
2938 amdgpu_dm_update_backlight_caps(dm);
2939 caps = dm->backlight_caps;
94562810
RS
2940
2941 link = (struct dc_link *)dm->backlight_link;
2942
2943 brightness = convert_brightness(&caps, bd->props.brightness);
2944 // Change brightness based on AUX property
2945 if (caps.aux_support)
2946 return set_backlight_via_aux(link, brightness);
2947
2948 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2949
2950 return rc ? 0 : 1;
4562236b
HW
2951}
2952
2953static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2954{
620a0d27
DF
2955 struct amdgpu_display_manager *dm = bl_get_data(bd);
2956 int ret = dc_link_get_backlight_level(dm->backlight_link);
2957
2958 if (ret == DC_ERROR_UNEXPECTED)
2959 return bd->props.brightness;
2960 return ret;
4562236b
HW
2961}
2962
2963static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 2964 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
2965 .get_brightness = amdgpu_dm_backlight_get_brightness,
2966 .update_status = amdgpu_dm_backlight_update_status,
2967};
2968
7578ecda
AD
2969static void
2970amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
2971{
2972 char bl_name[16];
2973 struct backlight_properties props = { 0 };
2974
206bbafe
DF
2975 amdgpu_dm_update_backlight_caps(dm);
2976
4562236b 2977 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 2978 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
2979 props.type = BACKLIGHT_RAW;
2980
2981 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2982 dm->adev->ddev->primary->index);
2983
2984 dm->backlight_dev = backlight_device_register(bl_name,
2985 dm->adev->ddev->dev,
2986 dm,
2987 &amdgpu_dm_backlight_ops,
2988 &props);
2989
74baea42 2990 if (IS_ERR(dm->backlight_dev))
4562236b
HW
2991 DRM_ERROR("DM: Backlight registration failed!\n");
2992 else
f1ad2f5e 2993 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
2994}
2995
2996#endif
2997
df534fff 2998static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 2999 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3000 enum drm_plane_type plane_type,
3001 const struct dc_plane_cap *plane_cap)
df534fff 3002{
f180b4bc 3003 struct drm_plane *plane;
df534fff
S
3004 unsigned long possible_crtcs;
3005 int ret = 0;
3006
f180b4bc 3007 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3008 if (!plane) {
3009 DRM_ERROR("KMS: Failed to allocate plane\n");
3010 return -ENOMEM;
3011 }
b2fddb13 3012 plane->type = plane_type;
df534fff
S
3013
3014 /*
b2fddb13
NK
3015 * HACK: IGT tests expect that the primary plane for a CRTC
3016 * can only have one possible CRTC. Only expose support for
3017 * any CRTC if they're not going to be used as a primary plane
3018 * for a CRTC - like overlay or underlay planes.
df534fff
S
3019 */
3020 possible_crtcs = 1 << plane_id;
3021 if (plane_id >= dm->dc->caps.max_streams)
3022 possible_crtcs = 0xff;
3023
cc1fec57 3024 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3025
3026 if (ret) {
3027 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3028 kfree(plane);
df534fff
S
3029 return ret;
3030 }
3031
54087768
NK
3032 if (mode_info)
3033 mode_info->planes[plane_id] = plane;
3034
df534fff
S
3035 return ret;
3036}
3037
89fc8d4e
HW
3038
3039static void register_backlight_device(struct amdgpu_display_manager *dm,
3040 struct dc_link *link)
3041{
3042#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3043 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3044
3045 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3046 link->type != dc_connection_none) {
1f6010a9
DF
3047 /*
3048 * Event if registration failed, we should continue with
89fc8d4e
HW
3049 * DM initialization because not having a backlight control
3050 * is better then a black screen.
3051 */
3052 amdgpu_dm_register_backlight_device(dm);
3053
3054 if (dm->backlight_dev)
3055 dm->backlight_link = link;
3056 }
3057#endif
3058}
3059
3060
1f6010a9
DF
3061/*
3062 * In this architecture, the association
4562236b
HW
3063 * connector -> encoder -> crtc
3064 * id not really requried. The crtc and connector will hold the
3065 * display_index as an abstraction to use with DAL component
3066 *
3067 * Returns 0 on success
3068 */
7578ecda 3069static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3070{
3071 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3072 int32_t i;
c84dec2f 3073 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3074 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3075 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3076 uint32_t link_cnt;
cc1fec57 3077 int32_t primary_planes;
fbbdadf2 3078 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3079 const struct dc_plane_cap *plane;
4562236b
HW
3080
3081 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3082 if (amdgpu_dm_mode_config_init(dm->adev)) {
3083 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3084 return -EINVAL;
4562236b
HW
3085 }
3086
b2fddb13
NK
3087 /* There is one primary plane per CRTC */
3088 primary_planes = dm->dc->caps.max_streams;
54087768 3089 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3090
b2fddb13
NK
3091 /*
3092 * Initialize primary planes, implicit planes for legacy IOCTLS.
3093 * Order is reversed to match iteration order in atomic check.
3094 */
3095 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3096 plane = &dm->dc->caps.planes[i];
3097
b2fddb13 3098 if (initialize_plane(dm, mode_info, i,
cc1fec57 3099 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3100 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3101 goto fail;
d4e13b0d 3102 }
df534fff 3103 }
92f3ac40 3104
0d579c7e
NK
3105 /*
3106 * Initialize overlay planes, index starting after primary planes.
3107 * These planes have a higher DRM index than the primary planes since
3108 * they should be considered as having a higher z-order.
3109 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3110 *
3111 * Only support DCN for now, and only expose one so we don't encourage
3112 * userspace to use up all the pipes.
0d579c7e 3113 */
cc1fec57
NK
3114 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3115 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3116
3117 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3118 continue;
3119
3120 if (!plane->blends_with_above || !plane->blends_with_below)
3121 continue;
3122
ea36ad34 3123 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3124 continue;
3125
54087768 3126 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3127 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3128 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3129 goto fail;
d4e13b0d 3130 }
cc1fec57
NK
3131
3132 /* Only create one overlay plane. */
3133 break;
d4e13b0d 3134 }
4562236b 3135
d4e13b0d 3136 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3137 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3138 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3139 goto fail;
4562236b 3140 }
4562236b 3141
ab2541b6 3142 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
3143
3144 /* loops over all connectors on the board */
3145 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3146 struct dc_link *link = NULL;
4562236b
HW
3147
3148 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3149 DRM_ERROR(
3150 "KMS: Cannot support more than %d display indexes\n",
3151 AMDGPU_DM_MAX_DISPLAY_INDEX);
3152 continue;
3153 }
3154
3155 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3156 if (!aconnector)
cd8a2ae8 3157 goto fail;
4562236b
HW
3158
3159 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3160 if (!aencoder)
cd8a2ae8 3161 goto fail;
4562236b
HW
3162
3163 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3164 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3165 goto fail;
4562236b
HW
3166 }
3167
3168 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3169 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3170 goto fail;
4562236b
HW
3171 }
3172
89fc8d4e
HW
3173 link = dc_get_link_at_index(dm->dc, i);
3174
fbbdadf2
BL
3175 if (!dc_link_detect_sink(link, &new_connection_type))
3176 DRM_ERROR("KMS: Failed to detect connector\n");
3177
3178 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3179 emulated_link_detect(link);
3180 amdgpu_dm_update_connector_after_detect(aconnector);
3181
3182 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3183 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3184 register_backlight_device(dm, link);
397a9bc5
RL
3185 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3186 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3187 }
3188
3189
4562236b
HW
3190 }
3191
3192 /* Software is initialized. Now we can register interrupt handlers. */
3193 switch (adev->asic_type) {
3194 case CHIP_BONAIRE:
3195 case CHIP_HAWAII:
cd4b356f
AD
3196 case CHIP_KAVERI:
3197 case CHIP_KABINI:
3198 case CHIP_MULLINS:
4562236b
HW
3199 case CHIP_TONGA:
3200 case CHIP_FIJI:
3201 case CHIP_CARRIZO:
3202 case CHIP_STONEY:
3203 case CHIP_POLARIS11:
3204 case CHIP_POLARIS10:
b264d345 3205 case CHIP_POLARIS12:
7737de91 3206 case CHIP_VEGAM:
2c8ad2d5 3207 case CHIP_VEGA10:
2325ff30 3208 case CHIP_VEGA12:
1fe6bf2f 3209 case CHIP_VEGA20:
4562236b
HW
3210 if (dce110_register_irq_handlers(dm->adev)) {
3211 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3212 goto fail;
4562236b
HW
3213 }
3214 break;
b86a1aa3 3215#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3216 case CHIP_RAVEN:
fbd2afe5 3217 case CHIP_NAVI12:
476e955d 3218 case CHIP_NAVI10:
fce651e3 3219 case CHIP_NAVI14:
30221ad8 3220 case CHIP_RENOIR:
79037324
BL
3221#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3222 case CHIP_SIENNA_CICHLID:
3223#endif
ff5ef992
AD
3224 if (dcn10_register_irq_handlers(dm->adev)) {
3225 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3226 goto fail;
ff5ef992
AD
3227 }
3228 break;
3229#endif
4562236b 3230 default:
e63f8673 3231 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3232 goto fail;
4562236b
HW
3233 }
3234
2d673560
NK
3235 /* No userspace support. */
3236 dm->dc->debug.disable_tri_buf = true;
3237
4562236b 3238 return 0;
cd8a2ae8 3239fail:
4562236b 3240 kfree(aencoder);
4562236b 3241 kfree(aconnector);
54087768 3242
59d0f396 3243 return -EINVAL;
4562236b
HW
3244}
3245
7578ecda 3246static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3247{
3248 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3249 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3250 return;
3251}
3252
3253/******************************************************************************
3254 * amdgpu_display_funcs functions
3255 *****************************************************************************/
3256
1f6010a9 3257/*
4562236b
HW
3258 * dm_bandwidth_update - program display watermarks
3259 *
3260 * @adev: amdgpu_device pointer
3261 *
3262 * Calculate and program the display watermarks and line buffer allocation.
3263 */
3264static void dm_bandwidth_update(struct amdgpu_device *adev)
3265{
49c07a99 3266 /* TODO: implement later */
4562236b
HW
3267}
3268
39cc5be2 3269static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3270 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3271 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3272 .backlight_set_level = NULL, /* never called for DC */
3273 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3274 .hpd_sense = NULL,/* called unconditionally */
3275 .hpd_set_polarity = NULL, /* called unconditionally */
3276 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3277 .page_flip_get_scanoutpos =
3278 dm_crtc_get_scanoutpos,/* called unconditionally */
3279 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3280 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3281};
3282
3283#if defined(CONFIG_DEBUG_KERNEL_DC)
3284
3ee6b26b
AD
3285static ssize_t s3_debug_store(struct device *device,
3286 struct device_attribute *attr,
3287 const char *buf,
3288 size_t count)
4562236b
HW
3289{
3290 int ret;
3291 int s3_state;
ef1de361 3292 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
3293 struct amdgpu_device *adev = drm_dev->dev_private;
3294
3295 ret = kstrtoint(buf, 0, &s3_state);
3296
3297 if (ret == 0) {
3298 if (s3_state) {
3299 dm_resume(adev);
4562236b
HW
3300 drm_kms_helper_hotplug_event(adev->ddev);
3301 } else
3302 dm_suspend(adev);
3303 }
3304
3305 return ret == 0 ? count : 0;
3306}
3307
3308DEVICE_ATTR_WO(s3_debug);
3309
3310#endif
3311
3312static int dm_early_init(void *handle)
3313{
3314 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3315
4562236b
HW
3316 switch (adev->asic_type) {
3317 case CHIP_BONAIRE:
3318 case CHIP_HAWAII:
3319 adev->mode_info.num_crtc = 6;
3320 adev->mode_info.num_hpd = 6;
3321 adev->mode_info.num_dig = 6;
4562236b 3322 break;
cd4b356f
AD
3323 case CHIP_KAVERI:
3324 adev->mode_info.num_crtc = 4;
3325 adev->mode_info.num_hpd = 6;
3326 adev->mode_info.num_dig = 7;
cd4b356f
AD
3327 break;
3328 case CHIP_KABINI:
3329 case CHIP_MULLINS:
3330 adev->mode_info.num_crtc = 2;
3331 adev->mode_info.num_hpd = 6;
3332 adev->mode_info.num_dig = 6;
cd4b356f 3333 break;
4562236b
HW
3334 case CHIP_FIJI:
3335 case CHIP_TONGA:
3336 adev->mode_info.num_crtc = 6;
3337 adev->mode_info.num_hpd = 6;
3338 adev->mode_info.num_dig = 7;
4562236b
HW
3339 break;
3340 case CHIP_CARRIZO:
3341 adev->mode_info.num_crtc = 3;
3342 adev->mode_info.num_hpd = 6;
3343 adev->mode_info.num_dig = 9;
4562236b
HW
3344 break;
3345 case CHIP_STONEY:
3346 adev->mode_info.num_crtc = 2;
3347 adev->mode_info.num_hpd = 6;
3348 adev->mode_info.num_dig = 9;
4562236b
HW
3349 break;
3350 case CHIP_POLARIS11:
b264d345 3351 case CHIP_POLARIS12:
4562236b
HW
3352 adev->mode_info.num_crtc = 5;
3353 adev->mode_info.num_hpd = 5;
3354 adev->mode_info.num_dig = 5;
4562236b
HW
3355 break;
3356 case CHIP_POLARIS10:
7737de91 3357 case CHIP_VEGAM:
4562236b
HW
3358 adev->mode_info.num_crtc = 6;
3359 adev->mode_info.num_hpd = 6;
3360 adev->mode_info.num_dig = 6;
4562236b 3361 break;
2c8ad2d5 3362 case CHIP_VEGA10:
2325ff30 3363 case CHIP_VEGA12:
1fe6bf2f 3364 case CHIP_VEGA20:
2c8ad2d5
AD
3365 adev->mode_info.num_crtc = 6;
3366 adev->mode_info.num_hpd = 6;
3367 adev->mode_info.num_dig = 6;
3368 break;
b86a1aa3 3369#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3370 case CHIP_RAVEN:
3371 adev->mode_info.num_crtc = 4;
3372 adev->mode_info.num_hpd = 4;
3373 adev->mode_info.num_dig = 4;
ff5ef992 3374 break;
476e955d 3375#endif
476e955d 3376 case CHIP_NAVI10:
fbd2afe5 3377 case CHIP_NAVI12:
79037324
BL
3378#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3379 case CHIP_SIENNA_CICHLID:
3380#endif
476e955d
HW
3381 adev->mode_info.num_crtc = 6;
3382 adev->mode_info.num_hpd = 6;
3383 adev->mode_info.num_dig = 6;
3384 break;
fce651e3
BL
3385 case CHIP_NAVI14:
3386 adev->mode_info.num_crtc = 5;
3387 adev->mode_info.num_hpd = 5;
3388 adev->mode_info.num_dig = 5;
3389 break;
30221ad8
BL
3390 case CHIP_RENOIR:
3391 adev->mode_info.num_crtc = 4;
3392 adev->mode_info.num_hpd = 4;
3393 adev->mode_info.num_dig = 4;
3394 break;
4562236b 3395 default:
e63f8673 3396 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3397 return -EINVAL;
3398 }
3399
c8dd5715
MD
3400 amdgpu_dm_set_irq_funcs(adev);
3401
39cc5be2
AD
3402 if (adev->mode_info.funcs == NULL)
3403 adev->mode_info.funcs = &dm_display_funcs;
3404
1f6010a9
DF
3405 /*
3406 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3407 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3408 * amdgpu_device_init()
3409 */
4562236b
HW
3410#if defined(CONFIG_DEBUG_KERNEL_DC)
3411 device_create_file(
3412 adev->ddev->dev,
3413 &dev_attr_s3_debug);
3414#endif
3415
3416 return 0;
3417}
3418
9b690ef3 3419static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3420 struct dc_stream_state *new_stream,
3421 struct dc_stream_state *old_stream)
9b690ef3 3422{
e7b07cee
HW
3423 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3424 return false;
3425
3426 if (!crtc_state->enable)
3427 return false;
3428
3429 return crtc_state->active;
3430}
3431
3432static bool modereset_required(struct drm_crtc_state *crtc_state)
3433{
3434 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3435 return false;
3436
3437 return !crtc_state->enable || !crtc_state->active;
3438}
3439
7578ecda 3440static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3441{
3442 drm_encoder_cleanup(encoder);
3443 kfree(encoder);
3444}
3445
3446static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3447 .destroy = amdgpu_dm_encoder_destroy,
3448};
3449
e7b07cee 3450
695af5f9
NK
3451static int fill_dc_scaling_info(const struct drm_plane_state *state,
3452 struct dc_scaling_info *scaling_info)
e7b07cee 3453{
6491f0c0 3454 int scale_w, scale_h;
e7b07cee 3455
695af5f9 3456 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3457
695af5f9
NK
3458 /* Source is fixed 16.16 but we ignore mantissa for now... */
3459 scaling_info->src_rect.x = state->src_x >> 16;
3460 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3461
695af5f9
NK
3462 scaling_info->src_rect.width = state->src_w >> 16;
3463 if (scaling_info->src_rect.width == 0)
3464 return -EINVAL;
3465
3466 scaling_info->src_rect.height = state->src_h >> 16;
3467 if (scaling_info->src_rect.height == 0)
3468 return -EINVAL;
3469
3470 scaling_info->dst_rect.x = state->crtc_x;
3471 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3472
3473 if (state->crtc_w == 0)
695af5f9 3474 return -EINVAL;
e7b07cee 3475
695af5f9 3476 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3477
3478 if (state->crtc_h == 0)
695af5f9 3479 return -EINVAL;
e7b07cee 3480
695af5f9 3481 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3482
695af5f9
NK
3483 /* DRM doesn't specify clipping on destination output. */
3484 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3485
6491f0c0
NK
3486 /* TODO: Validate scaling per-format with DC plane caps */
3487 scale_w = scaling_info->dst_rect.width * 1000 /
3488 scaling_info->src_rect.width;
e7b07cee 3489
6491f0c0
NK
3490 if (scale_w < 250 || scale_w > 16000)
3491 return -EINVAL;
3492
3493 scale_h = scaling_info->dst_rect.height * 1000 /
3494 scaling_info->src_rect.height;
3495
3496 if (scale_h < 250 || scale_h > 16000)
3497 return -EINVAL;
3498
695af5f9
NK
3499 /*
3500 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3501 * assume reasonable defaults based on the format.
3502 */
e7b07cee 3503
695af5f9 3504 return 0;
4562236b 3505}
695af5f9 3506
3ee6b26b 3507static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
5888f07a 3508 uint64_t *tiling_flags, bool *tmz_surface)
e7b07cee 3509{
e68d14dd 3510 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 3511 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3512
e7b07cee 3513 if (unlikely(r)) {
1f6010a9 3514 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3515 if (r != -ERESTARTSYS)
3516 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3517 return r;
3518 }
3519
e7b07cee
HW
3520 if (tiling_flags)
3521 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3522
5888f07a
HW
3523 if (tmz_surface)
3524 *tmz_surface = amdgpu_bo_encrypted(rbo);
3525
e7b07cee
HW
3526 amdgpu_bo_unreserve(rbo);
3527
3528 return r;
3529}
3530
7df7e505
NK
3531static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3532{
3533 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3534
3535 return offset ? (address + offset * 256) : 0;
3536}
3537
695af5f9
NK
3538static int
3539fill_plane_dcc_attributes(struct amdgpu_device *adev,
3540 const struct amdgpu_framebuffer *afb,
3541 const enum surface_pixel_format format,
3542 const enum dc_rotation_angle rotation,
12e2b2d4 3543 const struct plane_size *plane_size,
695af5f9
NK
3544 const union dc_tiling_info *tiling_info,
3545 const uint64_t info,
3546 struct dc_plane_dcc_param *dcc,
af031f07
RS
3547 struct dc_plane_address *address,
3548 bool force_disable_dcc)
7df7e505
NK
3549{
3550 struct dc *dc = adev->dm.dc;
8daa1218
NC
3551 struct dc_dcc_surface_param input;
3552 struct dc_surface_dcc_cap output;
7df7e505
NK
3553 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3554 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3555 uint64_t dcc_address;
3556
8daa1218
NC
3557 memset(&input, 0, sizeof(input));
3558 memset(&output, 0, sizeof(output));
3559
af031f07
RS
3560 if (force_disable_dcc)
3561 return 0;
3562
7df7e505 3563 if (!offset)
09e5665a
NK
3564 return 0;
3565
695af5f9 3566 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3567 return 0;
7df7e505
NK
3568
3569 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3570 return -EINVAL;
7df7e505 3571
695af5f9 3572 input.format = format;
12e2b2d4
DL
3573 input.surface_size.width = plane_size->surface_size.width;
3574 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3575 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3576
695af5f9 3577 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3578 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3579 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3580 input.scan = SCAN_DIRECTION_VERTICAL;
3581
3582 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3583 return -EINVAL;
7df7e505
NK
3584
3585 if (!output.capable)
09e5665a 3586 return -EINVAL;
7df7e505
NK
3587
3588 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3589 return -EINVAL;
7df7e505 3590
09e5665a 3591 dcc->enable = 1;
12e2b2d4 3592 dcc->meta_pitch =
7df7e505 3593 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3594 dcc->independent_64b_blks = i64b;
7df7e505
NK
3595
3596 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3597 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3598 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3599
09e5665a
NK
3600 return 0;
3601}
3602
3603static int
320932bf 3604fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3605 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3606 const enum surface_pixel_format format,
3607 const enum dc_rotation_angle rotation,
3608 const uint64_t tiling_flags,
09e5665a 3609 union dc_tiling_info *tiling_info,
12e2b2d4 3610 struct plane_size *plane_size,
09e5665a 3611 struct dc_plane_dcc_param *dcc,
af031f07 3612 struct dc_plane_address *address,
5888f07a 3613 bool tmz_surface,
af031f07 3614 bool force_disable_dcc)
09e5665a 3615{
320932bf 3616 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3617 int ret;
3618
3619 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3620 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3621 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3622 memset(address, 0, sizeof(*address));
3623
5888f07a
HW
3624 address->tmz_surface = tmz_surface;
3625
695af5f9 3626 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3627 plane_size->surface_size.x = 0;
3628 plane_size->surface_size.y = 0;
3629 plane_size->surface_size.width = fb->width;
3630 plane_size->surface_size.height = fb->height;
3631 plane_size->surface_pitch =
320932bf
NK
3632 fb->pitches[0] / fb->format->cpp[0];
3633
e0634e8d
NK
3634 address->type = PLN_ADDR_TYPE_GRAPHICS;
3635 address->grph.addr.low_part = lower_32_bits(afb->address);
3636 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3637 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3638 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3639
12e2b2d4
DL
3640 plane_size->surface_size.x = 0;
3641 plane_size->surface_size.y = 0;
3642 plane_size->surface_size.width = fb->width;
3643 plane_size->surface_size.height = fb->height;
3644 plane_size->surface_pitch =
320932bf
NK
3645 fb->pitches[0] / fb->format->cpp[0];
3646
12e2b2d4
DL
3647 plane_size->chroma_size.x = 0;
3648 plane_size->chroma_size.y = 0;
320932bf 3649 /* TODO: set these based on surface format */
12e2b2d4
DL
3650 plane_size->chroma_size.width = fb->width / 2;
3651 plane_size->chroma_size.height = fb->height / 2;
320932bf 3652
12e2b2d4 3653 plane_size->chroma_pitch =
320932bf
NK
3654 fb->pitches[1] / fb->format->cpp[1];
3655
e0634e8d
NK
3656 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3657 address->video_progressive.luma_addr.low_part =
3658 lower_32_bits(afb->address);
3659 address->video_progressive.luma_addr.high_part =
3660 upper_32_bits(afb->address);
3661 address->video_progressive.chroma_addr.low_part =
3662 lower_32_bits(chroma_addr);
3663 address->video_progressive.chroma_addr.high_part =
3664 upper_32_bits(chroma_addr);
3665 }
09e5665a
NK
3666
3667 /* Fill GFX8 params */
3668 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3669 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3670
3671 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3672 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3673 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3674 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3675 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3676
3677 /* XXX fix me for VI */
3678 tiling_info->gfx8.num_banks = num_banks;
3679 tiling_info->gfx8.array_mode =
3680 DC_ARRAY_2D_TILED_THIN1;
3681 tiling_info->gfx8.tile_split = tile_split;
3682 tiling_info->gfx8.bank_width = bankw;
3683 tiling_info->gfx8.bank_height = bankh;
3684 tiling_info->gfx8.tile_aspect = mtaspect;
3685 tiling_info->gfx8.tile_mode =
3686 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3687 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3688 == DC_ARRAY_1D_TILED_THIN1) {
3689 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3690 }
3691
3692 tiling_info->gfx8.pipe_config =
3693 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3694
3695 if (adev->asic_type == CHIP_VEGA10 ||
3696 adev->asic_type == CHIP_VEGA12 ||
3697 adev->asic_type == CHIP_VEGA20 ||
476e955d 3698 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3699 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3700 adev->asic_type == CHIP_NAVI12 ||
79037324
BL
3701#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3702 adev->asic_type == CHIP_SIENNA_CICHLID ||
3703#endif
30221ad8 3704 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3705 adev->asic_type == CHIP_RAVEN) {
3706 /* Fill GFX9 params */
3707 tiling_info->gfx9.num_pipes =
3708 adev->gfx.config.gb_addr_config_fields.num_pipes;
3709 tiling_info->gfx9.num_banks =
3710 adev->gfx.config.gb_addr_config_fields.num_banks;
3711 tiling_info->gfx9.pipe_interleave =
3712 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3713 tiling_info->gfx9.num_shader_engines =
3714 adev->gfx.config.gb_addr_config_fields.num_se;
3715 tiling_info->gfx9.max_compressed_frags =
3716 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3717 tiling_info->gfx9.num_rb_per_se =
3718 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3719 tiling_info->gfx9.swizzle =
3720 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3721 tiling_info->gfx9.shaderEnable = 1;
3722
79037324
BL
3723#ifdef CONFIG_DRM_AMD_DC_DCN3_0
3724 if (adev->asic_type == CHIP_SIENNA_CICHLID)
3725 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3726
3727#endif
695af5f9
NK
3728 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3729 plane_size, tiling_info,
af031f07
RS
3730 tiling_flags, dcc, address,
3731 force_disable_dcc);
09e5665a
NK
3732 if (ret)
3733 return ret;
3734 }
3735
3736 return 0;
7df7e505
NK
3737}
3738
d74004b6 3739static void
695af5f9 3740fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3741 bool *per_pixel_alpha, bool *global_alpha,
3742 int *global_alpha_value)
3743{
3744 *per_pixel_alpha = false;
3745 *global_alpha = false;
3746 *global_alpha_value = 0xff;
3747
3748 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3749 return;
3750
3751 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3752 static const uint32_t alpha_formats[] = {
3753 DRM_FORMAT_ARGB8888,
3754 DRM_FORMAT_RGBA8888,
3755 DRM_FORMAT_ABGR8888,
3756 };
3757 uint32_t format = plane_state->fb->format->format;
3758 unsigned int i;
3759
3760 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3761 if (format == alpha_formats[i]) {
3762 *per_pixel_alpha = true;
3763 break;
3764 }
3765 }
3766 }
3767
3768 if (plane_state->alpha < 0xffff) {
3769 *global_alpha = true;
3770 *global_alpha_value = plane_state->alpha >> 8;
3771 }
3772}
3773
004fefa3
NK
3774static int
3775fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3776 const enum surface_pixel_format format,
004fefa3
NK
3777 enum dc_color_space *color_space)
3778{
3779 bool full_range;
3780
3781 *color_space = COLOR_SPACE_SRGB;
3782
3783 /* DRM color properties only affect non-RGB formats. */
695af5f9 3784 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3785 return 0;
3786
3787 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3788
3789 switch (plane_state->color_encoding) {
3790 case DRM_COLOR_YCBCR_BT601:
3791 if (full_range)
3792 *color_space = COLOR_SPACE_YCBCR601;
3793 else
3794 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3795 break;
3796
3797 case DRM_COLOR_YCBCR_BT709:
3798 if (full_range)
3799 *color_space = COLOR_SPACE_YCBCR709;
3800 else
3801 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3802 break;
3803
3804 case DRM_COLOR_YCBCR_BT2020:
3805 if (full_range)
3806 *color_space = COLOR_SPACE_2020_YCBCR;
3807 else
3808 return -EINVAL;
3809 break;
3810
3811 default:
3812 return -EINVAL;
3813 }
3814
3815 return 0;
3816}
3817
695af5f9
NK
3818static int
3819fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3820 const struct drm_plane_state *plane_state,
3821 const uint64_t tiling_flags,
3822 struct dc_plane_info *plane_info,
af031f07 3823 struct dc_plane_address *address,
5888f07a 3824 bool tmz_surface,
af031f07 3825 bool force_disable_dcc)
695af5f9
NK
3826{
3827 const struct drm_framebuffer *fb = plane_state->fb;
3828 const struct amdgpu_framebuffer *afb =
3829 to_amdgpu_framebuffer(plane_state->fb);
3830 struct drm_format_name_buf format_name;
3831 int ret;
3832
3833 memset(plane_info, 0, sizeof(*plane_info));
3834
3835 switch (fb->format->format) {
3836 case DRM_FORMAT_C8:
3837 plane_info->format =
3838 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3839 break;
3840 case DRM_FORMAT_RGB565:
3841 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3842 break;
3843 case DRM_FORMAT_XRGB8888:
3844 case DRM_FORMAT_ARGB8888:
3845 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3846 break;
3847 case DRM_FORMAT_XRGB2101010:
3848 case DRM_FORMAT_ARGB2101010:
3849 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3850 break;
3851 case DRM_FORMAT_XBGR2101010:
3852 case DRM_FORMAT_ABGR2101010:
3853 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3854 break;
3855 case DRM_FORMAT_XBGR8888:
3856 case DRM_FORMAT_ABGR8888:
3857 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3858 break;
3859 case DRM_FORMAT_NV21:
3860 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3861 break;
3862 case DRM_FORMAT_NV12:
3863 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3864 break;
cbec6477
SW
3865 case DRM_FORMAT_P010:
3866 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3867 break;
492548dc
SW
3868 case DRM_FORMAT_XRGB16161616F:
3869 case DRM_FORMAT_ARGB16161616F:
3870 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3871 break;
2a5195dc
MK
3872 case DRM_FORMAT_XBGR16161616F:
3873 case DRM_FORMAT_ABGR16161616F:
3874 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3875 break;
695af5f9
NK
3876 default:
3877 DRM_ERROR(
3878 "Unsupported screen format %s\n",
3879 drm_get_format_name(fb->format->format, &format_name));
3880 return -EINVAL;
3881 }
3882
3883 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3884 case DRM_MODE_ROTATE_0:
3885 plane_info->rotation = ROTATION_ANGLE_0;
3886 break;
3887 case DRM_MODE_ROTATE_90:
3888 plane_info->rotation = ROTATION_ANGLE_90;
3889 break;
3890 case DRM_MODE_ROTATE_180:
3891 plane_info->rotation = ROTATION_ANGLE_180;
3892 break;
3893 case DRM_MODE_ROTATE_270:
3894 plane_info->rotation = ROTATION_ANGLE_270;
3895 break;
3896 default:
3897 plane_info->rotation = ROTATION_ANGLE_0;
3898 break;
3899 }
3900
3901 plane_info->visible = true;
3902 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3903
6d83a32d
MS
3904 plane_info->layer_index = 0;
3905
695af5f9
NK
3906 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3907 &plane_info->color_space);
3908 if (ret)
3909 return ret;
3910
3911 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3912 plane_info->rotation, tiling_flags,
3913 &plane_info->tiling_info,
3914 &plane_info->plane_size,
5888f07a 3915 &plane_info->dcc, address, tmz_surface,
af031f07 3916 force_disable_dcc);
695af5f9
NK
3917 if (ret)
3918 return ret;
3919
3920 fill_blending_from_plane_state(
3921 plane_state, &plane_info->per_pixel_alpha,
3922 &plane_info->global_alpha, &plane_info->global_alpha_value);
3923
3924 return 0;
3925}
3926
3927static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3928 struct dc_plane_state *dc_plane_state,
3929 struct drm_plane_state *plane_state,
3930 struct drm_crtc_state *crtc_state)
e7b07cee 3931{
cf020d49 3932 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
3933 const struct amdgpu_framebuffer *amdgpu_fb =
3934 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
3935 struct dc_scaling_info scaling_info;
3936 struct dc_plane_info plane_info;
3937 uint64_t tiling_flags;
3938 int ret;
5888f07a 3939 bool tmz_surface = false;
af031f07 3940 bool force_disable_dcc = false;
e7b07cee 3941
695af5f9
NK
3942 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3943 if (ret)
3944 return ret;
e7b07cee 3945
695af5f9
NK
3946 dc_plane_state->src_rect = scaling_info.src_rect;
3947 dc_plane_state->dst_rect = scaling_info.dst_rect;
3948 dc_plane_state->clip_rect = scaling_info.clip_rect;
3949 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 3950
5888f07a 3951 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
e7b07cee
HW
3952 if (ret)
3953 return ret;
3954
af031f07 3955 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
695af5f9
NK
3956 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3957 &plane_info,
af031f07 3958 &dc_plane_state->address,
5888f07a 3959 tmz_surface,
af031f07 3960 force_disable_dcc);
004fefa3
NK
3961 if (ret)
3962 return ret;
3963
695af5f9
NK
3964 dc_plane_state->format = plane_info.format;
3965 dc_plane_state->color_space = plane_info.color_space;
3966 dc_plane_state->format = plane_info.format;
3967 dc_plane_state->plane_size = plane_info.plane_size;
3968 dc_plane_state->rotation = plane_info.rotation;
3969 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3970 dc_plane_state->stereo_format = plane_info.stereo_format;
3971 dc_plane_state->tiling_info = plane_info.tiling_info;
3972 dc_plane_state->visible = plane_info.visible;
3973 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3974 dc_plane_state->global_alpha = plane_info.global_alpha;
3975 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3976 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 3977 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 3978
e277adc5
LSL
3979 /*
3980 * Always set input transfer function, since plane state is refreshed
3981 * every time.
3982 */
cf020d49
NK
3983 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3984 if (ret)
3985 return ret;
e7b07cee 3986
cf020d49 3987 return 0;
e7b07cee
HW
3988}
3989
3ee6b26b
AD
3990static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3991 const struct dm_connector_state *dm_state,
3992 struct dc_stream_state *stream)
e7b07cee
HW
3993{
3994 enum amdgpu_rmx_type rmx_type;
3995
3996 struct rect src = { 0 }; /* viewport in composition space*/
3997 struct rect dst = { 0 }; /* stream addressable area */
3998
3999 /* no mode. nothing to be done */
4000 if (!mode)
4001 return;
4002
4003 /* Full screen scaling by default */
4004 src.width = mode->hdisplay;
4005 src.height = mode->vdisplay;
4006 dst.width = stream->timing.h_addressable;
4007 dst.height = stream->timing.v_addressable;
4008
f4791779
HW
4009 if (dm_state) {
4010 rmx_type = dm_state->scaling;
4011 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4012 if (src.width * dst.height <
4013 src.height * dst.width) {
4014 /* height needs less upscaling/more downscaling */
4015 dst.width = src.width *
4016 dst.height / src.height;
4017 } else {
4018 /* width needs less upscaling/more downscaling */
4019 dst.height = src.height *
4020 dst.width / src.width;
4021 }
4022 } else if (rmx_type == RMX_CENTER) {
4023 dst = src;
e7b07cee 4024 }
e7b07cee 4025
f4791779
HW
4026 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4027 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4028
f4791779
HW
4029 if (dm_state->underscan_enable) {
4030 dst.x += dm_state->underscan_hborder / 2;
4031 dst.y += dm_state->underscan_vborder / 2;
4032 dst.width -= dm_state->underscan_hborder;
4033 dst.height -= dm_state->underscan_vborder;
4034 }
e7b07cee
HW
4035 }
4036
4037 stream->src = src;
4038 stream->dst = dst;
4039
f1ad2f5e 4040 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4041 dst.x, dst.y, dst.width, dst.height);
4042
4043}
4044
3ee6b26b 4045static enum dc_color_depth
42ba01fc 4046convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4047 bool is_y420, int requested_bpc)
e7b07cee 4048{
1bc22f20 4049 uint8_t bpc;
01c22997 4050
1bc22f20
SW
4051 if (is_y420) {
4052 bpc = 8;
4053
4054 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4055 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4056 bpc = 16;
4057 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4058 bpc = 12;
4059 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4060 bpc = 10;
4061 } else {
4062 bpc = (uint8_t)connector->display_info.bpc;
4063 /* Assume 8 bpc by default if no bpc is specified. */
4064 bpc = bpc ? bpc : 8;
4065 }
e7b07cee 4066
cbd14ae7 4067 if (requested_bpc > 0) {
01c22997
NK
4068 /*
4069 * Cap display bpc based on the user requested value.
4070 *
4071 * The value for state->max_bpc may not correctly updated
4072 * depending on when the connector gets added to the state
4073 * or if this was called outside of atomic check, so it
4074 * can't be used directly.
4075 */
cbd14ae7 4076 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4077
1825fd34
NK
4078 /* Round down to the nearest even number. */
4079 bpc = bpc - (bpc & 1);
4080 }
07e3a1cf 4081
e7b07cee
HW
4082 switch (bpc) {
4083 case 0:
1f6010a9
DF
4084 /*
4085 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4086 * EDID revision before 1.4
4087 * TODO: Fix edid parsing
4088 */
4089 return COLOR_DEPTH_888;
4090 case 6:
4091 return COLOR_DEPTH_666;
4092 case 8:
4093 return COLOR_DEPTH_888;
4094 case 10:
4095 return COLOR_DEPTH_101010;
4096 case 12:
4097 return COLOR_DEPTH_121212;
4098 case 14:
4099 return COLOR_DEPTH_141414;
4100 case 16:
4101 return COLOR_DEPTH_161616;
4102 default:
4103 return COLOR_DEPTH_UNDEFINED;
4104 }
4105}
4106
3ee6b26b
AD
4107static enum dc_aspect_ratio
4108get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4109{
e11d4147
LSL
4110 /* 1-1 mapping, since both enums follow the HDMI spec. */
4111 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4112}
4113
3ee6b26b
AD
4114static enum dc_color_space
4115get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4116{
4117 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4118
4119 switch (dc_crtc_timing->pixel_encoding) {
4120 case PIXEL_ENCODING_YCBCR422:
4121 case PIXEL_ENCODING_YCBCR444:
4122 case PIXEL_ENCODING_YCBCR420:
4123 {
4124 /*
4125 * 27030khz is the separation point between HDTV and SDTV
4126 * according to HDMI spec, we use YCbCr709 and YCbCr601
4127 * respectively
4128 */
380604e2 4129 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4130 if (dc_crtc_timing->flags.Y_ONLY)
4131 color_space =
4132 COLOR_SPACE_YCBCR709_LIMITED;
4133 else
4134 color_space = COLOR_SPACE_YCBCR709;
4135 } else {
4136 if (dc_crtc_timing->flags.Y_ONLY)
4137 color_space =
4138 COLOR_SPACE_YCBCR601_LIMITED;
4139 else
4140 color_space = COLOR_SPACE_YCBCR601;
4141 }
4142
4143 }
4144 break;
4145 case PIXEL_ENCODING_RGB:
4146 color_space = COLOR_SPACE_SRGB;
4147 break;
4148
4149 default:
4150 WARN_ON(1);
4151 break;
4152 }
4153
4154 return color_space;
4155}
4156
ea117312
TA
4157static bool adjust_colour_depth_from_display_info(
4158 struct dc_crtc_timing *timing_out,
4159 const struct drm_display_info *info)
400443e8 4160{
ea117312 4161 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4162 int normalized_clk;
400443e8 4163 do {
380604e2 4164 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4165 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4166 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4167 normalized_clk /= 2;
4168 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4169 switch (depth) {
4170 case COLOR_DEPTH_888:
4171 break;
400443e8
ML
4172 case COLOR_DEPTH_101010:
4173 normalized_clk = (normalized_clk * 30) / 24;
4174 break;
4175 case COLOR_DEPTH_121212:
4176 normalized_clk = (normalized_clk * 36) / 24;
4177 break;
4178 case COLOR_DEPTH_161616:
4179 normalized_clk = (normalized_clk * 48) / 24;
4180 break;
4181 default:
ea117312
TA
4182 /* The above depths are the only ones valid for HDMI. */
4183 return false;
400443e8 4184 }
ea117312
TA
4185 if (normalized_clk <= info->max_tmds_clock) {
4186 timing_out->display_color_depth = depth;
4187 return true;
4188 }
4189 } while (--depth > COLOR_DEPTH_666);
4190 return false;
400443e8 4191}
e7b07cee 4192
42ba01fc
NK
4193static void fill_stream_properties_from_drm_display_mode(
4194 struct dc_stream_state *stream,
4195 const struct drm_display_mode *mode_in,
4196 const struct drm_connector *connector,
4197 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4198 const struct dc_stream_state *old_stream,
4199 int requested_bpc)
e7b07cee
HW
4200{
4201 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4202 const struct drm_display_info *info = &connector->display_info;
d4252eee 4203 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4204 struct hdmi_vendor_infoframe hv_frame;
4205 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4206
acf83f86
WL
4207 memset(&hv_frame, 0, sizeof(hv_frame));
4208 memset(&avi_frame, 0, sizeof(avi_frame));
4209
e7b07cee
HW
4210 timing_out->h_border_left = 0;
4211 timing_out->h_border_right = 0;
4212 timing_out->v_border_top = 0;
4213 timing_out->v_border_bottom = 0;
4214 /* TODO: un-hardcode */
fe61a2f1 4215 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4216 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4217 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4218 else if (drm_mode_is_420_also(info, mode_in)
4219 && aconnector->force_yuv420_output)
4220 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4221 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4222 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4223 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4224 else
4225 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4226
4227 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4228 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4229 connector,
4230 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4231 requested_bpc);
e7b07cee
HW
4232 timing_out->scan_type = SCANNING_TYPE_NODATA;
4233 timing_out->hdmi_vic = 0;
b333730d
BL
4234
4235 if(old_stream) {
4236 timing_out->vic = old_stream->timing.vic;
4237 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4238 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4239 } else {
4240 timing_out->vic = drm_match_cea_mode(mode_in);
4241 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4242 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4243 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4244 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4245 }
e7b07cee 4246
1cb1d477
WL
4247 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4248 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4249 timing_out->vic = avi_frame.video_code;
4250 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4251 timing_out->hdmi_vic = hv_frame.vic;
4252 }
4253
e7b07cee
HW
4254 timing_out->h_addressable = mode_in->crtc_hdisplay;
4255 timing_out->h_total = mode_in->crtc_htotal;
4256 timing_out->h_sync_width =
4257 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4258 timing_out->h_front_porch =
4259 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4260 timing_out->v_total = mode_in->crtc_vtotal;
4261 timing_out->v_addressable = mode_in->crtc_vdisplay;
4262 timing_out->v_front_porch =
4263 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4264 timing_out->v_sync_width =
4265 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4266 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4267 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4268
4269 stream->output_color_space = get_output_color_space(timing_out);
4270
e43a432c
AK
4271 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4272 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4273 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4274 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4275 drm_mode_is_420_also(info, mode_in) &&
4276 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4277 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4278 adjust_colour_depth_from_display_info(timing_out, info);
4279 }
4280 }
e7b07cee
HW
4281}
4282
3ee6b26b
AD
4283static void fill_audio_info(struct audio_info *audio_info,
4284 const struct drm_connector *drm_connector,
4285 const struct dc_sink *dc_sink)
e7b07cee
HW
4286{
4287 int i = 0;
4288 int cea_revision = 0;
4289 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4290
4291 audio_info->manufacture_id = edid_caps->manufacturer_id;
4292 audio_info->product_id = edid_caps->product_id;
4293
4294 cea_revision = drm_connector->display_info.cea_rev;
4295
090afc1e 4296 strscpy(audio_info->display_name,
d2b2562c 4297 edid_caps->display_name,
090afc1e 4298 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4299
b830ebc9 4300 if (cea_revision >= 3) {
e7b07cee
HW
4301 audio_info->mode_count = edid_caps->audio_mode_count;
4302
4303 for (i = 0; i < audio_info->mode_count; ++i) {
4304 audio_info->modes[i].format_code =
4305 (enum audio_format_code)
4306 (edid_caps->audio_modes[i].format_code);
4307 audio_info->modes[i].channel_count =
4308 edid_caps->audio_modes[i].channel_count;
4309 audio_info->modes[i].sample_rates.all =
4310 edid_caps->audio_modes[i].sample_rate;
4311 audio_info->modes[i].sample_size =
4312 edid_caps->audio_modes[i].sample_size;
4313 }
4314 }
4315
4316 audio_info->flags.all = edid_caps->speaker_flags;
4317
4318 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4319 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4320 audio_info->video_latency = drm_connector->video_latency[0];
4321 audio_info->audio_latency = drm_connector->audio_latency[0];
4322 }
4323
4324 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4325
4326}
4327
3ee6b26b
AD
4328static void
4329copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4330 struct drm_display_mode *dst_mode)
e7b07cee
HW
4331{
4332 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4333 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4334 dst_mode->crtc_clock = src_mode->crtc_clock;
4335 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4336 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4337 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4338 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4339 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4340 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4341 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4342 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4343 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4344 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4345 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4346}
4347
3ee6b26b
AD
4348static void
4349decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4350 const struct drm_display_mode *native_mode,
4351 bool scale_enabled)
e7b07cee
HW
4352{
4353 if (scale_enabled) {
4354 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4355 } else if (native_mode->clock == drm_mode->clock &&
4356 native_mode->htotal == drm_mode->htotal &&
4357 native_mode->vtotal == drm_mode->vtotal) {
4358 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4359 } else {
4360 /* no scaling nor amdgpu inserted, no need to patch */
4361 }
4362}
4363
aed15309
ML
4364static struct dc_sink *
4365create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4366{
2e0ac3d6 4367 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4368 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4369 sink_init_data.link = aconnector->dc_link;
4370 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4371
4372 sink = dc_sink_create(&sink_init_data);
423788c7 4373 if (!sink) {
2e0ac3d6 4374 DRM_ERROR("Failed to create sink!\n");
aed15309 4375 return NULL;
423788c7 4376 }
2e0ac3d6 4377 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4378
aed15309 4379 return sink;
2e0ac3d6
HW
4380}
4381
fa2123db
ML
4382static void set_multisync_trigger_params(
4383 struct dc_stream_state *stream)
4384{
4385 if (stream->triggered_crtc_reset.enabled) {
4386 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4387 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4388 }
4389}
4390
4391static void set_master_stream(struct dc_stream_state *stream_set[],
4392 int stream_count)
4393{
4394 int j, highest_rfr = 0, master_stream = 0;
4395
4396 for (j = 0; j < stream_count; j++) {
4397 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4398 int refresh_rate = 0;
4399
380604e2 4400 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4401 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4402 if (refresh_rate > highest_rfr) {
4403 highest_rfr = refresh_rate;
4404 master_stream = j;
4405 }
4406 }
4407 }
4408 for (j = 0; j < stream_count; j++) {
03736f4c 4409 if (stream_set[j])
fa2123db
ML
4410 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4411 }
4412}
4413
4414static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4415{
4416 int i = 0;
4417
4418 if (context->stream_count < 2)
4419 return;
4420 for (i = 0; i < context->stream_count ; i++) {
4421 if (!context->streams[i])
4422 continue;
1f6010a9
DF
4423 /*
4424 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4425 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4426 * For now it's set to false
fa2123db
ML
4427 */
4428 set_multisync_trigger_params(context->streams[i]);
4429 }
4430 set_master_stream(context->streams, context->stream_count);
4431}
4432
3ee6b26b
AD
4433static struct dc_stream_state *
4434create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4435 const struct drm_display_mode *drm_mode,
b333730d 4436 const struct dm_connector_state *dm_state,
cbd14ae7
SW
4437 const struct dc_stream_state *old_stream,
4438 int requested_bpc)
e7b07cee
HW
4439{
4440 struct drm_display_mode *preferred_mode = NULL;
391ef035 4441 struct drm_connector *drm_connector;
42ba01fc
NK
4442 const struct drm_connector_state *con_state =
4443 dm_state ? &dm_state->base : NULL;
0971c40e 4444 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4445 struct drm_display_mode mode = *drm_mode;
4446 bool native_mode_found = false;
b333730d
BL
4447 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4448 int mode_refresh;
58124bf8 4449 int preferred_refresh = 0;
defeb878 4450#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4451 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4452#endif
df2f1015 4453 uint32_t link_bandwidth_kbps;
b333730d 4454
aed15309 4455 struct dc_sink *sink = NULL;
b830ebc9 4456 if (aconnector == NULL) {
e7b07cee 4457 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4458 return stream;
e7b07cee
HW
4459 }
4460
e7b07cee 4461 drm_connector = &aconnector->base;
2e0ac3d6 4462
f4ac176e 4463 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4464 sink = create_fake_sink(aconnector);
4465 if (!sink)
4466 return stream;
aed15309
ML
4467 } else {
4468 sink = aconnector->dc_sink;
dcd5fb82 4469 dc_sink_retain(sink);
f4ac176e 4470 }
2e0ac3d6 4471
aed15309 4472 stream = dc_create_stream_for_sink(sink);
4562236b 4473
b830ebc9 4474 if (stream == NULL) {
e7b07cee 4475 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4476 goto finish;
e7b07cee
HW
4477 }
4478
ceb3dbb4
JL
4479 stream->dm_stream_context = aconnector;
4480
4a36fcba
WL
4481 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4482 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4483
e7b07cee
HW
4484 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4485 /* Search for preferred mode */
4486 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4487 native_mode_found = true;
4488 break;
4489 }
4490 }
4491 if (!native_mode_found)
4492 preferred_mode = list_first_entry_or_null(
4493 &aconnector->base.modes,
4494 struct drm_display_mode,
4495 head);
4496
b333730d
BL
4497 mode_refresh = drm_mode_vrefresh(&mode);
4498
b830ebc9 4499 if (preferred_mode == NULL) {
1f6010a9
DF
4500 /*
4501 * This may not be an error, the use case is when we have no
e7b07cee
HW
4502 * usermode calls to reset and set mode upon hotplug. In this
4503 * case, we call set mode ourselves to restore the previous mode
4504 * and the modelist may not be filled in in time.
4505 */
f1ad2f5e 4506 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4507 } else {
4508 decide_crtc_timing_for_drm_display_mode(
4509 &mode, preferred_mode,
f4791779 4510 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4511 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4512 }
4513
f783577c
JFZ
4514 if (!dm_state)
4515 drm_mode_set_crtcinfo(&mode, 0);
4516
b333730d
BL
4517 /*
4518 * If scaling is enabled and refresh rate didn't change
4519 * we copy the vic and polarities of the old timings
4520 */
4521 if (!scale || mode_refresh != preferred_refresh)
4522 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4523 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
4524 else
4525 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4526 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 4527
df2f1015
DF
4528 stream->timing.flags.DSC = 0;
4529
4530 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4531#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4532 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4533 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
df2f1015
DF
4534 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4535 &dsc_caps);
defeb878 4536#endif
df2f1015
DF
4537 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4538 dc_link_get_link_cap(aconnector->dc_link));
4539
defeb878 4540#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4541 if (dsc_caps.is_dsc_supported)
0417df16 4542 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4543 &dsc_caps,
0417df16 4544 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4545 link_bandwidth_kbps,
4546 &stream->timing,
4547 &stream->timing.dsc_cfg))
4548 stream->timing.flags.DSC = 1;
39a4eb85 4549#endif
df2f1015 4550 }
39a4eb85 4551
e7b07cee
HW
4552 update_stream_scaling_settings(&mode, dm_state, stream);
4553
4554 fill_audio_info(
4555 &stream->audio_info,
4556 drm_connector,
aed15309 4557 sink);
e7b07cee 4558
ceb3dbb4 4559 update_stream_signal(stream, sink);
9182b4cb 4560
d832fc3b
WL
4561 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4562 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
d1ebfdd8 4563 if (stream->link->psr_settings.psr_feature_enabled) {
8c322309 4564 struct dc *core_dc = stream->link->ctx->dc;
d832fc3b 4565
8c322309 4566 if (dc_is_dmcu_initialized(core_dc)) {
c38cc677
MT
4567 //
4568 // should decide stream support vsc sdp colorimetry capability
4569 // before building vsc info packet
4570 //
4571 stream->use_vsc_sdp_for_colorimetry = false;
4572 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4573 stream->use_vsc_sdp_for_colorimetry =
4574 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4575 } else {
7715fdf3 4576 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
c38cc677 4577 stream->use_vsc_sdp_for_colorimetry = true;
c38cc677
MT
4578 }
4579 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309
RL
4580 }
4581 }
aed15309 4582finish:
dcd5fb82 4583 dc_sink_release(sink);
9e3efe3e 4584
e7b07cee
HW
4585 return stream;
4586}
4587
7578ecda 4588static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4589{
4590 drm_crtc_cleanup(crtc);
4591 kfree(crtc);
4592}
4593
4594static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4595 struct drm_crtc_state *state)
e7b07cee
HW
4596{
4597 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4598
4599 /* TODO Destroy dc_stream objects are stream object is flattened */
4600 if (cur->stream)
4601 dc_stream_release(cur->stream);
4602
4603
4604 __drm_atomic_helper_crtc_destroy_state(state);
4605
4606
4607 kfree(state);
4608}
4609
4610static void dm_crtc_reset_state(struct drm_crtc *crtc)
4611{
4612 struct dm_crtc_state *state;
4613
4614 if (crtc->state)
4615 dm_crtc_destroy_state(crtc, crtc->state);
4616
4617 state = kzalloc(sizeof(*state), GFP_KERNEL);
4618 if (WARN_ON(!state))
4619 return;
4620
4621 crtc->state = &state->base;
4622 crtc->state->crtc = crtc;
4623
4624}
4625
4626static struct drm_crtc_state *
4627dm_crtc_duplicate_state(struct drm_crtc *crtc)
4628{
4629 struct dm_crtc_state *state, *cur;
4630
4631 cur = to_dm_crtc_state(crtc->state);
4632
4633 if (WARN_ON(!crtc->state))
4634 return NULL;
4635
2004f45e 4636 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4637 if (!state)
4638 return NULL;
e7b07cee
HW
4639
4640 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4641
4642 if (cur->stream) {
4643 state->stream = cur->stream;
4644 dc_stream_retain(state->stream);
4645 }
4646
d6ef9b41
NK
4647 state->active_planes = cur->active_planes;
4648 state->interrupts_enabled = cur->interrupts_enabled;
180db303 4649 state->vrr_params = cur->vrr_params;
98e6436d 4650 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4651 state->abm_level = cur->abm_level;
bb47de73
NK
4652 state->vrr_supported = cur->vrr_supported;
4653 state->freesync_config = cur->freesync_config;
14b25846 4654 state->crc_src = cur->crc_src;
cf020d49
NK
4655 state->cm_has_degamma = cur->cm_has_degamma;
4656 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4657
e7b07cee
HW
4658 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4659
4660 return &state->base;
4661}
4662
d2574c33
MK
4663static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4664{
4665 enum dc_irq_source irq_source;
4666 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4667 struct amdgpu_device *adev = crtc->dev->dev_private;
4668 int rc;
4669
4670 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4671
4672 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4673
4674 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4675 acrtc->crtc_id, enable ? "en" : "dis", rc);
4676 return rc;
4677}
589d2739
HW
4678
4679static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4680{
4681 enum dc_irq_source irq_source;
4682 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4683 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4684 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4685 int rc = 0;
4686
4687 if (enable) {
4688 /* vblank irq on -> Only need vupdate irq in vrr mode */
4689 if (amdgpu_dm_vrr_active(acrtc_state))
4690 rc = dm_set_vupdate_irq(crtc, true);
4691 } else {
4692 /* vblank irq off -> vupdate irq off */
4693 rc = dm_set_vupdate_irq(crtc, false);
4694 }
4695
4696 if (rc)
4697 return rc;
589d2739
HW
4698
4699 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4700 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4701}
4702
4703static int dm_enable_vblank(struct drm_crtc *crtc)
4704{
4705 return dm_set_vblank(crtc, true);
4706}
4707
4708static void dm_disable_vblank(struct drm_crtc *crtc)
4709{
4710 dm_set_vblank(crtc, false);
4711}
4712
e7b07cee
HW
4713/* Implemented only the options currently availible for the driver */
4714static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4715 .reset = dm_crtc_reset_state,
4716 .destroy = amdgpu_dm_crtc_destroy,
4717 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4718 .set_config = drm_atomic_helper_set_config,
4719 .page_flip = drm_atomic_helper_page_flip,
4720 .atomic_duplicate_state = dm_crtc_duplicate_state,
4721 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4722 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4723 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4724 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 4725 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
4726 .enable_vblank = dm_enable_vblank,
4727 .disable_vblank = dm_disable_vblank,
e3eff4b5 4728 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
4729};
4730
4731static enum drm_connector_status
4732amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4733{
4734 bool connected;
c84dec2f 4735 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4736
1f6010a9
DF
4737 /*
4738 * Notes:
e7b07cee
HW
4739 * 1. This interface is NOT called in context of HPD irq.
4740 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4741 * makes it a bad place for *any* MST-related activity.
4742 */
e7b07cee 4743
8580d60b
HW
4744 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4745 !aconnector->fake_enable)
e7b07cee
HW
4746 connected = (aconnector->dc_sink != NULL);
4747 else
4748 connected = (aconnector->base.force == DRM_FORCE_ON);
4749
4750 return (connected ? connector_status_connected :
4751 connector_status_disconnected);
4752}
4753
3ee6b26b
AD
4754int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4755 struct drm_connector_state *connector_state,
4756 struct drm_property *property,
4757 uint64_t val)
e7b07cee
HW
4758{
4759 struct drm_device *dev = connector->dev;
4760 struct amdgpu_device *adev = dev->dev_private;
4761 struct dm_connector_state *dm_old_state =
4762 to_dm_connector_state(connector->state);
4763 struct dm_connector_state *dm_new_state =
4764 to_dm_connector_state(connector_state);
4765
4766 int ret = -EINVAL;
4767
4768 if (property == dev->mode_config.scaling_mode_property) {
4769 enum amdgpu_rmx_type rmx_type;
4770
4771 switch (val) {
4772 case DRM_MODE_SCALE_CENTER:
4773 rmx_type = RMX_CENTER;
4774 break;
4775 case DRM_MODE_SCALE_ASPECT:
4776 rmx_type = RMX_ASPECT;
4777 break;
4778 case DRM_MODE_SCALE_FULLSCREEN:
4779 rmx_type = RMX_FULL;
4780 break;
4781 case DRM_MODE_SCALE_NONE:
4782 default:
4783 rmx_type = RMX_OFF;
4784 break;
4785 }
4786
4787 if (dm_old_state->scaling == rmx_type)
4788 return 0;
4789
4790 dm_new_state->scaling = rmx_type;
4791 ret = 0;
4792 } else if (property == adev->mode_info.underscan_hborder_property) {
4793 dm_new_state->underscan_hborder = val;
4794 ret = 0;
4795 } else if (property == adev->mode_info.underscan_vborder_property) {
4796 dm_new_state->underscan_vborder = val;
4797 ret = 0;
4798 } else if (property == adev->mode_info.underscan_property) {
4799 dm_new_state->underscan_enable = val;
4800 ret = 0;
c1ee92f9
DF
4801 } else if (property == adev->mode_info.abm_level_property) {
4802 dm_new_state->abm_level = val;
4803 ret = 0;
e7b07cee
HW
4804 }
4805
4806 return ret;
4807}
4808
3ee6b26b
AD
4809int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4810 const struct drm_connector_state *state,
4811 struct drm_property *property,
4812 uint64_t *val)
e7b07cee
HW
4813{
4814 struct drm_device *dev = connector->dev;
4815 struct amdgpu_device *adev = dev->dev_private;
4816 struct dm_connector_state *dm_state =
4817 to_dm_connector_state(state);
4818 int ret = -EINVAL;
4819
4820 if (property == dev->mode_config.scaling_mode_property) {
4821 switch (dm_state->scaling) {
4822 case RMX_CENTER:
4823 *val = DRM_MODE_SCALE_CENTER;
4824 break;
4825 case RMX_ASPECT:
4826 *val = DRM_MODE_SCALE_ASPECT;
4827 break;
4828 case RMX_FULL:
4829 *val = DRM_MODE_SCALE_FULLSCREEN;
4830 break;
4831 case RMX_OFF:
4832 default:
4833 *val = DRM_MODE_SCALE_NONE;
4834 break;
4835 }
4836 ret = 0;
4837 } else if (property == adev->mode_info.underscan_hborder_property) {
4838 *val = dm_state->underscan_hborder;
4839 ret = 0;
4840 } else if (property == adev->mode_info.underscan_vborder_property) {
4841 *val = dm_state->underscan_vborder;
4842 ret = 0;
4843 } else if (property == adev->mode_info.underscan_property) {
4844 *val = dm_state->underscan_enable;
4845 ret = 0;
c1ee92f9
DF
4846 } else if (property == adev->mode_info.abm_level_property) {
4847 *val = dm_state->abm_level;
4848 ret = 0;
e7b07cee 4849 }
c1ee92f9 4850
e7b07cee
HW
4851 return ret;
4852}
4853
526c654a
ED
4854static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4855{
4856 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4857
4858 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4859}
4860
7578ecda 4861static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4862{
c84dec2f 4863 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4864 const struct dc_link *link = aconnector->dc_link;
4865 struct amdgpu_device *adev = connector->dev->dev_private;
4866 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4867
e7b07cee
HW
4868#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4869 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4870
89fc8d4e 4871 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
4872 link->type != dc_connection_none &&
4873 dm->backlight_dev) {
4874 backlight_device_unregister(dm->backlight_dev);
4875 dm->backlight_dev = NULL;
e7b07cee
HW
4876 }
4877#endif
dcd5fb82
MF
4878
4879 if (aconnector->dc_em_sink)
4880 dc_sink_release(aconnector->dc_em_sink);
4881 aconnector->dc_em_sink = NULL;
4882 if (aconnector->dc_sink)
4883 dc_sink_release(aconnector->dc_sink);
4884 aconnector->dc_sink = NULL;
4885
e86e8947 4886 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
4887 drm_connector_unregister(connector);
4888 drm_connector_cleanup(connector);
526c654a
ED
4889 if (aconnector->i2c) {
4890 i2c_del_adapter(&aconnector->i2c->base);
4891 kfree(aconnector->i2c);
4892 }
9f656935 4893 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 4894
e7b07cee
HW
4895 kfree(connector);
4896}
4897
4898void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4899{
4900 struct dm_connector_state *state =
4901 to_dm_connector_state(connector->state);
4902
df099b9b
LSL
4903 if (connector->state)
4904 __drm_atomic_helper_connector_destroy_state(connector->state);
4905
e7b07cee
HW
4906 kfree(state);
4907
4908 state = kzalloc(sizeof(*state), GFP_KERNEL);
4909
4910 if (state) {
4911 state->scaling = RMX_OFF;
4912 state->underscan_enable = false;
4913 state->underscan_hborder = 0;
4914 state->underscan_vborder = 0;
01933ba4 4915 state->base.max_requested_bpc = 8;
3261e013
ML
4916 state->vcpi_slots = 0;
4917 state->pbn = 0;
c3e50f89
NK
4918 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4919 state->abm_level = amdgpu_dm_abm_level;
4920
df099b9b 4921 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
4922 }
4923}
4924
3ee6b26b
AD
4925struct drm_connector_state *
4926amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
4927{
4928 struct dm_connector_state *state =
4929 to_dm_connector_state(connector->state);
4930
4931 struct dm_connector_state *new_state =
4932 kmemdup(state, sizeof(*state), GFP_KERNEL);
4933
98e6436d
AK
4934 if (!new_state)
4935 return NULL;
e7b07cee 4936
98e6436d
AK
4937 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4938
4939 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 4940 new_state->abm_level = state->abm_level;
922454c2
NK
4941 new_state->scaling = state->scaling;
4942 new_state->underscan_enable = state->underscan_enable;
4943 new_state->underscan_hborder = state->underscan_hborder;
4944 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
4945 new_state->vcpi_slots = state->vcpi_slots;
4946 new_state->pbn = state->pbn;
98e6436d 4947 return &new_state->base;
e7b07cee
HW
4948}
4949
14f04fa4
AD
4950static int
4951amdgpu_dm_connector_late_register(struct drm_connector *connector)
4952{
7e7ea24f 4953#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
4954 struct amdgpu_dm_connector *amdgpu_dm_connector =
4955 to_amdgpu_dm_connector(connector);
bdb9fbc6 4956 int r;
14f04fa4 4957
bdb9fbc6
AD
4958 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4959 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4960 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4961 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4962 if (r)
4963 return r;
4964 }
14f04fa4 4965
14f04fa4
AD
4966 connector_debugfs_init(amdgpu_dm_connector);
4967#endif
4968
4969 return 0;
4970}
4971
e7b07cee
HW
4972static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4973 .reset = amdgpu_dm_connector_funcs_reset,
4974 .detect = amdgpu_dm_connector_detect,
4975 .fill_modes = drm_helper_probe_single_connector_modes,
4976 .destroy = amdgpu_dm_connector_destroy,
4977 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4978 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4979 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 4980 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 4981 .late_register = amdgpu_dm_connector_late_register,
526c654a 4982 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
4983};
4984
e7b07cee
HW
4985static int get_modes(struct drm_connector *connector)
4986{
4987 return amdgpu_dm_connector_get_modes(connector);
4988}
4989
c84dec2f 4990static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4991{
4992 struct dc_sink_init_data init_params = {
4993 .link = aconnector->dc_link,
4994 .sink_signal = SIGNAL_TYPE_VIRTUAL
4995 };
70e8ffc5 4996 struct edid *edid;
e7b07cee 4997
a89ff457 4998 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
4999 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5000 aconnector->base.name);
5001
5002 aconnector->base.force = DRM_FORCE_OFF;
5003 aconnector->base.override_edid = false;
5004 return;
5005 }
5006
70e8ffc5
HW
5007 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5008
e7b07cee
HW
5009 aconnector->edid = edid;
5010
5011 aconnector->dc_em_sink = dc_link_add_remote_sink(
5012 aconnector->dc_link,
5013 (uint8_t *)edid,
5014 (edid->extensions + 1) * EDID_LENGTH,
5015 &init_params);
5016
dcd5fb82 5017 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5018 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5019 aconnector->dc_link->local_sink :
5020 aconnector->dc_em_sink;
dcd5fb82
MF
5021 dc_sink_retain(aconnector->dc_sink);
5022 }
e7b07cee
HW
5023}
5024
c84dec2f 5025static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5026{
5027 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5028
1f6010a9
DF
5029 /*
5030 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5031 * Those settings have to be != 0 to get initial modeset
5032 */
5033 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5034 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5035 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5036 }
5037
5038
5039 aconnector->base.override_edid = true;
5040 create_eml_sink(aconnector);
5041}
5042
cbd14ae7
SW
5043static struct dc_stream_state *
5044create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5045 const struct drm_display_mode *drm_mode,
5046 const struct dm_connector_state *dm_state,
5047 const struct dc_stream_state *old_stream)
5048{
5049 struct drm_connector *connector = &aconnector->base;
5050 struct amdgpu_device *adev = connector->dev->dev_private;
5051 struct dc_stream_state *stream;
4b7da34b
SW
5052 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5053 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5054 enum dc_status dc_result = DC_OK;
5055
5056 do {
5057 stream = create_stream_for_sink(aconnector, drm_mode,
5058 dm_state, old_stream,
5059 requested_bpc);
5060 if (stream == NULL) {
5061 DRM_ERROR("Failed to create stream for sink!\n");
5062 break;
5063 }
5064
5065 dc_result = dc_validate_stream(adev->dm.dc, stream);
5066
5067 if (dc_result != DC_OK) {
74a16675 5068 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5069 drm_mode->hdisplay,
5070 drm_mode->vdisplay,
5071 drm_mode->clock,
74a16675
RS
5072 dc_result,
5073 dc_status_to_str(dc_result));
cbd14ae7
SW
5074
5075 dc_stream_release(stream);
5076 stream = NULL;
5077 requested_bpc -= 2; /* lower bpc to retry validation */
5078 }
5079
5080 } while (stream == NULL && requested_bpc >= 6);
5081
5082 return stream;
5083}
5084
ba9ca088 5085enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5086 struct drm_display_mode *mode)
e7b07cee
HW
5087{
5088 int result = MODE_ERROR;
5089 struct dc_sink *dc_sink;
e7b07cee 5090 /* TODO: Unhardcode stream count */
0971c40e 5091 struct dc_stream_state *stream;
c84dec2f 5092 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5093
5094 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5095 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5096 return result;
5097
1f6010a9
DF
5098 /*
5099 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5100 * EDID mgmt
5101 */
5102 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5103 !aconnector->dc_em_sink)
5104 handle_edid_mgmt(aconnector);
5105
c84dec2f 5106 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5107
b830ebc9 5108 if (dc_sink == NULL) {
e7b07cee
HW
5109 DRM_ERROR("dc_sink is NULL!\n");
5110 goto fail;
5111 }
5112
cbd14ae7
SW
5113 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5114 if (stream) {
5115 dc_stream_release(stream);
e7b07cee 5116 result = MODE_OK;
cbd14ae7 5117 }
e7b07cee
HW
5118
5119fail:
5120 /* TODO: error handling*/
5121 return result;
5122}
5123
88694af9
NK
5124static int fill_hdr_info_packet(const struct drm_connector_state *state,
5125 struct dc_info_packet *out)
5126{
5127 struct hdmi_drm_infoframe frame;
5128 unsigned char buf[30]; /* 26 + 4 */
5129 ssize_t len;
5130 int ret, i;
5131
5132 memset(out, 0, sizeof(*out));
5133
5134 if (!state->hdr_output_metadata)
5135 return 0;
5136
5137 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5138 if (ret)
5139 return ret;
5140
5141 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5142 if (len < 0)
5143 return (int)len;
5144
5145 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5146 if (len != 30)
5147 return -EINVAL;
5148
5149 /* Prepare the infopacket for DC. */
5150 switch (state->connector->connector_type) {
5151 case DRM_MODE_CONNECTOR_HDMIA:
5152 out->hb0 = 0x87; /* type */
5153 out->hb1 = 0x01; /* version */
5154 out->hb2 = 0x1A; /* length */
5155 out->sb[0] = buf[3]; /* checksum */
5156 i = 1;
5157 break;
5158
5159 case DRM_MODE_CONNECTOR_DisplayPort:
5160 case DRM_MODE_CONNECTOR_eDP:
5161 out->hb0 = 0x00; /* sdp id, zero */
5162 out->hb1 = 0x87; /* type */
5163 out->hb2 = 0x1D; /* payload len - 1 */
5164 out->hb3 = (0x13 << 2); /* sdp version */
5165 out->sb[0] = 0x01; /* version */
5166 out->sb[1] = 0x1A; /* length */
5167 i = 2;
5168 break;
5169
5170 default:
5171 return -EINVAL;
5172 }
5173
5174 memcpy(&out->sb[i], &buf[4], 26);
5175 out->valid = true;
5176
5177 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5178 sizeof(out->sb), false);
5179
5180 return 0;
5181}
5182
5183static bool
5184is_hdr_metadata_different(const struct drm_connector_state *old_state,
5185 const struct drm_connector_state *new_state)
5186{
5187 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5188 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5189
5190 if (old_blob != new_blob) {
5191 if (old_blob && new_blob &&
5192 old_blob->length == new_blob->length)
5193 return memcmp(old_blob->data, new_blob->data,
5194 old_blob->length);
5195
5196 return true;
5197 }
5198
5199 return false;
5200}
5201
5202static int
5203amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5204 struct drm_atomic_state *state)
88694af9 5205{
51e857af
SP
5206 struct drm_connector_state *new_con_state =
5207 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5208 struct drm_connector_state *old_con_state =
5209 drm_atomic_get_old_connector_state(state, conn);
5210 struct drm_crtc *crtc = new_con_state->crtc;
5211 struct drm_crtc_state *new_crtc_state;
5212 int ret;
5213
5214 if (!crtc)
5215 return 0;
5216
5217 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5218 struct dc_info_packet hdr_infopacket;
5219
5220 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5221 if (ret)
5222 return ret;
5223
5224 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5225 if (IS_ERR(new_crtc_state))
5226 return PTR_ERR(new_crtc_state);
5227
5228 /*
5229 * DC considers the stream backends changed if the
5230 * static metadata changes. Forcing the modeset also
5231 * gives a simple way for userspace to switch from
b232d4ed
NK
5232 * 8bpc to 10bpc when setting the metadata to enter
5233 * or exit HDR.
5234 *
5235 * Changing the static metadata after it's been
5236 * set is permissible, however. So only force a
5237 * modeset if we're entering or exiting HDR.
88694af9 5238 */
b232d4ed
NK
5239 new_crtc_state->mode_changed =
5240 !old_con_state->hdr_output_metadata ||
5241 !new_con_state->hdr_output_metadata;
88694af9
NK
5242 }
5243
5244 return 0;
5245}
5246
e7b07cee
HW
5247static const struct drm_connector_helper_funcs
5248amdgpu_dm_connector_helper_funcs = {
5249 /*
1f6010a9 5250 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5251 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5252 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5253 * in get_modes call back, not just return the modes count
5254 */
e7b07cee
HW
5255 .get_modes = get_modes,
5256 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5257 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5258};
5259
5260static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5261{
5262}
5263
bc92c065
NK
5264static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5265{
5266 struct drm_device *dev = new_crtc_state->crtc->dev;
5267 struct drm_plane *plane;
5268
5269 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5270 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5271 return true;
5272 }
5273
5274 return false;
5275}
5276
d6ef9b41 5277static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5278{
5279 struct drm_atomic_state *state = new_crtc_state->state;
5280 struct drm_plane *plane;
5281 int num_active = 0;
5282
5283 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5284 struct drm_plane_state *new_plane_state;
5285
5286 /* Cursor planes are "fake". */
5287 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5288 continue;
5289
5290 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5291
5292 if (!new_plane_state) {
5293 /*
5294 * The plane is enable on the CRTC and hasn't changed
5295 * state. This means that it previously passed
5296 * validation and is therefore enabled.
5297 */
5298 num_active += 1;
5299 continue;
5300 }
5301
5302 /* We need a framebuffer to be considered enabled. */
5303 num_active += (new_plane_state->fb != NULL);
5304 }
5305
d6ef9b41
NK
5306 return num_active;
5307}
5308
5309/*
5310 * Sets whether interrupts should be enabled on a specific CRTC.
5311 * We require that the stream be enabled and that there exist active
5312 * DC planes on the stream.
5313 */
5314static void
5315dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5316 struct drm_crtc_state *new_crtc_state)
5317{
5318 struct dm_crtc_state *dm_new_crtc_state =
5319 to_dm_crtc_state(new_crtc_state);
5320
5321 dm_new_crtc_state->active_planes = 0;
5322 dm_new_crtc_state->interrupts_enabled = false;
5323
5324 if (!dm_new_crtc_state->stream)
5325 return;
5326
5327 dm_new_crtc_state->active_planes =
5328 count_crtc_active_planes(new_crtc_state);
5329
5330 dm_new_crtc_state->interrupts_enabled =
5331 dm_new_crtc_state->active_planes > 0;
c14a005c
NK
5332}
5333
3ee6b26b
AD
5334static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5335 struct drm_crtc_state *state)
e7b07cee
HW
5336{
5337 struct amdgpu_device *adev = crtc->dev->dev_private;
5338 struct dc *dc = adev->dm.dc;
5339 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5340 int ret = -EINVAL;
5341
d6ef9b41
NK
5342 /*
5343 * Update interrupt state for the CRTC. This needs to happen whenever
5344 * the CRTC has changed or whenever any of its planes have changed.
5345 * Atomic check satisfies both of these requirements since the CRTC
5346 * is added to the state by DRM during drm_atomic_helper_check_planes.
5347 */
5348 dm_update_crtc_interrupt_state(crtc, state);
5349
9b690ef3
BL
5350 if (unlikely(!dm_crtc_state->stream &&
5351 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5352 WARN_ON(1);
5353 return ret;
5354 }
5355
1f6010a9 5356 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
5357 if (!dm_crtc_state->stream)
5358 return 0;
5359
bc92c065
NK
5360 /*
5361 * We want at least one hardware plane enabled to use
5362 * the stream with a cursor enabled.
5363 */
c14a005c 5364 if (state->enable && state->active &&
bc92c065 5365 does_crtc_have_active_cursor(state) &&
d6ef9b41 5366 dm_crtc_state->active_planes == 0)
c14a005c
NK
5367 return -EINVAL;
5368
62c933f9 5369 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5370 return 0;
5371
5372 return ret;
5373}
5374
3ee6b26b
AD
5375static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5376 const struct drm_display_mode *mode,
5377 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5378{
5379 return true;
5380}
5381
5382static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5383 .disable = dm_crtc_helper_disable,
5384 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5385 .mode_fixup = dm_crtc_helper_mode_fixup,
5386 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5387};
5388
5389static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5390{
5391
5392}
5393
3261e013
ML
5394static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5395{
5396 switch (display_color_depth) {
5397 case COLOR_DEPTH_666:
5398 return 6;
5399 case COLOR_DEPTH_888:
5400 return 8;
5401 case COLOR_DEPTH_101010:
5402 return 10;
5403 case COLOR_DEPTH_121212:
5404 return 12;
5405 case COLOR_DEPTH_141414:
5406 return 14;
5407 case COLOR_DEPTH_161616:
5408 return 16;
5409 default:
5410 break;
5411 }
5412 return 0;
5413}
5414
3ee6b26b
AD
5415static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5416 struct drm_crtc_state *crtc_state,
5417 struct drm_connector_state *conn_state)
e7b07cee 5418{
3261e013
ML
5419 struct drm_atomic_state *state = crtc_state->state;
5420 struct drm_connector *connector = conn_state->connector;
5421 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5422 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5423 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5424 struct drm_dp_mst_topology_mgr *mst_mgr;
5425 struct drm_dp_mst_port *mst_port;
5426 enum dc_color_depth color_depth;
5427 int clock, bpp = 0;
1bc22f20 5428 bool is_y420 = false;
3261e013
ML
5429
5430 if (!aconnector->port || !aconnector->dc_sink)
5431 return 0;
5432
5433 mst_port = aconnector->port;
5434 mst_mgr = &aconnector->mst_port->mst_mgr;
5435
5436 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5437 return 0;
5438
5439 if (!state->duplicated) {
cbd14ae7 5440 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
5441 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5442 aconnector->force_yuv420_output;
cbd14ae7
SW
5443 color_depth = convert_color_depth_from_display_info(connector,
5444 is_y420,
5445 max_bpc);
3261e013
ML
5446 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5447 clock = adjusted_mode->clock;
dc48529f 5448 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5449 }
5450 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5451 mst_mgr,
5452 mst_port,
1c6c1cb5 5453 dm_new_connector_state->pbn,
03ca9600 5454 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
5455 if (dm_new_connector_state->vcpi_slots < 0) {
5456 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5457 return dm_new_connector_state->vcpi_slots;
5458 }
e7b07cee
HW
5459 return 0;
5460}
5461
5462const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5463 .disable = dm_encoder_helper_disable,
5464 .atomic_check = dm_encoder_helper_atomic_check
5465};
5466
d9fe1a4c 5467#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5468static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5469 struct dc_state *dc_state)
5470{
5471 struct dc_stream_state *stream = NULL;
5472 struct drm_connector *connector;
5473 struct drm_connector_state *new_con_state, *old_con_state;
5474 struct amdgpu_dm_connector *aconnector;
5475 struct dm_connector_state *dm_conn_state;
5476 int i, j, clock, bpp;
5477 int vcpi, pbn_div, pbn = 0;
5478
5479 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5480
5481 aconnector = to_amdgpu_dm_connector(connector);
5482
5483 if (!aconnector->port)
5484 continue;
5485
5486 if (!new_con_state || !new_con_state->crtc)
5487 continue;
5488
5489 dm_conn_state = to_dm_connector_state(new_con_state);
5490
5491 for (j = 0; j < dc_state->stream_count; j++) {
5492 stream = dc_state->streams[j];
5493 if (!stream)
5494 continue;
5495
5496 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5497 break;
5498
5499 stream = NULL;
5500 }
5501
5502 if (!stream)
5503 continue;
5504
5505 if (stream->timing.flags.DSC != 1) {
5506 drm_dp_mst_atomic_enable_dsc(state,
5507 aconnector->port,
5508 dm_conn_state->pbn,
5509 0,
5510 false);
5511 continue;
5512 }
5513
5514 pbn_div = dm_mst_get_pbn_divider(stream->link);
5515 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5516 clock = stream->timing.pix_clk_100hz / 10;
5517 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5518 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5519 aconnector->port,
5520 pbn, pbn_div,
5521 true);
5522 if (vcpi < 0)
5523 return vcpi;
5524
5525 dm_conn_state->pbn = pbn;
5526 dm_conn_state->vcpi_slots = vcpi;
5527 }
5528 return 0;
5529}
d9fe1a4c 5530#endif
29b9ba74 5531
e7b07cee
HW
5532static void dm_drm_plane_reset(struct drm_plane *plane)
5533{
5534 struct dm_plane_state *amdgpu_state = NULL;
5535
5536 if (plane->state)
5537 plane->funcs->atomic_destroy_state(plane, plane->state);
5538
5539 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5540 WARN_ON(amdgpu_state == NULL);
1f6010a9 5541
7ddaef96
NK
5542 if (amdgpu_state)
5543 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5544}
5545
5546static struct drm_plane_state *
5547dm_drm_plane_duplicate_state(struct drm_plane *plane)
5548{
5549 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5550
5551 old_dm_plane_state = to_dm_plane_state(plane->state);
5552 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5553 if (!dm_plane_state)
5554 return NULL;
5555
5556 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5557
3be5262e
HW
5558 if (old_dm_plane_state->dc_state) {
5559 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5560 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5561 }
5562
5563 return &dm_plane_state->base;
5564}
5565
dfd84d90 5566static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5567 struct drm_plane_state *state)
e7b07cee
HW
5568{
5569 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5570
3be5262e
HW
5571 if (dm_plane_state->dc_state)
5572 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5573
0627bbd3 5574 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5575}
5576
5577static const struct drm_plane_funcs dm_plane_funcs = {
5578 .update_plane = drm_atomic_helper_update_plane,
5579 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5580 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5581 .reset = dm_drm_plane_reset,
5582 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5583 .atomic_destroy_state = dm_drm_plane_destroy_state,
5584};
5585
3ee6b26b
AD
5586static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5587 struct drm_plane_state *new_state)
e7b07cee
HW
5588{
5589 struct amdgpu_framebuffer *afb;
5590 struct drm_gem_object *obj;
5d43be0c 5591 struct amdgpu_device *adev;
e7b07cee 5592 struct amdgpu_bo *rbo;
e7b07cee 5593 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5594 struct list_head list;
5595 struct ttm_validate_buffer tv;
5596 struct ww_acquire_ctx ticket;
e0634e8d 5597 uint64_t tiling_flags;
5d43be0c
CK
5598 uint32_t domain;
5599 int r;
5888f07a 5600 bool tmz_surface = false;
af031f07 5601 bool force_disable_dcc = false;
e7b07cee
HW
5602
5603 dm_plane_state_old = to_dm_plane_state(plane->state);
5604 dm_plane_state_new = to_dm_plane_state(new_state);
5605
5606 if (!new_state->fb) {
f1ad2f5e 5607 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5608 return 0;
5609 }
5610
5611 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5612 obj = new_state->fb->obj[0];
e7b07cee 5613 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5614 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5615 INIT_LIST_HEAD(&list);
5616
5617 tv.bo = &rbo->tbo;
5618 tv.num_shared = 1;
5619 list_add(&tv.head, &list);
5620
9165fb87 5621 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5622 if (r) {
5623 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5624 return r;
0f257b09 5625 }
e7b07cee 5626
5d43be0c 5627 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5628 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5629 else
5630 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5631
7b7c6c81 5632 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5633 if (unlikely(r != 0)) {
30b7c614
HW
5634 if (r != -ERESTARTSYS)
5635 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5636 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5637 return r;
5638 }
5639
bb812f1e
JZ
5640 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5641 if (unlikely(r != 0)) {
5642 amdgpu_bo_unpin(rbo);
0f257b09 5643 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5644 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5645 return r;
5646 }
7df7e505
NK
5647
5648 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5649
5888f07a
HW
5650 tmz_surface = amdgpu_bo_encrypted(rbo);
5651
0f257b09 5652 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5653
7b7c6c81 5654 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5655
5656 amdgpu_bo_ref(rbo);
5657
3be5262e
HW
5658 if (dm_plane_state_new->dc_state &&
5659 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5660 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 5661
af031f07 5662 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
320932bf 5663 fill_plane_buffer_attributes(
695af5f9
NK
5664 adev, afb, plane_state->format, plane_state->rotation,
5665 tiling_flags, &plane_state->tiling_info,
320932bf 5666 &plane_state->plane_size, &plane_state->dcc,
5888f07a 5667 &plane_state->address, tmz_surface,
af031f07 5668 force_disable_dcc);
e7b07cee
HW
5669 }
5670
e7b07cee
HW
5671 return 0;
5672}
5673
3ee6b26b
AD
5674static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5675 struct drm_plane_state *old_state)
e7b07cee
HW
5676{
5677 struct amdgpu_bo *rbo;
e7b07cee
HW
5678 int r;
5679
5680 if (!old_state->fb)
5681 return;
5682
e68d14dd 5683 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5684 r = amdgpu_bo_reserve(rbo, false);
5685 if (unlikely(r)) {
5686 DRM_ERROR("failed to reserve rbo before unpin\n");
5687 return;
b830ebc9
HW
5688 }
5689
5690 amdgpu_bo_unpin(rbo);
5691 amdgpu_bo_unreserve(rbo);
5692 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5693}
5694
8c44515b
AP
5695static int dm_plane_helper_check_state(struct drm_plane_state *state,
5696 struct drm_crtc_state *new_crtc_state)
5697{
5698 int max_downscale = 0;
5699 int max_upscale = INT_MAX;
5700
5701 /* TODO: These should be checked against DC plane caps */
5702 return drm_atomic_helper_check_plane_state(
5703 state, new_crtc_state, max_downscale, max_upscale, true, true);
5704}
5705
7578ecda
AD
5706static int dm_plane_atomic_check(struct drm_plane *plane,
5707 struct drm_plane_state *state)
cbd19488
AG
5708{
5709 struct amdgpu_device *adev = plane->dev->dev_private;
5710 struct dc *dc = adev->dm.dc;
78171832 5711 struct dm_plane_state *dm_plane_state;
695af5f9 5712 struct dc_scaling_info scaling_info;
8c44515b 5713 struct drm_crtc_state *new_crtc_state;
695af5f9 5714 int ret;
78171832
NK
5715
5716 dm_plane_state = to_dm_plane_state(state);
cbd19488 5717
3be5262e 5718 if (!dm_plane_state->dc_state)
9a3329b1 5719 return 0;
cbd19488 5720
8c44515b
AP
5721 new_crtc_state =
5722 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5723 if (!new_crtc_state)
5724 return -EINVAL;
5725
5726 ret = dm_plane_helper_check_state(state, new_crtc_state);
5727 if (ret)
5728 return ret;
5729
695af5f9
NK
5730 ret = fill_dc_scaling_info(state, &scaling_info);
5731 if (ret)
5732 return ret;
a05bcff1 5733
62c933f9 5734 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5735 return 0;
5736
5737 return -EINVAL;
5738}
5739
674e78ac
NK
5740static int dm_plane_atomic_async_check(struct drm_plane *plane,
5741 struct drm_plane_state *new_plane_state)
5742{
5743 /* Only support async updates on cursor planes. */
5744 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5745 return -EINVAL;
5746
5747 return 0;
5748}
5749
5750static void dm_plane_atomic_async_update(struct drm_plane *plane,
5751 struct drm_plane_state *new_state)
5752{
5753 struct drm_plane_state *old_state =
5754 drm_atomic_get_old_plane_state(new_state->state, plane);
5755
332af874 5756 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5757
5758 plane->state->src_x = new_state->src_x;
5759 plane->state->src_y = new_state->src_y;
5760 plane->state->src_w = new_state->src_w;
5761 plane->state->src_h = new_state->src_h;
5762 plane->state->crtc_x = new_state->crtc_x;
5763 plane->state->crtc_y = new_state->crtc_y;
5764 plane->state->crtc_w = new_state->crtc_w;
5765 plane->state->crtc_h = new_state->crtc_h;
5766
5767 handle_cursor_update(plane, old_state);
5768}
5769
e7b07cee
HW
5770static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5771 .prepare_fb = dm_plane_helper_prepare_fb,
5772 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5773 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5774 .atomic_async_check = dm_plane_atomic_async_check,
5775 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5776};
5777
5778/*
5779 * TODO: these are currently initialized to rgb formats only.
5780 * For future use cases we should either initialize them dynamically based on
5781 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5782 * check will succeed, and let DC implement proper check
e7b07cee 5783 */
d90371b0 5784static const uint32_t rgb_formats[] = {
e7b07cee
HW
5785 DRM_FORMAT_XRGB8888,
5786 DRM_FORMAT_ARGB8888,
5787 DRM_FORMAT_RGBA8888,
5788 DRM_FORMAT_XRGB2101010,
5789 DRM_FORMAT_XBGR2101010,
5790 DRM_FORMAT_ARGB2101010,
5791 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5792 DRM_FORMAT_XBGR8888,
5793 DRM_FORMAT_ABGR8888,
46dd9ff7 5794 DRM_FORMAT_RGB565,
e7b07cee
HW
5795};
5796
0d579c7e
NK
5797static const uint32_t overlay_formats[] = {
5798 DRM_FORMAT_XRGB8888,
5799 DRM_FORMAT_ARGB8888,
5800 DRM_FORMAT_RGBA8888,
5801 DRM_FORMAT_XBGR8888,
5802 DRM_FORMAT_ABGR8888,
7267a1a9 5803 DRM_FORMAT_RGB565
e7b07cee
HW
5804};
5805
5806static const u32 cursor_formats[] = {
5807 DRM_FORMAT_ARGB8888
5808};
5809
37c6a93b
NK
5810static int get_plane_formats(const struct drm_plane *plane,
5811 const struct dc_plane_cap *plane_cap,
5812 uint32_t *formats, int max_formats)
e7b07cee 5813{
37c6a93b
NK
5814 int i, num_formats = 0;
5815
5816 /*
5817 * TODO: Query support for each group of formats directly from
5818 * DC plane caps. This will require adding more formats to the
5819 * caps list.
5820 */
e7b07cee 5821
f180b4bc 5822 switch (plane->type) {
e7b07cee 5823 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5824 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5825 if (num_formats >= max_formats)
5826 break;
5827
5828 formats[num_formats++] = rgb_formats[i];
5829 }
5830
ea36ad34 5831 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5832 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
5833 if (plane_cap && plane_cap->pixel_format_support.p010)
5834 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
5835 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5836 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5837 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
5838 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5839 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 5840 }
e7b07cee 5841 break;
37c6a93b 5842
e7b07cee 5843 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5844 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5845 if (num_formats >= max_formats)
5846 break;
5847
5848 formats[num_formats++] = overlay_formats[i];
5849 }
e7b07cee 5850 break;
37c6a93b 5851
e7b07cee 5852 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5853 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5854 if (num_formats >= max_formats)
5855 break;
5856
5857 formats[num_formats++] = cursor_formats[i];
5858 }
e7b07cee
HW
5859 break;
5860 }
5861
37c6a93b
NK
5862 return num_formats;
5863}
5864
5865static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5866 struct drm_plane *plane,
5867 unsigned long possible_crtcs,
5868 const struct dc_plane_cap *plane_cap)
5869{
5870 uint32_t formats[32];
5871 int num_formats;
5872 int res = -EPERM;
5873
5874 num_formats = get_plane_formats(plane, plane_cap, formats,
5875 ARRAY_SIZE(formats));
5876
5877 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5878 &dm_plane_funcs, formats, num_formats,
5879 NULL, plane->type, NULL);
5880 if (res)
5881 return res;
5882
cc1fec57
NK
5883 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5884 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
5885 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5886 BIT(DRM_MODE_BLEND_PREMULTI);
5887
5888 drm_plane_create_alpha_property(plane);
5889 drm_plane_create_blend_mode_property(plane, blend_caps);
5890 }
5891
fc8e5230 5892 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
5893 plane_cap &&
5894 (plane_cap->pixel_format_support.nv12 ||
5895 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
5896 /* This only affects YUV formats. */
5897 drm_plane_create_color_properties(
5898 plane,
5899 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
5900 BIT(DRM_COLOR_YCBCR_BT709) |
5901 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
5902 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5903 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5904 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5905 }
5906
f180b4bc 5907 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 5908
96719c54 5909 /* Create (reset) the plane state */
f180b4bc
HW
5910 if (plane->funcs->reset)
5911 plane->funcs->reset(plane);
96719c54 5912
37c6a93b 5913 return 0;
e7b07cee
HW
5914}
5915
7578ecda
AD
5916static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5917 struct drm_plane *plane,
5918 uint32_t crtc_index)
e7b07cee
HW
5919{
5920 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 5921 struct drm_plane *cursor_plane;
e7b07cee
HW
5922
5923 int res = -ENOMEM;
5924
5925 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5926 if (!cursor_plane)
5927 goto fail;
5928
f180b4bc 5929 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 5930 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
5931
5932 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5933 if (!acrtc)
5934 goto fail;
5935
5936 res = drm_crtc_init_with_planes(
5937 dm->ddev,
5938 &acrtc->base,
5939 plane,
f180b4bc 5940 cursor_plane,
e7b07cee
HW
5941 &amdgpu_dm_crtc_funcs, NULL);
5942
5943 if (res)
5944 goto fail;
5945
5946 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5947
96719c54
HW
5948 /* Create (reset) the plane state */
5949 if (acrtc->base.funcs->reset)
5950 acrtc->base.funcs->reset(&acrtc->base);
5951
e7b07cee
HW
5952 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5953 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5954
5955 acrtc->crtc_id = crtc_index;
5956 acrtc->base.enabled = false;
c37e2d29 5957 acrtc->otg_inst = -1;
e7b07cee
HW
5958
5959 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
5960 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5961 true, MAX_COLOR_LUT_ENTRIES);
086247a4 5962 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
5963
5964 return 0;
5965
5966fail:
b830ebc9
HW
5967 kfree(acrtc);
5968 kfree(cursor_plane);
e7b07cee
HW
5969 return res;
5970}
5971
5972
5973static int to_drm_connector_type(enum signal_type st)
5974{
5975 switch (st) {
5976 case SIGNAL_TYPE_HDMI_TYPE_A:
5977 return DRM_MODE_CONNECTOR_HDMIA;
5978 case SIGNAL_TYPE_EDP:
5979 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
5980 case SIGNAL_TYPE_LVDS:
5981 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
5982 case SIGNAL_TYPE_RGB:
5983 return DRM_MODE_CONNECTOR_VGA;
5984 case SIGNAL_TYPE_DISPLAY_PORT:
5985 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5986 return DRM_MODE_CONNECTOR_DisplayPort;
5987 case SIGNAL_TYPE_DVI_DUAL_LINK:
5988 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5989 return DRM_MODE_CONNECTOR_DVID;
5990 case SIGNAL_TYPE_VIRTUAL:
5991 return DRM_MODE_CONNECTOR_VIRTUAL;
5992
5993 default:
5994 return DRM_MODE_CONNECTOR_Unknown;
5995 }
5996}
5997
2b4c1c05
DV
5998static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5999{
62afb4ad
JRS
6000 struct drm_encoder *encoder;
6001
6002 /* There is only one encoder per connector */
6003 drm_connector_for_each_possible_encoder(connector, encoder)
6004 return encoder;
6005
6006 return NULL;
2b4c1c05
DV
6007}
6008
e7b07cee
HW
6009static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6010{
e7b07cee
HW
6011 struct drm_encoder *encoder;
6012 struct amdgpu_encoder *amdgpu_encoder;
6013
2b4c1c05 6014 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6015
6016 if (encoder == NULL)
6017 return;
6018
6019 amdgpu_encoder = to_amdgpu_encoder(encoder);
6020
6021 amdgpu_encoder->native_mode.clock = 0;
6022
6023 if (!list_empty(&connector->probed_modes)) {
6024 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6025
e7b07cee 6026 list_for_each_entry(preferred_mode,
b830ebc9
HW
6027 &connector->probed_modes,
6028 head) {
6029 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6030 amdgpu_encoder->native_mode = *preferred_mode;
6031
e7b07cee
HW
6032 break;
6033 }
6034
6035 }
6036}
6037
3ee6b26b
AD
6038static struct drm_display_mode *
6039amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6040 char *name,
6041 int hdisplay, int vdisplay)
e7b07cee
HW
6042{
6043 struct drm_device *dev = encoder->dev;
6044 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6045 struct drm_display_mode *mode = NULL;
6046 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6047
6048 mode = drm_mode_duplicate(dev, native_mode);
6049
b830ebc9 6050 if (mode == NULL)
e7b07cee
HW
6051 return NULL;
6052
6053 mode->hdisplay = hdisplay;
6054 mode->vdisplay = vdisplay;
6055 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6056 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6057
6058 return mode;
6059
6060}
6061
6062static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6063 struct drm_connector *connector)
e7b07cee
HW
6064{
6065 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6066 struct drm_display_mode *mode = NULL;
6067 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6068 struct amdgpu_dm_connector *amdgpu_dm_connector =
6069 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6070 int i;
6071 int n;
6072 struct mode_size {
6073 char name[DRM_DISPLAY_MODE_LEN];
6074 int w;
6075 int h;
b830ebc9 6076 } common_modes[] = {
e7b07cee
HW
6077 { "640x480", 640, 480},
6078 { "800x600", 800, 600},
6079 { "1024x768", 1024, 768},
6080 { "1280x720", 1280, 720},
6081 { "1280x800", 1280, 800},
6082 {"1280x1024", 1280, 1024},
6083 { "1440x900", 1440, 900},
6084 {"1680x1050", 1680, 1050},
6085 {"1600x1200", 1600, 1200},
6086 {"1920x1080", 1920, 1080},
6087 {"1920x1200", 1920, 1200}
6088 };
6089
b830ebc9 6090 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6091
6092 for (i = 0; i < n; i++) {
6093 struct drm_display_mode *curmode = NULL;
6094 bool mode_existed = false;
6095
6096 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6097 common_modes[i].h > native_mode->vdisplay ||
6098 (common_modes[i].w == native_mode->hdisplay &&
6099 common_modes[i].h == native_mode->vdisplay))
6100 continue;
e7b07cee
HW
6101
6102 list_for_each_entry(curmode, &connector->probed_modes, head) {
6103 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6104 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6105 mode_existed = true;
6106 break;
6107 }
6108 }
6109
6110 if (mode_existed)
6111 continue;
6112
6113 mode = amdgpu_dm_create_common_mode(encoder,
6114 common_modes[i].name, common_modes[i].w,
6115 common_modes[i].h);
6116 drm_mode_probed_add(connector, mode);
c84dec2f 6117 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6118 }
6119}
6120
3ee6b26b
AD
6121static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6122 struct edid *edid)
e7b07cee 6123{
c84dec2f
HW
6124 struct amdgpu_dm_connector *amdgpu_dm_connector =
6125 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6126
6127 if (edid) {
6128 /* empty probed_modes */
6129 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6130 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6131 drm_add_edid_modes(connector, edid);
6132
f1e5e913
YMM
6133 /* sorting the probed modes before calling function
6134 * amdgpu_dm_get_native_mode() since EDID can have
6135 * more than one preferred mode. The modes that are
6136 * later in the probed mode list could be of higher
6137 * and preferred resolution. For example, 3840x2160
6138 * resolution in base EDID preferred timing and 4096x2160
6139 * preferred resolution in DID extension block later.
6140 */
6141 drm_mode_sort(&connector->probed_modes);
e7b07cee 6142 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6143 } else {
c84dec2f 6144 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6145 }
e7b07cee
HW
6146}
6147
7578ecda 6148static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6149{
c84dec2f
HW
6150 struct amdgpu_dm_connector *amdgpu_dm_connector =
6151 to_amdgpu_dm_connector(connector);
e7b07cee 6152 struct drm_encoder *encoder;
c84dec2f 6153 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6154
2b4c1c05 6155 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6156
85ee15d6 6157 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
6158 amdgpu_dm_connector->num_modes =
6159 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6160 } else {
6161 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6162 amdgpu_dm_connector_add_common_modes(encoder, connector);
6163 }
3e332d3a 6164 amdgpu_dm_fbc_init(connector);
5099114b 6165
c84dec2f 6166 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6167}
6168
3ee6b26b
AD
6169void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6170 struct amdgpu_dm_connector *aconnector,
6171 int connector_type,
6172 struct dc_link *link,
6173 int link_index)
e7b07cee
HW
6174{
6175 struct amdgpu_device *adev = dm->ddev->dev_private;
6176
f04bee34
NK
6177 /*
6178 * Some of the properties below require access to state, like bpc.
6179 * Allocate some default initial connector state with our reset helper.
6180 */
6181 if (aconnector->base.funcs->reset)
6182 aconnector->base.funcs->reset(&aconnector->base);
6183
e7b07cee
HW
6184 aconnector->connector_id = link_index;
6185 aconnector->dc_link = link;
6186 aconnector->base.interlace_allowed = false;
6187 aconnector->base.doublescan_allowed = false;
6188 aconnector->base.stereo_allowed = false;
6189 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6190 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6191 aconnector->audio_inst = -1;
e7b07cee
HW
6192 mutex_init(&aconnector->hpd_lock);
6193
1f6010a9
DF
6194 /*
6195 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6196 * which means HPD hot plug not supported
6197 */
e7b07cee
HW
6198 switch (connector_type) {
6199 case DRM_MODE_CONNECTOR_HDMIA:
6200 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6201 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6202 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6203 break;
6204 case DRM_MODE_CONNECTOR_DisplayPort:
6205 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6206 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6207 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6208 break;
6209 case DRM_MODE_CONNECTOR_DVID:
6210 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6211 break;
6212 default:
6213 break;
6214 }
6215
6216 drm_object_attach_property(&aconnector->base.base,
6217 dm->ddev->mode_config.scaling_mode_property,
6218 DRM_MODE_SCALE_NONE);
6219
6220 drm_object_attach_property(&aconnector->base.base,
6221 adev->mode_info.underscan_property,
6222 UNDERSCAN_OFF);
6223 drm_object_attach_property(&aconnector->base.base,
6224 adev->mode_info.underscan_hborder_property,
6225 0);
6226 drm_object_attach_property(&aconnector->base.base,
6227 adev->mode_info.underscan_vborder_property,
6228 0);
1825fd34 6229
8c61b31e
JFZ
6230 if (!aconnector->mst_port)
6231 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 6232
4a8ca46b
RL
6233 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6234 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6235 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 6236
c1ee92f9
DF
6237 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6238 dc_is_dmcu_initialized(adev->dm.dc)) {
6239 drm_object_attach_property(&aconnector->base.base,
6240 adev->mode_info.abm_level_property, 0);
6241 }
bb47de73
NK
6242
6243 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
6244 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6245 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
6246 drm_object_attach_property(
6247 &aconnector->base.base,
6248 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6249
8c61b31e
JFZ
6250 if (!aconnector->mst_port)
6251 drm_connector_attach_vrr_capable_property(&aconnector->base);
6252
0c8620d6 6253#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 6254 if (adev->dm.hdcp_workqueue)
53e108aa 6255 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 6256#endif
bb47de73 6257 }
e7b07cee
HW
6258}
6259
7578ecda
AD
6260static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6261 struct i2c_msg *msgs, int num)
e7b07cee
HW
6262{
6263 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6264 struct ddc_service *ddc_service = i2c->ddc_service;
6265 struct i2c_command cmd;
6266 int i;
6267 int result = -EIO;
6268
b830ebc9 6269 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
6270
6271 if (!cmd.payloads)
6272 return result;
6273
6274 cmd.number_of_payloads = num;
6275 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6276 cmd.speed = 100;
6277
6278 for (i = 0; i < num; i++) {
6279 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6280 cmd.payloads[i].address = msgs[i].addr;
6281 cmd.payloads[i].length = msgs[i].len;
6282 cmd.payloads[i].data = msgs[i].buf;
6283 }
6284
c85e6e54
DF
6285 if (dc_submit_i2c(
6286 ddc_service->ctx->dc,
6287 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
6288 &cmd))
6289 result = num;
6290
6291 kfree(cmd.payloads);
6292 return result;
6293}
6294
7578ecda 6295static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
6296{
6297 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6298}
6299
6300static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6301 .master_xfer = amdgpu_dm_i2c_xfer,
6302 .functionality = amdgpu_dm_i2c_func,
6303};
6304
3ee6b26b
AD
6305static struct amdgpu_i2c_adapter *
6306create_i2c(struct ddc_service *ddc_service,
6307 int link_index,
6308 int *res)
e7b07cee
HW
6309{
6310 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6311 struct amdgpu_i2c_adapter *i2c;
6312
b830ebc9 6313 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
6314 if (!i2c)
6315 return NULL;
e7b07cee
HW
6316 i2c->base.owner = THIS_MODULE;
6317 i2c->base.class = I2C_CLASS_DDC;
6318 i2c->base.dev.parent = &adev->pdev->dev;
6319 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 6320 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
6321 i2c_set_adapdata(&i2c->base, i2c);
6322 i2c->ddc_service = ddc_service;
c85e6e54 6323 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
6324
6325 return i2c;
6326}
6327
89fc8d4e 6328
1f6010a9
DF
6329/*
6330 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
6331 * dc_link which will be represented by this aconnector.
6332 */
7578ecda
AD
6333static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6334 struct amdgpu_dm_connector *aconnector,
6335 uint32_t link_index,
6336 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6337{
6338 int res = 0;
6339 int connector_type;
6340 struct dc *dc = dm->dc;
6341 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6342 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6343
6344 link->priv = aconnector;
e7b07cee 6345
f1ad2f5e 6346 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6347
6348 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6349 if (!i2c) {
6350 DRM_ERROR("Failed to create i2c adapter data\n");
6351 return -ENOMEM;
6352 }
6353
e7b07cee
HW
6354 aconnector->i2c = i2c;
6355 res = i2c_add_adapter(&i2c->base);
6356
6357 if (res) {
6358 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6359 goto out_free;
6360 }
6361
6362 connector_type = to_drm_connector_type(link->connector_signal);
6363
17165de2 6364 res = drm_connector_init_with_ddc(
e7b07cee
HW
6365 dm->ddev,
6366 &aconnector->base,
6367 &amdgpu_dm_connector_funcs,
17165de2
AP
6368 connector_type,
6369 &i2c->base);
e7b07cee
HW
6370
6371 if (res) {
6372 DRM_ERROR("connector_init failed\n");
6373 aconnector->connector_id = -1;
6374 goto out_free;
6375 }
6376
6377 drm_connector_helper_add(
6378 &aconnector->base,
6379 &amdgpu_dm_connector_helper_funcs);
6380
6381 amdgpu_dm_connector_init_helper(
6382 dm,
6383 aconnector,
6384 connector_type,
6385 link,
6386 link_index);
6387
cde4c44d 6388 drm_connector_attach_encoder(
e7b07cee
HW
6389 &aconnector->base, &aencoder->base);
6390
e7b07cee
HW
6391 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6392 || connector_type == DRM_MODE_CONNECTOR_eDP)
9f656935 6393 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 6394
e7b07cee
HW
6395out_free:
6396 if (res) {
6397 kfree(i2c);
6398 aconnector->i2c = NULL;
6399 }
6400 return res;
6401}
6402
6403int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6404{
6405 switch (adev->mode_info.num_crtc) {
6406 case 1:
6407 return 0x1;
6408 case 2:
6409 return 0x3;
6410 case 3:
6411 return 0x7;
6412 case 4:
6413 return 0xf;
6414 case 5:
6415 return 0x1f;
6416 case 6:
6417 default:
6418 return 0x3f;
6419 }
6420}
6421
7578ecda
AD
6422static int amdgpu_dm_encoder_init(struct drm_device *dev,
6423 struct amdgpu_encoder *aencoder,
6424 uint32_t link_index)
e7b07cee
HW
6425{
6426 struct amdgpu_device *adev = dev->dev_private;
6427
6428 int res = drm_encoder_init(dev,
6429 &aencoder->base,
6430 &amdgpu_dm_encoder_funcs,
6431 DRM_MODE_ENCODER_TMDS,
6432 NULL);
6433
6434 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6435
6436 if (!res)
6437 aencoder->encoder_id = link_index;
6438 else
6439 aencoder->encoder_id = -1;
6440
6441 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6442
6443 return res;
6444}
6445
3ee6b26b
AD
6446static void manage_dm_interrupts(struct amdgpu_device *adev,
6447 struct amdgpu_crtc *acrtc,
6448 bool enable)
e7b07cee
HW
6449{
6450 /*
6451 * this is not correct translation but will work as soon as VBLANK
6452 * constant is the same as PFLIP
6453 */
6454 int irq_type =
734dd01d 6455 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6456 adev,
6457 acrtc->crtc_id);
6458
6459 if (enable) {
6460 drm_crtc_vblank_on(&acrtc->base);
6461 amdgpu_irq_get(
6462 adev,
6463 &adev->pageflip_irq,
6464 irq_type);
6465 } else {
6466
6467 amdgpu_irq_put(
6468 adev,
6469 &adev->pageflip_irq,
6470 irq_type);
6471 drm_crtc_vblank_off(&acrtc->base);
6472 }
6473}
6474
3ee6b26b
AD
6475static bool
6476is_scaling_state_different(const struct dm_connector_state *dm_state,
6477 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6478{
6479 if (dm_state->scaling != old_dm_state->scaling)
6480 return true;
6481 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6482 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6483 return true;
6484 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6485 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6486 return true;
b830ebc9
HW
6487 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6488 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6489 return true;
e7b07cee
HW
6490 return false;
6491}
6492
0c8620d6
BL
6493#ifdef CONFIG_DRM_AMD_DC_HDCP
6494static bool is_content_protection_different(struct drm_connector_state *state,
6495 const struct drm_connector_state *old_state,
6496 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6497{
6498 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6499
53e108aa
BL
6500 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6501 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6502 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6503 return true;
6504 }
6505
0c8620d6
BL
6506 /* CP is being re enabled, ignore this */
6507 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6508 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6509 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6510 return false;
6511 }
6512
6513 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6514 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6515 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6516 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6517
6518 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6519 * hot-plug, headless s3, dpms
6520 */
6521 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6522 aconnector->dc_sink != NULL)
6523 return true;
6524
6525 if (old_state->content_protection == state->content_protection)
6526 return false;
6527
6528 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6529 return true;
6530
6531 return false;
6532}
6533
0c8620d6 6534#endif
3ee6b26b
AD
6535static void remove_stream(struct amdgpu_device *adev,
6536 struct amdgpu_crtc *acrtc,
6537 struct dc_stream_state *stream)
e7b07cee
HW
6538{
6539 /* this is the update mode case */
e7b07cee
HW
6540
6541 acrtc->otg_inst = -1;
6542 acrtc->enabled = false;
6543}
6544
7578ecda
AD
6545static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6546 struct dc_cursor_position *position)
2a8f6ccb 6547{
f4c2cc43 6548 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6549 int x, y;
6550 int xorigin = 0, yorigin = 0;
6551
e371e19c
NK
6552 position->enable = false;
6553 position->x = 0;
6554 position->y = 0;
6555
6556 if (!crtc || !plane->state->fb)
2a8f6ccb 6557 return 0;
2a8f6ccb
HW
6558
6559 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6560 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6561 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6562 __func__,
6563 plane->state->crtc_w,
6564 plane->state->crtc_h);
6565 return -EINVAL;
6566 }
6567
6568 x = plane->state->crtc_x;
6569 y = plane->state->crtc_y;
c14a005c 6570
e371e19c
NK
6571 if (x <= -amdgpu_crtc->max_cursor_width ||
6572 y <= -amdgpu_crtc->max_cursor_height)
6573 return 0;
6574
2a8f6ccb
HW
6575 if (x < 0) {
6576 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6577 x = 0;
6578 }
6579 if (y < 0) {
6580 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6581 y = 0;
6582 }
6583 position->enable = true;
d243b6ff 6584 position->translate_by_source = true;
2a8f6ccb
HW
6585 position->x = x;
6586 position->y = y;
6587 position->x_hotspot = xorigin;
6588 position->y_hotspot = yorigin;
6589
6590 return 0;
6591}
6592
3ee6b26b
AD
6593static void handle_cursor_update(struct drm_plane *plane,
6594 struct drm_plane_state *old_plane_state)
e7b07cee 6595{
674e78ac 6596 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
6597 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6598 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6599 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6600 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6601 uint64_t address = afb ? afb->address : 0;
6602 struct dc_cursor_position position;
6603 struct dc_cursor_attributes attributes;
6604 int ret;
6605
e7b07cee
HW
6606 if (!plane->state->fb && !old_plane_state->fb)
6607 return;
6608
f1ad2f5e 6609 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6610 __func__,
6611 amdgpu_crtc->crtc_id,
6612 plane->state->crtc_w,
6613 plane->state->crtc_h);
2a8f6ccb
HW
6614
6615 ret = get_cursor_position(plane, crtc, &position);
6616 if (ret)
6617 return;
6618
6619 if (!position.enable) {
6620 /* turn off cursor */
674e78ac
NK
6621 if (crtc_state && crtc_state->stream) {
6622 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6623 dc_stream_set_cursor_position(crtc_state->stream,
6624 &position);
674e78ac
NK
6625 mutex_unlock(&adev->dm.dc_lock);
6626 }
2a8f6ccb 6627 return;
e7b07cee 6628 }
e7b07cee 6629
2a8f6ccb
HW
6630 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6631 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6632
c1cefe11 6633 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6634 attributes.address.high_part = upper_32_bits(address);
6635 attributes.address.low_part = lower_32_bits(address);
6636 attributes.width = plane->state->crtc_w;
6637 attributes.height = plane->state->crtc_h;
6638 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6639 attributes.rotation_angle = 0;
6640 attributes.attribute_flags.value = 0;
6641
6642 attributes.pitch = attributes.width;
6643
886daac9 6644 if (crtc_state->stream) {
674e78ac 6645 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6646 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6647 &attributes))
6648 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6649
2a8f6ccb
HW
6650 if (!dc_stream_set_cursor_position(crtc_state->stream,
6651 &position))
6652 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6653 mutex_unlock(&adev->dm.dc_lock);
886daac9 6654 }
2a8f6ccb 6655}
e7b07cee
HW
6656
6657static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6658{
6659
6660 assert_spin_locked(&acrtc->base.dev->event_lock);
6661 WARN_ON(acrtc->event);
6662
6663 acrtc->event = acrtc->base.state->event;
6664
6665 /* Set the flip status */
6666 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6667
6668 /* Mark this event as consumed */
6669 acrtc->base.state->event = NULL;
6670
6671 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6672 acrtc->crtc_id);
6673}
6674
bb47de73
NK
6675static void update_freesync_state_on_stream(
6676 struct amdgpu_display_manager *dm,
6677 struct dm_crtc_state *new_crtc_state,
180db303
NK
6678 struct dc_stream_state *new_stream,
6679 struct dc_plane_state *surface,
6680 u32 flip_timestamp_in_us)
bb47de73 6681{
09aef2c4 6682 struct mod_vrr_params vrr_params;
bb47de73 6683 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
6684 struct amdgpu_device *adev = dm->adev;
6685 unsigned long flags;
bb47de73
NK
6686
6687 if (!new_stream)
6688 return;
6689
6690 /*
6691 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6692 * For now it's sufficient to just guard against these conditions.
6693 */
6694
6695 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6696 return;
6697
09aef2c4
MK
6698 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6699 vrr_params = new_crtc_state->vrr_params;
6700
180db303
NK
6701 if (surface) {
6702 mod_freesync_handle_preflip(
6703 dm->freesync_module,
6704 surface,
6705 new_stream,
6706 flip_timestamp_in_us,
6707 &vrr_params);
09aef2c4
MK
6708
6709 if (adev->family < AMDGPU_FAMILY_AI &&
6710 amdgpu_dm_vrr_active(new_crtc_state)) {
6711 mod_freesync_handle_v_update(dm->freesync_module,
6712 new_stream, &vrr_params);
e63e2491
EB
6713
6714 /* Need to call this before the frame ends. */
6715 dc_stream_adjust_vmin_vmax(dm->dc,
6716 new_crtc_state->stream,
6717 &vrr_params.adjust);
09aef2c4 6718 }
180db303 6719 }
bb47de73
NK
6720
6721 mod_freesync_build_vrr_infopacket(
6722 dm->freesync_module,
6723 new_stream,
180db303 6724 &vrr_params,
ecd0136b
HT
6725 PACKET_TYPE_VRR,
6726 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6727 &vrr_infopacket);
6728
8a48b44c 6729 new_crtc_state->freesync_timing_changed |=
180db303
NK
6730 (memcmp(&new_crtc_state->vrr_params.adjust,
6731 &vrr_params.adjust,
6732 sizeof(vrr_params.adjust)) != 0);
bb47de73 6733
8a48b44c 6734 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6735 (memcmp(&new_crtc_state->vrr_infopacket,
6736 &vrr_infopacket,
6737 sizeof(vrr_infopacket)) != 0);
6738
180db303 6739 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6740 new_crtc_state->vrr_infopacket = vrr_infopacket;
6741
180db303 6742 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6743 new_stream->vrr_infopacket = vrr_infopacket;
6744
6745 if (new_crtc_state->freesync_vrr_info_changed)
6746 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6747 new_crtc_state->base.crtc->base.id,
6748 (int)new_crtc_state->base.vrr_enabled,
180db303 6749 (int)vrr_params.state);
09aef2c4
MK
6750
6751 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6752}
6753
e854194c
MK
6754static void pre_update_freesync_state_on_stream(
6755 struct amdgpu_display_manager *dm,
6756 struct dm_crtc_state *new_crtc_state)
6757{
6758 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6759 struct mod_vrr_params vrr_params;
e854194c 6760 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6761 struct amdgpu_device *adev = dm->adev;
6762 unsigned long flags;
e854194c
MK
6763
6764 if (!new_stream)
6765 return;
6766
6767 /*
6768 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6769 * For now it's sufficient to just guard against these conditions.
6770 */
6771 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6772 return;
6773
09aef2c4
MK
6774 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6775 vrr_params = new_crtc_state->vrr_params;
6776
e854194c
MK
6777 if (new_crtc_state->vrr_supported &&
6778 config.min_refresh_in_uhz &&
6779 config.max_refresh_in_uhz) {
6780 config.state = new_crtc_state->base.vrr_enabled ?
6781 VRR_STATE_ACTIVE_VARIABLE :
6782 VRR_STATE_INACTIVE;
6783 } else {
6784 config.state = VRR_STATE_UNSUPPORTED;
6785 }
6786
6787 mod_freesync_build_vrr_params(dm->freesync_module,
6788 new_stream,
6789 &config, &vrr_params);
6790
6791 new_crtc_state->freesync_timing_changed |=
6792 (memcmp(&new_crtc_state->vrr_params.adjust,
6793 &vrr_params.adjust,
6794 sizeof(vrr_params.adjust)) != 0);
6795
6796 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6797 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6798}
6799
66b0c973
MK
6800static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6801 struct dm_crtc_state *new_state)
6802{
6803 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6804 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6805
6806 if (!old_vrr_active && new_vrr_active) {
6807 /* Transition VRR inactive -> active:
6808 * While VRR is active, we must not disable vblank irq, as a
6809 * reenable after disable would compute bogus vblank/pflip
6810 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6811 *
6812 * We also need vupdate irq for the actual core vblank handling
6813 * at end of vblank.
66b0c973 6814 */
d2574c33 6815 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6816 drm_crtc_vblank_get(new_state->base.crtc);
6817 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6818 __func__, new_state->base.crtc->base.id);
6819 } else if (old_vrr_active && !new_vrr_active) {
6820 /* Transition VRR active -> inactive:
6821 * Allow vblank irq disable again for fixed refresh rate.
6822 */
d2574c33 6823 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6824 drm_crtc_vblank_put(new_state->base.crtc);
6825 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6826 __func__, new_state->base.crtc->base.id);
6827 }
6828}
6829
8ad27806
NK
6830static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6831{
6832 struct drm_plane *plane;
6833 struct drm_plane_state *old_plane_state, *new_plane_state;
6834 int i;
6835
6836 /*
6837 * TODO: Make this per-stream so we don't issue redundant updates for
6838 * commits with multiple streams.
6839 */
6840 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6841 new_plane_state, i)
6842 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6843 handle_cursor_update(plane, old_plane_state);
6844}
6845
3be5262e 6846static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6847 struct dc_state *dc_state,
3ee6b26b
AD
6848 struct drm_device *dev,
6849 struct amdgpu_display_manager *dm,
6850 struct drm_crtc *pcrtc,
420cd472 6851 bool wait_for_vblank)
e7b07cee 6852{
570c91d5 6853 uint32_t i;
8a48b44c 6854 uint64_t timestamp_ns;
e7b07cee 6855 struct drm_plane *plane;
0bc9706d 6856 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6857 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6858 struct drm_crtc_state *new_pcrtc_state =
6859 drm_atomic_get_new_crtc_state(state, pcrtc);
6860 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
6861 struct dm_crtc_state *dm_old_crtc_state =
6862 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 6863 int planes_count = 0, vpos, hpos;
570c91d5 6864 long r;
e7b07cee 6865 unsigned long flags;
8a48b44c 6866 struct amdgpu_bo *abo;
09e5665a 6867 uint64_t tiling_flags;
5888f07a 6868 bool tmz_surface = false;
fdd1fe57
MK
6869 uint32_t target_vblank, last_flip_vblank;
6870 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 6871 bool pflip_present = false;
bc7f670e
DF
6872 struct {
6873 struct dc_surface_update surface_updates[MAX_SURFACES];
6874 struct dc_plane_info plane_infos[MAX_SURFACES];
6875 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 6876 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 6877 struct dc_stream_update stream_update;
74aa7bd4 6878 } *bundle;
bc7f670e 6879
74aa7bd4 6880 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 6881
74aa7bd4
DF
6882 if (!bundle) {
6883 dm_error("Failed to allocate update bundle\n");
4b510503
NK
6884 goto cleanup;
6885 }
e7b07cee 6886
8ad27806
NK
6887 /*
6888 * Disable the cursor first if we're disabling all the planes.
6889 * It'll remain on the screen after the planes are re-enabled
6890 * if we don't.
6891 */
6892 if (acrtc_state->active_planes == 0)
6893 amdgpu_dm_commit_cursors(state);
6894
e7b07cee 6895 /* update planes when needed */
0bc9706d
LSL
6896 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6897 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 6898 struct drm_crtc_state *new_crtc_state;
0bc9706d 6899 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 6900 bool plane_needs_flip;
c7af5f77 6901 struct dc_plane_state *dc_plane;
54d76575 6902 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 6903
80c218d5
NK
6904 /* Cursor plane is handled after stream updates */
6905 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 6906 continue;
e7b07cee 6907
f5ba60fe
DD
6908 if (!fb || !crtc || pcrtc != crtc)
6909 continue;
6910
6911 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6912 if (!new_crtc_state->active)
e7b07cee
HW
6913 continue;
6914
bc7f670e 6915 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 6916
74aa7bd4 6917 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 6918 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
6919 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6920 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 6921 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 6922 }
8a48b44c 6923
695af5f9
NK
6924 fill_dc_scaling_info(new_plane_state,
6925 &bundle->scaling_infos[planes_count]);
8a48b44c 6926
695af5f9
NK
6927 bundle->surface_updates[planes_count].scaling_info =
6928 &bundle->scaling_infos[planes_count];
8a48b44c 6929
f5031000 6930 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 6931
f5031000 6932 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 6933
f5031000
DF
6934 if (!plane_needs_flip) {
6935 planes_count += 1;
6936 continue;
6937 }
8a48b44c 6938
2fac0f53
CK
6939 abo = gem_to_amdgpu_bo(fb->obj[0]);
6940
f8308898
AG
6941 /*
6942 * Wait for all fences on this FB. Do limited wait to avoid
6943 * deadlock during GPU reset when this fence will not signal
6944 * but we hold reservation lock for the BO.
6945 */
52791eee 6946 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 6947 false,
f8308898
AG
6948 msecs_to_jiffies(5000));
6949 if (unlikely(r <= 0))
ed8a5fb2 6950 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 6951
f5031000
DF
6952 /*
6953 * TODO This might fail and hence better not used, wait
6954 * explicitly on fences instead
6955 * and in general should be called for
6956 * blocking commit to as per framework helpers
6957 */
f5031000 6958 r = amdgpu_bo_reserve(abo, true);
f8308898 6959 if (unlikely(r != 0))
f5031000 6960 DRM_ERROR("failed to reserve buffer before flip\n");
8a48b44c 6961
f5031000 6962 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
8a48b44c 6963
5888f07a
HW
6964 tmz_surface = amdgpu_bo_encrypted(abo);
6965
f5031000 6966 amdgpu_bo_unreserve(abo);
8a48b44c 6967
695af5f9
NK
6968 fill_dc_plane_info_and_addr(
6969 dm->adev, new_plane_state, tiling_flags,
6970 &bundle->plane_infos[planes_count],
af031f07 6971 &bundle->flip_addrs[planes_count].address,
5888f07a 6972 tmz_surface,
af031f07
RS
6973 false);
6974
6975 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6976 new_plane_state->plane->index,
6977 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
6978
6979 bundle->surface_updates[planes_count].plane_info =
6980 &bundle->plane_infos[planes_count];
8a48b44c 6981
caff0e66
NK
6982 /*
6983 * Only allow immediate flips for fast updates that don't
6984 * change FB pitch, DCC state, rotation or mirroing.
6985 */
f5031000 6986 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 6987 crtc->state->async_flip &&
caff0e66 6988 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 6989
f5031000
DF
6990 timestamp_ns = ktime_get_ns();
6991 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6992 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6993 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 6994
f5031000
DF
6995 if (!bundle->surface_updates[planes_count].surface) {
6996 DRM_ERROR("No surface for CRTC: id=%d\n",
6997 acrtc_attach->crtc_id);
6998 continue;
bc7f670e
DF
6999 }
7000
f5031000
DF
7001 if (plane == pcrtc->primary)
7002 update_freesync_state_on_stream(
7003 dm,
7004 acrtc_state,
7005 acrtc_state->stream,
7006 dc_plane,
7007 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7008
f5031000
DF
7009 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7010 __func__,
7011 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7012 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7013
7014 planes_count += 1;
7015
8a48b44c
DF
7016 }
7017
74aa7bd4 7018 if (pflip_present) {
634092b1
MK
7019 if (!vrr_active) {
7020 /* Use old throttling in non-vrr fixed refresh rate mode
7021 * to keep flip scheduling based on target vblank counts
7022 * working in a backwards compatible way, e.g., for
7023 * clients using the GLX_OML_sync_control extension or
7024 * DRI3/Present extension with defined target_msc.
7025 */
e3eff4b5 7026 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7027 }
7028 else {
7029 /* For variable refresh rate mode only:
7030 * Get vblank of last completed flip to avoid > 1 vrr
7031 * flips per video frame by use of throttling, but allow
7032 * flip programming anywhere in the possibly large
7033 * variable vrr vblank interval for fine-grained flip
7034 * timing control and more opportunity to avoid stutter
7035 * on late submission of flips.
7036 */
7037 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7038 last_flip_vblank = acrtc_attach->last_flip_vblank;
7039 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7040 }
7041
fdd1fe57 7042 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7043
7044 /*
7045 * Wait until we're out of the vertical blank period before the one
7046 * targeted by the flip
7047 */
7048 while ((acrtc_attach->enabled &&
7049 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7050 0, &vpos, &hpos, NULL,
7051 NULL, &pcrtc->hwmode)
7052 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7053 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7054 (int)(target_vblank -
e3eff4b5 7055 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7056 usleep_range(1000, 1100);
7057 }
7058
7059 if (acrtc_attach->base.state->event) {
7060 drm_crtc_vblank_get(pcrtc);
7061
7062 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7063
7064 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7065 prepare_flip_isr(acrtc_attach);
7066
7067 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7068 }
7069
7070 if (acrtc_state->stream) {
8a48b44c 7071 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7072 bundle->stream_update.vrr_infopacket =
8a48b44c 7073 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7074 }
e7b07cee
HW
7075 }
7076
bc92c065 7077 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7078 if ((planes_count || acrtc_state->active_planes == 0) &&
7079 acrtc_state->stream) {
b6e881c9 7080 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7081 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7082 bundle->stream_update.src = acrtc_state->stream->src;
7083 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7084 }
7085
cf020d49
NK
7086 if (new_pcrtc_state->color_mgmt_changed) {
7087 /*
7088 * TODO: This isn't fully correct since we've actually
7089 * already modified the stream in place.
7090 */
7091 bundle->stream_update.gamut_remap =
7092 &acrtc_state->stream->gamut_remap_matrix;
7093 bundle->stream_update.output_csc_transform =
7094 &acrtc_state->stream->csc_color_matrix;
7095 bundle->stream_update.out_transfer_func =
7096 acrtc_state->stream->out_transfer_func;
7097 }
bc7f670e 7098
8a48b44c 7099 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7100 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7101 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7102
e63e2491
EB
7103 /*
7104 * If FreeSync state on the stream has changed then we need to
7105 * re-adjust the min/max bounds now that DC doesn't handle this
7106 * as part of commit.
7107 */
7108 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7109 amdgpu_dm_vrr_active(acrtc_state)) {
7110 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7111 dc_stream_adjust_vmin_vmax(
7112 dm->dc, acrtc_state->stream,
7113 &acrtc_state->vrr_params.adjust);
7114 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7115 }
bc7f670e 7116 mutex_lock(&dm->dc_lock);
8c322309 7117 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7118 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7119 amdgpu_dm_psr_disable(acrtc_state->stream);
7120
bc7f670e 7121 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7122 bundle->surface_updates,
bc7f670e
DF
7123 planes_count,
7124 acrtc_state->stream,
74aa7bd4 7125 &bundle->stream_update,
bc7f670e 7126 dc_state);
8c322309
RL
7127
7128 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7129 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7130 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7131 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7132 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7133 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7134 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7135 amdgpu_dm_psr_enable(acrtc_state->stream);
7136 }
7137
bc7f670e 7138 mutex_unlock(&dm->dc_lock);
e7b07cee 7139 }
4b510503 7140
8ad27806
NK
7141 /*
7142 * Update cursor state *after* programming all the planes.
7143 * This avoids redundant programming in the case where we're going
7144 * to be disabling a single plane - those pipes are being disabled.
7145 */
7146 if (acrtc_state->active_planes)
7147 amdgpu_dm_commit_cursors(state);
80c218d5 7148
4b510503 7149cleanup:
74aa7bd4 7150 kfree(bundle);
e7b07cee
HW
7151}
7152
6ce8f316
NK
7153static void amdgpu_dm_commit_audio(struct drm_device *dev,
7154 struct drm_atomic_state *state)
7155{
7156 struct amdgpu_device *adev = dev->dev_private;
7157 struct amdgpu_dm_connector *aconnector;
7158 struct drm_connector *connector;
7159 struct drm_connector_state *old_con_state, *new_con_state;
7160 struct drm_crtc_state *new_crtc_state;
7161 struct dm_crtc_state *new_dm_crtc_state;
7162 const struct dc_stream_status *status;
7163 int i, inst;
7164
7165 /* Notify device removals. */
7166 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7167 if (old_con_state->crtc != new_con_state->crtc) {
7168 /* CRTC changes require notification. */
7169 goto notify;
7170 }
7171
7172 if (!new_con_state->crtc)
7173 continue;
7174
7175 new_crtc_state = drm_atomic_get_new_crtc_state(
7176 state, new_con_state->crtc);
7177
7178 if (!new_crtc_state)
7179 continue;
7180
7181 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7182 continue;
7183
7184 notify:
7185 aconnector = to_amdgpu_dm_connector(connector);
7186
7187 mutex_lock(&adev->dm.audio_lock);
7188 inst = aconnector->audio_inst;
7189 aconnector->audio_inst = -1;
7190 mutex_unlock(&adev->dm.audio_lock);
7191
7192 amdgpu_dm_audio_eld_notify(adev, inst);
7193 }
7194
7195 /* Notify audio device additions. */
7196 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7197 if (!new_con_state->crtc)
7198 continue;
7199
7200 new_crtc_state = drm_atomic_get_new_crtc_state(
7201 state, new_con_state->crtc);
7202
7203 if (!new_crtc_state)
7204 continue;
7205
7206 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7207 continue;
7208
7209 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7210 if (!new_dm_crtc_state->stream)
7211 continue;
7212
7213 status = dc_stream_get_status(new_dm_crtc_state->stream);
7214 if (!status)
7215 continue;
7216
7217 aconnector = to_amdgpu_dm_connector(connector);
7218
7219 mutex_lock(&adev->dm.audio_lock);
7220 inst = status->audio_inst;
7221 aconnector->audio_inst = inst;
7222 mutex_unlock(&adev->dm.audio_lock);
7223
7224 amdgpu_dm_audio_eld_notify(adev, inst);
7225 }
7226}
7227
b5e83f6f
NK
7228/*
7229 * Enable interrupts on CRTCs that are newly active, undergone
7230 * a modeset, or have active planes again.
7231 *
7232 * Done in two passes, based on the for_modeset flag:
7233 * Pass 1: For CRTCs going through modeset
7234 * Pass 2: For CRTCs going from 0 to n active planes
7235 *
7236 * Interrupts can only be enabled after the planes are programmed,
7237 * so this requires a two-pass approach since we don't want to
7238 * just defer the interrupts until after commit planes every time.
7239 */
7240static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
7241 struct drm_atomic_state *state,
7242 bool for_modeset)
7243{
7244 struct amdgpu_device *adev = dev->dev_private;
7245 struct drm_crtc *crtc;
7246 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7247 int i;
148d31e3 7248#ifdef CONFIG_DEBUG_FS
14b25846 7249 enum amdgpu_dm_pipe_crc_source source;
148d31e3 7250#endif
b5e83f6f
NK
7251
7252 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7253 new_crtc_state, i) {
7254 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7255 struct dm_crtc_state *dm_new_crtc_state =
7256 to_dm_crtc_state(new_crtc_state);
7257 struct dm_crtc_state *dm_old_crtc_state =
7258 to_dm_crtc_state(old_crtc_state);
7259 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7260 bool run_pass;
7261
7262 run_pass = (for_modeset && modeset) ||
7263 (!for_modeset && !modeset &&
7264 !dm_old_crtc_state->interrupts_enabled);
7265
7266 if (!run_pass)
7267 continue;
7268
b5e83f6f
NK
7269 if (!dm_new_crtc_state->interrupts_enabled)
7270 continue;
7271
7272 manage_dm_interrupts(adev, acrtc, true);
7273
7274#ifdef CONFIG_DEBUG_FS
7275 /* The stream has changed so CRC capture needs to re-enabled. */
14b25846
DZ
7276 source = dm_new_crtc_state->crc_src;
7277 if (amdgpu_dm_is_valid_crc_source(source)) {
57638021
NK
7278 amdgpu_dm_crtc_configure_crc_source(
7279 crtc, dm_new_crtc_state,
7280 dm_new_crtc_state->crc_src);
b5e83f6f
NK
7281 }
7282#endif
7283 }
7284}
7285
1f6010a9 7286/*
27b3f4fc
LSL
7287 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7288 * @crtc_state: the DRM CRTC state
7289 * @stream_state: the DC stream state.
7290 *
7291 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7292 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7293 */
7294static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7295 struct dc_stream_state *stream_state)
7296{
b9952f93 7297 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 7298}
e7b07cee 7299
7578ecda
AD
7300static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7301 struct drm_atomic_state *state,
7302 bool nonblock)
e7b07cee
HW
7303{
7304 struct drm_crtc *crtc;
c2cea706 7305 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7306 struct amdgpu_device *adev = dev->dev_private;
7307 int i;
7308
7309 /*
d6ef9b41
NK
7310 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7311 * a modeset, being disabled, or have no active planes.
7312 *
7313 * It's done in atomic commit rather than commit tail for now since
7314 * some of these interrupt handlers access the current CRTC state and
7315 * potentially the stream pointer itself.
7316 *
7317 * Since the atomic state is swapped within atomic commit and not within
7318 * commit tail this would leave to new state (that hasn't been committed yet)
7319 * being accesssed from within the handlers.
7320 *
7321 * TODO: Fix this so we can do this in commit tail and not have to block
7322 * in atomic check.
e7b07cee 7323 */
c2cea706 7324 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 7325 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
428da2bd 7326 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee
HW
7327 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7328
d6ef9b41
NK
7329 if (dm_old_crtc_state->interrupts_enabled &&
7330 (!dm_new_crtc_state->interrupts_enabled ||
57638021 7331 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
7332 manage_dm_interrupts(adev, acrtc, false);
7333 }
1f6010a9
DF
7334 /*
7335 * Add check here for SoC's that support hardware cursor plane, to
7336 * unset legacy_cursor_update
7337 */
e7b07cee
HW
7338
7339 return drm_atomic_helper_commit(dev, state, nonblock);
7340
7341 /*TODO Handle EINTR, reenable IRQ*/
7342}
7343
b8592b48
LL
7344/**
7345 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7346 * @state: The atomic state to commit
7347 *
7348 * This will tell DC to commit the constructed DC state from atomic_check,
7349 * programming the hardware. Any failures here implies a hardware failure, since
7350 * atomic check should have filtered anything non-kosher.
7351 */
7578ecda 7352static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7353{
7354 struct drm_device *dev = state->dev;
7355 struct amdgpu_device *adev = dev->dev_private;
7356 struct amdgpu_display_manager *dm = &adev->dm;
7357 struct dm_atomic_state *dm_state;
eb3dc897 7358 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7359 uint32_t i, j;
5cc6dcbd 7360 struct drm_crtc *crtc;
0bc9706d 7361 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7362 unsigned long flags;
7363 bool wait_for_vblank = true;
7364 struct drm_connector *connector;
c2cea706 7365 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7366 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7367 int crtc_disable_count = 0;
e7b07cee
HW
7368
7369 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7370
eb3dc897
NK
7371 dm_state = dm_atomic_get_new_state(state);
7372 if (dm_state && dm_state->context) {
7373 dc_state = dm_state->context;
7374 } else {
7375 /* No state changes, retain current state. */
813d20dc 7376 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7377 ASSERT(dc_state_temp);
7378 dc_state = dc_state_temp;
7379 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7380 }
e7b07cee
HW
7381
7382 /* update changed items */
0bc9706d 7383 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7384 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7385
54d76575
LSL
7386 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7387 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7388
f1ad2f5e 7389 DRM_DEBUG_DRIVER(
e7b07cee
HW
7390 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7391 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7392 "connectors_changed:%d\n",
7393 acrtc->crtc_id,
0bc9706d
LSL
7394 new_crtc_state->enable,
7395 new_crtc_state->active,
7396 new_crtc_state->planes_changed,
7397 new_crtc_state->mode_changed,
7398 new_crtc_state->active_changed,
7399 new_crtc_state->connectors_changed);
e7b07cee 7400
27b3f4fc
LSL
7401 /* Copy all transient state flags into dc state */
7402 if (dm_new_crtc_state->stream) {
7403 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7404 dm_new_crtc_state->stream);
7405 }
7406
e7b07cee
HW
7407 /* handles headless hotplug case, updating new_state and
7408 * aconnector as needed
7409 */
7410
54d76575 7411 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7412
f1ad2f5e 7413 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7414
54d76575 7415 if (!dm_new_crtc_state->stream) {
e7b07cee 7416 /*
b830ebc9
HW
7417 * this could happen because of issues with
7418 * userspace notifications delivery.
7419 * In this case userspace tries to set mode on
1f6010a9
DF
7420 * display which is disconnected in fact.
7421 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7422 * We expect reset mode will come soon.
7423 *
7424 * This can also happen when unplug is done
7425 * during resume sequence ended
7426 *
7427 * In this case, we want to pretend we still
7428 * have a sink to keep the pipe running so that
7429 * hw state is consistent with the sw state
7430 */
f1ad2f5e 7431 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7432 __func__, acrtc->base.base.id);
7433 continue;
7434 }
7435
54d76575
LSL
7436 if (dm_old_crtc_state->stream)
7437 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7438
97028037
LP
7439 pm_runtime_get_noresume(dev->dev);
7440
e7b07cee 7441 acrtc->enabled = true;
0bc9706d
LSL
7442 acrtc->hw_mode = new_crtc_state->mode;
7443 crtc->hwmode = new_crtc_state->mode;
7444 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7445 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7446 /* i.e. reset mode */
8c322309 7447 if (dm_old_crtc_state->stream) {
d1ebfdd8 7448 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7449 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7450
54d76575 7451 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8c322309 7452 }
e7b07cee
HW
7453 }
7454 } /* for_each_crtc_in_state() */
7455
eb3dc897
NK
7456 if (dc_state) {
7457 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7458 mutex_lock(&dm->dc_lock);
eb3dc897 7459 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7460 mutex_unlock(&dm->dc_lock);
fa2123db 7461 }
e7b07cee 7462
0bc9706d 7463 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7464 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7465
54d76575 7466 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7467
54d76575 7468 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7469 const struct dc_stream_status *status =
54d76575 7470 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7471
eb3dc897 7472 if (!status)
09f609c3
LL
7473 status = dc_stream_get_status_from_state(dc_state,
7474 dm_new_crtc_state->stream);
eb3dc897 7475
e7b07cee 7476 if (!status)
54d76575 7477 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7478 else
7479 acrtc->otg_inst = status->primary_otg_inst;
7480 }
7481 }
0c8620d6
BL
7482#ifdef CONFIG_DRM_AMD_DC_HDCP
7483 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7484 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7485 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7486 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7487
7488 new_crtc_state = NULL;
7489
7490 if (acrtc)
7491 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7492
7493 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7494
7495 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7496 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7497 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7498 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7499 continue;
7500 }
7501
7502 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7503 hdcp_update_display(
7504 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7505 new_con_state->hdcp_content_type,
b1abe558
BL
7506 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7507 : false);
0c8620d6
BL
7508 }
7509#endif
e7b07cee 7510
02d6a6fc 7511 /* Handle connector state changes */
c2cea706 7512 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7513 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7514 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7515 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7516 struct dc_surface_update dummy_updates[MAX_SURFACES];
7517 struct dc_stream_update stream_update;
b232d4ed 7518 struct dc_info_packet hdr_packet;
e7b07cee 7519 struct dc_stream_status *status = NULL;
b232d4ed 7520 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7521
19afd799
NC
7522 memset(&dummy_updates, 0, sizeof(dummy_updates));
7523 memset(&stream_update, 0, sizeof(stream_update));
7524
44d09c6a 7525 if (acrtc) {
0bc9706d 7526 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7527 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7528 }
0bc9706d 7529
e7b07cee 7530 /* Skip any modesets/resets */
0bc9706d 7531 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7532 continue;
7533
54d76575 7534 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7535 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7536
b232d4ed
NK
7537 scaling_changed = is_scaling_state_different(dm_new_con_state,
7538 dm_old_con_state);
7539
7540 abm_changed = dm_new_crtc_state->abm_level !=
7541 dm_old_crtc_state->abm_level;
7542
7543 hdr_changed =
7544 is_hdr_metadata_different(old_con_state, new_con_state);
7545
7546 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7547 continue;
e7b07cee 7548
b6e881c9 7549 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7550 if (scaling_changed) {
02d6a6fc 7551 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7552 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7553
02d6a6fc
DF
7554 stream_update.src = dm_new_crtc_state->stream->src;
7555 stream_update.dst = dm_new_crtc_state->stream->dst;
7556 }
7557
b232d4ed 7558 if (abm_changed) {
02d6a6fc
DF
7559 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7560
7561 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7562 }
70e8ffc5 7563
b232d4ed
NK
7564 if (hdr_changed) {
7565 fill_hdr_info_packet(new_con_state, &hdr_packet);
7566 stream_update.hdr_static_metadata = &hdr_packet;
7567 }
7568
54d76575 7569 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7570 WARN_ON(!status);
3be5262e 7571 WARN_ON(!status->plane_count);
e7b07cee 7572
02d6a6fc
DF
7573 /*
7574 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7575 * Here we create an empty update on each plane.
7576 * To fix this, DC should permit updating only stream properties.
7577 */
7578 for (j = 0; j < status->plane_count; j++)
7579 dummy_updates[j].surface = status->plane_states[0];
7580
7581
7582 mutex_lock(&dm->dc_lock);
7583 dc_commit_updates_for_stream(dm->dc,
7584 dummy_updates,
7585 status->plane_count,
7586 dm_new_crtc_state->stream,
7587 &stream_update,
7588 dc_state);
7589 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7590 }
7591
b5e83f6f 7592 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7593 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7594 new_crtc_state, i) {
fe2a1965
LP
7595 if (old_crtc_state->active && !new_crtc_state->active)
7596 crtc_disable_count++;
7597
54d76575 7598 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7599 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7600
057be086
NK
7601 /* Update freesync active state. */
7602 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7603
66b0c973
MK
7604 /* Handle vrr on->off / off->on transitions */
7605 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7606 dm_new_crtc_state);
e7b07cee
HW
7607 }
7608
b5e83f6f
NK
7609 /* Enable interrupts for CRTCs going through a modeset. */
7610 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
e7b07cee 7611
420cd472 7612 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7613 if (new_crtc_state->async_flip)
420cd472
DF
7614 wait_for_vblank = false;
7615
e7b07cee 7616 /* update planes when needed per crtc*/
5cc6dcbd 7617 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7618 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7619
54d76575 7620 if (dm_new_crtc_state->stream)
eb3dc897 7621 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7622 dm, crtc, wait_for_vblank);
e7b07cee
HW
7623 }
7624
b5e83f6f
NK
7625 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7626 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
e7b07cee 7627
6ce8f316
NK
7628 /* Update audio instances for each connector. */
7629 amdgpu_dm_commit_audio(dev, state);
7630
e7b07cee
HW
7631 /*
7632 * send vblank event on all events not handled in flip and
7633 * mark consumed event for drm_atomic_helper_commit_hw_done
7634 */
7635 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 7636 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7637
0bc9706d
LSL
7638 if (new_crtc_state->event)
7639 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7640
0bc9706d 7641 new_crtc_state->event = NULL;
e7b07cee
HW
7642 }
7643 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7644
29c8f234
LL
7645 /* Signal HW programming completion */
7646 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7647
7648 if (wait_for_vblank)
320a1274 7649 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7650
7651 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7652
1f6010a9
DF
7653 /*
7654 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7655 * so we can put the GPU into runtime suspend if we're not driving any
7656 * displays anymore
7657 */
fe2a1965
LP
7658 for (i = 0; i < crtc_disable_count; i++)
7659 pm_runtime_put_autosuspend(dev->dev);
97028037 7660 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7661
7662 if (dc_state_temp)
7663 dc_release_state(dc_state_temp);
e7b07cee
HW
7664}
7665
7666
7667static int dm_force_atomic_commit(struct drm_connector *connector)
7668{
7669 int ret = 0;
7670 struct drm_device *ddev = connector->dev;
7671 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7672 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7673 struct drm_plane *plane = disconnected_acrtc->base.primary;
7674 struct drm_connector_state *conn_state;
7675 struct drm_crtc_state *crtc_state;
7676 struct drm_plane_state *plane_state;
7677
7678 if (!state)
7679 return -ENOMEM;
7680
7681 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7682
7683 /* Construct an atomic state to restore previous display setting */
7684
7685 /*
7686 * Attach connectors to drm_atomic_state
7687 */
7688 conn_state = drm_atomic_get_connector_state(state, connector);
7689
7690 ret = PTR_ERR_OR_ZERO(conn_state);
7691 if (ret)
7692 goto err;
7693
7694 /* Attach crtc to drm_atomic_state*/
7695 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7696
7697 ret = PTR_ERR_OR_ZERO(crtc_state);
7698 if (ret)
7699 goto err;
7700
7701 /* force a restore */
7702 crtc_state->mode_changed = true;
7703
7704 /* Attach plane to drm_atomic_state */
7705 plane_state = drm_atomic_get_plane_state(state, plane);
7706
7707 ret = PTR_ERR_OR_ZERO(plane_state);
7708 if (ret)
7709 goto err;
7710
7711
7712 /* Call commit internally with the state we just constructed */
7713 ret = drm_atomic_commit(state);
7714 if (!ret)
7715 return 0;
7716
7717err:
7718 DRM_ERROR("Restoring old state failed with %i\n", ret);
7719 drm_atomic_state_put(state);
7720
7721 return ret;
7722}
7723
7724/*
1f6010a9
DF
7725 * This function handles all cases when set mode does not come upon hotplug.
7726 * This includes when a display is unplugged then plugged back into the
7727 * same port and when running without usermode desktop manager supprot
e7b07cee 7728 */
3ee6b26b
AD
7729void dm_restore_drm_connector_state(struct drm_device *dev,
7730 struct drm_connector *connector)
e7b07cee 7731{
c84dec2f 7732 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7733 struct amdgpu_crtc *disconnected_acrtc;
7734 struct dm_crtc_state *acrtc_state;
7735
7736 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7737 return;
7738
7739 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7740 if (!disconnected_acrtc)
7741 return;
e7b07cee 7742
70e8ffc5
HW
7743 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7744 if (!acrtc_state->stream)
e7b07cee
HW
7745 return;
7746
7747 /*
7748 * If the previous sink is not released and different from the current,
7749 * we deduce we are in a state where we can not rely on usermode call
7750 * to turn on the display, so we do it here
7751 */
7752 if (acrtc_state->stream->sink != aconnector->dc_sink)
7753 dm_force_atomic_commit(&aconnector->base);
7754}
7755
1f6010a9 7756/*
e7b07cee
HW
7757 * Grabs all modesetting locks to serialize against any blocking commits,
7758 * Waits for completion of all non blocking commits.
7759 */
3ee6b26b
AD
7760static int do_aquire_global_lock(struct drm_device *dev,
7761 struct drm_atomic_state *state)
e7b07cee
HW
7762{
7763 struct drm_crtc *crtc;
7764 struct drm_crtc_commit *commit;
7765 long ret;
7766
1f6010a9
DF
7767 /*
7768 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7769 * ensure that when the framework release it the
7770 * extra locks we are locking here will get released to
7771 */
7772 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7773 if (ret)
7774 return ret;
7775
7776 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7777 spin_lock(&crtc->commit_lock);
7778 commit = list_first_entry_or_null(&crtc->commit_list,
7779 struct drm_crtc_commit, commit_entry);
7780 if (commit)
7781 drm_crtc_commit_get(commit);
7782 spin_unlock(&crtc->commit_lock);
7783
7784 if (!commit)
7785 continue;
7786
1f6010a9
DF
7787 /*
7788 * Make sure all pending HW programming completed and
e7b07cee
HW
7789 * page flips done
7790 */
7791 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7792
7793 if (ret > 0)
7794 ret = wait_for_completion_interruptible_timeout(
7795 &commit->flip_done, 10*HZ);
7796
7797 if (ret == 0)
7798 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7799 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7800
7801 drm_crtc_commit_put(commit);
7802 }
7803
7804 return ret < 0 ? ret : 0;
7805}
7806
bb47de73
NK
7807static void get_freesync_config_for_crtc(
7808 struct dm_crtc_state *new_crtc_state,
7809 struct dm_connector_state *new_con_state)
98e6436d
AK
7810{
7811 struct mod_freesync_config config = {0};
98e6436d
AK
7812 struct amdgpu_dm_connector *aconnector =
7813 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7814 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7815 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7816
a057ec46 7817 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7818 vrefresh >= aconnector->min_vfreq &&
7819 vrefresh <= aconnector->max_vfreq;
bb47de73 7820
a057ec46
IB
7821 if (new_crtc_state->vrr_supported) {
7822 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7823 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7824 VRR_STATE_ACTIVE_VARIABLE :
7825 VRR_STATE_INACTIVE;
7826 config.min_refresh_in_uhz =
7827 aconnector->min_vfreq * 1000000;
7828 config.max_refresh_in_uhz =
7829 aconnector->max_vfreq * 1000000;
69ff8845 7830 config.vsif_supported = true;
180db303 7831 config.btr = true;
98e6436d
AK
7832 }
7833
bb47de73
NK
7834 new_crtc_state->freesync_config = config;
7835}
98e6436d 7836
bb47de73
NK
7837static void reset_freesync_config_for_crtc(
7838 struct dm_crtc_state *new_crtc_state)
7839{
7840 new_crtc_state->vrr_supported = false;
98e6436d 7841
180db303
NK
7842 memset(&new_crtc_state->vrr_params, 0,
7843 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7844 memset(&new_crtc_state->vrr_infopacket, 0,
7845 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7846}
7847
4b9674e5
LL
7848static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7849 struct drm_atomic_state *state,
7850 struct drm_crtc *crtc,
7851 struct drm_crtc_state *old_crtc_state,
7852 struct drm_crtc_state *new_crtc_state,
7853 bool enable,
7854 bool *lock_and_validation_needed)
e7b07cee 7855{
eb3dc897 7856 struct dm_atomic_state *dm_state = NULL;
54d76575 7857 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7858 struct dc_stream_state *new_stream;
62f55537 7859 int ret = 0;
d4d4a645 7860
1f6010a9
DF
7861 /*
7862 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7863 * update changed items
7864 */
4b9674e5
LL
7865 struct amdgpu_crtc *acrtc = NULL;
7866 struct amdgpu_dm_connector *aconnector = NULL;
7867 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7868 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7869
4b9674e5 7870 new_stream = NULL;
9635b754 7871
4b9674e5
LL
7872 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7873 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7874 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7875 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7876
4b9674e5
LL
7877 /* TODO This hack should go away */
7878 if (aconnector && enable) {
7879 /* Make sure fake sink is created in plug-in scenario */
7880 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7881 &aconnector->base);
7882 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7883 &aconnector->base);
19f89e23 7884
4b9674e5
LL
7885 if (IS_ERR(drm_new_conn_state)) {
7886 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7887 goto fail;
7888 }
19f89e23 7889
4b9674e5
LL
7890 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7891 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 7892
02d35a67
JFZ
7893 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7894 goto skip_modeset;
7895
cbd14ae7
SW
7896 new_stream = create_validate_stream_for_sink(aconnector,
7897 &new_crtc_state->mode,
7898 dm_new_conn_state,
7899 dm_old_crtc_state->stream);
19f89e23 7900
4b9674e5
LL
7901 /*
7902 * we can have no stream on ACTION_SET if a display
7903 * was disconnected during S3, in this case it is not an
7904 * error, the OS will be updated after detection, and
7905 * will do the right thing on next atomic commit
7906 */
19f89e23 7907
4b9674e5
LL
7908 if (!new_stream) {
7909 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7910 __func__, acrtc->base.base.id);
7911 ret = -ENOMEM;
7912 goto fail;
7913 }
e7b07cee 7914
4b9674e5 7915 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 7916
88694af9
NK
7917 ret = fill_hdr_info_packet(drm_new_conn_state,
7918 &new_stream->hdr_static_metadata);
7919 if (ret)
7920 goto fail;
7921
7e930949
NK
7922 /*
7923 * If we already removed the old stream from the context
7924 * (and set the new stream to NULL) then we can't reuse
7925 * the old stream even if the stream and scaling are unchanged.
7926 * We'll hit the BUG_ON and black screen.
7927 *
7928 * TODO: Refactor this function to allow this check to work
7929 * in all conditions.
7930 */
7931 if (dm_new_crtc_state->stream &&
7932 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
7933 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7934 new_crtc_state->mode_changed = false;
7935 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7936 new_crtc_state->mode_changed);
62f55537 7937 }
4b9674e5 7938 }
b830ebc9 7939
02d35a67 7940 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
7941 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7942 goto skip_modeset;
e7b07cee 7943
4b9674e5
LL
7944 DRM_DEBUG_DRIVER(
7945 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7946 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7947 "connectors_changed:%d\n",
7948 acrtc->crtc_id,
7949 new_crtc_state->enable,
7950 new_crtc_state->active,
7951 new_crtc_state->planes_changed,
7952 new_crtc_state->mode_changed,
7953 new_crtc_state->active_changed,
7954 new_crtc_state->connectors_changed);
62f55537 7955
4b9674e5
LL
7956 /* Remove stream for any changed/disabled CRTC */
7957 if (!enable) {
62f55537 7958
4b9674e5
LL
7959 if (!dm_old_crtc_state->stream)
7960 goto skip_modeset;
eb3dc897 7961
4b9674e5
LL
7962 ret = dm_atomic_get_state(state, &dm_state);
7963 if (ret)
7964 goto fail;
e7b07cee 7965
4b9674e5
LL
7966 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7967 crtc->base.id);
62f55537 7968
4b9674e5
LL
7969 /* i.e. reset mode */
7970 if (dc_remove_stream_from_ctx(
7971 dm->dc,
7972 dm_state->context,
7973 dm_old_crtc_state->stream) != DC_OK) {
7974 ret = -EINVAL;
7975 goto fail;
7976 }
62f55537 7977
4b9674e5
LL
7978 dc_stream_release(dm_old_crtc_state->stream);
7979 dm_new_crtc_state->stream = NULL;
bb47de73 7980
4b9674e5 7981 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 7982
4b9674e5 7983 *lock_and_validation_needed = true;
62f55537 7984
4b9674e5
LL
7985 } else {/* Add stream for any updated/enabled CRTC */
7986 /*
7987 * Quick fix to prevent NULL pointer on new_stream when
7988 * added MST connectors not found in existing crtc_state in the chained mode
7989 * TODO: need to dig out the root cause of that
7990 */
7991 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7992 goto skip_modeset;
62f55537 7993
4b9674e5
LL
7994 if (modereset_required(new_crtc_state))
7995 goto skip_modeset;
62f55537 7996
4b9674e5
LL
7997 if (modeset_required(new_crtc_state, new_stream,
7998 dm_old_crtc_state->stream)) {
62f55537 7999
4b9674e5 8000 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8001
4b9674e5
LL
8002 ret = dm_atomic_get_state(state, &dm_state);
8003 if (ret)
8004 goto fail;
27b3f4fc 8005
4b9674e5 8006 dm_new_crtc_state->stream = new_stream;
62f55537 8007
4b9674e5 8008 dc_stream_retain(new_stream);
1dc90497 8009
4b9674e5
LL
8010 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8011 crtc->base.id);
1dc90497 8012
4b9674e5
LL
8013 if (dc_add_stream_to_ctx(
8014 dm->dc,
8015 dm_state->context,
8016 dm_new_crtc_state->stream) != DC_OK) {
8017 ret = -EINVAL;
8018 goto fail;
9b690ef3
BL
8019 }
8020
4b9674e5
LL
8021 *lock_and_validation_needed = true;
8022 }
8023 }
e277adc5 8024
4b9674e5
LL
8025skip_modeset:
8026 /* Release extra reference */
8027 if (new_stream)
8028 dc_stream_release(new_stream);
e277adc5 8029
4b9674e5
LL
8030 /*
8031 * We want to do dc stream updates that do not require a
8032 * full modeset below.
8033 */
8034 if (!(enable && aconnector && new_crtc_state->enable &&
8035 new_crtc_state->active))
8036 return 0;
8037 /*
8038 * Given above conditions, the dc state cannot be NULL because:
8039 * 1. We're in the process of enabling CRTCs (just been added
8040 * to the dc context, or already is on the context)
8041 * 2. Has a valid connector attached, and
8042 * 3. Is currently active and enabled.
8043 * => The dc stream state currently exists.
8044 */
8045 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8046
4b9674e5
LL
8047 /* Scaling or underscan settings */
8048 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8049 update_stream_scaling_settings(
8050 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8051
b05e2c5e
DF
8052 /* ABM settings */
8053 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8054
4b9674e5
LL
8055 /*
8056 * Color management settings. We also update color properties
8057 * when a modeset is needed, to ensure it gets reprogrammed.
8058 */
8059 if (dm_new_crtc_state->base.color_mgmt_changed ||
8060 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8061 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8062 if (ret)
8063 goto fail;
62f55537 8064 }
e7b07cee 8065
4b9674e5
LL
8066 /* Update Freesync settings. */
8067 get_freesync_config_for_crtc(dm_new_crtc_state,
8068 dm_new_conn_state);
8069
62f55537 8070 return ret;
9635b754
DS
8071
8072fail:
8073 if (new_stream)
8074 dc_stream_release(new_stream);
8075 return ret;
62f55537 8076}
9b690ef3 8077
f6ff2a08
NK
8078static bool should_reset_plane(struct drm_atomic_state *state,
8079 struct drm_plane *plane,
8080 struct drm_plane_state *old_plane_state,
8081 struct drm_plane_state *new_plane_state)
8082{
8083 struct drm_plane *other;
8084 struct drm_plane_state *old_other_state, *new_other_state;
8085 struct drm_crtc_state *new_crtc_state;
8086 int i;
8087
70a1efac
NK
8088 /*
8089 * TODO: Remove this hack once the checks below are sufficient
8090 * enough to determine when we need to reset all the planes on
8091 * the stream.
8092 */
8093 if (state->allow_modeset)
8094 return true;
8095
f6ff2a08
NK
8096 /* Exit early if we know that we're adding or removing the plane. */
8097 if (old_plane_state->crtc != new_plane_state->crtc)
8098 return true;
8099
8100 /* old crtc == new_crtc == NULL, plane not in context. */
8101 if (!new_plane_state->crtc)
8102 return false;
8103
8104 new_crtc_state =
8105 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8106
8107 if (!new_crtc_state)
8108 return true;
8109
7316c4ad
NK
8110 /* CRTC Degamma changes currently require us to recreate planes. */
8111 if (new_crtc_state->color_mgmt_changed)
8112 return true;
8113
f6ff2a08
NK
8114 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8115 return true;
8116
8117 /*
8118 * If there are any new primary or overlay planes being added or
8119 * removed then the z-order can potentially change. To ensure
8120 * correct z-order and pipe acquisition the current DC architecture
8121 * requires us to remove and recreate all existing planes.
8122 *
8123 * TODO: Come up with a more elegant solution for this.
8124 */
8125 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8126 if (other->type == DRM_PLANE_TYPE_CURSOR)
8127 continue;
8128
8129 if (old_other_state->crtc != new_plane_state->crtc &&
8130 new_other_state->crtc != new_plane_state->crtc)
8131 continue;
8132
8133 if (old_other_state->crtc != new_other_state->crtc)
8134 return true;
8135
8136 /* TODO: Remove this once we can handle fast format changes. */
8137 if (old_other_state->fb && new_other_state->fb &&
8138 old_other_state->fb->format != new_other_state->fb->format)
8139 return true;
8140 }
8141
8142 return false;
8143}
8144
9e869063
LL
8145static int dm_update_plane_state(struct dc *dc,
8146 struct drm_atomic_state *state,
8147 struct drm_plane *plane,
8148 struct drm_plane_state *old_plane_state,
8149 struct drm_plane_state *new_plane_state,
8150 bool enable,
8151 bool *lock_and_validation_needed)
62f55537 8152{
eb3dc897
NK
8153
8154 struct dm_atomic_state *dm_state = NULL;
62f55537 8155 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 8156 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 8157 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 8158 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
e133020f 8159 struct amdgpu_crtc *new_acrtc;
f6ff2a08 8160 bool needs_reset;
62f55537 8161 int ret = 0;
e7b07cee 8162
9b690ef3 8163
9e869063
LL
8164 new_plane_crtc = new_plane_state->crtc;
8165 old_plane_crtc = old_plane_state->crtc;
8166 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8167 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 8168
e133020f
SS
8169 /*TODO Implement better atomic check for cursor plane */
8170 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8171 if (!enable || !new_plane_crtc ||
8172 drm_atomic_plane_disabling(plane->state, new_plane_state))
8173 return 0;
8174
8175 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8176
8177 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8178 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8179 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8180 new_plane_state->crtc_w, new_plane_state->crtc_h);
8181 return -EINVAL;
8182 }
8183
9e869063 8184 return 0;
e133020f 8185 }
9b690ef3 8186
f6ff2a08
NK
8187 needs_reset = should_reset_plane(state, plane, old_plane_state,
8188 new_plane_state);
8189
9e869063
LL
8190 /* Remove any changed/removed planes */
8191 if (!enable) {
f6ff2a08 8192 if (!needs_reset)
9e869063 8193 return 0;
a7b06724 8194
9e869063
LL
8195 if (!old_plane_crtc)
8196 return 0;
62f55537 8197
9e869063
LL
8198 old_crtc_state = drm_atomic_get_old_crtc_state(
8199 state, old_plane_crtc);
8200 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 8201
9e869063
LL
8202 if (!dm_old_crtc_state->stream)
8203 return 0;
62f55537 8204
9e869063
LL
8205 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8206 plane->base.id, old_plane_crtc->base.id);
9b690ef3 8207
9e869063
LL
8208 ret = dm_atomic_get_state(state, &dm_state);
8209 if (ret)
8210 return ret;
eb3dc897 8211
9e869063
LL
8212 if (!dc_remove_plane_from_context(
8213 dc,
8214 dm_old_crtc_state->stream,
8215 dm_old_plane_state->dc_state,
8216 dm_state->context)) {
62f55537 8217
9e869063
LL
8218 ret = EINVAL;
8219 return ret;
8220 }
e7b07cee 8221
9b690ef3 8222
9e869063
LL
8223 dc_plane_state_release(dm_old_plane_state->dc_state);
8224 dm_new_plane_state->dc_state = NULL;
1dc90497 8225
9e869063 8226 *lock_and_validation_needed = true;
1dc90497 8227
9e869063
LL
8228 } else { /* Add new planes */
8229 struct dc_plane_state *dc_new_plane_state;
1dc90497 8230
9e869063
LL
8231 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8232 return 0;
e7b07cee 8233
9e869063
LL
8234 if (!new_plane_crtc)
8235 return 0;
e7b07cee 8236
9e869063
LL
8237 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8238 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 8239
9e869063
LL
8240 if (!dm_new_crtc_state->stream)
8241 return 0;
62f55537 8242
f6ff2a08 8243 if (!needs_reset)
9e869063 8244 return 0;
62f55537 8245
8c44515b
AP
8246 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8247 if (ret)
8248 return ret;
8249
9e869063 8250 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 8251
9e869063
LL
8252 dc_new_plane_state = dc_create_plane_state(dc);
8253 if (!dc_new_plane_state)
8254 return -ENOMEM;
62f55537 8255
9e869063
LL
8256 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8257 plane->base.id, new_plane_crtc->base.id);
8c45c5db 8258
695af5f9 8259 ret = fill_dc_plane_attributes(
9e869063
LL
8260 new_plane_crtc->dev->dev_private,
8261 dc_new_plane_state,
8262 new_plane_state,
8263 new_crtc_state);
8264 if (ret) {
8265 dc_plane_state_release(dc_new_plane_state);
8266 return ret;
8267 }
62f55537 8268
9e869063
LL
8269 ret = dm_atomic_get_state(state, &dm_state);
8270 if (ret) {
8271 dc_plane_state_release(dc_new_plane_state);
8272 return ret;
8273 }
eb3dc897 8274
9e869063
LL
8275 /*
8276 * Any atomic check errors that occur after this will
8277 * not need a release. The plane state will be attached
8278 * to the stream, and therefore part of the atomic
8279 * state. It'll be released when the atomic state is
8280 * cleaned.
8281 */
8282 if (!dc_add_plane_to_context(
8283 dc,
8284 dm_new_crtc_state->stream,
8285 dc_new_plane_state,
8286 dm_state->context)) {
62f55537 8287
9e869063
LL
8288 dc_plane_state_release(dc_new_plane_state);
8289 return -EINVAL;
8290 }
8c45c5db 8291
9e869063 8292 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 8293
9e869063
LL
8294 /* Tell DC to do a full surface update every time there
8295 * is a plane change. Inefficient, but works for now.
8296 */
8297 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8298
8299 *lock_and_validation_needed = true;
62f55537 8300 }
e7b07cee
HW
8301
8302
62f55537
AG
8303 return ret;
8304}
a87fa993 8305
eb3dc897 8306static int
f843b308 8307dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
eb3dc897
NK
8308 struct drm_atomic_state *state,
8309 enum surface_update_type *out_type)
8310{
f843b308 8311 struct dc *dc = dm->dc;
eb3dc897
NK
8312 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8313 int i, j, num_plane, ret = 0;
a87fa993
BL
8314 struct drm_plane_state *old_plane_state, *new_plane_state;
8315 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
d3b65841 8316 struct drm_crtc *new_plane_crtc;
a87fa993
BL
8317 struct drm_plane *plane;
8318
8319 struct drm_crtc *crtc;
8320 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8321 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8322 struct dc_stream_status *status = NULL;
a87fa993 8323 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7527791e
RL
8324 struct surface_info_bundle {
8325 struct dc_surface_update surface_updates[MAX_SURFACES];
8326 struct dc_plane_info plane_infos[MAX_SURFACES];
8327 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8328 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8329 struct dc_stream_update stream_update;
8330 } *bundle;
a87fa993 8331
7527791e 8332 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
fe96b99d 8333
7527791e
RL
8334 if (!bundle) {
8335 DRM_ERROR("Failed to allocate update bundle\n");
4f712911
BL
8336 /* Set type to FULL to avoid crashing in DC*/
8337 update_type = UPDATE_TYPE_FULL;
eb3dc897 8338 goto cleanup;
4f712911 8339 }
a87fa993
BL
8340
8341 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2aa632c5 8342
7527791e 8343 memset(bundle, 0, sizeof(struct surface_info_bundle));
c448a53a 8344
a87fa993
BL
8345 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8346 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8347 num_plane = 0;
8348
6836d239
NK
8349 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8350 update_type = UPDATE_TYPE_FULL;
8351 goto cleanup;
8352 }
a87fa993 8353
6836d239 8354 if (!new_dm_crtc_state->stream)
c744e974 8355 continue;
eb3dc897 8356
c744e974 8357 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
2cc450ce
NK
8358 const struct amdgpu_framebuffer *amdgpu_fb =
8359 to_amdgpu_framebuffer(new_plane_state->fb);
7527791e
RL
8360 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8361 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8362 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
2cc450ce 8363 uint64_t tiling_flags;
5888f07a 8364 bool tmz_surface = false;
2cc450ce 8365
c744e974 8366 new_plane_crtc = new_plane_state->crtc;
c744e974
NK
8367 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8368 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 8369
c744e974
NK
8370 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8371 continue;
eb3dc897 8372
6836d239
NK
8373 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8374 update_type = UPDATE_TYPE_FULL;
8375 goto cleanup;
8376 }
8377
c744e974
NK
8378 if (crtc != new_plane_crtc)
8379 continue;
8380
7527791e
RL
8381 bundle->surface_updates[num_plane].surface =
8382 new_dm_plane_state->dc_state;
c744e974
NK
8383
8384 if (new_crtc_state->mode_changed) {
7527791e
RL
8385 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8386 bundle->stream_update.src = new_dm_crtc_state->stream->src;
c744e974
NK
8387 }
8388
8389 if (new_crtc_state->color_mgmt_changed) {
7527791e 8390 bundle->surface_updates[num_plane].gamma =
c744e974 8391 new_dm_plane_state->dc_state->gamma_correction;
7527791e 8392 bundle->surface_updates[num_plane].in_transfer_func =
c744e974 8393 new_dm_plane_state->dc_state->in_transfer_func;
44efb784
SW
8394 bundle->surface_updates[num_plane].gamut_remap_matrix =
8395 &new_dm_plane_state->dc_state->gamut_remap_matrix;
7527791e 8396 bundle->stream_update.gamut_remap =
c744e974 8397 &new_dm_crtc_state->stream->gamut_remap_matrix;
7527791e 8398 bundle->stream_update.output_csc_transform =
cf020d49 8399 &new_dm_crtc_state->stream->csc_color_matrix;
7527791e 8400 bundle->stream_update.out_transfer_func =
c744e974 8401 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
8402 }
8403
004b3938 8404 ret = fill_dc_scaling_info(new_plane_state,
7527791e 8405 scaling_info);
004b3938
NK
8406 if (ret)
8407 goto cleanup;
8408
7527791e 8409 bundle->surface_updates[num_plane].scaling_info = scaling_info;
004b3938 8410
2cc450ce 8411 if (amdgpu_fb) {
5888f07a 8412 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
2cc450ce
NK
8413 if (ret)
8414 goto cleanup;
8415
2cc450ce
NK
8416 ret = fill_dc_plane_info_and_addr(
8417 dm->adev, new_plane_state, tiling_flags,
7527791e 8418 plane_info,
5888f07a 8419 &flip_addr->address, tmz_surface,
af031f07 8420 false);
2cc450ce
NK
8421 if (ret)
8422 goto cleanup;
8423
7527791e
RL
8424 bundle->surface_updates[num_plane].plane_info = plane_info;
8425 bundle->surface_updates[num_plane].flip_addr = flip_addr;
2cc450ce
NK
8426 }
8427
c744e974
NK
8428 num_plane++;
8429 }
8430
8431 if (num_plane == 0)
8432 continue;
8433
8434 ret = dm_atomic_get_state(state, &dm_state);
8435 if (ret)
8436 goto cleanup;
8437
8438 old_dm_state = dm_atomic_get_old_state(state);
8439 if (!old_dm_state) {
8440 ret = -EINVAL;
8441 goto cleanup;
8442 }
8443
8444 status = dc_stream_get_status_from_state(old_dm_state->context,
8445 new_dm_crtc_state->stream);
7527791e 8446 bundle->stream_update.stream = new_dm_crtc_state->stream;
f843b308
NK
8447 /*
8448 * TODO: DC modifies the surface during this call so we need
8449 * to lock here - find a way to do this without locking.
8450 */
8451 mutex_lock(&dm->dc_lock);
7527791e
RL
8452 update_type = dc_check_update_surfaces_for_stream(
8453 dc, bundle->surface_updates, num_plane,
8454 &bundle->stream_update, status);
f843b308 8455 mutex_unlock(&dm->dc_lock);
c744e974
NK
8456
8457 if (update_type > UPDATE_TYPE_MED) {
a87fa993 8458 update_type = UPDATE_TYPE_FULL;
eb3dc897 8459 goto cleanup;
a87fa993
BL
8460 }
8461 }
8462
eb3dc897 8463cleanup:
7527791e 8464 kfree(bundle);
a87fa993 8465
eb3dc897
NK
8466 *out_type = update_type;
8467 return ret;
a87fa993 8468}
62f55537 8469
44be939f
ML
8470static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8471{
8472 struct drm_connector *connector;
8473 struct drm_connector_state *conn_state;
8474 struct amdgpu_dm_connector *aconnector = NULL;
8475 int i;
8476 for_each_new_connector_in_state(state, connector, conn_state, i) {
8477 if (conn_state->crtc != crtc)
8478 continue;
8479
8480 aconnector = to_amdgpu_dm_connector(connector);
8481 if (!aconnector->port || !aconnector->mst_port)
8482 aconnector = NULL;
8483 else
8484 break;
8485 }
8486
8487 if (!aconnector)
8488 return 0;
8489
8490 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8491}
8492
b8592b48
LL
8493/**
8494 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8495 * @dev: The DRM device
8496 * @state: The atomic state to commit
8497 *
8498 * Validate that the given atomic state is programmable by DC into hardware.
8499 * This involves constructing a &struct dc_state reflecting the new hardware
8500 * state we wish to commit, then querying DC to see if it is programmable. It's
8501 * important not to modify the existing DC state. Otherwise, atomic_check
8502 * may unexpectedly commit hardware changes.
8503 *
8504 * When validating the DC state, it's important that the right locks are
8505 * acquired. For full updates case which removes/adds/updates streams on one
8506 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8507 * that any such full update commit will wait for completion of any outstanding
8508 * flip using DRMs synchronization events. See
8509 * dm_determine_update_type_for_commit()
8510 *
8511 * Note that DM adds the affected connectors for all CRTCs in state, when that
8512 * might not seem necessary. This is because DC stream creation requires the
8513 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8514 * be possible but non-trivial - a possible TODO item.
8515 *
8516 * Return: -Error code if validation failed.
8517 */
7578ecda
AD
8518static int amdgpu_dm_atomic_check(struct drm_device *dev,
8519 struct drm_atomic_state *state)
62f55537 8520{
62f55537 8521 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 8522 struct dm_atomic_state *dm_state = NULL;
62f55537 8523 struct dc *dc = adev->dm.dc;
62f55537 8524 struct drm_connector *connector;
c2cea706 8525 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8526 struct drm_crtc *crtc;
fc9e9920 8527 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8528 struct drm_plane *plane;
8529 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
8530 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8531 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
74a16675 8532 enum dc_status status;
1e88ad0a 8533 int ret, i;
e7b07cee 8534
62f55537
AG
8535 /*
8536 * This bool will be set for true for any modeset/reset
8537 * or plane update which implies non fast surface update.
8538 */
8539 bool lock_and_validation_needed = false;
8540
8541 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8542 if (ret)
8543 goto fail;
62f55537 8544
44be939f
ML
8545 if (adev->asic_type >= CHIP_NAVI10) {
8546 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8547 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8548 ret = add_affected_mst_dsc_crtcs(state, crtc);
8549 if (ret)
8550 goto fail;
8551 }
8552 }
8553 }
8554
1e88ad0a
S
8555 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8556 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8557 !new_crtc_state->color_mgmt_changed &&
a93587b3 8558 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8559 continue;
7bef1af3 8560
1e88ad0a
S
8561 if (!new_crtc_state->enable)
8562 continue;
fc9e9920 8563
1e88ad0a
S
8564 ret = drm_atomic_add_affected_connectors(state, crtc);
8565 if (ret)
8566 return ret;
fc9e9920 8567
1e88ad0a
S
8568 ret = drm_atomic_add_affected_planes(state, crtc);
8569 if (ret)
8570 goto fail;
e7b07cee
HW
8571 }
8572
2d9e6431
NK
8573 /*
8574 * Add all primary and overlay planes on the CRTC to the state
8575 * whenever a plane is enabled to maintain correct z-ordering
8576 * and to enable fast surface updates.
8577 */
8578 drm_for_each_crtc(crtc, dev) {
8579 bool modified = false;
8580
8581 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8582 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8583 continue;
8584
8585 if (new_plane_state->crtc == crtc ||
8586 old_plane_state->crtc == crtc) {
8587 modified = true;
8588 break;
8589 }
8590 }
8591
8592 if (!modified)
8593 continue;
8594
8595 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8596 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8597 continue;
8598
8599 new_plane_state =
8600 drm_atomic_get_plane_state(state, plane);
8601
8602 if (IS_ERR(new_plane_state)) {
8603 ret = PTR_ERR(new_plane_state);
8604 goto fail;
8605 }
8606 }
8607 }
8608
62f55537 8609 /* Remove exiting planes if they are modified */
9e869063
LL
8610 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8611 ret = dm_update_plane_state(dc, state, plane,
8612 old_plane_state,
8613 new_plane_state,
8614 false,
8615 &lock_and_validation_needed);
8616 if (ret)
8617 goto fail;
62f55537
AG
8618 }
8619
8620 /* Disable all crtcs which require disable */
4b9674e5
LL
8621 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8622 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8623 old_crtc_state,
8624 new_crtc_state,
8625 false,
8626 &lock_and_validation_needed);
8627 if (ret)
8628 goto fail;
62f55537
AG
8629 }
8630
8631 /* Enable all crtcs which require enable */
4b9674e5
LL
8632 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8633 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8634 old_crtc_state,
8635 new_crtc_state,
8636 true,
8637 &lock_and_validation_needed);
8638 if (ret)
8639 goto fail;
62f55537
AG
8640 }
8641
8642 /* Add new/modified planes */
9e869063
LL
8643 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8644 ret = dm_update_plane_state(dc, state, plane,
8645 old_plane_state,
8646 new_plane_state,
8647 true,
8648 &lock_and_validation_needed);
8649 if (ret)
8650 goto fail;
62f55537
AG
8651 }
8652
b349f76e
ES
8653 /* Run this here since we want to validate the streams we created */
8654 ret = drm_atomic_helper_check_planes(dev, state);
8655 if (ret)
8656 goto fail;
62f55537 8657
43d10d30
NK
8658 if (state->legacy_cursor_update) {
8659 /*
8660 * This is a fast cursor update coming from the plane update
8661 * helper, check if it can be done asynchronously for better
8662 * performance.
8663 */
8664 state->async_update =
8665 !drm_atomic_helper_async_check(dev, state);
8666
8667 /*
8668 * Skip the remaining global validation if this is an async
8669 * update. Cursor updates can be done without affecting
8670 * state or bandwidth calcs and this avoids the performance
8671 * penalty of locking the private state object and
8672 * allocating a new dc_state.
8673 */
8674 if (state->async_update)
8675 return 0;
8676 }
8677
ebdd27e1 8678 /* Check scaling and underscan changes*/
1f6010a9 8679 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8680 * new stream into context w\o causing full reset. Need to
8681 * decide how to handle.
8682 */
c2cea706 8683 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8684 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8685 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8686 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8687
8688 /* Skip any modesets/resets */
0bc9706d
LSL
8689 if (!acrtc || drm_atomic_crtc_needs_modeset(
8690 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8691 continue;
8692
b830ebc9 8693 /* Skip any thing not scale or underscan changes */
54d76575 8694 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8695 continue;
8696
a87fa993 8697 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
8698 lock_and_validation_needed = true;
8699 }
8700
f843b308 8701 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
eb3dc897
NK
8702 if (ret)
8703 goto fail;
a87fa993
BL
8704
8705 if (overall_update_type < update_type)
8706 overall_update_type = update_type;
8707
8708 /*
8709 * lock_and_validation_needed was an old way to determine if we need to set
8710 * the global lock. Leaving it in to check if we broke any corner cases
8711 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8712 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8713 */
8714 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8715 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
e7b07cee 8716
a87fa993 8717 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
8718 ret = dm_atomic_get_state(state, &dm_state);
8719 if (ret)
8720 goto fail;
e7b07cee
HW
8721
8722 ret = do_aquire_global_lock(dev, state);
8723 if (ret)
8724 goto fail;
1dc90497 8725
d9fe1a4c 8726#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8727 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8728 goto fail;
8729
29b9ba74
ML
8730 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8731 if (ret)
8732 goto fail;
d9fe1a4c 8733#endif
29b9ba74 8734
ded58c7b
ZL
8735 /*
8736 * Perform validation of MST topology in the state:
8737 * We need to perform MST atomic check before calling
8738 * dc_validate_global_state(), or there is a chance
8739 * to get stuck in an infinite loop and hang eventually.
8740 */
8741 ret = drm_dp_mst_atomic_check(state);
8742 if (ret)
8743 goto fail;
74a16675
RS
8744 status = dc_validate_global_state(dc, dm_state->context, false);
8745 if (status != DC_OK) {
8746 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8747 dc_status_to_str(status), status);
e7b07cee
HW
8748 ret = -EINVAL;
8749 goto fail;
8750 }
bd200d19 8751 } else {
674e78ac 8752 /*
bd200d19
NK
8753 * The commit is a fast update. Fast updates shouldn't change
8754 * the DC context, affect global validation, and can have their
8755 * commit work done in parallel with other commits not touching
8756 * the same resource. If we have a new DC context as part of
8757 * the DM atomic state from validation we need to free it and
8758 * retain the existing one instead.
674e78ac 8759 */
bd200d19
NK
8760 struct dm_atomic_state *new_dm_state, *old_dm_state;
8761
8762 new_dm_state = dm_atomic_get_new_state(state);
8763 old_dm_state = dm_atomic_get_old_state(state);
8764
8765 if (new_dm_state && old_dm_state) {
8766 if (new_dm_state->context)
8767 dc_release_state(new_dm_state->context);
8768
8769 new_dm_state->context = old_dm_state->context;
8770
8771 if (old_dm_state->context)
8772 dc_retain_state(old_dm_state->context);
8773 }
e7b07cee
HW
8774 }
8775
caff0e66
NK
8776 /* Store the overall update type for use later in atomic check. */
8777 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8778 struct dm_crtc_state *dm_new_crtc_state =
8779 to_dm_crtc_state(new_crtc_state);
8780
8781 dm_new_crtc_state->update_type = (int)overall_update_type;
e7b07cee
HW
8782 }
8783
8784 /* Must be success */
8785 WARN_ON(ret);
8786 return ret;
8787
8788fail:
8789 if (ret == -EDEADLK)
01e28f9c 8790 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8791 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8792 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8793 else
01e28f9c 8794 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8795
8796 return ret;
8797}
8798
3ee6b26b
AD
8799static bool is_dp_capable_without_timing_msa(struct dc *dc,
8800 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8801{
8802 uint8_t dpcd_data;
8803 bool capable = false;
8804
c84dec2f 8805 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8806 dm_helpers_dp_read_dpcd(
8807 NULL,
c84dec2f 8808 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8809 DP_DOWN_STREAM_PORT_COUNT,
8810 &dpcd_data,
8811 sizeof(dpcd_data))) {
8812 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8813 }
8814
8815 return capable;
8816}
98e6436d
AK
8817void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8818 struct edid *edid)
e7b07cee
HW
8819{
8820 int i;
e7b07cee
HW
8821 bool edid_check_required;
8822 struct detailed_timing *timing;
8823 struct detailed_non_pixel *data;
8824 struct detailed_data_monitor_range *range;
c84dec2f
HW
8825 struct amdgpu_dm_connector *amdgpu_dm_connector =
8826 to_amdgpu_dm_connector(connector);
bb47de73 8827 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8828
8829 struct drm_device *dev = connector->dev;
8830 struct amdgpu_device *adev = dev->dev_private;
bb47de73 8831 bool freesync_capable = false;
b830ebc9 8832
8218d7f1
HW
8833 if (!connector->state) {
8834 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 8835 goto update;
8218d7f1
HW
8836 }
8837
98e6436d
AK
8838 if (!edid) {
8839 dm_con_state = to_dm_connector_state(connector->state);
8840
8841 amdgpu_dm_connector->min_vfreq = 0;
8842 amdgpu_dm_connector->max_vfreq = 0;
8843 amdgpu_dm_connector->pixel_clock_mhz = 0;
8844
bb47de73 8845 goto update;
98e6436d
AK
8846 }
8847
8218d7f1
HW
8848 dm_con_state = to_dm_connector_state(connector->state);
8849
e7b07cee 8850 edid_check_required = false;
c84dec2f 8851 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 8852 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 8853 goto update;
e7b07cee
HW
8854 }
8855 if (!adev->dm.freesync_module)
bb47de73 8856 goto update;
e7b07cee
HW
8857 /*
8858 * if edid non zero restrict freesync only for dp and edp
8859 */
8860 if (edid) {
c84dec2f
HW
8861 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8862 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
8863 edid_check_required = is_dp_capable_without_timing_msa(
8864 adev->dm.dc,
c84dec2f 8865 amdgpu_dm_connector);
e7b07cee
HW
8866 }
8867 }
e7b07cee
HW
8868 if (edid_check_required == true && (edid->version > 1 ||
8869 (edid->version == 1 && edid->revision > 1))) {
8870 for (i = 0; i < 4; i++) {
8871
8872 timing = &edid->detailed_timings[i];
8873 data = &timing->data.other_data;
8874 range = &data->data.range;
8875 /*
8876 * Check if monitor has continuous frequency mode
8877 */
8878 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8879 continue;
8880 /*
8881 * Check for flag range limits only. If flag == 1 then
8882 * no additional timing information provided.
8883 * Default GTF, GTF Secondary curve and CVT are not
8884 * supported
8885 */
8886 if (range->flags != 1)
8887 continue;
8888
c84dec2f
HW
8889 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8890 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8891 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
8892 range->pixel_clock_mhz * 10;
8893 break;
8894 }
8895
c84dec2f 8896 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
8897 amdgpu_dm_connector->min_vfreq > 10) {
8898
bb47de73 8899 freesync_capable = true;
e7b07cee
HW
8900 }
8901 }
bb47de73
NK
8902
8903update:
8904 if (dm_con_state)
8905 dm_con_state->freesync_capable = freesync_capable;
8906
8907 if (connector->vrr_capable_property)
8908 drm_connector_set_vrr_capable_property(connector,
8909 freesync_capable);
e7b07cee
HW
8910}
8911
8c322309
RL
8912static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8913{
8914 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8915
8916 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8917 return;
8918 if (link->type == dc_connection_none)
8919 return;
8920 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8921 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
8922 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8923
8924 if (dpcd_data[0] == 0) {
1cfbbdde 8925 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
8926 link->psr_settings.psr_feature_enabled = false;
8927 } else {
1cfbbdde 8928 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
8929 link->psr_settings.psr_feature_enabled = true;
8930 }
8931
8932 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
8933 }
8934}
8935
8936/*
8937 * amdgpu_dm_link_setup_psr() - configure psr link
8938 * @stream: stream state
8939 *
8940 * Return: true if success
8941 */
8942static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8943{
8944 struct dc_link *link = NULL;
8945 struct psr_config psr_config = {0};
8946 struct psr_context psr_context = {0};
8c322309
RL
8947 bool ret = false;
8948
8949 if (stream == NULL)
8950 return false;
8951
8952 link = stream->link;
8c322309 8953
d1ebfdd8 8954 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
8955
8956 if (psr_config.psr_version > 0) {
8957 psr_config.psr_exit_link_training_required = 0x1;
8958 psr_config.psr_frame_capture_indication_req = 0;
8959 psr_config.psr_rfb_setup_time = 0x37;
8960 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8961 psr_config.allow_smu_optimizations = 0x0;
8962
8963 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8964
8965 }
d1ebfdd8 8966 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
8967
8968 return ret;
8969}
8970
8971/*
8972 * amdgpu_dm_psr_enable() - enable psr f/w
8973 * @stream: stream state
8974 *
8975 * Return: true if success
8976 */
8977bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8978{
8979 struct dc_link *link = stream->link;
5b5abe95
AK
8980 unsigned int vsync_rate_hz = 0;
8981 struct dc_static_screen_params params = {0};
8982 /* Calculate number of static frames before generating interrupt to
8983 * enter PSR.
8984 */
5b5abe95
AK
8985 // Init fail safe of 2 frames static
8986 unsigned int num_frames_static = 2;
8c322309
RL
8987
8988 DRM_DEBUG_DRIVER("Enabling psr...\n");
8989
5b5abe95
AK
8990 vsync_rate_hz = div64_u64(div64_u64((
8991 stream->timing.pix_clk_100hz * 100),
8992 stream->timing.v_total),
8993 stream->timing.h_total);
8994
8995 /* Round up
8996 * Calculate number of frames such that at least 30 ms of time has
8997 * passed.
8998 */
7aa62404
RL
8999 if (vsync_rate_hz != 0) {
9000 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9001 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9002 }
5b5abe95
AK
9003
9004 params.triggers.cursor_update = true;
9005 params.triggers.overlay_update = true;
9006 params.triggers.surface_update = true;
9007 params.num_frames = num_frames_static;
8c322309 9008
5b5abe95 9009 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9010 &stream, 1,
5b5abe95 9011 &params);
8c322309
RL
9012
9013 return dc_link_set_psr_allow_active(link, true, false);
9014}
9015
9016/*
9017 * amdgpu_dm_psr_disable() - disable psr f/w
9018 * @stream: stream state
9019 *
9020 * Return: true if success
9021 */
9022static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9023{
9024
9025 DRM_DEBUG_DRIVER("Disabling psr...\n");
9026
9027 return dc_link_set_psr_allow_active(stream->link, false, true);
9028}