drm/amdgpu: Enter low power state if CRTC active.
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
743b9786
NK
33#include "dmub/inc/dmub_srv.h"
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
2200eb9e 97
a94d5569
DF
98#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 100
5ea23931
RL
101#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
8c7aea40
NK
104/* Number of bytes in PSP header for firmware. */
105#define PSP_HEADER_BYTES 0x100
106
107/* Number of bytes in PSP footer for firmware. */
108#define PSP_FOOTER_BYTES 0x100
109
b8592b48
LL
110/**
111 * DOC: overview
112 *
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
116 *
117 * The root control structure is &struct amdgpu_display_manager.
118 */
119
7578ecda
AD
120/* basic init/fini API */
121static int amdgpu_dm_init(struct amdgpu_device *adev);
122static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
1f6010a9
DF
124/*
125 * initializes drm_device display related structures, based on the information
7578ecda
AD
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
128 *
129 * Returns 0 on success
130 */
131static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132/* removes and deallocates the drm structures, created by the above function */
133static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135static void
136amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
137
138static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 139 struct drm_plane *plane,
cc1fec57
NK
140 unsigned long possible_crtcs,
141 const struct dc_plane_cap *plane_cap);
7578ecda
AD
142static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
143 struct drm_plane *plane,
144 uint32_t link_index);
145static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
146 struct amdgpu_dm_connector *amdgpu_dm_connector,
147 uint32_t link_index,
148 struct amdgpu_encoder *amdgpu_encoder);
149static int amdgpu_dm_encoder_init(struct drm_device *dev,
150 struct amdgpu_encoder *aencoder,
151 uint32_t link_index);
152
153static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
154
155static int amdgpu_dm_atomic_commit(struct drm_device *dev,
156 struct drm_atomic_state *state,
157 bool nonblock);
158
159static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
160
161static int amdgpu_dm_atomic_check(struct drm_device *dev,
162 struct drm_atomic_state *state);
163
674e78ac
NK
164static void handle_cursor_update(struct drm_plane *plane,
165 struct drm_plane_state *old_plane_state);
7578ecda 166
8c322309
RL
167static void amdgpu_dm_set_psr_caps(struct dc_link *link);
168static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
169static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
170static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
171
172
4562236b
HW
173/*
174 * dm_vblank_get_counter
175 *
176 * @brief
177 * Get counter for number of vertical blanks
178 *
179 * @param
180 * struct amdgpu_device *adev - [in] desired amdgpu device
181 * int disp_idx - [in] which CRTC to get the counter from
182 *
183 * @return
184 * Counter for vertical blanks
185 */
186static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
187{
188 if (crtc >= adev->mode_info.num_crtc)
189 return 0;
190 else {
191 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
192 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
193 acrtc->base.state);
4562236b 194
da5c47f6
AG
195
196 if (acrtc_state->stream == NULL) {
0971c40e
HW
197 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
198 crtc);
4562236b
HW
199 return 0;
200 }
201
da5c47f6 202 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
203 }
204}
205
206static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 207 u32 *vbl, u32 *position)
4562236b 208{
81c50963
ST
209 uint32_t v_blank_start, v_blank_end, h_position, v_position;
210
4562236b
HW
211 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
212 return -EINVAL;
213 else {
214 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
215 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
216 acrtc->base.state);
4562236b 217
da5c47f6 218 if (acrtc_state->stream == NULL) {
0971c40e
HW
219 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
220 crtc);
4562236b
HW
221 return 0;
222 }
223
81c50963
ST
224 /*
225 * TODO rework base driver to use values directly.
226 * for now parse it back into reg-format
227 */
da5c47f6 228 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
229 &v_blank_start,
230 &v_blank_end,
231 &h_position,
232 &v_position);
233
e806208d
AG
234 *position = v_position | (h_position << 16);
235 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
236 }
237
238 return 0;
239}
240
241static bool dm_is_idle(void *handle)
242{
243 /* XXX todo */
244 return true;
245}
246
247static int dm_wait_for_idle(void *handle)
248{
249 /* XXX todo */
250 return 0;
251}
252
253static bool dm_check_soft_reset(void *handle)
254{
255 return false;
256}
257
258static int dm_soft_reset(void *handle)
259{
260 /* XXX todo */
261 return 0;
262}
263
3ee6b26b
AD
264static struct amdgpu_crtc *
265get_crtc_by_otg_inst(struct amdgpu_device *adev,
266 int otg_inst)
4562236b
HW
267{
268 struct drm_device *dev = adev->ddev;
269 struct drm_crtc *crtc;
270 struct amdgpu_crtc *amdgpu_crtc;
271
4562236b
HW
272 if (otg_inst == -1) {
273 WARN_ON(1);
274 return adev->mode_info.crtcs[0];
275 }
276
277 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
278 amdgpu_crtc = to_amdgpu_crtc(crtc);
279
280 if (amdgpu_crtc->otg_inst == otg_inst)
281 return amdgpu_crtc;
282 }
283
284 return NULL;
285}
286
66b0c973
MK
287static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
288{
289 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
290 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
291}
292
b8e8c934
HW
293/**
294 * dm_pflip_high_irq() - Handle pageflip interrupt
295 * @interrupt_params: ignored
296 *
297 * Handles the pageflip interrupt by notifying all interested parties
298 * that the pageflip has been completed.
299 */
4562236b
HW
300static void dm_pflip_high_irq(void *interrupt_params)
301{
4562236b
HW
302 struct amdgpu_crtc *amdgpu_crtc;
303 struct common_irq_params *irq_params = interrupt_params;
304 struct amdgpu_device *adev = irq_params->adev;
305 unsigned long flags;
71bbe51a
MK
306 struct drm_pending_vblank_event *e;
307 struct dm_crtc_state *acrtc_state;
308 uint32_t vpos, hpos, v_blank_start, v_blank_end;
309 bool vrr_active;
4562236b
HW
310
311 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
312
313 /* IRQ could occur when in initial stage */
1f6010a9 314 /* TODO work and BO cleanup */
4562236b
HW
315 if (amdgpu_crtc == NULL) {
316 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 return;
318 }
319
320 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
321
322 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
323 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
324 amdgpu_crtc->pflip_status,
325 AMDGPU_FLIP_SUBMITTED,
326 amdgpu_crtc->crtc_id,
327 amdgpu_crtc);
328 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
329 return;
330 }
331
71bbe51a
MK
332 /* page flip completed. */
333 e = amdgpu_crtc->event;
334 amdgpu_crtc->event = NULL;
4562236b 335
71bbe51a
MK
336 if (!e)
337 WARN_ON(1);
1159898a 338
71bbe51a
MK
339 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
340 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
341
342 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
343 if (!vrr_active ||
344 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
345 &v_blank_end, &hpos, &vpos) ||
346 (vpos < v_blank_start)) {
347 /* Update to correct count and vblank timestamp if racing with
348 * vblank irq. This also updates to the correct vblank timestamp
349 * even in VRR mode, as scanout is past the front-porch atm.
350 */
351 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 352
71bbe51a
MK
353 /* Wake up userspace by sending the pageflip event with proper
354 * count and timestamp of vblank of flip completion.
355 */
356 if (e) {
357 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
358
359 /* Event sent, so done with vblank for this flip */
360 drm_crtc_vblank_put(&amdgpu_crtc->base);
361 }
362 } else if (e) {
363 /* VRR active and inside front-porch: vblank count and
364 * timestamp for pageflip event will only be up to date after
365 * drm_crtc_handle_vblank() has been executed from late vblank
366 * irq handler after start of back-porch (vline 0). We queue the
367 * pageflip event for send-out by drm_crtc_handle_vblank() with
368 * updated timestamp and count, once it runs after us.
369 *
370 * We need to open-code this instead of using the helper
371 * drm_crtc_arm_vblank_event(), as that helper would
372 * call drm_crtc_accurate_vblank_count(), which we must
373 * not call in VRR mode while we are in front-porch!
374 */
375
376 /* sequence will be replaced by real count during send-out. */
377 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
378 e->pipe = amdgpu_crtc->crtc_id;
379
380 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
381 e = NULL;
382 }
4562236b 383
fdd1fe57
MK
384 /* Keep track of vblank of this flip for flip throttling. We use the
385 * cooked hw counter, as that one incremented at start of this vblank
386 * of pageflip completion, so last_flip_vblank is the forbidden count
387 * for queueing new pageflips if vsync + VRR is enabled.
388 */
e3eff4b5
TZ
389 amdgpu_crtc->last_flip_vblank =
390 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 391
54f5499a 392 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
393 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
394
71bbe51a
MK
395 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
396 amdgpu_crtc->crtc_id, amdgpu_crtc,
397 vrr_active, (int) !e);
4562236b
HW
398}
399
d2574c33
MK
400static void dm_vupdate_high_irq(void *interrupt_params)
401{
402 struct common_irq_params *irq_params = interrupt_params;
403 struct amdgpu_device *adev = irq_params->adev;
404 struct amdgpu_crtc *acrtc;
405 struct dm_crtc_state *acrtc_state;
09aef2c4 406 unsigned long flags;
d2574c33
MK
407
408 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
409
410 if (acrtc) {
411 acrtc_state = to_dm_crtc_state(acrtc->base.state);
412
413 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
414 amdgpu_dm_vrr_active(acrtc_state));
415
416 /* Core vblank handling is done here after end of front-porch in
417 * vrr mode, as vblank timestamping will give valid results
418 * while now done after front-porch. This will also deliver
419 * page-flip completion events that have been queued to us
420 * if a pageflip happened inside front-porch.
421 */
09aef2c4 422 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 423 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
424
425 /* BTR processing for pre-DCE12 ASICs */
426 if (acrtc_state->stream &&
427 adev->family < AMDGPU_FAMILY_AI) {
428 spin_lock_irqsave(&adev->ddev->event_lock, flags);
429 mod_freesync_handle_v_update(
430 adev->dm.freesync_module,
431 acrtc_state->stream,
432 &acrtc_state->vrr_params);
433
434 dc_stream_adjust_vmin_vmax(
435 adev->dm.dc,
436 acrtc_state->stream,
437 &acrtc_state->vrr_params.adjust);
438 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
439 }
440 }
d2574c33
MK
441 }
442}
443
b8e8c934
HW
444/**
445 * dm_crtc_high_irq() - Handles CRTC interrupt
446 * @interrupt_params: ignored
447 *
448 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449 * event handler.
450 */
4562236b
HW
451static void dm_crtc_high_irq(void *interrupt_params)
452{
453 struct common_irq_params *irq_params = interrupt_params;
454 struct amdgpu_device *adev = irq_params->adev;
4562236b 455 struct amdgpu_crtc *acrtc;
180db303 456 struct dm_crtc_state *acrtc_state;
09aef2c4 457 unsigned long flags;
4562236b 458
b57de80a 459 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b 460
e5d0170e 461 if (acrtc) {
180db303
NK
462 acrtc_state = to_dm_crtc_state(acrtc->base.state);
463
d2574c33
MK
464 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
465 amdgpu_dm_vrr_active(acrtc_state));
466
467 /* Core vblank handling at start of front-porch is only possible
468 * in non-vrr mode, as only there vblank timestamping will give
469 * valid results while done in front-porch. Otherwise defer it
470 * to dm_vupdate_high_irq after end of front-porch.
471 */
472 if (!amdgpu_dm_vrr_active(acrtc_state))
473 drm_crtc_handle_vblank(&acrtc->base);
474
475 /* Following stuff must happen at start of vblank, for crc
476 * computation and below-the-range btr support in vrr mode.
477 */
478 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
479
09aef2c4 480 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
180db303
NK
481 acrtc_state->vrr_params.supported &&
482 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
09aef2c4 483 spin_lock_irqsave(&adev->ddev->event_lock, flags);
180db303
NK
484 mod_freesync_handle_v_update(
485 adev->dm.freesync_module,
486 acrtc_state->stream,
487 &acrtc_state->vrr_params);
488
489 dc_stream_adjust_vmin_vmax(
490 adev->dm.dc,
491 acrtc_state->stream,
492 &acrtc_state->vrr_params.adjust);
09aef2c4 493 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
180db303 494 }
e5d0170e 495 }
4562236b
HW
496}
497
b8219745 498#if defined(CONFIG_DRM_AMD_DC_DCN)
16f17eda
LL
499/**
500 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
501 * @interrupt params - interrupt parameters
502 *
503 * Notify DRM's vblank event handler at VSTARTUP
504 *
505 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
506 * * We are close enough to VUPDATE - the point of no return for hw
507 * * We are in the fixed portion of variable front porch when vrr is enabled
508 * * We are before VUPDATE, where double-buffered vrr registers are swapped
509 *
510 * It is therefore the correct place to signal vblank, send user flip events,
511 * and update VRR.
512 */
513static void dm_dcn_crtc_high_irq(void *interrupt_params)
514{
515 struct common_irq_params *irq_params = interrupt_params;
516 struct amdgpu_device *adev = irq_params->adev;
517 struct amdgpu_crtc *acrtc;
518 struct dm_crtc_state *acrtc_state;
519 unsigned long flags;
520
521 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
522
523 if (!acrtc)
524 return;
525
526 acrtc_state = to_dm_crtc_state(acrtc->base.state);
527
528 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
529 amdgpu_dm_vrr_active(acrtc_state));
530
531 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 drm_crtc_handle_vblank(&acrtc->base);
533
534 spin_lock_irqsave(&adev->ddev->event_lock, flags);
535
536 if (acrtc_state->vrr_params.supported &&
537 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(
539 adev->dm.freesync_module,
540 acrtc_state->stream,
541 &acrtc_state->vrr_params);
542
543 dc_stream_adjust_vmin_vmax(
544 adev->dm.dc,
545 acrtc_state->stream,
546 &acrtc_state->vrr_params.adjust);
547 }
548
549 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
550 if (acrtc->event) {
551 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
552 acrtc->event = NULL;
553 drm_crtc_vblank_put(&acrtc->base);
554 }
555 acrtc->pflip_status = AMDGPU_FLIP_NONE;
556 }
557
558 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
559}
b8219745 560#endif
16f17eda 561
4562236b
HW
562static int dm_set_clockgating_state(void *handle,
563 enum amd_clockgating_state state)
564{
565 return 0;
566}
567
568static int dm_set_powergating_state(void *handle,
569 enum amd_powergating_state state)
570{
571 return 0;
572}
573
574/* Prototypes of private functions */
575static int dm_early_init(void* handle);
576
a32e24b4 577/* Allocate memory for FBC compressed data */
3e332d3a 578static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 579{
3e332d3a
RL
580 struct drm_device *dev = connector->dev;
581 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 582 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
583 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
584 struct drm_display_mode *mode;
42e67c3b
RL
585 unsigned long max_size = 0;
586
587 if (adev->dm.dc->fbc_compressor == NULL)
588 return;
a32e24b4 589
3e332d3a 590 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
591 return;
592
3e332d3a
RL
593 if (compressor->bo_ptr)
594 return;
42e67c3b 595
42e67c3b 596
3e332d3a
RL
597 list_for_each_entry(mode, &connector->modes, head) {
598 if (max_size < mode->htotal * mode->vtotal)
599 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
600 }
601
602 if (max_size) {
603 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 604 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 605 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
606
607 if (r)
42e67c3b
RL
608 DRM_ERROR("DM: Failed to initialize FBC\n");
609 else {
610 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
611 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
612 }
613
a32e24b4
RL
614 }
615
616}
a32e24b4 617
6ce8f316
NK
618static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
619 int pipe, bool *enabled,
620 unsigned char *buf, int max_bytes)
621{
622 struct drm_device *dev = dev_get_drvdata(kdev);
623 struct amdgpu_device *adev = dev->dev_private;
624 struct drm_connector *connector;
625 struct drm_connector_list_iter conn_iter;
626 struct amdgpu_dm_connector *aconnector;
627 int ret = 0;
628
629 *enabled = false;
630
631 mutex_lock(&adev->dm.audio_lock);
632
633 drm_connector_list_iter_begin(dev, &conn_iter);
634 drm_for_each_connector_iter(connector, &conn_iter) {
635 aconnector = to_amdgpu_dm_connector(connector);
636 if (aconnector->audio_inst != port)
637 continue;
638
639 *enabled = true;
640 ret = drm_eld_size(connector->eld);
641 memcpy(buf, connector->eld, min(max_bytes, ret));
642
643 break;
644 }
645 drm_connector_list_iter_end(&conn_iter);
646
647 mutex_unlock(&adev->dm.audio_lock);
648
649 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
650
651 return ret;
652}
653
654static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
655 .get_eld = amdgpu_dm_audio_component_get_eld,
656};
657
658static int amdgpu_dm_audio_component_bind(struct device *kdev,
659 struct device *hda_kdev, void *data)
660{
661 struct drm_device *dev = dev_get_drvdata(kdev);
662 struct amdgpu_device *adev = dev->dev_private;
663 struct drm_audio_component *acomp = data;
664
665 acomp->ops = &amdgpu_dm_audio_component_ops;
666 acomp->dev = kdev;
667 adev->dm.audio_component = acomp;
668
669 return 0;
670}
671
672static void amdgpu_dm_audio_component_unbind(struct device *kdev,
673 struct device *hda_kdev, void *data)
674{
675 struct drm_device *dev = dev_get_drvdata(kdev);
676 struct amdgpu_device *adev = dev->dev_private;
677 struct drm_audio_component *acomp = data;
678
679 acomp->ops = NULL;
680 acomp->dev = NULL;
681 adev->dm.audio_component = NULL;
682}
683
684static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
685 .bind = amdgpu_dm_audio_component_bind,
686 .unbind = amdgpu_dm_audio_component_unbind,
687};
688
689static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
690{
691 int i, ret;
692
693 if (!amdgpu_audio)
694 return 0;
695
696 adev->mode_info.audio.enabled = true;
697
698 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
699
700 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
701 adev->mode_info.audio.pin[i].channels = -1;
702 adev->mode_info.audio.pin[i].rate = -1;
703 adev->mode_info.audio.pin[i].bits_per_sample = -1;
704 adev->mode_info.audio.pin[i].status_bits = 0;
705 adev->mode_info.audio.pin[i].category_code = 0;
706 adev->mode_info.audio.pin[i].connected = false;
707 adev->mode_info.audio.pin[i].id =
708 adev->dm.dc->res_pool->audios[i]->inst;
709 adev->mode_info.audio.pin[i].offset = 0;
710 }
711
712 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
713 if (ret < 0)
714 return ret;
715
716 adev->dm.audio_registered = true;
717
718 return 0;
719}
720
721static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
722{
723 if (!amdgpu_audio)
724 return;
725
726 if (!adev->mode_info.audio.enabled)
727 return;
728
729 if (adev->dm.audio_registered) {
730 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
731 adev->dm.audio_registered = false;
732 }
733
734 /* TODO: Disable audio? */
735
736 adev->mode_info.audio.enabled = false;
737}
738
739void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
740{
741 struct drm_audio_component *acomp = adev->dm.audio_component;
742
743 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
744 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
745
746 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
747 pin, -1);
748 }
749}
750
743b9786
NK
751static int dm_dmub_hw_init(struct amdgpu_device *adev)
752{
743b9786
NK
753 const struct dmcub_firmware_header_v1_0 *hdr;
754 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 755 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
756 const struct firmware *dmub_fw = adev->dm.dmub_fw;
757 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
758 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
759 struct dmub_srv_hw_params hw_params;
760 enum dmub_status status;
761 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 762 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
763 bool has_hw_support;
764
765 if (!dmub_srv)
766 /* DMUB isn't supported on the ASIC. */
767 return 0;
768
8c7aea40
NK
769 if (!fb_info) {
770 DRM_ERROR("No framebuffer info for DMUB service.\n");
771 return -EINVAL;
772 }
773
743b9786
NK
774 if (!dmub_fw) {
775 /* Firmware required for DMUB support. */
776 DRM_ERROR("No firmware provided for DMUB.\n");
777 return -EINVAL;
778 }
779
780 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
781 if (status != DMUB_STATUS_OK) {
782 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
783 return -EINVAL;
784 }
785
786 if (!has_hw_support) {
787 DRM_INFO("DMUB unsupported on ASIC\n");
788 return 0;
789 }
790
791 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
792
743b9786
NK
793 fw_inst_const = dmub_fw->data +
794 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 795 PSP_HEADER_BYTES;
743b9786
NK
796
797 fw_bss_data = dmub_fw->data +
798 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
799 le32_to_cpu(hdr->inst_const_bytes);
800
801 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
802 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
803 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
804
805 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
806
ddde28a5
HW
807 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
808 * amdgpu_ucode_init_single_fw will load dmub firmware
809 * fw_inst_const part to cw0; otherwise, the firmware back door load
810 * will be done by dm_dmub_hw_init
811 */
812 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
813 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
814 fw_inst_const_size);
815 }
816
8c7aea40
NK
817 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
818 fw_bss_data_size);
ddde28a5
HW
819
820 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
821 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
822 adev->bios_size);
823
824 /* Reset regions that need to be reset. */
825 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
826 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
827
828 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
829 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
830
831 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
832 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
833
834 /* Initialize hardware. */
835 memset(&hw_params, 0, sizeof(hw_params));
836 hw_params.fb_base = adev->gmc.fb_start;
837 hw_params.fb_offset = adev->gmc.aper_base;
838
31a7f4bb
HW
839 /* backdoor load firmware and trigger dmub running */
840 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
841 hw_params.load_inst_const = true;
842
743b9786
NK
843 if (dmcu)
844 hw_params.psp_version = dmcu->psp_version;
845
8c7aea40
NK
846 for (i = 0; i < fb_info->num_fb; ++i)
847 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
848
849 status = dmub_srv_hw_init(dmub_srv, &hw_params);
850 if (status != DMUB_STATUS_OK) {
851 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
852 return -EINVAL;
853 }
854
855 /* Wait for firmware load to finish. */
856 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
857 if (status != DMUB_STATUS_OK)
858 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
859
860 /* Init DMCU and ABM if available. */
861 if (dmcu && abm) {
862 dmcu->funcs->dmcu_init(dmcu);
863 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
864 }
865
9a71c7d3
NK
866 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
867 if (!adev->dm.dc->ctx->dmub_srv) {
868 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
869 return -ENOMEM;
870 }
871
743b9786
NK
872 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
873 adev->dm.dmcub_fw_version);
874
875 return 0;
876}
877
7578ecda 878static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
879{
880 struct dc_init_data init_data;
52704fca
BL
881#ifdef CONFIG_DRM_AMD_DC_HDCP
882 struct dc_callback_init init_params;
883#endif
743b9786 884 int r;
52704fca 885
4562236b
HW
886 adev->dm.ddev = adev->ddev;
887 adev->dm.adev = adev;
888
4562236b
HW
889 /* Zero all the fields */
890 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
891#ifdef CONFIG_DRM_AMD_DC_HDCP
892 memset(&init_params, 0, sizeof(init_params));
893#endif
4562236b 894
674e78ac 895 mutex_init(&adev->dm.dc_lock);
6ce8f316 896 mutex_init(&adev->dm.audio_lock);
674e78ac 897
4562236b
HW
898 if(amdgpu_dm_irq_init(adev)) {
899 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
900 goto error;
901 }
902
903 init_data.asic_id.chip_family = adev->family;
904
905 init_data.asic_id.pci_revision_id = adev->rev_id;
906 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
907
770d13b1 908 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
909 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
910 init_data.asic_id.atombios_base_address =
911 adev->mode_info.atom_context->bios;
912
913 init_data.driver = adev;
914
915 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
916
917 if (!adev->dm.cgs_device) {
918 DRM_ERROR("amdgpu: failed to create cgs device.\n");
919 goto error;
920 }
921
922 init_data.cgs_device = adev->dm.cgs_device;
923
4562236b
HW
924 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
925
60fb100b
AD
926 switch (adev->asic_type) {
927 case CHIP_CARRIZO:
928 case CHIP_STONEY:
929 case CHIP_RAVEN:
fe3db437 930 case CHIP_RENOIR:
6e227308 931 init_data.flags.gpu_vm_support = true;
60fb100b
AD
932 break;
933 default:
934 break;
935 }
6e227308 936
04b94af4
AD
937 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
938 init_data.flags.fbc_support = true;
939
d99f38ae
AD
940 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
941 init_data.flags.multi_mon_pp_mclk_switch = true;
942
eaf56410
LL
943 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
944 init_data.flags.disable_fractional_pwm = true;
945
27eaa492 946 init_data.flags.power_down_display_on_boot = true;
78ad75f8 947
48321c3d 948 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 949
4562236b
HW
950 /* Display Core create. */
951 adev->dm.dc = dc_create(&init_data);
952
423788c7 953 if (adev->dm.dc) {
76121231 954 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 955 } else {
76121231 956 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
957 goto error;
958 }
4562236b 959
743b9786
NK
960 r = dm_dmub_hw_init(adev);
961 if (r) {
962 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
963 goto error;
964 }
965
bb6785c1
NK
966 dc_hardware_init(adev->dm.dc);
967
4562236b
HW
968 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
969 if (!adev->dm.freesync_module) {
970 DRM_ERROR(
971 "amdgpu: failed to initialize freesync_module.\n");
972 } else
f1ad2f5e 973 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
974 adev->dm.freesync_module);
975
e277adc5
LSL
976 amdgpu_dm_init_color_mod();
977
52704fca 978#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 979 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 980 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 981
96a3b32e
BL
982 if (!adev->dm.hdcp_workqueue)
983 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
984 else
985 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 986
96a3b32e
BL
987 dc_init_callbacks(adev->dm.dc, &init_params);
988 }
52704fca 989#endif
4562236b
HW
990 if (amdgpu_dm_initialize_drm_device(adev)) {
991 DRM_ERROR(
992 "amdgpu: failed to initialize sw for display support.\n");
993 goto error;
994 }
995
996 /* Update the actual used number of crtc */
997 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
998
999 /* TODO: Add_display_info? */
1000
1001 /* TODO use dynamic cursor width */
ce75805e
AG
1002 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1003 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
1004
1005 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1006 DRM_ERROR(
1007 "amdgpu: failed to initialize sw for display support.\n");
1008 goto error;
1009 }
1010
f1ad2f5e 1011 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1012
1013 return 0;
1014error:
1015 amdgpu_dm_fini(adev);
1016
59d0f396 1017 return -EINVAL;
4562236b
HW
1018}
1019
7578ecda 1020static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1021{
6ce8f316
NK
1022 amdgpu_dm_audio_fini(adev);
1023
4562236b 1024 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1025
52704fca
BL
1026#ifdef CONFIG_DRM_AMD_DC_HDCP
1027 if (adev->dm.hdcp_workqueue) {
1028 hdcp_destroy(adev->dm.hdcp_workqueue);
1029 adev->dm.hdcp_workqueue = NULL;
1030 }
1031
1032 if (adev->dm.dc)
1033 dc_deinit_callbacks(adev->dm.dc);
1034#endif
9a71c7d3
NK
1035 if (adev->dm.dc->ctx->dmub_srv) {
1036 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1037 adev->dm.dc->ctx->dmub_srv = NULL;
1038 }
1039
743b9786
NK
1040 if (adev->dm.dmub_bo)
1041 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1042 &adev->dm.dmub_bo_gpu_addr,
1043 &adev->dm.dmub_bo_cpu_addr);
52704fca 1044
c8bdf2b6
ED
1045 /* DC Destroy TODO: Replace destroy DAL */
1046 if (adev->dm.dc)
1047 dc_destroy(&adev->dm.dc);
4562236b
HW
1048 /*
1049 * TODO: pageflip, vlank interrupt
1050 *
1051 * amdgpu_dm_irq_fini(adev);
1052 */
1053
1054 if (adev->dm.cgs_device) {
1055 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1056 adev->dm.cgs_device = NULL;
1057 }
1058 if (adev->dm.freesync_module) {
1059 mod_freesync_destroy(adev->dm.freesync_module);
1060 adev->dm.freesync_module = NULL;
1061 }
674e78ac 1062
6ce8f316 1063 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1064 mutex_destroy(&adev->dm.dc_lock);
1065
4562236b
HW
1066 return;
1067}
1068
a94d5569 1069static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1070{
a7669aff 1071 const char *fw_name_dmcu = NULL;
a94d5569
DF
1072 int r;
1073 const struct dmcu_firmware_header_v1_0 *hdr;
1074
1075 switch(adev->asic_type) {
1076 case CHIP_BONAIRE:
1077 case CHIP_HAWAII:
1078 case CHIP_KAVERI:
1079 case CHIP_KABINI:
1080 case CHIP_MULLINS:
1081 case CHIP_TONGA:
1082 case CHIP_FIJI:
1083 case CHIP_CARRIZO:
1084 case CHIP_STONEY:
1085 case CHIP_POLARIS11:
1086 case CHIP_POLARIS10:
1087 case CHIP_POLARIS12:
1088 case CHIP_VEGAM:
1089 case CHIP_VEGA10:
1090 case CHIP_VEGA12:
1091 case CHIP_VEGA20:
476e955d 1092 case CHIP_NAVI10:
baebcf2e 1093 case CHIP_NAVI14:
30221ad8 1094 case CHIP_RENOIR:
a94d5569 1095 return 0;
5ea23931
RL
1096 case CHIP_NAVI12:
1097 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1098 break;
a94d5569 1099 case CHIP_RAVEN:
a7669aff
HW
1100 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1101 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1102 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1103 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1104 else
a7669aff 1105 return 0;
a94d5569
DF
1106 break;
1107 default:
1108 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1109 return -EINVAL;
a94d5569
DF
1110 }
1111
1112 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1113 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1114 return 0;
1115 }
1116
1117 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1118 if (r == -ENOENT) {
1119 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1120 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1121 adev->dm.fw_dmcu = NULL;
1122 return 0;
1123 }
1124 if (r) {
1125 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1126 fw_name_dmcu);
1127 return r;
1128 }
1129
1130 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1131 if (r) {
1132 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1133 fw_name_dmcu);
1134 release_firmware(adev->dm.fw_dmcu);
1135 adev->dm.fw_dmcu = NULL;
1136 return r;
1137 }
1138
1139 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1140 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1141 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1142 adev->firmware.fw_size +=
1143 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1144
1145 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1146 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1147 adev->firmware.fw_size +=
1148 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1149
ee6e89c0
DF
1150 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1151
a94d5569
DF
1152 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1153
4562236b
HW
1154 return 0;
1155}
1156
743b9786
NK
1157static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1158{
1159 struct amdgpu_device *adev = ctx;
1160
1161 return dm_read_reg(adev->dm.dc->ctx, address);
1162}
1163
1164static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1165 uint32_t value)
1166{
1167 struct amdgpu_device *adev = ctx;
1168
1169 return dm_write_reg(adev->dm.dc->ctx, address, value);
1170}
1171
1172static int dm_dmub_sw_init(struct amdgpu_device *adev)
1173{
1174 struct dmub_srv_create_params create_params;
8c7aea40
NK
1175 struct dmub_srv_region_params region_params;
1176 struct dmub_srv_region_info region_info;
1177 struct dmub_srv_fb_params fb_params;
1178 struct dmub_srv_fb_info *fb_info;
1179 struct dmub_srv *dmub_srv;
743b9786
NK
1180 const struct dmcub_firmware_header_v1_0 *hdr;
1181 const char *fw_name_dmub;
1182 enum dmub_asic dmub_asic;
1183 enum dmub_status status;
1184 int r;
1185
1186 switch (adev->asic_type) {
1187 case CHIP_RENOIR:
1188 dmub_asic = DMUB_ASIC_DCN21;
1189 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1190 break;
1191
1192 default:
1193 /* ASIC doesn't support DMUB. */
1194 return 0;
1195 }
1196
743b9786
NK
1197 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1198 if (r) {
1199 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1200 return 0;
1201 }
1202
1203 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1204 if (r) {
1205 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1206 return 0;
1207 }
1208
743b9786 1209 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1210
9a6ed547
NK
1211 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1212 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1213 AMDGPU_UCODE_ID_DMCUB;
1214 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1215 adev->dm.dmub_fw;
1216 adev->firmware.fw_size +=
1217 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1218
9a6ed547
NK
1219 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1220 adev->dm.dmcub_fw_version);
1221 }
1222
1223 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1224
8c7aea40
NK
1225 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1226 dmub_srv = adev->dm.dmub_srv;
1227
1228 if (!dmub_srv) {
1229 DRM_ERROR("Failed to allocate DMUB service!\n");
1230 return -ENOMEM;
1231 }
1232
1233 memset(&create_params, 0, sizeof(create_params));
1234 create_params.user_ctx = adev;
1235 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1236 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1237 create_params.asic = dmub_asic;
1238
1239 /* Create the DMUB service. */
1240 status = dmub_srv_create(dmub_srv, &create_params);
1241 if (status != DMUB_STATUS_OK) {
1242 DRM_ERROR("Error creating DMUB service: %d\n", status);
1243 return -EINVAL;
1244 }
1245
1246 /* Calculate the size of all the regions for the DMUB service. */
1247 memset(&region_params, 0, sizeof(region_params));
1248
1249 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1250 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1251 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1252 region_params.vbios_size = adev->bios_size;
1f0674fd
NK
1253 region_params.fw_bss_data =
1254 adev->dm.dmub_fw->data +
1255 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1256 le32_to_cpu(hdr->inst_const_bytes);
8c7aea40
NK
1257
1258 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1259 &region_info);
1260
1261 if (status != DMUB_STATUS_OK) {
1262 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1263 return -EINVAL;
1264 }
1265
1266 /*
1267 * Allocate a framebuffer based on the total size of all the regions.
1268 * TODO: Move this into GART.
1269 */
1270 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1271 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1272 &adev->dm.dmub_bo_gpu_addr,
1273 &adev->dm.dmub_bo_cpu_addr);
1274 if (r)
1275 return r;
1276
1277 /* Rebase the regions on the framebuffer address. */
1278 memset(&fb_params, 0, sizeof(fb_params));
1279 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1280 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1281 fb_params.region_info = &region_info;
1282
1283 adev->dm.dmub_fb_info =
1284 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1285 fb_info = adev->dm.dmub_fb_info;
1286
1287 if (!fb_info) {
1288 DRM_ERROR(
1289 "Failed to allocate framebuffer info for DMUB service!\n");
1290 return -ENOMEM;
1291 }
1292
1293 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1294 if (status != DMUB_STATUS_OK) {
1295 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1296 return -EINVAL;
1297 }
1298
743b9786
NK
1299 return 0;
1300}
1301
a94d5569
DF
1302static int dm_sw_init(void *handle)
1303{
1304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1305 int r;
1306
1307 r = dm_dmub_sw_init(adev);
1308 if (r)
1309 return r;
a94d5569
DF
1310
1311 return load_dmcu_fw(adev);
1312}
1313
4562236b
HW
1314static int dm_sw_fini(void *handle)
1315{
a94d5569
DF
1316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1317
8c7aea40
NK
1318 kfree(adev->dm.dmub_fb_info);
1319 adev->dm.dmub_fb_info = NULL;
1320
743b9786
NK
1321 if (adev->dm.dmub_srv) {
1322 dmub_srv_destroy(adev->dm.dmub_srv);
1323 adev->dm.dmub_srv = NULL;
1324 }
1325
1326 if (adev->dm.dmub_fw) {
1327 release_firmware(adev->dm.dmub_fw);
1328 adev->dm.dmub_fw = NULL;
1329 }
1330
a94d5569
DF
1331 if(adev->dm.fw_dmcu) {
1332 release_firmware(adev->dm.fw_dmcu);
1333 adev->dm.fw_dmcu = NULL;
1334 }
1335
4562236b
HW
1336 return 0;
1337}
1338
7abcf6b5 1339static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1340{
c84dec2f 1341 struct amdgpu_dm_connector *aconnector;
4562236b 1342 struct drm_connector *connector;
f8d2d39e 1343 struct drm_connector_list_iter iter;
7abcf6b5 1344 int ret = 0;
4562236b 1345
f8d2d39e
LP
1346 drm_connector_list_iter_begin(dev, &iter);
1347 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1348 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1349 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1350 aconnector->mst_mgr.aux) {
f1ad2f5e 1351 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1352 aconnector,
1353 aconnector->base.base.id);
7abcf6b5
AG
1354
1355 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1356 if (ret < 0) {
1357 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1358 aconnector->dc_link->type =
1359 dc_connection_single;
1360 break;
7abcf6b5 1361 }
f8d2d39e 1362 }
4562236b 1363 }
f8d2d39e 1364 drm_connector_list_iter_end(&iter);
4562236b 1365
7abcf6b5
AG
1366 return ret;
1367}
1368
1369static int dm_late_init(void *handle)
1370{
42e67c3b 1371 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1372
bbf854dc
DF
1373 struct dmcu_iram_parameters params;
1374 unsigned int linear_lut[16];
1375 int i;
1376 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
96cb7cf1 1377 bool ret = false;
bbf854dc
DF
1378
1379 for (i = 0; i < 16; i++)
1380 linear_lut[i] = 0xFFFF * i / 15;
1381
1382 params.set = 0;
1383 params.backlight_ramping_start = 0xCCCC;
1384 params.backlight_ramping_reduction = 0xCCCCCCCC;
1385 params.backlight_lut_array_size = 16;
1386 params.backlight_lut_array = linear_lut;
1387
2ad0cdf9
AK
1388 /* Min backlight level after ABM reduction, Don't allow below 1%
1389 * 0xFFFF x 0.01 = 0x28F
1390 */
1391 params.min_abm_backlight = 0x28F;
1392
96cb7cf1 1393 /* todo will enable for navi10 */
1394 if (adev->asic_type <= CHIP_RAVEN) {
1395 ret = dmcu_load_iram(dmcu, params);
bbf854dc 1396
96cb7cf1 1397 if (!ret)
1398 return -EINVAL;
1399 }
bbf854dc 1400
42e67c3b 1401 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1402}
1403
1404static void s3_handle_mst(struct drm_device *dev, bool suspend)
1405{
c84dec2f 1406 struct amdgpu_dm_connector *aconnector;
4562236b 1407 struct drm_connector *connector;
f8d2d39e 1408 struct drm_connector_list_iter iter;
fe7553be
LP
1409 struct drm_dp_mst_topology_mgr *mgr;
1410 int ret;
1411 bool need_hotplug = false;
4562236b 1412
f8d2d39e
LP
1413 drm_connector_list_iter_begin(dev, &iter);
1414 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1415 aconnector = to_amdgpu_dm_connector(connector);
1416 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1417 aconnector->mst_port)
1418 continue;
1419
1420 mgr = &aconnector->mst_mgr;
1421
1422 if (suspend) {
1423 drm_dp_mst_topology_mgr_suspend(mgr);
1424 } else {
6f85f738 1425 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1426 if (ret < 0) {
1427 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1428 need_hotplug = true;
1429 }
1430 }
4562236b 1431 }
f8d2d39e 1432 drm_connector_list_iter_end(&iter);
fe7553be
LP
1433
1434 if (need_hotplug)
1435 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1436}
1437
9340dfd3
HW
1438static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1439{
1440 struct smu_context *smu = &adev->smu;
1441 int ret = 0;
1442
1443 if (!is_support_sw_smu(adev))
1444 return 0;
1445
1446 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1447 * on window driver dc implementation.
1448 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1449 * should be passed to smu during boot up and resume from s3.
1450 * boot up: dc calculate dcn watermark clock settings within dc_create,
1451 * dcn20_resource_construct
1452 * then call pplib functions below to pass the settings to smu:
1453 * smu_set_watermarks_for_clock_ranges
1454 * smu_set_watermarks_table
1455 * navi10_set_watermarks_table
1456 * smu_write_watermarks_table
1457 *
1458 * For Renoir, clock settings of dcn watermark are also fixed values.
1459 * dc has implemented different flow for window driver:
1460 * dc_hardware_init / dc_set_power_state
1461 * dcn10_init_hw
1462 * notify_wm_ranges
1463 * set_wm_ranges
1464 * -- Linux
1465 * smu_set_watermarks_for_clock_ranges
1466 * renoir_set_watermarks_table
1467 * smu_write_watermarks_table
1468 *
1469 * For Linux,
1470 * dc_hardware_init -> amdgpu_dm_init
1471 * dc_set_power_state --> dm_resume
1472 *
1473 * therefore, this function apply to navi10/12/14 but not Renoir
1474 * *
1475 */
1476 switch(adev->asic_type) {
1477 case CHIP_NAVI10:
1478 case CHIP_NAVI14:
1479 case CHIP_NAVI12:
1480 break;
1481 default:
1482 return 0;
1483 }
1484
1485 mutex_lock(&smu->mutex);
1486
1487 /* pass data to smu controller */
1488 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1489 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1490 ret = smu_write_watermarks_table(smu);
1491
1492 if (ret) {
1493 mutex_unlock(&smu->mutex);
1494 DRM_ERROR("Failed to update WMTABLE!\n");
1495 return ret;
1496 }
1497 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1498 }
1499
1500 mutex_unlock(&smu->mutex);
1501
1502 return 0;
1503}
1504
b8592b48
LL
1505/**
1506 * dm_hw_init() - Initialize DC device
28d687ea 1507 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1508 *
1509 * Initialize the &struct amdgpu_display_manager device. This involves calling
1510 * the initializers of each DM component, then populating the struct with them.
1511 *
1512 * Although the function implies hardware initialization, both hardware and
1513 * software are initialized here. Splitting them out to their relevant init
1514 * hooks is a future TODO item.
1515 *
1516 * Some notable things that are initialized here:
1517 *
1518 * - Display Core, both software and hardware
1519 * - DC modules that we need (freesync and color management)
1520 * - DRM software states
1521 * - Interrupt sources and handlers
1522 * - Vblank support
1523 * - Debug FS entries, if enabled
1524 */
4562236b
HW
1525static int dm_hw_init(void *handle)
1526{
1527 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1528 /* Create DAL display manager */
1529 amdgpu_dm_init(adev);
4562236b
HW
1530 amdgpu_dm_hpd_init(adev);
1531
4562236b
HW
1532 return 0;
1533}
1534
b8592b48
LL
1535/**
1536 * dm_hw_fini() - Teardown DC device
28d687ea 1537 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1538 *
1539 * Teardown components within &struct amdgpu_display_manager that require
1540 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1541 * were loaded. Also flush IRQ workqueues and disable them.
1542 */
4562236b
HW
1543static int dm_hw_fini(void *handle)
1544{
1545 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1546
1547 amdgpu_dm_hpd_fini(adev);
1548
1549 amdgpu_dm_irq_fini(adev);
21de3396 1550 amdgpu_dm_fini(adev);
4562236b
HW
1551 return 0;
1552}
1553
1554static int dm_suspend(void *handle)
1555{
1556 struct amdgpu_device *adev = handle;
1557 struct amdgpu_display_manager *dm = &adev->dm;
1558 int ret = 0;
4562236b 1559
d2f0b53b
LHM
1560 WARN_ON(adev->dm.cached_state);
1561 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1562
4562236b
HW
1563 s3_handle_mst(adev->ddev, true);
1564
4562236b
HW
1565 amdgpu_dm_irq_suspend(adev);
1566
a3621485 1567
32f5062d 1568 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b
HW
1569
1570 return ret;
1571}
1572
1daf8c63
AD
1573static struct amdgpu_dm_connector *
1574amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1575 struct drm_crtc *crtc)
4562236b
HW
1576{
1577 uint32_t i;
c2cea706 1578 struct drm_connector_state *new_con_state;
4562236b
HW
1579 struct drm_connector *connector;
1580 struct drm_crtc *crtc_from_state;
1581
c2cea706
LSL
1582 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1583 crtc_from_state = new_con_state->crtc;
4562236b
HW
1584
1585 if (crtc_from_state == crtc)
c84dec2f 1586 return to_amdgpu_dm_connector(connector);
4562236b
HW
1587 }
1588
1589 return NULL;
1590}
1591
fbbdadf2
BL
1592static void emulated_link_detect(struct dc_link *link)
1593{
1594 struct dc_sink_init_data sink_init_data = { 0 };
1595 struct display_sink_capability sink_caps = { 0 };
1596 enum dc_edid_status edid_status;
1597 struct dc_context *dc_ctx = link->ctx;
1598 struct dc_sink *sink = NULL;
1599 struct dc_sink *prev_sink = NULL;
1600
1601 link->type = dc_connection_none;
1602 prev_sink = link->local_sink;
1603
1604 if (prev_sink != NULL)
1605 dc_sink_retain(prev_sink);
1606
1607 switch (link->connector_signal) {
1608 case SIGNAL_TYPE_HDMI_TYPE_A: {
1609 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1610 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1611 break;
1612 }
1613
1614 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1615 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1616 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1617 break;
1618 }
1619
1620 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1621 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1622 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1623 break;
1624 }
1625
1626 case SIGNAL_TYPE_LVDS: {
1627 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1628 sink_caps.signal = SIGNAL_TYPE_LVDS;
1629 break;
1630 }
1631
1632 case SIGNAL_TYPE_EDP: {
1633 sink_caps.transaction_type =
1634 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1635 sink_caps.signal = SIGNAL_TYPE_EDP;
1636 break;
1637 }
1638
1639 case SIGNAL_TYPE_DISPLAY_PORT: {
1640 sink_caps.transaction_type =
1641 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1642 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1643 break;
1644 }
1645
1646 default:
1647 DC_ERROR("Invalid connector type! signal:%d\n",
1648 link->connector_signal);
1649 return;
1650 }
1651
1652 sink_init_data.link = link;
1653 sink_init_data.sink_signal = sink_caps.signal;
1654
1655 sink = dc_sink_create(&sink_init_data);
1656 if (!sink) {
1657 DC_ERROR("Failed to create sink!\n");
1658 return;
1659 }
1660
dcd5fb82 1661 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1662 link->local_sink = sink;
1663
1664 edid_status = dm_helpers_read_local_edid(
1665 link->ctx,
1666 link,
1667 sink);
1668
1669 if (edid_status != EDID_OK)
1670 DC_ERROR("Failed to read EDID");
1671
1672}
1673
4562236b
HW
1674static int dm_resume(void *handle)
1675{
1676 struct amdgpu_device *adev = handle;
4562236b
HW
1677 struct drm_device *ddev = adev->ddev;
1678 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1679 struct amdgpu_dm_connector *aconnector;
4562236b 1680 struct drm_connector *connector;
f8d2d39e 1681 struct drm_connector_list_iter iter;
4562236b 1682 struct drm_crtc *crtc;
c2cea706 1683 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1684 struct dm_crtc_state *dm_new_crtc_state;
1685 struct drm_plane *plane;
1686 struct drm_plane_state *new_plane_state;
1687 struct dm_plane_state *dm_new_plane_state;
113b7a01 1688 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1689 enum dc_connection_type new_connection_type = dc_connection_none;
8c7aea40 1690 int i, r;
4562236b 1691
113b7a01
LL
1692 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1693 dc_release_state(dm_state->context);
1694 dm_state->context = dc_create_state(dm->dc);
1695 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1696 dc_resource_state_construct(dm->dc, dm_state->context);
1697
8c7aea40
NK
1698 /* Before powering on DC we need to re-initialize DMUB. */
1699 r = dm_dmub_hw_init(adev);
1700 if (r)
1701 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1702
a80aa93d
ML
1703 /* power on hardware */
1704 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1705
4562236b
HW
1706 /* program HPD filter */
1707 dc_resume(dm->dc);
1708
4562236b
HW
1709 /*
1710 * early enable HPD Rx IRQ, should be done before set mode as short
1711 * pulse interrupts are used for MST
1712 */
1713 amdgpu_dm_irq_resume_early(adev);
1714
d20ebea8 1715 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1716 s3_handle_mst(ddev, false);
1717
4562236b 1718 /* Do detection*/
f8d2d39e
LP
1719 drm_connector_list_iter_begin(ddev, &iter);
1720 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1721 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1722
1723 /*
1724 * this is the case when traversing through already created
1725 * MST connectors, should be skipped
1726 */
1727 if (aconnector->mst_port)
1728 continue;
1729
03ea364c 1730 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1731 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1732 DRM_ERROR("KMS: Failed to detect connector\n");
1733
1734 if (aconnector->base.force && new_connection_type == dc_connection_none)
1735 emulated_link_detect(aconnector->dc_link);
1736 else
1737 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1738
1739 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1740 aconnector->fake_enable = false;
1741
dcd5fb82
MF
1742 if (aconnector->dc_sink)
1743 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1744 aconnector->dc_sink = NULL;
1745 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1746 mutex_unlock(&aconnector->hpd_lock);
4562236b 1747 }
f8d2d39e 1748 drm_connector_list_iter_end(&iter);
4562236b 1749
1f6010a9 1750 /* Force mode set in atomic commit */
a80aa93d 1751 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1752 new_crtc_state->active_changed = true;
4f346e65 1753
fcb4019e
LSL
1754 /*
1755 * atomic_check is expected to create the dc states. We need to release
1756 * them here, since they were duplicated as part of the suspend
1757 * procedure.
1758 */
a80aa93d 1759 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1760 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1761 if (dm_new_crtc_state->stream) {
1762 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1763 dc_stream_release(dm_new_crtc_state->stream);
1764 dm_new_crtc_state->stream = NULL;
1765 }
1766 }
1767
a80aa93d 1768 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1769 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1770 if (dm_new_plane_state->dc_state) {
1771 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1772 dc_plane_state_release(dm_new_plane_state->dc_state);
1773 dm_new_plane_state->dc_state = NULL;
1774 }
1775 }
1776
2d1af6a1 1777 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1778
a80aa93d 1779 dm->cached_state = NULL;
0a214e2f 1780
9faa4237 1781 amdgpu_dm_irq_resume_late(adev);
4562236b 1782
9340dfd3
HW
1783 amdgpu_dm_smu_write_watermarks_table(adev);
1784
2d1af6a1 1785 return 0;
4562236b
HW
1786}
1787
b8592b48
LL
1788/**
1789 * DOC: DM Lifecycle
1790 *
1791 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1792 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1793 * the base driver's device list to be initialized and torn down accordingly.
1794 *
1795 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1796 */
1797
4562236b
HW
1798static const struct amd_ip_funcs amdgpu_dm_funcs = {
1799 .name = "dm",
1800 .early_init = dm_early_init,
7abcf6b5 1801 .late_init = dm_late_init,
4562236b
HW
1802 .sw_init = dm_sw_init,
1803 .sw_fini = dm_sw_fini,
1804 .hw_init = dm_hw_init,
1805 .hw_fini = dm_hw_fini,
1806 .suspend = dm_suspend,
1807 .resume = dm_resume,
1808 .is_idle = dm_is_idle,
1809 .wait_for_idle = dm_wait_for_idle,
1810 .check_soft_reset = dm_check_soft_reset,
1811 .soft_reset = dm_soft_reset,
1812 .set_clockgating_state = dm_set_clockgating_state,
1813 .set_powergating_state = dm_set_powergating_state,
1814};
1815
1816const struct amdgpu_ip_block_version dm_ip_block =
1817{
1818 .type = AMD_IP_BLOCK_TYPE_DCE,
1819 .major = 1,
1820 .minor = 0,
1821 .rev = 0,
1822 .funcs = &amdgpu_dm_funcs,
1823};
1824
ca3268c4 1825
b8592b48
LL
1826/**
1827 * DOC: atomic
1828 *
1829 * *WIP*
1830 */
0a323b84 1831
b3663f70 1832static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 1833 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 1834 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 1835 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 1836 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
1837};
1838
1839static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1840 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
1841};
1842
94562810
RS
1843static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1844{
1845 u32 max_cll, min_cll, max, min, q, r;
1846 struct amdgpu_dm_backlight_caps *caps;
1847 struct amdgpu_display_manager *dm;
1848 struct drm_connector *conn_base;
1849 struct amdgpu_device *adev;
1850 static const u8 pre_computed_values[] = {
1851 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1852 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1853
1854 if (!aconnector || !aconnector->dc_link)
1855 return;
1856
1857 conn_base = &aconnector->base;
1858 adev = conn_base->dev->dev_private;
1859 dm = &adev->dm;
1860 caps = &dm->backlight_caps;
1861 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1862 caps->aux_support = false;
1863 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1864 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1865
1866 if (caps->ext_caps->bits.oled == 1 ||
1867 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1868 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1869 caps->aux_support = true;
1870
1871 /* From the specification (CTA-861-G), for calculating the maximum
1872 * luminance we need to use:
1873 * Luminance = 50*2**(CV/32)
1874 * Where CV is a one-byte value.
1875 * For calculating this expression we may need float point precision;
1876 * to avoid this complexity level, we take advantage that CV is divided
1877 * by a constant. From the Euclids division algorithm, we know that CV
1878 * can be written as: CV = 32*q + r. Next, we replace CV in the
1879 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1880 * need to pre-compute the value of r/32. For pre-computing the values
1881 * We just used the following Ruby line:
1882 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1883 * The results of the above expressions can be verified at
1884 * pre_computed_values.
1885 */
1886 q = max_cll >> 5;
1887 r = max_cll % 32;
1888 max = (1 << q) * pre_computed_values[r];
1889
1890 // min luminance: maxLum * (CV/255)^2 / 100
1891 q = DIV_ROUND_CLOSEST(min_cll, 255);
1892 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1893
1894 caps->aux_max_input_signal = max;
1895 caps->aux_min_input_signal = min;
1896}
1897
7578ecda 1898static void
3ee6b26b 1899amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
4562236b
HW
1900{
1901 struct drm_connector *connector = &aconnector->base;
1902 struct drm_device *dev = connector->dev;
b73a22d3 1903 struct dc_sink *sink;
4562236b
HW
1904
1905 /* MST handled by drm_mst framework */
1906 if (aconnector->mst_mgr.mst_state == true)
1907 return;
1908
1909
1910 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
1911 if (sink)
1912 dc_sink_retain(sink);
4562236b 1913
1f6010a9
DF
1914 /*
1915 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 1916 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 1917 * Skip if already done during boot.
4562236b
HW
1918 */
1919 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1920 && aconnector->dc_em_sink) {
1921
1f6010a9
DF
1922 /*
1923 * For S3 resume with headless use eml_sink to fake stream
1924 * because on resume connector->sink is set to NULL
4562236b
HW
1925 */
1926 mutex_lock(&dev->mode_config.mutex);
1927
1928 if (sink) {
922aa1e1 1929 if (aconnector->dc_sink) {
98e6436d 1930 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
1931 /*
1932 * retain and release below are used to
1933 * bump up refcount for sink because the link doesn't point
1934 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
1935 * reshuffle by UMD we will get into unwanted dc_sink release
1936 */
dcd5fb82 1937 dc_sink_release(aconnector->dc_sink);
922aa1e1 1938 }
4562236b 1939 aconnector->dc_sink = sink;
dcd5fb82 1940 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
1941 amdgpu_dm_update_freesync_caps(connector,
1942 aconnector->edid);
4562236b 1943 } else {
98e6436d 1944 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 1945 if (!aconnector->dc_sink) {
4562236b 1946 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 1947 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 1948 }
4562236b
HW
1949 }
1950
1951 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1952
1953 if (sink)
1954 dc_sink_release(sink);
4562236b
HW
1955 return;
1956 }
1957
1958 /*
1959 * TODO: temporary guard to look for proper fix
1960 * if this sink is MST sink, we should not do anything
1961 */
dcd5fb82
MF
1962 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1963 dc_sink_release(sink);
4562236b 1964 return;
dcd5fb82 1965 }
4562236b
HW
1966
1967 if (aconnector->dc_sink == sink) {
1f6010a9
DF
1968 /*
1969 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1970 * Do nothing!!
1971 */
f1ad2f5e 1972 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 1973 aconnector->connector_id);
dcd5fb82
MF
1974 if (sink)
1975 dc_sink_release(sink);
4562236b
HW
1976 return;
1977 }
1978
f1ad2f5e 1979 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
1980 aconnector->connector_id, aconnector->dc_sink, sink);
1981
1982 mutex_lock(&dev->mode_config.mutex);
1983
1f6010a9
DF
1984 /*
1985 * 1. Update status of the drm connector
1986 * 2. Send an event and let userspace tell us what to do
1987 */
4562236b 1988 if (sink) {
1f6010a9
DF
1989 /*
1990 * TODO: check if we still need the S3 mode update workaround.
1991 * If yes, put it here.
1992 */
4562236b 1993 if (aconnector->dc_sink)
98e6436d 1994 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
1995
1996 aconnector->dc_sink = sink;
dcd5fb82 1997 dc_sink_retain(aconnector->dc_sink);
900b3cb1 1998 if (sink->dc_edid.length == 0) {
4562236b 1999 aconnector->edid = NULL;
e86e8947 2000 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
900b3cb1 2001 } else {
4562236b
HW
2002 aconnector->edid =
2003 (struct edid *) sink->dc_edid.raw_edid;
2004
2005
c555f023 2006 drm_connector_update_edid_property(connector,
4562236b 2007 aconnector->edid);
e86e8947
HV
2008 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2009 aconnector->edid);
4562236b 2010 }
98e6436d 2011 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2012 update_connector_ext_caps(aconnector);
4562236b 2013 } else {
e86e8947 2014 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2015 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2016 drm_connector_update_edid_property(connector, NULL);
4562236b 2017 aconnector->num_modes = 0;
dcd5fb82 2018 dc_sink_release(aconnector->dc_sink);
4562236b 2019 aconnector->dc_sink = NULL;
5326c452 2020 aconnector->edid = NULL;
0c8620d6
BL
2021#ifdef CONFIG_DRM_AMD_DC_HDCP
2022 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2023 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2024 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2025#endif
4562236b
HW
2026 }
2027
2028 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2029
2030 if (sink)
2031 dc_sink_release(sink);
4562236b
HW
2032}
2033
2034static void handle_hpd_irq(void *param)
2035{
c84dec2f 2036 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2037 struct drm_connector *connector = &aconnector->base;
2038 struct drm_device *dev = connector->dev;
fbbdadf2 2039 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
2040#ifdef CONFIG_DRM_AMD_DC_HDCP
2041 struct amdgpu_device *adev = dev->dev_private;
2042#endif
4562236b 2043
1f6010a9
DF
2044 /*
2045 * In case of failure or MST no need to update connector status or notify the OS
2046 * since (for MST case) MST does this in its own context.
4562236b
HW
2047 */
2048 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2049
0c8620d6 2050#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2051 if (adev->dm.hdcp_workqueue)
96a3b32e 2052 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2053#endif
2e0ac3d6
HW
2054 if (aconnector->fake_enable)
2055 aconnector->fake_enable = false;
2056
fbbdadf2
BL
2057 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2058 DRM_ERROR("KMS: Failed to detect connector\n");
2059
2060 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2061 emulated_link_detect(aconnector->dc_link);
2062
2063
2064 drm_modeset_lock_all(dev);
2065 dm_restore_drm_connector_state(dev, connector);
2066 drm_modeset_unlock_all(dev);
2067
2068 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2069 drm_kms_helper_hotplug_event(dev);
2070
2071 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2072 amdgpu_dm_update_connector_after_detect(aconnector);
2073
2074
2075 drm_modeset_lock_all(dev);
2076 dm_restore_drm_connector_state(dev, connector);
2077 drm_modeset_unlock_all(dev);
2078
2079 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2080 drm_kms_helper_hotplug_event(dev);
2081 }
2082 mutex_unlock(&aconnector->hpd_lock);
2083
2084}
2085
c84dec2f 2086static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2087{
2088 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2089 uint8_t dret;
2090 bool new_irq_handled = false;
2091 int dpcd_addr;
2092 int dpcd_bytes_to_read;
2093
2094 const int max_process_count = 30;
2095 int process_count = 0;
2096
2097 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2098
2099 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2100 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2101 /* DPCD 0x200 - 0x201 for downstream IRQ */
2102 dpcd_addr = DP_SINK_COUNT;
2103 } else {
2104 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2105 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2106 dpcd_addr = DP_SINK_COUNT_ESI;
2107 }
2108
2109 dret = drm_dp_dpcd_read(
2110 &aconnector->dm_dp_aux.aux,
2111 dpcd_addr,
2112 esi,
2113 dpcd_bytes_to_read);
2114
2115 while (dret == dpcd_bytes_to_read &&
2116 process_count < max_process_count) {
2117 uint8_t retry;
2118 dret = 0;
2119
2120 process_count++;
2121
f1ad2f5e 2122 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2123 /* handle HPD short pulse irq */
2124 if (aconnector->mst_mgr.mst_state)
2125 drm_dp_mst_hpd_irq(
2126 &aconnector->mst_mgr,
2127 esi,
2128 &new_irq_handled);
4562236b
HW
2129
2130 if (new_irq_handled) {
2131 /* ACK at DPCD to notify down stream */
2132 const int ack_dpcd_bytes_to_write =
2133 dpcd_bytes_to_read - 1;
2134
2135 for (retry = 0; retry < 3; retry++) {
2136 uint8_t wret;
2137
2138 wret = drm_dp_dpcd_write(
2139 &aconnector->dm_dp_aux.aux,
2140 dpcd_addr + 1,
2141 &esi[1],
2142 ack_dpcd_bytes_to_write);
2143 if (wret == ack_dpcd_bytes_to_write)
2144 break;
2145 }
2146
1f6010a9 2147 /* check if there is new irq to be handled */
4562236b
HW
2148 dret = drm_dp_dpcd_read(
2149 &aconnector->dm_dp_aux.aux,
2150 dpcd_addr,
2151 esi,
2152 dpcd_bytes_to_read);
2153
2154 new_irq_handled = false;
d4a6e8a9 2155 } else {
4562236b 2156 break;
d4a6e8a9 2157 }
4562236b
HW
2158 }
2159
2160 if (process_count == max_process_count)
f1ad2f5e 2161 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2162}
2163
2164static void handle_hpd_rx_irq(void *param)
2165{
c84dec2f 2166 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2167 struct drm_connector *connector = &aconnector->base;
2168 struct drm_device *dev = connector->dev;
53cbf65c 2169 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2170 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2171 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2172#ifdef CONFIG_DRM_AMD_DC_HDCP
2173 union hpd_irq_data hpd_irq_data;
2174 struct amdgpu_device *adev = dev->dev_private;
2175
2176 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2177#endif
4562236b 2178
1f6010a9
DF
2179 /*
2180 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2181 * conflict, after implement i2c helper, this mutex should be
2182 * retired.
2183 */
53cbf65c 2184 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2185 mutex_lock(&aconnector->hpd_lock);
2186
2a0f9270
BL
2187
2188#ifdef CONFIG_DRM_AMD_DC_HDCP
2189 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2190#else
4e18814e 2191 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2192#endif
4562236b
HW
2193 !is_mst_root_connector) {
2194 /* Downstream Port status changed. */
fbbdadf2
BL
2195 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2196 DRM_ERROR("KMS: Failed to detect connector\n");
2197
2198 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2199 emulated_link_detect(dc_link);
2200
2201 if (aconnector->fake_enable)
2202 aconnector->fake_enable = false;
2203
2204 amdgpu_dm_update_connector_after_detect(aconnector);
2205
2206
2207 drm_modeset_lock_all(dev);
2208 dm_restore_drm_connector_state(dev, connector);
2209 drm_modeset_unlock_all(dev);
2210
2211 drm_kms_helper_hotplug_event(dev);
2212 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2213
2214 if (aconnector->fake_enable)
2215 aconnector->fake_enable = false;
2216
4562236b
HW
2217 amdgpu_dm_update_connector_after_detect(aconnector);
2218
2219
2220 drm_modeset_lock_all(dev);
2221 dm_restore_drm_connector_state(dev, connector);
2222 drm_modeset_unlock_all(dev);
2223
2224 drm_kms_helper_hotplug_event(dev);
2225 }
2226 }
2a0f9270 2227#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562
AD
2228 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2229 if (adev->dm.hdcp_workqueue)
2230 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2231 }
2a0f9270 2232#endif
4562236b 2233 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2234 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2235 dm_handle_hpd_rx_irq(aconnector);
2236
e86e8947
HV
2237 if (dc_link->type != dc_connection_mst_branch) {
2238 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2239 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2240 }
4562236b
HW
2241}
2242
2243static void register_hpd_handlers(struct amdgpu_device *adev)
2244{
2245 struct drm_device *dev = adev->ddev;
2246 struct drm_connector *connector;
c84dec2f 2247 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2248 const struct dc_link *dc_link;
2249 struct dc_interrupt_params int_params = {0};
2250
2251 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2252 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2253
2254 list_for_each_entry(connector,
2255 &dev->mode_config.connector_list, head) {
2256
c84dec2f 2257 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2258 dc_link = aconnector->dc_link;
2259
2260 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2261 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2262 int_params.irq_source = dc_link->irq_source_hpd;
2263
2264 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2265 handle_hpd_irq,
2266 (void *) aconnector);
2267 }
2268
2269 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2270
2271 /* Also register for DP short pulse (hpd_rx). */
2272 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2273 int_params.irq_source = dc_link->irq_source_hpd_rx;
2274
2275 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2276 handle_hpd_rx_irq,
2277 (void *) aconnector);
2278 }
2279 }
2280}
2281
2282/* Register IRQ sources and initialize IRQ callbacks */
2283static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2284{
2285 struct dc *dc = adev->dm.dc;
2286 struct common_irq_params *c_irq_params;
2287 struct dc_interrupt_params int_params = {0};
2288 int r;
2289 int i;
1ffdeca6 2290 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2291
84374725 2292 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2293 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2294
2295 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2296 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2297
1f6010a9
DF
2298 /*
2299 * Actions of amdgpu_irq_add_id():
4562236b
HW
2300 * 1. Register a set() function with base driver.
2301 * Base driver will call set() function to enable/disable an
2302 * interrupt in DC hardware.
2303 * 2. Register amdgpu_dm_irq_handler().
2304 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2305 * coming from DC hardware.
2306 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2307 * for acknowledging and handling. */
2308
b57de80a 2309 /* Use VBLANK interrupt */
e9029155 2310 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2311 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2312 if (r) {
2313 DRM_ERROR("Failed to add crtc irq id!\n");
2314 return r;
2315 }
2316
2317 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2318 int_params.irq_source =
3d761e79 2319 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2320
b57de80a 2321 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2322
2323 c_irq_params->adev = adev;
2324 c_irq_params->irq_src = int_params.irq_source;
2325
2326 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2327 dm_crtc_high_irq, c_irq_params);
2328 }
2329
d2574c33
MK
2330 /* Use VUPDATE interrupt */
2331 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2332 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2333 if (r) {
2334 DRM_ERROR("Failed to add vupdate irq id!\n");
2335 return r;
2336 }
2337
2338 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2339 int_params.irq_source =
2340 dc_interrupt_to_irq_source(dc, i, 0);
2341
2342 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2343
2344 c_irq_params->adev = adev;
2345 c_irq_params->irq_src = int_params.irq_source;
2346
2347 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2348 dm_vupdate_high_irq, c_irq_params);
2349 }
2350
3d761e79 2351 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2352 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2353 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2354 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2355 if (r) {
2356 DRM_ERROR("Failed to add page flip irq id!\n");
2357 return r;
2358 }
2359
2360 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2361 int_params.irq_source =
2362 dc_interrupt_to_irq_source(dc, i, 0);
2363
2364 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2365
2366 c_irq_params->adev = adev;
2367 c_irq_params->irq_src = int_params.irq_source;
2368
2369 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2370 dm_pflip_high_irq, c_irq_params);
2371
2372 }
2373
2374 /* HPD */
2c8ad2d5
AD
2375 r = amdgpu_irq_add_id(adev, client_id,
2376 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2377 if (r) {
2378 DRM_ERROR("Failed to add hpd irq id!\n");
2379 return r;
2380 }
2381
2382 register_hpd_handlers(adev);
2383
2384 return 0;
2385}
2386
b86a1aa3 2387#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2388/* Register IRQ sources and initialize IRQ callbacks */
2389static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2390{
2391 struct dc *dc = adev->dm.dc;
2392 struct common_irq_params *c_irq_params;
2393 struct dc_interrupt_params int_params = {0};
2394 int r;
2395 int i;
2396
2397 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2398 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2399
1f6010a9
DF
2400 /*
2401 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2402 * 1. Register a set() function with base driver.
2403 * Base driver will call set() function to enable/disable an
2404 * interrupt in DC hardware.
2405 * 2. Register amdgpu_dm_irq_handler().
2406 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2407 * coming from DC hardware.
2408 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2409 * for acknowledging and handling.
1f6010a9 2410 */
ff5ef992
AD
2411
2412 /* Use VSTARTUP interrupt */
2413 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2414 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2415 i++) {
3760f76c 2416 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2417
2418 if (r) {
2419 DRM_ERROR("Failed to add crtc irq id!\n");
2420 return r;
2421 }
2422
2423 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2424 int_params.irq_source =
2425 dc_interrupt_to_irq_source(dc, i, 0);
2426
2427 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2428
2429 c_irq_params->adev = adev;
2430 c_irq_params->irq_src = int_params.irq_source;
2431
2432 amdgpu_dm_irq_register_interrupt(adev, &int_params,
16f17eda 2433 dm_dcn_crtc_high_irq, c_irq_params);
d2574c33
MK
2434 }
2435
ff5ef992
AD
2436 /* Use GRPH_PFLIP interrupt */
2437 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2438 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2439 i++) {
3760f76c 2440 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2441 if (r) {
2442 DRM_ERROR("Failed to add page flip irq id!\n");
2443 return r;
2444 }
2445
2446 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2447 int_params.irq_source =
2448 dc_interrupt_to_irq_source(dc, i, 0);
2449
2450 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2451
2452 c_irq_params->adev = adev;
2453 c_irq_params->irq_src = int_params.irq_source;
2454
2455 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2456 dm_pflip_high_irq, c_irq_params);
2457
2458 }
2459
2460 /* HPD */
3760f76c 2461 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2462 &adev->hpd_irq);
2463 if (r) {
2464 DRM_ERROR("Failed to add hpd irq id!\n");
2465 return r;
2466 }
2467
2468 register_hpd_handlers(adev);
2469
2470 return 0;
2471}
2472#endif
2473
eb3dc897
NK
2474/*
2475 * Acquires the lock for the atomic state object and returns
2476 * the new atomic state.
2477 *
2478 * This should only be called during atomic check.
2479 */
2480static int dm_atomic_get_state(struct drm_atomic_state *state,
2481 struct dm_atomic_state **dm_state)
2482{
2483 struct drm_device *dev = state->dev;
2484 struct amdgpu_device *adev = dev->dev_private;
2485 struct amdgpu_display_manager *dm = &adev->dm;
2486 struct drm_private_state *priv_state;
eb3dc897
NK
2487
2488 if (*dm_state)
2489 return 0;
2490
eb3dc897
NK
2491 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2492 if (IS_ERR(priv_state))
2493 return PTR_ERR(priv_state);
2494
2495 *dm_state = to_dm_atomic_state(priv_state);
2496
2497 return 0;
2498}
2499
2500struct dm_atomic_state *
2501dm_atomic_get_new_state(struct drm_atomic_state *state)
2502{
2503 struct drm_device *dev = state->dev;
2504 struct amdgpu_device *adev = dev->dev_private;
2505 struct amdgpu_display_manager *dm = &adev->dm;
2506 struct drm_private_obj *obj;
2507 struct drm_private_state *new_obj_state;
2508 int i;
2509
2510 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2511 if (obj->funcs == dm->atomic_obj.funcs)
2512 return to_dm_atomic_state(new_obj_state);
2513 }
2514
2515 return NULL;
2516}
2517
2518struct dm_atomic_state *
2519dm_atomic_get_old_state(struct drm_atomic_state *state)
2520{
2521 struct drm_device *dev = state->dev;
2522 struct amdgpu_device *adev = dev->dev_private;
2523 struct amdgpu_display_manager *dm = &adev->dm;
2524 struct drm_private_obj *obj;
2525 struct drm_private_state *old_obj_state;
2526 int i;
2527
2528 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2529 if (obj->funcs == dm->atomic_obj.funcs)
2530 return to_dm_atomic_state(old_obj_state);
2531 }
2532
2533 return NULL;
2534}
2535
2536static struct drm_private_state *
2537dm_atomic_duplicate_state(struct drm_private_obj *obj)
2538{
2539 struct dm_atomic_state *old_state, *new_state;
2540
2541 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2542 if (!new_state)
2543 return NULL;
2544
2545 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2546
813d20dc
AW
2547 old_state = to_dm_atomic_state(obj->state);
2548
2549 if (old_state && old_state->context)
2550 new_state->context = dc_copy_state(old_state->context);
2551
eb3dc897
NK
2552 if (!new_state->context) {
2553 kfree(new_state);
2554 return NULL;
2555 }
2556
eb3dc897
NK
2557 return &new_state->base;
2558}
2559
2560static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2561 struct drm_private_state *state)
2562{
2563 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2564
2565 if (dm_state && dm_state->context)
2566 dc_release_state(dm_state->context);
2567
2568 kfree(dm_state);
2569}
2570
2571static struct drm_private_state_funcs dm_atomic_state_funcs = {
2572 .atomic_duplicate_state = dm_atomic_duplicate_state,
2573 .atomic_destroy_state = dm_atomic_destroy_state,
2574};
2575
4562236b
HW
2576static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2577{
eb3dc897 2578 struct dm_atomic_state *state;
4562236b
HW
2579 int r;
2580
2581 adev->mode_info.mode_config_initialized = true;
2582
4562236b 2583 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2584 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2585
2586 adev->ddev->mode_config.max_width = 16384;
2587 adev->ddev->mode_config.max_height = 16384;
2588
2589 adev->ddev->mode_config.preferred_depth = 24;
2590 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2591 /* indicates support for immediate flip */
4562236b
HW
2592 adev->ddev->mode_config.async_page_flip = true;
2593
770d13b1 2594 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2595
eb3dc897
NK
2596 state = kzalloc(sizeof(*state), GFP_KERNEL);
2597 if (!state)
2598 return -ENOMEM;
2599
813d20dc 2600 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2601 if (!state->context) {
2602 kfree(state);
2603 return -ENOMEM;
2604 }
2605
2606 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2607
8c1a765b
DA
2608 drm_atomic_private_obj_init(adev->ddev,
2609 &adev->dm.atomic_obj,
eb3dc897
NK
2610 &state->base,
2611 &dm_atomic_state_funcs);
2612
3dc9b1ce 2613 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2614 if (r)
2615 return r;
2616
6ce8f316
NK
2617 r = amdgpu_dm_audio_init(adev);
2618 if (r)
2619 return r;
2620
4562236b
HW
2621 return 0;
2622}
2623
206bbafe
DF
2624#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2625#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 2626#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 2627
4562236b
HW
2628#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2629 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2630
206bbafe
DF
2631static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2632{
2633#if defined(CONFIG_ACPI)
2634 struct amdgpu_dm_backlight_caps caps;
2635
2636 if (dm->backlight_caps.caps_valid)
2637 return;
2638
2639 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2640 if (caps.caps_valid) {
94562810
RS
2641 dm->backlight_caps.caps_valid = true;
2642 if (caps.aux_support)
2643 return;
206bbafe
DF
2644 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2645 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
2646 } else {
2647 dm->backlight_caps.min_input_signal =
2648 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2649 dm->backlight_caps.max_input_signal =
2650 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2651 }
2652#else
94562810
RS
2653 if (dm->backlight_caps.aux_support)
2654 return;
2655
8bcbc9ef
DF
2656 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2657 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2658#endif
2659}
2660
94562810
RS
2661static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2662{
2663 bool rc;
2664
2665 if (!link)
2666 return 1;
2667
2668 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2669 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2670
2671 return rc ? 0 : 1;
2672}
2673
2674static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2675 const uint32_t user_brightness)
2676{
2677 u32 min, max, conversion_pace;
2678 u32 brightness = user_brightness;
2679
2680 if (!caps)
2681 goto out;
2682
2683 if (!caps->aux_support) {
2684 max = caps->max_input_signal;
2685 min = caps->min_input_signal;
2686 /*
2687 * The brightness input is in the range 0-255
2688 * It needs to be rescaled to be between the
2689 * requested min and max input signal
2690 * It also needs to be scaled up by 0x101 to
2691 * match the DC interface which has a range of
2692 * 0 to 0xffff
2693 */
2694 conversion_pace = 0x101;
2695 brightness =
2696 user_brightness
2697 * conversion_pace
2698 * (max - min)
2699 / AMDGPU_MAX_BL_LEVEL
2700 + min * conversion_pace;
2701 } else {
2702 /* TODO
2703 * We are doing a linear interpolation here, which is OK but
2704 * does not provide the optimal result. We probably want
2705 * something close to the Perceptual Quantizer (PQ) curve.
2706 */
2707 max = caps->aux_max_input_signal;
2708 min = caps->aux_min_input_signal;
2709
2710 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2711 + user_brightness * max;
2712 // Multiple the value by 1000 since we use millinits
2713 brightness *= 1000;
2714 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2715 }
2716
2717out:
2718 return brightness;
2719}
2720
4562236b
HW
2721static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2722{
2723 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 2724 struct amdgpu_dm_backlight_caps caps;
94562810
RS
2725 struct dc_link *link = NULL;
2726 u32 brightness;
2727 bool rc;
4562236b 2728
206bbafe
DF
2729 amdgpu_dm_update_backlight_caps(dm);
2730 caps = dm->backlight_caps;
94562810
RS
2731
2732 link = (struct dc_link *)dm->backlight_link;
2733
2734 brightness = convert_brightness(&caps, bd->props.brightness);
2735 // Change brightness based on AUX property
2736 if (caps.aux_support)
2737 return set_backlight_via_aux(link, brightness);
2738
2739 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2740
2741 return rc ? 0 : 1;
4562236b
HW
2742}
2743
2744static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2745{
620a0d27
DF
2746 struct amdgpu_display_manager *dm = bl_get_data(bd);
2747 int ret = dc_link_get_backlight_level(dm->backlight_link);
2748
2749 if (ret == DC_ERROR_UNEXPECTED)
2750 return bd->props.brightness;
2751 return ret;
4562236b
HW
2752}
2753
2754static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 2755 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
2756 .get_brightness = amdgpu_dm_backlight_get_brightness,
2757 .update_status = amdgpu_dm_backlight_update_status,
2758};
2759
7578ecda
AD
2760static void
2761amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
2762{
2763 char bl_name[16];
2764 struct backlight_properties props = { 0 };
2765
206bbafe
DF
2766 amdgpu_dm_update_backlight_caps(dm);
2767
4562236b 2768 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 2769 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
2770 props.type = BACKLIGHT_RAW;
2771
2772 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2773 dm->adev->ddev->primary->index);
2774
2775 dm->backlight_dev = backlight_device_register(bl_name,
2776 dm->adev->ddev->dev,
2777 dm,
2778 &amdgpu_dm_backlight_ops,
2779 &props);
2780
74baea42 2781 if (IS_ERR(dm->backlight_dev))
4562236b
HW
2782 DRM_ERROR("DM: Backlight registration failed!\n");
2783 else
f1ad2f5e 2784 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
2785}
2786
2787#endif
2788
df534fff 2789static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 2790 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
2791 enum drm_plane_type plane_type,
2792 const struct dc_plane_cap *plane_cap)
df534fff 2793{
f180b4bc 2794 struct drm_plane *plane;
df534fff
S
2795 unsigned long possible_crtcs;
2796 int ret = 0;
2797
f180b4bc 2798 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
2799 if (!plane) {
2800 DRM_ERROR("KMS: Failed to allocate plane\n");
2801 return -ENOMEM;
2802 }
b2fddb13 2803 plane->type = plane_type;
df534fff
S
2804
2805 /*
b2fddb13
NK
2806 * HACK: IGT tests expect that the primary plane for a CRTC
2807 * can only have one possible CRTC. Only expose support for
2808 * any CRTC if they're not going to be used as a primary plane
2809 * for a CRTC - like overlay or underlay planes.
df534fff
S
2810 */
2811 possible_crtcs = 1 << plane_id;
2812 if (plane_id >= dm->dc->caps.max_streams)
2813 possible_crtcs = 0xff;
2814
cc1fec57 2815 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
2816
2817 if (ret) {
2818 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 2819 kfree(plane);
df534fff
S
2820 return ret;
2821 }
2822
54087768
NK
2823 if (mode_info)
2824 mode_info->planes[plane_id] = plane;
2825
df534fff
S
2826 return ret;
2827}
2828
89fc8d4e
HW
2829
2830static void register_backlight_device(struct amdgpu_display_manager *dm,
2831 struct dc_link *link)
2832{
2833#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2834 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2835
2836 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2837 link->type != dc_connection_none) {
1f6010a9
DF
2838 /*
2839 * Event if registration failed, we should continue with
89fc8d4e
HW
2840 * DM initialization because not having a backlight control
2841 * is better then a black screen.
2842 */
2843 amdgpu_dm_register_backlight_device(dm);
2844
2845 if (dm->backlight_dev)
2846 dm->backlight_link = link;
2847 }
2848#endif
2849}
2850
2851
1f6010a9
DF
2852/*
2853 * In this architecture, the association
4562236b
HW
2854 * connector -> encoder -> crtc
2855 * id not really requried. The crtc and connector will hold the
2856 * display_index as an abstraction to use with DAL component
2857 *
2858 * Returns 0 on success
2859 */
7578ecda 2860static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
2861{
2862 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 2863 int32_t i;
c84dec2f 2864 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 2865 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 2866 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 2867 uint32_t link_cnt;
cc1fec57 2868 int32_t primary_planes;
fbbdadf2 2869 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 2870 const struct dc_plane_cap *plane;
4562236b
HW
2871
2872 link_cnt = dm->dc->caps.max_links;
4562236b
HW
2873 if (amdgpu_dm_mode_config_init(dm->adev)) {
2874 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 2875 return -EINVAL;
4562236b
HW
2876 }
2877
b2fddb13
NK
2878 /* There is one primary plane per CRTC */
2879 primary_planes = dm->dc->caps.max_streams;
54087768 2880 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 2881
b2fddb13
NK
2882 /*
2883 * Initialize primary planes, implicit planes for legacy IOCTLS.
2884 * Order is reversed to match iteration order in atomic check.
2885 */
2886 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
2887 plane = &dm->dc->caps.planes[i];
2888
b2fddb13 2889 if (initialize_plane(dm, mode_info, i,
cc1fec57 2890 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 2891 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 2892 goto fail;
d4e13b0d 2893 }
df534fff 2894 }
92f3ac40 2895
0d579c7e
NK
2896 /*
2897 * Initialize overlay planes, index starting after primary planes.
2898 * These planes have a higher DRM index than the primary planes since
2899 * they should be considered as having a higher z-order.
2900 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
2901 *
2902 * Only support DCN for now, and only expose one so we don't encourage
2903 * userspace to use up all the pipes.
0d579c7e 2904 */
cc1fec57
NK
2905 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2906 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2907
2908 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2909 continue;
2910
2911 if (!plane->blends_with_above || !plane->blends_with_below)
2912 continue;
2913
ea36ad34 2914 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
2915 continue;
2916
54087768 2917 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 2918 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 2919 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 2920 goto fail;
d4e13b0d 2921 }
cc1fec57
NK
2922
2923 /* Only create one overlay plane. */
2924 break;
d4e13b0d 2925 }
4562236b 2926
d4e13b0d 2927 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 2928 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 2929 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 2930 goto fail;
4562236b 2931 }
4562236b 2932
ab2541b6 2933 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
2934
2935 /* loops over all connectors on the board */
2936 for (i = 0; i < link_cnt; i++) {
89fc8d4e 2937 struct dc_link *link = NULL;
4562236b
HW
2938
2939 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2940 DRM_ERROR(
2941 "KMS: Cannot support more than %d display indexes\n",
2942 AMDGPU_DM_MAX_DISPLAY_INDEX);
2943 continue;
2944 }
2945
2946 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2947 if (!aconnector)
cd8a2ae8 2948 goto fail;
4562236b
HW
2949
2950 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 2951 if (!aencoder)
cd8a2ae8 2952 goto fail;
4562236b
HW
2953
2954 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2955 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 2956 goto fail;
4562236b
HW
2957 }
2958
2959 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2960 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 2961 goto fail;
4562236b
HW
2962 }
2963
89fc8d4e
HW
2964 link = dc_get_link_at_index(dm->dc, i);
2965
fbbdadf2
BL
2966 if (!dc_link_detect_sink(link, &new_connection_type))
2967 DRM_ERROR("KMS: Failed to detect connector\n");
2968
2969 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2970 emulated_link_detect(link);
2971 amdgpu_dm_update_connector_after_detect(aconnector);
2972
2973 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 2974 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 2975 register_backlight_device(dm, link);
397a9bc5
RL
2976 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2977 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
2978 }
2979
2980
4562236b
HW
2981 }
2982
2983 /* Software is initialized. Now we can register interrupt handlers. */
2984 switch (adev->asic_type) {
2985 case CHIP_BONAIRE:
2986 case CHIP_HAWAII:
cd4b356f
AD
2987 case CHIP_KAVERI:
2988 case CHIP_KABINI:
2989 case CHIP_MULLINS:
4562236b
HW
2990 case CHIP_TONGA:
2991 case CHIP_FIJI:
2992 case CHIP_CARRIZO:
2993 case CHIP_STONEY:
2994 case CHIP_POLARIS11:
2995 case CHIP_POLARIS10:
b264d345 2996 case CHIP_POLARIS12:
7737de91 2997 case CHIP_VEGAM:
2c8ad2d5 2998 case CHIP_VEGA10:
2325ff30 2999 case CHIP_VEGA12:
1fe6bf2f 3000 case CHIP_VEGA20:
4562236b
HW
3001 if (dce110_register_irq_handlers(dm->adev)) {
3002 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3003 goto fail;
4562236b
HW
3004 }
3005 break;
b86a1aa3 3006#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3007 case CHIP_RAVEN:
fbd2afe5 3008 case CHIP_NAVI12:
476e955d 3009 case CHIP_NAVI10:
fce651e3 3010 case CHIP_NAVI14:
30221ad8 3011 case CHIP_RENOIR:
ff5ef992
AD
3012 if (dcn10_register_irq_handlers(dm->adev)) {
3013 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3014 goto fail;
ff5ef992
AD
3015 }
3016 break;
3017#endif
4562236b 3018 default:
e63f8673 3019 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3020 goto fail;
4562236b
HW
3021 }
3022
1bc460a4
HW
3023 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3024 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3025
4562236b 3026 return 0;
cd8a2ae8 3027fail:
4562236b 3028 kfree(aencoder);
4562236b 3029 kfree(aconnector);
54087768 3030
59d0f396 3031 return -EINVAL;
4562236b
HW
3032}
3033
7578ecda 3034static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3035{
3036 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3037 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3038 return;
3039}
3040
3041/******************************************************************************
3042 * amdgpu_display_funcs functions
3043 *****************************************************************************/
3044
1f6010a9 3045/*
4562236b
HW
3046 * dm_bandwidth_update - program display watermarks
3047 *
3048 * @adev: amdgpu_device pointer
3049 *
3050 * Calculate and program the display watermarks and line buffer allocation.
3051 */
3052static void dm_bandwidth_update(struct amdgpu_device *adev)
3053{
49c07a99 3054 /* TODO: implement later */
4562236b
HW
3055}
3056
39cc5be2 3057static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3058 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3059 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3060 .backlight_set_level = NULL, /* never called for DC */
3061 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3062 .hpd_sense = NULL,/* called unconditionally */
3063 .hpd_set_polarity = NULL, /* called unconditionally */
3064 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3065 .page_flip_get_scanoutpos =
3066 dm_crtc_get_scanoutpos,/* called unconditionally */
3067 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3068 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3069};
3070
3071#if defined(CONFIG_DEBUG_KERNEL_DC)
3072
3ee6b26b
AD
3073static ssize_t s3_debug_store(struct device *device,
3074 struct device_attribute *attr,
3075 const char *buf,
3076 size_t count)
4562236b
HW
3077{
3078 int ret;
3079 int s3_state;
ef1de361 3080 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
3081 struct amdgpu_device *adev = drm_dev->dev_private;
3082
3083 ret = kstrtoint(buf, 0, &s3_state);
3084
3085 if (ret == 0) {
3086 if (s3_state) {
3087 dm_resume(adev);
4562236b
HW
3088 drm_kms_helper_hotplug_event(adev->ddev);
3089 } else
3090 dm_suspend(adev);
3091 }
3092
3093 return ret == 0 ? count : 0;
3094}
3095
3096DEVICE_ATTR_WO(s3_debug);
3097
3098#endif
3099
3100static int dm_early_init(void *handle)
3101{
3102 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3103
4562236b
HW
3104 switch (adev->asic_type) {
3105 case CHIP_BONAIRE:
3106 case CHIP_HAWAII:
3107 adev->mode_info.num_crtc = 6;
3108 adev->mode_info.num_hpd = 6;
3109 adev->mode_info.num_dig = 6;
4562236b 3110 break;
cd4b356f
AD
3111 case CHIP_KAVERI:
3112 adev->mode_info.num_crtc = 4;
3113 adev->mode_info.num_hpd = 6;
3114 adev->mode_info.num_dig = 7;
cd4b356f
AD
3115 break;
3116 case CHIP_KABINI:
3117 case CHIP_MULLINS:
3118 adev->mode_info.num_crtc = 2;
3119 adev->mode_info.num_hpd = 6;
3120 adev->mode_info.num_dig = 6;
cd4b356f 3121 break;
4562236b
HW
3122 case CHIP_FIJI:
3123 case CHIP_TONGA:
3124 adev->mode_info.num_crtc = 6;
3125 adev->mode_info.num_hpd = 6;
3126 adev->mode_info.num_dig = 7;
4562236b
HW
3127 break;
3128 case CHIP_CARRIZO:
3129 adev->mode_info.num_crtc = 3;
3130 adev->mode_info.num_hpd = 6;
3131 adev->mode_info.num_dig = 9;
4562236b
HW
3132 break;
3133 case CHIP_STONEY:
3134 adev->mode_info.num_crtc = 2;
3135 adev->mode_info.num_hpd = 6;
3136 adev->mode_info.num_dig = 9;
4562236b
HW
3137 break;
3138 case CHIP_POLARIS11:
b264d345 3139 case CHIP_POLARIS12:
4562236b
HW
3140 adev->mode_info.num_crtc = 5;
3141 adev->mode_info.num_hpd = 5;
3142 adev->mode_info.num_dig = 5;
4562236b
HW
3143 break;
3144 case CHIP_POLARIS10:
7737de91 3145 case CHIP_VEGAM:
4562236b
HW
3146 adev->mode_info.num_crtc = 6;
3147 adev->mode_info.num_hpd = 6;
3148 adev->mode_info.num_dig = 6;
4562236b 3149 break;
2c8ad2d5 3150 case CHIP_VEGA10:
2325ff30 3151 case CHIP_VEGA12:
1fe6bf2f 3152 case CHIP_VEGA20:
2c8ad2d5
AD
3153 adev->mode_info.num_crtc = 6;
3154 adev->mode_info.num_hpd = 6;
3155 adev->mode_info.num_dig = 6;
3156 break;
b86a1aa3 3157#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3158 case CHIP_RAVEN:
3159 adev->mode_info.num_crtc = 4;
3160 adev->mode_info.num_hpd = 4;
3161 adev->mode_info.num_dig = 4;
ff5ef992 3162 break;
476e955d 3163#endif
476e955d 3164 case CHIP_NAVI10:
fbd2afe5 3165 case CHIP_NAVI12:
476e955d
HW
3166 adev->mode_info.num_crtc = 6;
3167 adev->mode_info.num_hpd = 6;
3168 adev->mode_info.num_dig = 6;
3169 break;
fce651e3
BL
3170 case CHIP_NAVI14:
3171 adev->mode_info.num_crtc = 5;
3172 adev->mode_info.num_hpd = 5;
3173 adev->mode_info.num_dig = 5;
3174 break;
30221ad8
BL
3175 case CHIP_RENOIR:
3176 adev->mode_info.num_crtc = 4;
3177 adev->mode_info.num_hpd = 4;
3178 adev->mode_info.num_dig = 4;
3179 break;
4562236b 3180 default:
e63f8673 3181 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3182 return -EINVAL;
3183 }
3184
c8dd5715
MD
3185 amdgpu_dm_set_irq_funcs(adev);
3186
39cc5be2
AD
3187 if (adev->mode_info.funcs == NULL)
3188 adev->mode_info.funcs = &dm_display_funcs;
3189
1f6010a9
DF
3190 /*
3191 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3192 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3193 * amdgpu_device_init()
3194 */
4562236b
HW
3195#if defined(CONFIG_DEBUG_KERNEL_DC)
3196 device_create_file(
3197 adev->ddev->dev,
3198 &dev_attr_s3_debug);
3199#endif
3200
3201 return 0;
3202}
3203
9b690ef3 3204static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3205 struct dc_stream_state *new_stream,
3206 struct dc_stream_state *old_stream)
9b690ef3 3207{
e7b07cee
HW
3208 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3209 return false;
3210
3211 if (!crtc_state->enable)
3212 return false;
3213
3214 return crtc_state->active;
3215}
3216
3217static bool modereset_required(struct drm_crtc_state *crtc_state)
3218{
3219 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3220 return false;
3221
3222 return !crtc_state->enable || !crtc_state->active;
3223}
3224
7578ecda 3225static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3226{
3227 drm_encoder_cleanup(encoder);
3228 kfree(encoder);
3229}
3230
3231static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3232 .destroy = amdgpu_dm_encoder_destroy,
3233};
3234
e7b07cee 3235
695af5f9
NK
3236static int fill_dc_scaling_info(const struct drm_plane_state *state,
3237 struct dc_scaling_info *scaling_info)
e7b07cee 3238{
6491f0c0 3239 int scale_w, scale_h;
e7b07cee 3240
695af5f9 3241 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3242
695af5f9
NK
3243 /* Source is fixed 16.16 but we ignore mantissa for now... */
3244 scaling_info->src_rect.x = state->src_x >> 16;
3245 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3246
695af5f9
NK
3247 scaling_info->src_rect.width = state->src_w >> 16;
3248 if (scaling_info->src_rect.width == 0)
3249 return -EINVAL;
3250
3251 scaling_info->src_rect.height = state->src_h >> 16;
3252 if (scaling_info->src_rect.height == 0)
3253 return -EINVAL;
3254
3255 scaling_info->dst_rect.x = state->crtc_x;
3256 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3257
3258 if (state->crtc_w == 0)
695af5f9 3259 return -EINVAL;
e7b07cee 3260
695af5f9 3261 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3262
3263 if (state->crtc_h == 0)
695af5f9 3264 return -EINVAL;
e7b07cee 3265
695af5f9 3266 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3267
695af5f9
NK
3268 /* DRM doesn't specify clipping on destination output. */
3269 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3270
6491f0c0
NK
3271 /* TODO: Validate scaling per-format with DC plane caps */
3272 scale_w = scaling_info->dst_rect.width * 1000 /
3273 scaling_info->src_rect.width;
e7b07cee 3274
6491f0c0
NK
3275 if (scale_w < 250 || scale_w > 16000)
3276 return -EINVAL;
3277
3278 scale_h = scaling_info->dst_rect.height * 1000 /
3279 scaling_info->src_rect.height;
3280
3281 if (scale_h < 250 || scale_h > 16000)
3282 return -EINVAL;
3283
695af5f9
NK
3284 /*
3285 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3286 * assume reasonable defaults based on the format.
3287 */
e7b07cee 3288
695af5f9 3289 return 0;
4562236b 3290}
695af5f9 3291
3ee6b26b 3292static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
9817d5f5 3293 uint64_t *tiling_flags)
e7b07cee 3294{
e68d14dd 3295 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 3296 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3297
e7b07cee 3298 if (unlikely(r)) {
1f6010a9 3299 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3300 if (r != -ERESTARTSYS)
3301 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3302 return r;
3303 }
3304
e7b07cee
HW
3305 if (tiling_flags)
3306 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3307
3308 amdgpu_bo_unreserve(rbo);
3309
3310 return r;
3311}
3312
7df7e505
NK
3313static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3314{
3315 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3316
3317 return offset ? (address + offset * 256) : 0;
3318}
3319
695af5f9
NK
3320static int
3321fill_plane_dcc_attributes(struct amdgpu_device *adev,
3322 const struct amdgpu_framebuffer *afb,
3323 const enum surface_pixel_format format,
3324 const enum dc_rotation_angle rotation,
12e2b2d4 3325 const struct plane_size *plane_size,
695af5f9
NK
3326 const union dc_tiling_info *tiling_info,
3327 const uint64_t info,
3328 struct dc_plane_dcc_param *dcc,
3329 struct dc_plane_address *address)
7df7e505
NK
3330{
3331 struct dc *dc = adev->dm.dc;
8daa1218
NC
3332 struct dc_dcc_surface_param input;
3333 struct dc_surface_dcc_cap output;
7df7e505
NK
3334 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3335 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3336 uint64_t dcc_address;
3337
8daa1218
NC
3338 memset(&input, 0, sizeof(input));
3339 memset(&output, 0, sizeof(output));
3340
7df7e505 3341 if (!offset)
09e5665a
NK
3342 return 0;
3343
695af5f9 3344 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3345 return 0;
7df7e505
NK
3346
3347 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3348 return -EINVAL;
7df7e505 3349
695af5f9 3350 input.format = format;
12e2b2d4
DL
3351 input.surface_size.width = plane_size->surface_size.width;
3352 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3353 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3354
695af5f9 3355 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3356 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3357 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3358 input.scan = SCAN_DIRECTION_VERTICAL;
3359
3360 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3361 return -EINVAL;
7df7e505
NK
3362
3363 if (!output.capable)
09e5665a 3364 return -EINVAL;
7df7e505
NK
3365
3366 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3367 return -EINVAL;
7df7e505 3368
09e5665a 3369 dcc->enable = 1;
12e2b2d4 3370 dcc->meta_pitch =
7df7e505 3371 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3372 dcc->independent_64b_blks = i64b;
7df7e505
NK
3373
3374 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3375 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3376 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3377
09e5665a
NK
3378 return 0;
3379}
3380
3381static int
320932bf 3382fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3383 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3384 const enum surface_pixel_format format,
3385 const enum dc_rotation_angle rotation,
3386 const uint64_t tiling_flags,
09e5665a 3387 union dc_tiling_info *tiling_info,
12e2b2d4 3388 struct plane_size *plane_size,
09e5665a 3389 struct dc_plane_dcc_param *dcc,
695af5f9 3390 struct dc_plane_address *address)
09e5665a 3391{
320932bf 3392 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3393 int ret;
3394
3395 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3396 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3397 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3398 memset(address, 0, sizeof(*address));
3399
695af5f9 3400 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3401 plane_size->surface_size.x = 0;
3402 plane_size->surface_size.y = 0;
3403 plane_size->surface_size.width = fb->width;
3404 plane_size->surface_size.height = fb->height;
3405 plane_size->surface_pitch =
320932bf
NK
3406 fb->pitches[0] / fb->format->cpp[0];
3407
e0634e8d
NK
3408 address->type = PLN_ADDR_TYPE_GRAPHICS;
3409 address->grph.addr.low_part = lower_32_bits(afb->address);
3410 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3411 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3412 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3413
12e2b2d4
DL
3414 plane_size->surface_size.x = 0;
3415 plane_size->surface_size.y = 0;
3416 plane_size->surface_size.width = fb->width;
3417 plane_size->surface_size.height = fb->height;
3418 plane_size->surface_pitch =
320932bf
NK
3419 fb->pitches[0] / fb->format->cpp[0];
3420
12e2b2d4
DL
3421 plane_size->chroma_size.x = 0;
3422 plane_size->chroma_size.y = 0;
320932bf 3423 /* TODO: set these based on surface format */
12e2b2d4
DL
3424 plane_size->chroma_size.width = fb->width / 2;
3425 plane_size->chroma_size.height = fb->height / 2;
320932bf 3426
12e2b2d4 3427 plane_size->chroma_pitch =
320932bf
NK
3428 fb->pitches[1] / fb->format->cpp[1];
3429
e0634e8d
NK
3430 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3431 address->video_progressive.luma_addr.low_part =
3432 lower_32_bits(afb->address);
3433 address->video_progressive.luma_addr.high_part =
3434 upper_32_bits(afb->address);
3435 address->video_progressive.chroma_addr.low_part =
3436 lower_32_bits(chroma_addr);
3437 address->video_progressive.chroma_addr.high_part =
3438 upper_32_bits(chroma_addr);
3439 }
09e5665a
NK
3440
3441 /* Fill GFX8 params */
3442 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3443 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3444
3445 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3446 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3447 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3448 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3449 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3450
3451 /* XXX fix me for VI */
3452 tiling_info->gfx8.num_banks = num_banks;
3453 tiling_info->gfx8.array_mode =
3454 DC_ARRAY_2D_TILED_THIN1;
3455 tiling_info->gfx8.tile_split = tile_split;
3456 tiling_info->gfx8.bank_width = bankw;
3457 tiling_info->gfx8.bank_height = bankh;
3458 tiling_info->gfx8.tile_aspect = mtaspect;
3459 tiling_info->gfx8.tile_mode =
3460 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3461 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3462 == DC_ARRAY_1D_TILED_THIN1) {
3463 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3464 }
3465
3466 tiling_info->gfx8.pipe_config =
3467 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3468
3469 if (adev->asic_type == CHIP_VEGA10 ||
3470 adev->asic_type == CHIP_VEGA12 ||
3471 adev->asic_type == CHIP_VEGA20 ||
476e955d 3472 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3473 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3474 adev->asic_type == CHIP_NAVI12 ||
30221ad8 3475 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3476 adev->asic_type == CHIP_RAVEN) {
3477 /* Fill GFX9 params */
3478 tiling_info->gfx9.num_pipes =
3479 adev->gfx.config.gb_addr_config_fields.num_pipes;
3480 tiling_info->gfx9.num_banks =
3481 adev->gfx.config.gb_addr_config_fields.num_banks;
3482 tiling_info->gfx9.pipe_interleave =
3483 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3484 tiling_info->gfx9.num_shader_engines =
3485 adev->gfx.config.gb_addr_config_fields.num_se;
3486 tiling_info->gfx9.max_compressed_frags =
3487 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3488 tiling_info->gfx9.num_rb_per_se =
3489 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3490 tiling_info->gfx9.swizzle =
3491 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3492 tiling_info->gfx9.shaderEnable = 1;
3493
695af5f9
NK
3494 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3495 plane_size, tiling_info,
3496 tiling_flags, dcc, address);
09e5665a
NK
3497 if (ret)
3498 return ret;
3499 }
3500
3501 return 0;
7df7e505
NK
3502}
3503
d74004b6 3504static void
695af5f9 3505fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3506 bool *per_pixel_alpha, bool *global_alpha,
3507 int *global_alpha_value)
3508{
3509 *per_pixel_alpha = false;
3510 *global_alpha = false;
3511 *global_alpha_value = 0xff;
3512
3513 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3514 return;
3515
3516 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3517 static const uint32_t alpha_formats[] = {
3518 DRM_FORMAT_ARGB8888,
3519 DRM_FORMAT_RGBA8888,
3520 DRM_FORMAT_ABGR8888,
3521 };
3522 uint32_t format = plane_state->fb->format->format;
3523 unsigned int i;
3524
3525 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3526 if (format == alpha_formats[i]) {
3527 *per_pixel_alpha = true;
3528 break;
3529 }
3530 }
3531 }
3532
3533 if (plane_state->alpha < 0xffff) {
3534 *global_alpha = true;
3535 *global_alpha_value = plane_state->alpha >> 8;
3536 }
3537}
3538
004fefa3
NK
3539static int
3540fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3541 const enum surface_pixel_format format,
004fefa3
NK
3542 enum dc_color_space *color_space)
3543{
3544 bool full_range;
3545
3546 *color_space = COLOR_SPACE_SRGB;
3547
3548 /* DRM color properties only affect non-RGB formats. */
695af5f9 3549 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3550 return 0;
3551
3552 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3553
3554 switch (plane_state->color_encoding) {
3555 case DRM_COLOR_YCBCR_BT601:
3556 if (full_range)
3557 *color_space = COLOR_SPACE_YCBCR601;
3558 else
3559 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3560 break;
3561
3562 case DRM_COLOR_YCBCR_BT709:
3563 if (full_range)
3564 *color_space = COLOR_SPACE_YCBCR709;
3565 else
3566 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3567 break;
3568
3569 case DRM_COLOR_YCBCR_BT2020:
3570 if (full_range)
3571 *color_space = COLOR_SPACE_2020_YCBCR;
3572 else
3573 return -EINVAL;
3574 break;
3575
3576 default:
3577 return -EINVAL;
3578 }
3579
3580 return 0;
3581}
3582
695af5f9
NK
3583static int
3584fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3585 const struct drm_plane_state *plane_state,
3586 const uint64_t tiling_flags,
3587 struct dc_plane_info *plane_info,
3588 struct dc_plane_address *address)
3589{
3590 const struct drm_framebuffer *fb = plane_state->fb;
3591 const struct amdgpu_framebuffer *afb =
3592 to_amdgpu_framebuffer(plane_state->fb);
3593 struct drm_format_name_buf format_name;
3594 int ret;
3595
3596 memset(plane_info, 0, sizeof(*plane_info));
3597
3598 switch (fb->format->format) {
3599 case DRM_FORMAT_C8:
3600 plane_info->format =
3601 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3602 break;
3603 case DRM_FORMAT_RGB565:
3604 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3605 break;
3606 case DRM_FORMAT_XRGB8888:
3607 case DRM_FORMAT_ARGB8888:
3608 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3609 break;
3610 case DRM_FORMAT_XRGB2101010:
3611 case DRM_FORMAT_ARGB2101010:
3612 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3613 break;
3614 case DRM_FORMAT_XBGR2101010:
3615 case DRM_FORMAT_ABGR2101010:
3616 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3617 break;
3618 case DRM_FORMAT_XBGR8888:
3619 case DRM_FORMAT_ABGR8888:
3620 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3621 break;
3622 case DRM_FORMAT_NV21:
3623 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3624 break;
3625 case DRM_FORMAT_NV12:
3626 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3627 break;
3628 default:
3629 DRM_ERROR(
3630 "Unsupported screen format %s\n",
3631 drm_get_format_name(fb->format->format, &format_name));
3632 return -EINVAL;
3633 }
3634
3635 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3636 case DRM_MODE_ROTATE_0:
3637 plane_info->rotation = ROTATION_ANGLE_0;
3638 break;
3639 case DRM_MODE_ROTATE_90:
3640 plane_info->rotation = ROTATION_ANGLE_90;
3641 break;
3642 case DRM_MODE_ROTATE_180:
3643 plane_info->rotation = ROTATION_ANGLE_180;
3644 break;
3645 case DRM_MODE_ROTATE_270:
3646 plane_info->rotation = ROTATION_ANGLE_270;
3647 break;
3648 default:
3649 plane_info->rotation = ROTATION_ANGLE_0;
3650 break;
3651 }
3652
3653 plane_info->visible = true;
3654 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3655
6d83a32d
MS
3656 plane_info->layer_index = 0;
3657
695af5f9
NK
3658 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3659 &plane_info->color_space);
3660 if (ret)
3661 return ret;
3662
3663 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3664 plane_info->rotation, tiling_flags,
3665 &plane_info->tiling_info,
3666 &plane_info->plane_size,
3667 &plane_info->dcc, address);
3668 if (ret)
3669 return ret;
3670
3671 fill_blending_from_plane_state(
3672 plane_state, &plane_info->per_pixel_alpha,
3673 &plane_info->global_alpha, &plane_info->global_alpha_value);
3674
3675 return 0;
3676}
3677
3678static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3679 struct dc_plane_state *dc_plane_state,
3680 struct drm_plane_state *plane_state,
3681 struct drm_crtc_state *crtc_state)
e7b07cee 3682{
cf020d49 3683 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
3684 const struct amdgpu_framebuffer *amdgpu_fb =
3685 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
3686 struct dc_scaling_info scaling_info;
3687 struct dc_plane_info plane_info;
3688 uint64_t tiling_flags;
3689 int ret;
e7b07cee 3690
695af5f9
NK
3691 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3692 if (ret)
3693 return ret;
e7b07cee 3694
695af5f9
NK
3695 dc_plane_state->src_rect = scaling_info.src_rect;
3696 dc_plane_state->dst_rect = scaling_info.dst_rect;
3697 dc_plane_state->clip_rect = scaling_info.clip_rect;
3698 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 3699
695af5f9 3700 ret = get_fb_info(amdgpu_fb, &tiling_flags);
e7b07cee
HW
3701 if (ret)
3702 return ret;
3703
695af5f9
NK
3704 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3705 &plane_info,
3706 &dc_plane_state->address);
004fefa3
NK
3707 if (ret)
3708 return ret;
3709
695af5f9
NK
3710 dc_plane_state->format = plane_info.format;
3711 dc_plane_state->color_space = plane_info.color_space;
3712 dc_plane_state->format = plane_info.format;
3713 dc_plane_state->plane_size = plane_info.plane_size;
3714 dc_plane_state->rotation = plane_info.rotation;
3715 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3716 dc_plane_state->stereo_format = plane_info.stereo_format;
3717 dc_plane_state->tiling_info = plane_info.tiling_info;
3718 dc_plane_state->visible = plane_info.visible;
3719 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3720 dc_plane_state->global_alpha = plane_info.global_alpha;
3721 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3722 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 3723 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 3724
e277adc5
LSL
3725 /*
3726 * Always set input transfer function, since plane state is refreshed
3727 * every time.
3728 */
cf020d49
NK
3729 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3730 if (ret)
3731 return ret;
e7b07cee 3732
cf020d49 3733 return 0;
e7b07cee
HW
3734}
3735
3ee6b26b
AD
3736static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3737 const struct dm_connector_state *dm_state,
3738 struct dc_stream_state *stream)
e7b07cee
HW
3739{
3740 enum amdgpu_rmx_type rmx_type;
3741
3742 struct rect src = { 0 }; /* viewport in composition space*/
3743 struct rect dst = { 0 }; /* stream addressable area */
3744
3745 /* no mode. nothing to be done */
3746 if (!mode)
3747 return;
3748
3749 /* Full screen scaling by default */
3750 src.width = mode->hdisplay;
3751 src.height = mode->vdisplay;
3752 dst.width = stream->timing.h_addressable;
3753 dst.height = stream->timing.v_addressable;
3754
f4791779
HW
3755 if (dm_state) {
3756 rmx_type = dm_state->scaling;
3757 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3758 if (src.width * dst.height <
3759 src.height * dst.width) {
3760 /* height needs less upscaling/more downscaling */
3761 dst.width = src.width *
3762 dst.height / src.height;
3763 } else {
3764 /* width needs less upscaling/more downscaling */
3765 dst.height = src.height *
3766 dst.width / src.width;
3767 }
3768 } else if (rmx_type == RMX_CENTER) {
3769 dst = src;
e7b07cee 3770 }
e7b07cee 3771
f4791779
HW
3772 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3773 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 3774
f4791779
HW
3775 if (dm_state->underscan_enable) {
3776 dst.x += dm_state->underscan_hborder / 2;
3777 dst.y += dm_state->underscan_vborder / 2;
3778 dst.width -= dm_state->underscan_hborder;
3779 dst.height -= dm_state->underscan_vborder;
3780 }
e7b07cee
HW
3781 }
3782
3783 stream->src = src;
3784 stream->dst = dst;
3785
f1ad2f5e 3786 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
3787 dst.x, dst.y, dst.width, dst.height);
3788
3789}
3790
3ee6b26b 3791static enum dc_color_depth
42ba01fc 3792convert_color_depth_from_display_info(const struct drm_connector *connector,
1bc22f20
SW
3793 const struct drm_connector_state *state,
3794 bool is_y420)
e7b07cee 3795{
1bc22f20 3796 uint8_t bpc;
01c22997 3797
1bc22f20
SW
3798 if (is_y420) {
3799 bpc = 8;
3800
3801 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3802 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3803 bpc = 16;
3804 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3805 bpc = 12;
3806 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3807 bpc = 10;
3808 } else {
3809 bpc = (uint8_t)connector->display_info.bpc;
3810 /* Assume 8 bpc by default if no bpc is specified. */
3811 bpc = bpc ? bpc : 8;
3812 }
e7b07cee 3813
01933ba4
NK
3814 if (!state)
3815 state = connector->state;
3816
42ba01fc 3817 if (state) {
01c22997
NK
3818 /*
3819 * Cap display bpc based on the user requested value.
3820 *
3821 * The value for state->max_bpc may not correctly updated
3822 * depending on when the connector gets added to the state
3823 * or if this was called outside of atomic check, so it
3824 * can't be used directly.
3825 */
3826 bpc = min(bpc, state->max_requested_bpc);
3827
1825fd34
NK
3828 /* Round down to the nearest even number. */
3829 bpc = bpc - (bpc & 1);
3830 }
07e3a1cf 3831
e7b07cee
HW
3832 switch (bpc) {
3833 case 0:
1f6010a9
DF
3834 /*
3835 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
3836 * EDID revision before 1.4
3837 * TODO: Fix edid parsing
3838 */
3839 return COLOR_DEPTH_888;
3840 case 6:
3841 return COLOR_DEPTH_666;
3842 case 8:
3843 return COLOR_DEPTH_888;
3844 case 10:
3845 return COLOR_DEPTH_101010;
3846 case 12:
3847 return COLOR_DEPTH_121212;
3848 case 14:
3849 return COLOR_DEPTH_141414;
3850 case 16:
3851 return COLOR_DEPTH_161616;
3852 default:
3853 return COLOR_DEPTH_UNDEFINED;
3854 }
3855}
3856
3ee6b26b
AD
3857static enum dc_aspect_ratio
3858get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 3859{
e11d4147
LSL
3860 /* 1-1 mapping, since both enums follow the HDMI spec. */
3861 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
3862}
3863
3ee6b26b
AD
3864static enum dc_color_space
3865get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
3866{
3867 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3868
3869 switch (dc_crtc_timing->pixel_encoding) {
3870 case PIXEL_ENCODING_YCBCR422:
3871 case PIXEL_ENCODING_YCBCR444:
3872 case PIXEL_ENCODING_YCBCR420:
3873 {
3874 /*
3875 * 27030khz is the separation point between HDTV and SDTV
3876 * according to HDMI spec, we use YCbCr709 and YCbCr601
3877 * respectively
3878 */
380604e2 3879 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
3880 if (dc_crtc_timing->flags.Y_ONLY)
3881 color_space =
3882 COLOR_SPACE_YCBCR709_LIMITED;
3883 else
3884 color_space = COLOR_SPACE_YCBCR709;
3885 } else {
3886 if (dc_crtc_timing->flags.Y_ONLY)
3887 color_space =
3888 COLOR_SPACE_YCBCR601_LIMITED;
3889 else
3890 color_space = COLOR_SPACE_YCBCR601;
3891 }
3892
3893 }
3894 break;
3895 case PIXEL_ENCODING_RGB:
3896 color_space = COLOR_SPACE_SRGB;
3897 break;
3898
3899 default:
3900 WARN_ON(1);
3901 break;
3902 }
3903
3904 return color_space;
3905}
3906
ea117312
TA
3907static bool adjust_colour_depth_from_display_info(
3908 struct dc_crtc_timing *timing_out,
3909 const struct drm_display_info *info)
400443e8 3910{
ea117312 3911 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 3912 int normalized_clk;
400443e8 3913 do {
380604e2 3914 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
3915 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3916 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3917 normalized_clk /= 2;
3918 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
3919 switch (depth) {
3920 case COLOR_DEPTH_888:
3921 break;
400443e8
ML
3922 case COLOR_DEPTH_101010:
3923 normalized_clk = (normalized_clk * 30) / 24;
3924 break;
3925 case COLOR_DEPTH_121212:
3926 normalized_clk = (normalized_clk * 36) / 24;
3927 break;
3928 case COLOR_DEPTH_161616:
3929 normalized_clk = (normalized_clk * 48) / 24;
3930 break;
3931 default:
ea117312
TA
3932 /* The above depths are the only ones valid for HDMI. */
3933 return false;
400443e8 3934 }
ea117312
TA
3935 if (normalized_clk <= info->max_tmds_clock) {
3936 timing_out->display_color_depth = depth;
3937 return true;
3938 }
3939 } while (--depth > COLOR_DEPTH_666);
3940 return false;
400443e8 3941}
e7b07cee 3942
42ba01fc
NK
3943static void fill_stream_properties_from_drm_display_mode(
3944 struct dc_stream_state *stream,
3945 const struct drm_display_mode *mode_in,
3946 const struct drm_connector *connector,
3947 const struct drm_connector_state *connector_state,
3948 const struct dc_stream_state *old_stream)
e7b07cee
HW
3949{
3950 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 3951 const struct drm_display_info *info = &connector->display_info;
d4252eee 3952 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
3953 struct hdmi_vendor_infoframe hv_frame;
3954 struct hdmi_avi_infoframe avi_frame;
e7b07cee 3955
acf83f86
WL
3956 memset(&hv_frame, 0, sizeof(hv_frame));
3957 memset(&avi_frame, 0, sizeof(avi_frame));
3958
e7b07cee
HW
3959 timing_out->h_border_left = 0;
3960 timing_out->h_border_right = 0;
3961 timing_out->v_border_top = 0;
3962 timing_out->v_border_bottom = 0;
3963 /* TODO: un-hardcode */
fe61a2f1 3964 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 3965 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 3966 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
3967 else if (drm_mode_is_420_also(info, mode_in)
3968 && aconnector->force_yuv420_output)
3969 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 3970 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 3971 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
3972 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3973 else
3974 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3975
3976 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3977 timing_out->display_color_depth = convert_color_depth_from_display_info(
1bc22f20
SW
3978 connector, connector_state,
3979 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
e7b07cee
HW
3980 timing_out->scan_type = SCANNING_TYPE_NODATA;
3981 timing_out->hdmi_vic = 0;
b333730d
BL
3982
3983 if(old_stream) {
3984 timing_out->vic = old_stream->timing.vic;
3985 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3986 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3987 } else {
3988 timing_out->vic = drm_match_cea_mode(mode_in);
3989 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3990 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3991 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3992 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3993 }
e7b07cee 3994
1cb1d477
WL
3995 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3996 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
3997 timing_out->vic = avi_frame.video_code;
3998 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
3999 timing_out->hdmi_vic = hv_frame.vic;
4000 }
4001
e7b07cee
HW
4002 timing_out->h_addressable = mode_in->crtc_hdisplay;
4003 timing_out->h_total = mode_in->crtc_htotal;
4004 timing_out->h_sync_width =
4005 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4006 timing_out->h_front_porch =
4007 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4008 timing_out->v_total = mode_in->crtc_vtotal;
4009 timing_out->v_addressable = mode_in->crtc_vdisplay;
4010 timing_out->v_front_porch =
4011 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4012 timing_out->v_sync_width =
4013 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4014 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4015 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4016
4017 stream->output_color_space = get_output_color_space(timing_out);
4018
e43a432c
AK
4019 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4020 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4021 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4022 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4023 drm_mode_is_420_also(info, mode_in) &&
4024 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4025 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4026 adjust_colour_depth_from_display_info(timing_out, info);
4027 }
4028 }
e7b07cee
HW
4029}
4030
3ee6b26b
AD
4031static void fill_audio_info(struct audio_info *audio_info,
4032 const struct drm_connector *drm_connector,
4033 const struct dc_sink *dc_sink)
e7b07cee
HW
4034{
4035 int i = 0;
4036 int cea_revision = 0;
4037 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4038
4039 audio_info->manufacture_id = edid_caps->manufacturer_id;
4040 audio_info->product_id = edid_caps->product_id;
4041
4042 cea_revision = drm_connector->display_info.cea_rev;
4043
090afc1e 4044 strscpy(audio_info->display_name,
d2b2562c 4045 edid_caps->display_name,
090afc1e 4046 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4047
b830ebc9 4048 if (cea_revision >= 3) {
e7b07cee
HW
4049 audio_info->mode_count = edid_caps->audio_mode_count;
4050
4051 for (i = 0; i < audio_info->mode_count; ++i) {
4052 audio_info->modes[i].format_code =
4053 (enum audio_format_code)
4054 (edid_caps->audio_modes[i].format_code);
4055 audio_info->modes[i].channel_count =
4056 edid_caps->audio_modes[i].channel_count;
4057 audio_info->modes[i].sample_rates.all =
4058 edid_caps->audio_modes[i].sample_rate;
4059 audio_info->modes[i].sample_size =
4060 edid_caps->audio_modes[i].sample_size;
4061 }
4062 }
4063
4064 audio_info->flags.all = edid_caps->speaker_flags;
4065
4066 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4067 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4068 audio_info->video_latency = drm_connector->video_latency[0];
4069 audio_info->audio_latency = drm_connector->audio_latency[0];
4070 }
4071
4072 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4073
4074}
4075
3ee6b26b
AD
4076static void
4077copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4078 struct drm_display_mode *dst_mode)
e7b07cee
HW
4079{
4080 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4081 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4082 dst_mode->crtc_clock = src_mode->crtc_clock;
4083 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4084 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4085 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4086 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4087 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4088 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4089 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4090 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4091 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4092 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4093 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4094}
4095
3ee6b26b
AD
4096static void
4097decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4098 const struct drm_display_mode *native_mode,
4099 bool scale_enabled)
e7b07cee
HW
4100{
4101 if (scale_enabled) {
4102 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4103 } else if (native_mode->clock == drm_mode->clock &&
4104 native_mode->htotal == drm_mode->htotal &&
4105 native_mode->vtotal == drm_mode->vtotal) {
4106 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4107 } else {
4108 /* no scaling nor amdgpu inserted, no need to patch */
4109 }
4110}
4111
aed15309
ML
4112static struct dc_sink *
4113create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4114{
2e0ac3d6 4115 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4116 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4117 sink_init_data.link = aconnector->dc_link;
4118 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4119
4120 sink = dc_sink_create(&sink_init_data);
423788c7 4121 if (!sink) {
2e0ac3d6 4122 DRM_ERROR("Failed to create sink!\n");
aed15309 4123 return NULL;
423788c7 4124 }
2e0ac3d6 4125 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4126
aed15309 4127 return sink;
2e0ac3d6
HW
4128}
4129
fa2123db
ML
4130static void set_multisync_trigger_params(
4131 struct dc_stream_state *stream)
4132{
4133 if (stream->triggered_crtc_reset.enabled) {
4134 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4135 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4136 }
4137}
4138
4139static void set_master_stream(struct dc_stream_state *stream_set[],
4140 int stream_count)
4141{
4142 int j, highest_rfr = 0, master_stream = 0;
4143
4144 for (j = 0; j < stream_count; j++) {
4145 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4146 int refresh_rate = 0;
4147
380604e2 4148 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4149 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4150 if (refresh_rate > highest_rfr) {
4151 highest_rfr = refresh_rate;
4152 master_stream = j;
4153 }
4154 }
4155 }
4156 for (j = 0; j < stream_count; j++) {
03736f4c 4157 if (stream_set[j])
fa2123db
ML
4158 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4159 }
4160}
4161
4162static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4163{
4164 int i = 0;
4165
4166 if (context->stream_count < 2)
4167 return;
4168 for (i = 0; i < context->stream_count ; i++) {
4169 if (!context->streams[i])
4170 continue;
1f6010a9
DF
4171 /*
4172 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4173 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4174 * For now it's set to false
fa2123db
ML
4175 */
4176 set_multisync_trigger_params(context->streams[i]);
4177 }
4178 set_master_stream(context->streams, context->stream_count);
4179}
4180
3ee6b26b
AD
4181static struct dc_stream_state *
4182create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4183 const struct drm_display_mode *drm_mode,
b333730d
BL
4184 const struct dm_connector_state *dm_state,
4185 const struct dc_stream_state *old_stream)
e7b07cee
HW
4186{
4187 struct drm_display_mode *preferred_mode = NULL;
391ef035 4188 struct drm_connector *drm_connector;
42ba01fc
NK
4189 const struct drm_connector_state *con_state =
4190 dm_state ? &dm_state->base : NULL;
0971c40e 4191 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4192 struct drm_display_mode mode = *drm_mode;
4193 bool native_mode_found = false;
b333730d
BL
4194 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4195 int mode_refresh;
58124bf8 4196 int preferred_refresh = 0;
defeb878 4197#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4198 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4199#endif
df2f1015 4200 uint32_t link_bandwidth_kbps;
b333730d 4201
aed15309 4202 struct dc_sink *sink = NULL;
b830ebc9 4203 if (aconnector == NULL) {
e7b07cee 4204 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4205 return stream;
e7b07cee
HW
4206 }
4207
e7b07cee 4208 drm_connector = &aconnector->base;
2e0ac3d6 4209
f4ac176e 4210 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4211 sink = create_fake_sink(aconnector);
4212 if (!sink)
4213 return stream;
aed15309
ML
4214 } else {
4215 sink = aconnector->dc_sink;
dcd5fb82 4216 dc_sink_retain(sink);
f4ac176e 4217 }
2e0ac3d6 4218
aed15309 4219 stream = dc_create_stream_for_sink(sink);
4562236b 4220
b830ebc9 4221 if (stream == NULL) {
e7b07cee 4222 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4223 goto finish;
e7b07cee
HW
4224 }
4225
ceb3dbb4
JL
4226 stream->dm_stream_context = aconnector;
4227
4a36fcba
WL
4228 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4229 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4230
e7b07cee
HW
4231 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4232 /* Search for preferred mode */
4233 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4234 native_mode_found = true;
4235 break;
4236 }
4237 }
4238 if (!native_mode_found)
4239 preferred_mode = list_first_entry_or_null(
4240 &aconnector->base.modes,
4241 struct drm_display_mode,
4242 head);
4243
b333730d
BL
4244 mode_refresh = drm_mode_vrefresh(&mode);
4245
b830ebc9 4246 if (preferred_mode == NULL) {
1f6010a9
DF
4247 /*
4248 * This may not be an error, the use case is when we have no
e7b07cee
HW
4249 * usermode calls to reset and set mode upon hotplug. In this
4250 * case, we call set mode ourselves to restore the previous mode
4251 * and the modelist may not be filled in in time.
4252 */
f1ad2f5e 4253 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4254 } else {
4255 decide_crtc_timing_for_drm_display_mode(
4256 &mode, preferred_mode,
f4791779 4257 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4258 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4259 }
4260
f783577c
JFZ
4261 if (!dm_state)
4262 drm_mode_set_crtcinfo(&mode, 0);
4263
b333730d
BL
4264 /*
4265 * If scaling is enabled and refresh rate didn't change
4266 * we copy the vic and polarities of the old timings
4267 */
4268 if (!scale || mode_refresh != preferred_refresh)
4269 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4270 &mode, &aconnector->base, con_state, NULL);
b333730d
BL
4271 else
4272 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4273 &mode, &aconnector->base, con_state, old_stream);
b333730d 4274
df2f1015
DF
4275 stream->timing.flags.DSC = 0;
4276
4277 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4278#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4279 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4280 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
df2f1015
DF
4281 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4282 &dsc_caps);
defeb878 4283#endif
df2f1015
DF
4284 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4285 dc_link_get_link_cap(aconnector->dc_link));
4286
defeb878 4287#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4288 if (dsc_caps.is_dsc_supported)
0417df16 4289 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4290 &dsc_caps,
0417df16 4291 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4292 link_bandwidth_kbps,
4293 &stream->timing,
4294 &stream->timing.dsc_cfg))
4295 stream->timing.flags.DSC = 1;
39a4eb85 4296#endif
df2f1015 4297 }
39a4eb85 4298
e7b07cee
HW
4299 update_stream_scaling_settings(&mode, dm_state, stream);
4300
4301 fill_audio_info(
4302 &stream->audio_info,
4303 drm_connector,
aed15309 4304 sink);
e7b07cee 4305
ceb3dbb4 4306 update_stream_signal(stream, sink);
9182b4cb 4307
d832fc3b
WL
4308 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4309 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
8c322309
RL
4310 if (stream->link->psr_feature_enabled) {
4311 struct dc *core_dc = stream->link->ctx->dc;
d832fc3b 4312
8c322309
RL
4313 if (dc_is_dmcu_initialized(core_dc)) {
4314 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4315
4316 stream->psr_version = dmcu->dmcu_version.psr_version;
5ed78cd6
AK
4317 mod_build_vsc_infopacket(stream,
4318 &stream->vsc_infopacket,
4319 &stream->use_vsc_sdp_for_colorimetry);
8c322309
RL
4320 }
4321 }
aed15309 4322finish:
dcd5fb82 4323 dc_sink_release(sink);
9e3efe3e 4324
e7b07cee
HW
4325 return stream;
4326}
4327
7578ecda 4328static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4329{
4330 drm_crtc_cleanup(crtc);
4331 kfree(crtc);
4332}
4333
4334static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4335 struct drm_crtc_state *state)
e7b07cee
HW
4336{
4337 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4338
4339 /* TODO Destroy dc_stream objects are stream object is flattened */
4340 if (cur->stream)
4341 dc_stream_release(cur->stream);
4342
4343
4344 __drm_atomic_helper_crtc_destroy_state(state);
4345
4346
4347 kfree(state);
4348}
4349
4350static void dm_crtc_reset_state(struct drm_crtc *crtc)
4351{
4352 struct dm_crtc_state *state;
4353
4354 if (crtc->state)
4355 dm_crtc_destroy_state(crtc, crtc->state);
4356
4357 state = kzalloc(sizeof(*state), GFP_KERNEL);
4358 if (WARN_ON(!state))
4359 return;
4360
4361 crtc->state = &state->base;
4362 crtc->state->crtc = crtc;
4363
4364}
4365
4366static struct drm_crtc_state *
4367dm_crtc_duplicate_state(struct drm_crtc *crtc)
4368{
4369 struct dm_crtc_state *state, *cur;
4370
4371 cur = to_dm_crtc_state(crtc->state);
4372
4373 if (WARN_ON(!crtc->state))
4374 return NULL;
4375
2004f45e 4376 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4377 if (!state)
4378 return NULL;
e7b07cee
HW
4379
4380 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4381
4382 if (cur->stream) {
4383 state->stream = cur->stream;
4384 dc_stream_retain(state->stream);
4385 }
4386
d6ef9b41
NK
4387 state->active_planes = cur->active_planes;
4388 state->interrupts_enabled = cur->interrupts_enabled;
180db303 4389 state->vrr_params = cur->vrr_params;
98e6436d 4390 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4391 state->abm_level = cur->abm_level;
bb47de73
NK
4392 state->vrr_supported = cur->vrr_supported;
4393 state->freesync_config = cur->freesync_config;
14b25846 4394 state->crc_src = cur->crc_src;
cf020d49
NK
4395 state->cm_has_degamma = cur->cm_has_degamma;
4396 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4397
e7b07cee
HW
4398 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4399
4400 return &state->base;
4401}
4402
d2574c33
MK
4403static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4404{
4405 enum dc_irq_source irq_source;
4406 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4407 struct amdgpu_device *adev = crtc->dev->dev_private;
4408 int rc;
4409
3a2ce8d6
LL
4410 /* Do not set vupdate for DCN hardware */
4411 if (adev->family > AMDGPU_FAMILY_AI)
4412 return 0;
4413
d2574c33
MK
4414 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4415
4416 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4417
4418 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4419 acrtc->crtc_id, enable ? "en" : "dis", rc);
4420 return rc;
4421}
589d2739
HW
4422
4423static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4424{
4425 enum dc_irq_source irq_source;
4426 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4427 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4428 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4429 int rc = 0;
4430
4431 if (enable) {
4432 /* vblank irq on -> Only need vupdate irq in vrr mode */
4433 if (amdgpu_dm_vrr_active(acrtc_state))
4434 rc = dm_set_vupdate_irq(crtc, true);
4435 } else {
4436 /* vblank irq off -> vupdate irq off */
4437 rc = dm_set_vupdate_irq(crtc, false);
4438 }
4439
4440 if (rc)
4441 return rc;
589d2739
HW
4442
4443 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4444 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4445}
4446
4447static int dm_enable_vblank(struct drm_crtc *crtc)
4448{
4449 return dm_set_vblank(crtc, true);
4450}
4451
4452static void dm_disable_vblank(struct drm_crtc *crtc)
4453{
4454 dm_set_vblank(crtc, false);
4455}
4456
e7b07cee
HW
4457/* Implemented only the options currently availible for the driver */
4458static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4459 .reset = dm_crtc_reset_state,
4460 .destroy = amdgpu_dm_crtc_destroy,
4461 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4462 .set_config = drm_atomic_helper_set_config,
4463 .page_flip = drm_atomic_helper_page_flip,
4464 .atomic_duplicate_state = dm_crtc_duplicate_state,
4465 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4466 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4467 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4468 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 4469 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
4470 .enable_vblank = dm_enable_vblank,
4471 .disable_vblank = dm_disable_vblank,
e3eff4b5 4472 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
4473};
4474
4475static enum drm_connector_status
4476amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4477{
4478 bool connected;
c84dec2f 4479 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4480
1f6010a9
DF
4481 /*
4482 * Notes:
e7b07cee
HW
4483 * 1. This interface is NOT called in context of HPD irq.
4484 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4485 * makes it a bad place for *any* MST-related activity.
4486 */
e7b07cee 4487
8580d60b
HW
4488 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4489 !aconnector->fake_enable)
e7b07cee
HW
4490 connected = (aconnector->dc_sink != NULL);
4491 else
4492 connected = (aconnector->base.force == DRM_FORCE_ON);
4493
4494 return (connected ? connector_status_connected :
4495 connector_status_disconnected);
4496}
4497
3ee6b26b
AD
4498int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4499 struct drm_connector_state *connector_state,
4500 struct drm_property *property,
4501 uint64_t val)
e7b07cee
HW
4502{
4503 struct drm_device *dev = connector->dev;
4504 struct amdgpu_device *adev = dev->dev_private;
4505 struct dm_connector_state *dm_old_state =
4506 to_dm_connector_state(connector->state);
4507 struct dm_connector_state *dm_new_state =
4508 to_dm_connector_state(connector_state);
4509
4510 int ret = -EINVAL;
4511
4512 if (property == dev->mode_config.scaling_mode_property) {
4513 enum amdgpu_rmx_type rmx_type;
4514
4515 switch (val) {
4516 case DRM_MODE_SCALE_CENTER:
4517 rmx_type = RMX_CENTER;
4518 break;
4519 case DRM_MODE_SCALE_ASPECT:
4520 rmx_type = RMX_ASPECT;
4521 break;
4522 case DRM_MODE_SCALE_FULLSCREEN:
4523 rmx_type = RMX_FULL;
4524 break;
4525 case DRM_MODE_SCALE_NONE:
4526 default:
4527 rmx_type = RMX_OFF;
4528 break;
4529 }
4530
4531 if (dm_old_state->scaling == rmx_type)
4532 return 0;
4533
4534 dm_new_state->scaling = rmx_type;
4535 ret = 0;
4536 } else if (property == adev->mode_info.underscan_hborder_property) {
4537 dm_new_state->underscan_hborder = val;
4538 ret = 0;
4539 } else if (property == adev->mode_info.underscan_vborder_property) {
4540 dm_new_state->underscan_vborder = val;
4541 ret = 0;
4542 } else if (property == adev->mode_info.underscan_property) {
4543 dm_new_state->underscan_enable = val;
4544 ret = 0;
c1ee92f9
DF
4545 } else if (property == adev->mode_info.abm_level_property) {
4546 dm_new_state->abm_level = val;
4547 ret = 0;
e7b07cee
HW
4548 }
4549
4550 return ret;
4551}
4552
3ee6b26b
AD
4553int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4554 const struct drm_connector_state *state,
4555 struct drm_property *property,
4556 uint64_t *val)
e7b07cee
HW
4557{
4558 struct drm_device *dev = connector->dev;
4559 struct amdgpu_device *adev = dev->dev_private;
4560 struct dm_connector_state *dm_state =
4561 to_dm_connector_state(state);
4562 int ret = -EINVAL;
4563
4564 if (property == dev->mode_config.scaling_mode_property) {
4565 switch (dm_state->scaling) {
4566 case RMX_CENTER:
4567 *val = DRM_MODE_SCALE_CENTER;
4568 break;
4569 case RMX_ASPECT:
4570 *val = DRM_MODE_SCALE_ASPECT;
4571 break;
4572 case RMX_FULL:
4573 *val = DRM_MODE_SCALE_FULLSCREEN;
4574 break;
4575 case RMX_OFF:
4576 default:
4577 *val = DRM_MODE_SCALE_NONE;
4578 break;
4579 }
4580 ret = 0;
4581 } else if (property == adev->mode_info.underscan_hborder_property) {
4582 *val = dm_state->underscan_hborder;
4583 ret = 0;
4584 } else if (property == adev->mode_info.underscan_vborder_property) {
4585 *val = dm_state->underscan_vborder;
4586 ret = 0;
4587 } else if (property == adev->mode_info.underscan_property) {
4588 *val = dm_state->underscan_enable;
4589 ret = 0;
c1ee92f9
DF
4590 } else if (property == adev->mode_info.abm_level_property) {
4591 *val = dm_state->abm_level;
4592 ret = 0;
e7b07cee 4593 }
c1ee92f9 4594
e7b07cee
HW
4595 return ret;
4596}
4597
526c654a
ED
4598static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4599{
4600 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4601
4602 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4603}
4604
7578ecda 4605static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4606{
c84dec2f 4607 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4608 const struct dc_link *link = aconnector->dc_link;
4609 struct amdgpu_device *adev = connector->dev->dev_private;
4610 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4611
e7b07cee
HW
4612#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4613 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4614
89fc8d4e 4615 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
4616 link->type != dc_connection_none &&
4617 dm->backlight_dev) {
4618 backlight_device_unregister(dm->backlight_dev);
4619 dm->backlight_dev = NULL;
e7b07cee
HW
4620 }
4621#endif
dcd5fb82
MF
4622
4623 if (aconnector->dc_em_sink)
4624 dc_sink_release(aconnector->dc_em_sink);
4625 aconnector->dc_em_sink = NULL;
4626 if (aconnector->dc_sink)
4627 dc_sink_release(aconnector->dc_sink);
4628 aconnector->dc_sink = NULL;
4629
e86e8947 4630 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
4631 drm_connector_unregister(connector);
4632 drm_connector_cleanup(connector);
526c654a
ED
4633 if (aconnector->i2c) {
4634 i2c_del_adapter(&aconnector->i2c->base);
4635 kfree(aconnector->i2c);
4636 }
4637
e7b07cee
HW
4638 kfree(connector);
4639}
4640
4641void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4642{
4643 struct dm_connector_state *state =
4644 to_dm_connector_state(connector->state);
4645
df099b9b
LSL
4646 if (connector->state)
4647 __drm_atomic_helper_connector_destroy_state(connector->state);
4648
e7b07cee
HW
4649 kfree(state);
4650
4651 state = kzalloc(sizeof(*state), GFP_KERNEL);
4652
4653 if (state) {
4654 state->scaling = RMX_OFF;
4655 state->underscan_enable = false;
4656 state->underscan_hborder = 0;
4657 state->underscan_vborder = 0;
01933ba4 4658 state->base.max_requested_bpc = 8;
3261e013
ML
4659 state->vcpi_slots = 0;
4660 state->pbn = 0;
c3e50f89
NK
4661 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4662 state->abm_level = amdgpu_dm_abm_level;
4663
df099b9b 4664 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
4665 }
4666}
4667
3ee6b26b
AD
4668struct drm_connector_state *
4669amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
4670{
4671 struct dm_connector_state *state =
4672 to_dm_connector_state(connector->state);
4673
4674 struct dm_connector_state *new_state =
4675 kmemdup(state, sizeof(*state), GFP_KERNEL);
4676
98e6436d
AK
4677 if (!new_state)
4678 return NULL;
e7b07cee 4679
98e6436d
AK
4680 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4681
4682 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 4683 new_state->abm_level = state->abm_level;
922454c2
NK
4684 new_state->scaling = state->scaling;
4685 new_state->underscan_enable = state->underscan_enable;
4686 new_state->underscan_hborder = state->underscan_hborder;
4687 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
4688 new_state->vcpi_slots = state->vcpi_slots;
4689 new_state->pbn = state->pbn;
98e6436d 4690 return &new_state->base;
e7b07cee
HW
4691}
4692
14f04fa4
AD
4693static int
4694amdgpu_dm_connector_late_register(struct drm_connector *connector)
4695{
4696 struct amdgpu_dm_connector *amdgpu_dm_connector =
4697 to_amdgpu_dm_connector(connector);
4698
4699#if defined(CONFIG_DEBUG_FS)
4700 connector_debugfs_init(amdgpu_dm_connector);
4701#endif
4702
4703 return 0;
4704}
4705
e7b07cee
HW
4706static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4707 .reset = amdgpu_dm_connector_funcs_reset,
4708 .detect = amdgpu_dm_connector_detect,
4709 .fill_modes = drm_helper_probe_single_connector_modes,
4710 .destroy = amdgpu_dm_connector_destroy,
4711 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4712 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4713 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 4714 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 4715 .late_register = amdgpu_dm_connector_late_register,
526c654a 4716 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
4717};
4718
e7b07cee
HW
4719static int get_modes(struct drm_connector *connector)
4720{
4721 return amdgpu_dm_connector_get_modes(connector);
4722}
4723
c84dec2f 4724static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4725{
4726 struct dc_sink_init_data init_params = {
4727 .link = aconnector->dc_link,
4728 .sink_signal = SIGNAL_TYPE_VIRTUAL
4729 };
70e8ffc5 4730 struct edid *edid;
e7b07cee 4731
a89ff457 4732 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
4733 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4734 aconnector->base.name);
4735
4736 aconnector->base.force = DRM_FORCE_OFF;
4737 aconnector->base.override_edid = false;
4738 return;
4739 }
4740
70e8ffc5
HW
4741 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4742
e7b07cee
HW
4743 aconnector->edid = edid;
4744
4745 aconnector->dc_em_sink = dc_link_add_remote_sink(
4746 aconnector->dc_link,
4747 (uint8_t *)edid,
4748 (edid->extensions + 1) * EDID_LENGTH,
4749 &init_params);
4750
dcd5fb82 4751 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
4752 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4753 aconnector->dc_link->local_sink :
4754 aconnector->dc_em_sink;
dcd5fb82
MF
4755 dc_sink_retain(aconnector->dc_sink);
4756 }
e7b07cee
HW
4757}
4758
c84dec2f 4759static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4760{
4761 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4762
1f6010a9
DF
4763 /*
4764 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
4765 * Those settings have to be != 0 to get initial modeset
4766 */
4767 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4768 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4769 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4770 }
4771
4772
4773 aconnector->base.override_edid = true;
4774 create_eml_sink(aconnector);
4775}
4776
ba9ca088 4777enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 4778 struct drm_display_mode *mode)
e7b07cee
HW
4779{
4780 int result = MODE_ERROR;
4781 struct dc_sink *dc_sink;
4782 struct amdgpu_device *adev = connector->dev->dev_private;
4783 /* TODO: Unhardcode stream count */
0971c40e 4784 struct dc_stream_state *stream;
c84dec2f 4785 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
a39438f0 4786 enum dc_status dc_result = DC_OK;
e7b07cee
HW
4787
4788 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4789 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4790 return result;
4791
1f6010a9
DF
4792 /*
4793 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
4794 * EDID mgmt
4795 */
4796 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4797 !aconnector->dc_em_sink)
4798 handle_edid_mgmt(aconnector);
4799
c84dec2f 4800 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 4801
b830ebc9 4802 if (dc_sink == NULL) {
e7b07cee
HW
4803 DRM_ERROR("dc_sink is NULL!\n");
4804 goto fail;
4805 }
4806
b333730d 4807 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
b830ebc9 4808 if (stream == NULL) {
e7b07cee
HW
4809 DRM_ERROR("Failed to create stream for sink!\n");
4810 goto fail;
4811 }
4812
a39438f0
HW
4813 dc_result = dc_validate_stream(adev->dm.dc, stream);
4814
4815 if (dc_result == DC_OK)
e7b07cee 4816 result = MODE_OK;
a39438f0 4817 else
9f921b14 4818 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
a39438f0 4819 mode->hdisplay,
26e99ba6 4820 mode->vdisplay,
9f921b14
HW
4821 mode->clock,
4822 dc_result);
e7b07cee
HW
4823
4824 dc_stream_release(stream);
4825
4826fail:
4827 /* TODO: error handling*/
4828 return result;
4829}
4830
88694af9
NK
4831static int fill_hdr_info_packet(const struct drm_connector_state *state,
4832 struct dc_info_packet *out)
4833{
4834 struct hdmi_drm_infoframe frame;
4835 unsigned char buf[30]; /* 26 + 4 */
4836 ssize_t len;
4837 int ret, i;
4838
4839 memset(out, 0, sizeof(*out));
4840
4841 if (!state->hdr_output_metadata)
4842 return 0;
4843
4844 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4845 if (ret)
4846 return ret;
4847
4848 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4849 if (len < 0)
4850 return (int)len;
4851
4852 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4853 if (len != 30)
4854 return -EINVAL;
4855
4856 /* Prepare the infopacket for DC. */
4857 switch (state->connector->connector_type) {
4858 case DRM_MODE_CONNECTOR_HDMIA:
4859 out->hb0 = 0x87; /* type */
4860 out->hb1 = 0x01; /* version */
4861 out->hb2 = 0x1A; /* length */
4862 out->sb[0] = buf[3]; /* checksum */
4863 i = 1;
4864 break;
4865
4866 case DRM_MODE_CONNECTOR_DisplayPort:
4867 case DRM_MODE_CONNECTOR_eDP:
4868 out->hb0 = 0x00; /* sdp id, zero */
4869 out->hb1 = 0x87; /* type */
4870 out->hb2 = 0x1D; /* payload len - 1 */
4871 out->hb3 = (0x13 << 2); /* sdp version */
4872 out->sb[0] = 0x01; /* version */
4873 out->sb[1] = 0x1A; /* length */
4874 i = 2;
4875 break;
4876
4877 default:
4878 return -EINVAL;
4879 }
4880
4881 memcpy(&out->sb[i], &buf[4], 26);
4882 out->valid = true;
4883
4884 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4885 sizeof(out->sb), false);
4886
4887 return 0;
4888}
4889
4890static bool
4891is_hdr_metadata_different(const struct drm_connector_state *old_state,
4892 const struct drm_connector_state *new_state)
4893{
4894 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4895 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4896
4897 if (old_blob != new_blob) {
4898 if (old_blob && new_blob &&
4899 old_blob->length == new_blob->length)
4900 return memcmp(old_blob->data, new_blob->data,
4901 old_blob->length);
4902
4903 return true;
4904 }
4905
4906 return false;
4907}
4908
4909static int
4910amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 4911 struct drm_atomic_state *state)
88694af9 4912{
51e857af
SP
4913 struct drm_connector_state *new_con_state =
4914 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
4915 struct drm_connector_state *old_con_state =
4916 drm_atomic_get_old_connector_state(state, conn);
4917 struct drm_crtc *crtc = new_con_state->crtc;
4918 struct drm_crtc_state *new_crtc_state;
4919 int ret;
4920
4921 if (!crtc)
4922 return 0;
4923
4924 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4925 struct dc_info_packet hdr_infopacket;
4926
4927 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4928 if (ret)
4929 return ret;
4930
4931 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4932 if (IS_ERR(new_crtc_state))
4933 return PTR_ERR(new_crtc_state);
4934
4935 /*
4936 * DC considers the stream backends changed if the
4937 * static metadata changes. Forcing the modeset also
4938 * gives a simple way for userspace to switch from
b232d4ed
NK
4939 * 8bpc to 10bpc when setting the metadata to enter
4940 * or exit HDR.
4941 *
4942 * Changing the static metadata after it's been
4943 * set is permissible, however. So only force a
4944 * modeset if we're entering or exiting HDR.
88694af9 4945 */
b232d4ed
NK
4946 new_crtc_state->mode_changed =
4947 !old_con_state->hdr_output_metadata ||
4948 !new_con_state->hdr_output_metadata;
88694af9
NK
4949 }
4950
4951 return 0;
4952}
4953
e7b07cee
HW
4954static const struct drm_connector_helper_funcs
4955amdgpu_dm_connector_helper_funcs = {
4956 /*
1f6010a9 4957 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 4958 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 4959 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
4960 * in get_modes call back, not just return the modes count
4961 */
e7b07cee
HW
4962 .get_modes = get_modes,
4963 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 4964 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
4965};
4966
4967static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4968{
4969}
4970
bc92c065
NK
4971static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4972{
4973 struct drm_device *dev = new_crtc_state->crtc->dev;
4974 struct drm_plane *plane;
4975
4976 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4977 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4978 return true;
4979 }
4980
4981 return false;
4982}
4983
d6ef9b41 4984static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
4985{
4986 struct drm_atomic_state *state = new_crtc_state->state;
4987 struct drm_plane *plane;
4988 int num_active = 0;
4989
4990 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4991 struct drm_plane_state *new_plane_state;
4992
4993 /* Cursor planes are "fake". */
4994 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4995 continue;
4996
4997 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4998
4999 if (!new_plane_state) {
5000 /*
5001 * The plane is enable on the CRTC and hasn't changed
5002 * state. This means that it previously passed
5003 * validation and is therefore enabled.
5004 */
5005 num_active += 1;
5006 continue;
5007 }
5008
5009 /* We need a framebuffer to be considered enabled. */
5010 num_active += (new_plane_state->fb != NULL);
5011 }
5012
d6ef9b41
NK
5013 return num_active;
5014}
5015
5016/*
5017 * Sets whether interrupts should be enabled on a specific CRTC.
5018 * We require that the stream be enabled and that there exist active
5019 * DC planes on the stream.
5020 */
5021static void
5022dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5023 struct drm_crtc_state *new_crtc_state)
5024{
5025 struct dm_crtc_state *dm_new_crtc_state =
5026 to_dm_crtc_state(new_crtc_state);
5027
5028 dm_new_crtc_state->active_planes = 0;
5029 dm_new_crtc_state->interrupts_enabled = false;
5030
5031 if (!dm_new_crtc_state->stream)
5032 return;
5033
5034 dm_new_crtc_state->active_planes =
5035 count_crtc_active_planes(new_crtc_state);
5036
5037 dm_new_crtc_state->interrupts_enabled =
5038 dm_new_crtc_state->active_planes > 0;
c14a005c
NK
5039}
5040
3ee6b26b
AD
5041static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5042 struct drm_crtc_state *state)
e7b07cee
HW
5043{
5044 struct amdgpu_device *adev = crtc->dev->dev_private;
5045 struct dc *dc = adev->dm.dc;
5046 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5047 int ret = -EINVAL;
5048
d6ef9b41
NK
5049 /*
5050 * Update interrupt state for the CRTC. This needs to happen whenever
5051 * the CRTC has changed or whenever any of its planes have changed.
5052 * Atomic check satisfies both of these requirements since the CRTC
5053 * is added to the state by DRM during drm_atomic_helper_check_planes.
5054 */
5055 dm_update_crtc_interrupt_state(crtc, state);
5056
9b690ef3
BL
5057 if (unlikely(!dm_crtc_state->stream &&
5058 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5059 WARN_ON(1);
5060 return ret;
5061 }
5062
1f6010a9 5063 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
5064 if (!dm_crtc_state->stream)
5065 return 0;
5066
bc92c065
NK
5067 /*
5068 * We want at least one hardware plane enabled to use
5069 * the stream with a cursor enabled.
5070 */
c14a005c 5071 if (state->enable && state->active &&
bc92c065 5072 does_crtc_have_active_cursor(state) &&
d6ef9b41 5073 dm_crtc_state->active_planes == 0)
c14a005c
NK
5074 return -EINVAL;
5075
62c933f9 5076 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5077 return 0;
5078
5079 return ret;
5080}
5081
3ee6b26b
AD
5082static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5083 const struct drm_display_mode *mode,
5084 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5085{
5086 return true;
5087}
5088
5089static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5090 .disable = dm_crtc_helper_disable,
5091 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5092 .mode_fixup = dm_crtc_helper_mode_fixup,
5093 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5094};
5095
5096static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5097{
5098
5099}
5100
3261e013
ML
5101static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5102{
5103 switch (display_color_depth) {
5104 case COLOR_DEPTH_666:
5105 return 6;
5106 case COLOR_DEPTH_888:
5107 return 8;
5108 case COLOR_DEPTH_101010:
5109 return 10;
5110 case COLOR_DEPTH_121212:
5111 return 12;
5112 case COLOR_DEPTH_141414:
5113 return 14;
5114 case COLOR_DEPTH_161616:
5115 return 16;
5116 default:
5117 break;
5118 }
5119 return 0;
5120}
5121
3ee6b26b
AD
5122static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5123 struct drm_crtc_state *crtc_state,
5124 struct drm_connector_state *conn_state)
e7b07cee 5125{
3261e013
ML
5126 struct drm_atomic_state *state = crtc_state->state;
5127 struct drm_connector *connector = conn_state->connector;
5128 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5129 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5130 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5131 struct drm_dp_mst_topology_mgr *mst_mgr;
5132 struct drm_dp_mst_port *mst_port;
5133 enum dc_color_depth color_depth;
5134 int clock, bpp = 0;
1bc22f20 5135 bool is_y420 = false;
3261e013
ML
5136
5137 if (!aconnector->port || !aconnector->dc_sink)
5138 return 0;
5139
5140 mst_port = aconnector->port;
5141 mst_mgr = &aconnector->mst_port->mst_mgr;
5142
5143 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5144 return 0;
5145
5146 if (!state->duplicated) {
1bc22f20
SW
5147 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5148 aconnector->force_yuv420_output;
5149 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5150 is_y420);
3261e013
ML
5151 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5152 clock = adjusted_mode->clock;
dc48529f 5153 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5154 }
5155 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5156 mst_mgr,
5157 mst_port,
1c6c1cb5
ML
5158 dm_new_connector_state->pbn,
5159 0);
3261e013
ML
5160 if (dm_new_connector_state->vcpi_slots < 0) {
5161 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5162 return dm_new_connector_state->vcpi_slots;
5163 }
e7b07cee
HW
5164 return 0;
5165}
5166
5167const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5168 .disable = dm_encoder_helper_disable,
5169 .atomic_check = dm_encoder_helper_atomic_check
5170};
5171
d9fe1a4c 5172#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5173static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5174 struct dc_state *dc_state)
5175{
5176 struct dc_stream_state *stream = NULL;
5177 struct drm_connector *connector;
5178 struct drm_connector_state *new_con_state, *old_con_state;
5179 struct amdgpu_dm_connector *aconnector;
5180 struct dm_connector_state *dm_conn_state;
5181 int i, j, clock, bpp;
5182 int vcpi, pbn_div, pbn = 0;
5183
5184 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5185
5186 aconnector = to_amdgpu_dm_connector(connector);
5187
5188 if (!aconnector->port)
5189 continue;
5190
5191 if (!new_con_state || !new_con_state->crtc)
5192 continue;
5193
5194 dm_conn_state = to_dm_connector_state(new_con_state);
5195
5196 for (j = 0; j < dc_state->stream_count; j++) {
5197 stream = dc_state->streams[j];
5198 if (!stream)
5199 continue;
5200
5201 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5202 break;
5203
5204 stream = NULL;
5205 }
5206
5207 if (!stream)
5208 continue;
5209
5210 if (stream->timing.flags.DSC != 1) {
5211 drm_dp_mst_atomic_enable_dsc(state,
5212 aconnector->port,
5213 dm_conn_state->pbn,
5214 0,
5215 false);
5216 continue;
5217 }
5218
5219 pbn_div = dm_mst_get_pbn_divider(stream->link);
5220 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5221 clock = stream->timing.pix_clk_100hz / 10;
5222 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5223 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5224 aconnector->port,
5225 pbn, pbn_div,
5226 true);
5227 if (vcpi < 0)
5228 return vcpi;
5229
5230 dm_conn_state->pbn = pbn;
5231 dm_conn_state->vcpi_slots = vcpi;
5232 }
5233 return 0;
5234}
d9fe1a4c 5235#endif
29b9ba74 5236
e7b07cee
HW
5237static void dm_drm_plane_reset(struct drm_plane *plane)
5238{
5239 struct dm_plane_state *amdgpu_state = NULL;
5240
5241 if (plane->state)
5242 plane->funcs->atomic_destroy_state(plane, plane->state);
5243
5244 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5245 WARN_ON(amdgpu_state == NULL);
1f6010a9 5246
7ddaef96
NK
5247 if (amdgpu_state)
5248 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5249}
5250
5251static struct drm_plane_state *
5252dm_drm_plane_duplicate_state(struct drm_plane *plane)
5253{
5254 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5255
5256 old_dm_plane_state = to_dm_plane_state(plane->state);
5257 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5258 if (!dm_plane_state)
5259 return NULL;
5260
5261 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5262
3be5262e
HW
5263 if (old_dm_plane_state->dc_state) {
5264 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5265 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5266 }
5267
5268 return &dm_plane_state->base;
5269}
5270
5271void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5272 struct drm_plane_state *state)
e7b07cee
HW
5273{
5274 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5275
3be5262e
HW
5276 if (dm_plane_state->dc_state)
5277 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5278
0627bbd3 5279 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5280}
5281
5282static const struct drm_plane_funcs dm_plane_funcs = {
5283 .update_plane = drm_atomic_helper_update_plane,
5284 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5285 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5286 .reset = dm_drm_plane_reset,
5287 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5288 .atomic_destroy_state = dm_drm_plane_destroy_state,
5289};
5290
3ee6b26b
AD
5291static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5292 struct drm_plane_state *new_state)
e7b07cee
HW
5293{
5294 struct amdgpu_framebuffer *afb;
5295 struct drm_gem_object *obj;
5d43be0c 5296 struct amdgpu_device *adev;
e7b07cee 5297 struct amdgpu_bo *rbo;
e7b07cee 5298 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5299 struct list_head list;
5300 struct ttm_validate_buffer tv;
5301 struct ww_acquire_ctx ticket;
e0634e8d 5302 uint64_t tiling_flags;
5d43be0c
CK
5303 uint32_t domain;
5304 int r;
e7b07cee
HW
5305
5306 dm_plane_state_old = to_dm_plane_state(plane->state);
5307 dm_plane_state_new = to_dm_plane_state(new_state);
5308
5309 if (!new_state->fb) {
f1ad2f5e 5310 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5311 return 0;
5312 }
5313
5314 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5315 obj = new_state->fb->obj[0];
e7b07cee 5316 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5317 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5318 INIT_LIST_HEAD(&list);
5319
5320 tv.bo = &rbo->tbo;
5321 tv.num_shared = 1;
5322 list_add(&tv.head, &list);
5323
9165fb87 5324 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5325 if (r) {
5326 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5327 return r;
0f257b09 5328 }
e7b07cee 5329
5d43be0c 5330 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5331 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5332 else
5333 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5334
7b7c6c81 5335 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5336 if (unlikely(r != 0)) {
30b7c614
HW
5337 if (r != -ERESTARTSYS)
5338 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5339 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5340 return r;
5341 }
5342
bb812f1e
JZ
5343 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5344 if (unlikely(r != 0)) {
5345 amdgpu_bo_unpin(rbo);
0f257b09 5346 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5347 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5348 return r;
5349 }
7df7e505
NK
5350
5351 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5352
0f257b09 5353 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5354
7b7c6c81 5355 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5356
5357 amdgpu_bo_ref(rbo);
5358
3be5262e
HW
5359 if (dm_plane_state_new->dc_state &&
5360 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5361 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 5362
320932bf 5363 fill_plane_buffer_attributes(
695af5f9
NK
5364 adev, afb, plane_state->format, plane_state->rotation,
5365 tiling_flags, &plane_state->tiling_info,
320932bf 5366 &plane_state->plane_size, &plane_state->dcc,
695af5f9 5367 &plane_state->address);
e7b07cee
HW
5368 }
5369
e7b07cee
HW
5370 return 0;
5371}
5372
3ee6b26b
AD
5373static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5374 struct drm_plane_state *old_state)
e7b07cee
HW
5375{
5376 struct amdgpu_bo *rbo;
e7b07cee
HW
5377 int r;
5378
5379 if (!old_state->fb)
5380 return;
5381
e68d14dd 5382 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5383 r = amdgpu_bo_reserve(rbo, false);
5384 if (unlikely(r)) {
5385 DRM_ERROR("failed to reserve rbo before unpin\n");
5386 return;
b830ebc9
HW
5387 }
5388
5389 amdgpu_bo_unpin(rbo);
5390 amdgpu_bo_unreserve(rbo);
5391 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5392}
5393
7578ecda
AD
5394static int dm_plane_atomic_check(struct drm_plane *plane,
5395 struct drm_plane_state *state)
cbd19488
AG
5396{
5397 struct amdgpu_device *adev = plane->dev->dev_private;
5398 struct dc *dc = adev->dm.dc;
78171832 5399 struct dm_plane_state *dm_plane_state;
695af5f9
NK
5400 struct dc_scaling_info scaling_info;
5401 int ret;
78171832
NK
5402
5403 dm_plane_state = to_dm_plane_state(state);
cbd19488 5404
3be5262e 5405 if (!dm_plane_state->dc_state)
9a3329b1 5406 return 0;
cbd19488 5407
695af5f9
NK
5408 ret = fill_dc_scaling_info(state, &scaling_info);
5409 if (ret)
5410 return ret;
a05bcff1 5411
62c933f9 5412 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5413 return 0;
5414
5415 return -EINVAL;
5416}
5417
674e78ac
NK
5418static int dm_plane_atomic_async_check(struct drm_plane *plane,
5419 struct drm_plane_state *new_plane_state)
5420{
5421 /* Only support async updates on cursor planes. */
5422 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5423 return -EINVAL;
5424
5425 return 0;
5426}
5427
5428static void dm_plane_atomic_async_update(struct drm_plane *plane,
5429 struct drm_plane_state *new_state)
5430{
5431 struct drm_plane_state *old_state =
5432 drm_atomic_get_old_plane_state(new_state->state, plane);
5433
332af874 5434 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5435
5436 plane->state->src_x = new_state->src_x;
5437 plane->state->src_y = new_state->src_y;
5438 plane->state->src_w = new_state->src_w;
5439 plane->state->src_h = new_state->src_h;
5440 plane->state->crtc_x = new_state->crtc_x;
5441 plane->state->crtc_y = new_state->crtc_y;
5442 plane->state->crtc_w = new_state->crtc_w;
5443 plane->state->crtc_h = new_state->crtc_h;
5444
5445 handle_cursor_update(plane, old_state);
5446}
5447
e7b07cee
HW
5448static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5449 .prepare_fb = dm_plane_helper_prepare_fb,
5450 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5451 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5452 .atomic_async_check = dm_plane_atomic_async_check,
5453 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5454};
5455
5456/*
5457 * TODO: these are currently initialized to rgb formats only.
5458 * For future use cases we should either initialize them dynamically based on
5459 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5460 * check will succeed, and let DC implement proper check
e7b07cee 5461 */
d90371b0 5462static const uint32_t rgb_formats[] = {
e7b07cee
HW
5463 DRM_FORMAT_XRGB8888,
5464 DRM_FORMAT_ARGB8888,
5465 DRM_FORMAT_RGBA8888,
5466 DRM_FORMAT_XRGB2101010,
5467 DRM_FORMAT_XBGR2101010,
5468 DRM_FORMAT_ARGB2101010,
5469 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5470 DRM_FORMAT_XBGR8888,
5471 DRM_FORMAT_ABGR8888,
46dd9ff7 5472 DRM_FORMAT_RGB565,
e7b07cee
HW
5473};
5474
0d579c7e
NK
5475static const uint32_t overlay_formats[] = {
5476 DRM_FORMAT_XRGB8888,
5477 DRM_FORMAT_ARGB8888,
5478 DRM_FORMAT_RGBA8888,
5479 DRM_FORMAT_XBGR8888,
5480 DRM_FORMAT_ABGR8888,
7267a1a9 5481 DRM_FORMAT_RGB565
e7b07cee
HW
5482};
5483
5484static const u32 cursor_formats[] = {
5485 DRM_FORMAT_ARGB8888
5486};
5487
37c6a93b
NK
5488static int get_plane_formats(const struct drm_plane *plane,
5489 const struct dc_plane_cap *plane_cap,
5490 uint32_t *formats, int max_formats)
e7b07cee 5491{
37c6a93b
NK
5492 int i, num_formats = 0;
5493
5494 /*
5495 * TODO: Query support for each group of formats directly from
5496 * DC plane caps. This will require adding more formats to the
5497 * caps list.
5498 */
e7b07cee 5499
f180b4bc 5500 switch (plane->type) {
e7b07cee 5501 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5502 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5503 if (num_formats >= max_formats)
5504 break;
5505
5506 formats[num_formats++] = rgb_formats[i];
5507 }
5508
ea36ad34 5509 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5510 formats[num_formats++] = DRM_FORMAT_NV12;
e7b07cee 5511 break;
37c6a93b 5512
e7b07cee 5513 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5514 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5515 if (num_formats >= max_formats)
5516 break;
5517
5518 formats[num_formats++] = overlay_formats[i];
5519 }
e7b07cee 5520 break;
37c6a93b 5521
e7b07cee 5522 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5523 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5524 if (num_formats >= max_formats)
5525 break;
5526
5527 formats[num_formats++] = cursor_formats[i];
5528 }
e7b07cee
HW
5529 break;
5530 }
5531
37c6a93b
NK
5532 return num_formats;
5533}
5534
5535static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5536 struct drm_plane *plane,
5537 unsigned long possible_crtcs,
5538 const struct dc_plane_cap *plane_cap)
5539{
5540 uint32_t formats[32];
5541 int num_formats;
5542 int res = -EPERM;
5543
5544 num_formats = get_plane_formats(plane, plane_cap, formats,
5545 ARRAY_SIZE(formats));
5546
5547 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5548 &dm_plane_funcs, formats, num_formats,
5549 NULL, plane->type, NULL);
5550 if (res)
5551 return res;
5552
cc1fec57
NK
5553 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5554 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
5555 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5556 BIT(DRM_MODE_BLEND_PREMULTI);
5557
5558 drm_plane_create_alpha_property(plane);
5559 drm_plane_create_blend_mode_property(plane, blend_caps);
5560 }
5561
fc8e5230 5562 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
ea36ad34 5563 plane_cap && plane_cap->pixel_format_support.nv12) {
fc8e5230
NK
5564 /* This only affects YUV formats. */
5565 drm_plane_create_color_properties(
5566 plane,
5567 BIT(DRM_COLOR_YCBCR_BT601) |
5568 BIT(DRM_COLOR_YCBCR_BT709),
5569 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5570 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5571 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5572 }
5573
f180b4bc 5574 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 5575
96719c54 5576 /* Create (reset) the plane state */
f180b4bc
HW
5577 if (plane->funcs->reset)
5578 plane->funcs->reset(plane);
96719c54 5579
37c6a93b 5580 return 0;
e7b07cee
HW
5581}
5582
7578ecda
AD
5583static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5584 struct drm_plane *plane,
5585 uint32_t crtc_index)
e7b07cee
HW
5586{
5587 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 5588 struct drm_plane *cursor_plane;
e7b07cee
HW
5589
5590 int res = -ENOMEM;
5591
5592 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5593 if (!cursor_plane)
5594 goto fail;
5595
f180b4bc 5596 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 5597 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
5598
5599 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5600 if (!acrtc)
5601 goto fail;
5602
5603 res = drm_crtc_init_with_planes(
5604 dm->ddev,
5605 &acrtc->base,
5606 plane,
f180b4bc 5607 cursor_plane,
e7b07cee
HW
5608 &amdgpu_dm_crtc_funcs, NULL);
5609
5610 if (res)
5611 goto fail;
5612
5613 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5614
96719c54
HW
5615 /* Create (reset) the plane state */
5616 if (acrtc->base.funcs->reset)
5617 acrtc->base.funcs->reset(&acrtc->base);
5618
e7b07cee
HW
5619 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5620 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5621
5622 acrtc->crtc_id = crtc_index;
5623 acrtc->base.enabled = false;
c37e2d29 5624 acrtc->otg_inst = -1;
e7b07cee
HW
5625
5626 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
5627 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5628 true, MAX_COLOR_LUT_ENTRIES);
086247a4 5629 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
5630
5631 return 0;
5632
5633fail:
b830ebc9
HW
5634 kfree(acrtc);
5635 kfree(cursor_plane);
e7b07cee
HW
5636 return res;
5637}
5638
5639
5640static int to_drm_connector_type(enum signal_type st)
5641{
5642 switch (st) {
5643 case SIGNAL_TYPE_HDMI_TYPE_A:
5644 return DRM_MODE_CONNECTOR_HDMIA;
5645 case SIGNAL_TYPE_EDP:
5646 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
5647 case SIGNAL_TYPE_LVDS:
5648 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
5649 case SIGNAL_TYPE_RGB:
5650 return DRM_MODE_CONNECTOR_VGA;
5651 case SIGNAL_TYPE_DISPLAY_PORT:
5652 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5653 return DRM_MODE_CONNECTOR_DisplayPort;
5654 case SIGNAL_TYPE_DVI_DUAL_LINK:
5655 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5656 return DRM_MODE_CONNECTOR_DVID;
5657 case SIGNAL_TYPE_VIRTUAL:
5658 return DRM_MODE_CONNECTOR_VIRTUAL;
5659
5660 default:
5661 return DRM_MODE_CONNECTOR_Unknown;
5662 }
5663}
5664
2b4c1c05
DV
5665static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5666{
62afb4ad
JRS
5667 struct drm_encoder *encoder;
5668
5669 /* There is only one encoder per connector */
5670 drm_connector_for_each_possible_encoder(connector, encoder)
5671 return encoder;
5672
5673 return NULL;
2b4c1c05
DV
5674}
5675
e7b07cee
HW
5676static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5677{
e7b07cee
HW
5678 struct drm_encoder *encoder;
5679 struct amdgpu_encoder *amdgpu_encoder;
5680
2b4c1c05 5681 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
5682
5683 if (encoder == NULL)
5684 return;
5685
5686 amdgpu_encoder = to_amdgpu_encoder(encoder);
5687
5688 amdgpu_encoder->native_mode.clock = 0;
5689
5690 if (!list_empty(&connector->probed_modes)) {
5691 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 5692
e7b07cee 5693 list_for_each_entry(preferred_mode,
b830ebc9
HW
5694 &connector->probed_modes,
5695 head) {
5696 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5697 amdgpu_encoder->native_mode = *preferred_mode;
5698
e7b07cee
HW
5699 break;
5700 }
5701
5702 }
5703}
5704
3ee6b26b
AD
5705static struct drm_display_mode *
5706amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5707 char *name,
5708 int hdisplay, int vdisplay)
e7b07cee
HW
5709{
5710 struct drm_device *dev = encoder->dev;
5711 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5712 struct drm_display_mode *mode = NULL;
5713 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5714
5715 mode = drm_mode_duplicate(dev, native_mode);
5716
b830ebc9 5717 if (mode == NULL)
e7b07cee
HW
5718 return NULL;
5719
5720 mode->hdisplay = hdisplay;
5721 mode->vdisplay = vdisplay;
5722 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 5723 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
5724
5725 return mode;
5726
5727}
5728
5729static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 5730 struct drm_connector *connector)
e7b07cee
HW
5731{
5732 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5733 struct drm_display_mode *mode = NULL;
5734 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
5735 struct amdgpu_dm_connector *amdgpu_dm_connector =
5736 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5737 int i;
5738 int n;
5739 struct mode_size {
5740 char name[DRM_DISPLAY_MODE_LEN];
5741 int w;
5742 int h;
b830ebc9 5743 } common_modes[] = {
e7b07cee
HW
5744 { "640x480", 640, 480},
5745 { "800x600", 800, 600},
5746 { "1024x768", 1024, 768},
5747 { "1280x720", 1280, 720},
5748 { "1280x800", 1280, 800},
5749 {"1280x1024", 1280, 1024},
5750 { "1440x900", 1440, 900},
5751 {"1680x1050", 1680, 1050},
5752 {"1600x1200", 1600, 1200},
5753 {"1920x1080", 1920, 1080},
5754 {"1920x1200", 1920, 1200}
5755 };
5756
b830ebc9 5757 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
5758
5759 for (i = 0; i < n; i++) {
5760 struct drm_display_mode *curmode = NULL;
5761 bool mode_existed = false;
5762
5763 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
5764 common_modes[i].h > native_mode->vdisplay ||
5765 (common_modes[i].w == native_mode->hdisplay &&
5766 common_modes[i].h == native_mode->vdisplay))
5767 continue;
e7b07cee
HW
5768
5769 list_for_each_entry(curmode, &connector->probed_modes, head) {
5770 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 5771 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
5772 mode_existed = true;
5773 break;
5774 }
5775 }
5776
5777 if (mode_existed)
5778 continue;
5779
5780 mode = amdgpu_dm_create_common_mode(encoder,
5781 common_modes[i].name, common_modes[i].w,
5782 common_modes[i].h);
5783 drm_mode_probed_add(connector, mode);
c84dec2f 5784 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
5785 }
5786}
5787
3ee6b26b
AD
5788static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5789 struct edid *edid)
e7b07cee 5790{
c84dec2f
HW
5791 struct amdgpu_dm_connector *amdgpu_dm_connector =
5792 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5793
5794 if (edid) {
5795 /* empty probed_modes */
5796 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 5797 amdgpu_dm_connector->num_modes =
e7b07cee
HW
5798 drm_add_edid_modes(connector, edid);
5799
f1e5e913
YMM
5800 /* sorting the probed modes before calling function
5801 * amdgpu_dm_get_native_mode() since EDID can have
5802 * more than one preferred mode. The modes that are
5803 * later in the probed mode list could be of higher
5804 * and preferred resolution. For example, 3840x2160
5805 * resolution in base EDID preferred timing and 4096x2160
5806 * preferred resolution in DID extension block later.
5807 */
5808 drm_mode_sort(&connector->probed_modes);
e7b07cee 5809 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 5810 } else {
c84dec2f 5811 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 5812 }
e7b07cee
HW
5813}
5814
7578ecda 5815static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 5816{
c84dec2f
HW
5817 struct amdgpu_dm_connector *amdgpu_dm_connector =
5818 to_amdgpu_dm_connector(connector);
e7b07cee 5819 struct drm_encoder *encoder;
c84dec2f 5820 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 5821
2b4c1c05 5822 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 5823
85ee15d6 5824 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
5825 amdgpu_dm_connector->num_modes =
5826 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
5827 } else {
5828 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5829 amdgpu_dm_connector_add_common_modes(encoder, connector);
5830 }
3e332d3a 5831 amdgpu_dm_fbc_init(connector);
5099114b 5832
c84dec2f 5833 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
5834}
5835
3ee6b26b
AD
5836void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5837 struct amdgpu_dm_connector *aconnector,
5838 int connector_type,
5839 struct dc_link *link,
5840 int link_index)
e7b07cee
HW
5841{
5842 struct amdgpu_device *adev = dm->ddev->dev_private;
5843
f04bee34
NK
5844 /*
5845 * Some of the properties below require access to state, like bpc.
5846 * Allocate some default initial connector state with our reset helper.
5847 */
5848 if (aconnector->base.funcs->reset)
5849 aconnector->base.funcs->reset(&aconnector->base);
5850
e7b07cee
HW
5851 aconnector->connector_id = link_index;
5852 aconnector->dc_link = link;
5853 aconnector->base.interlace_allowed = false;
5854 aconnector->base.doublescan_allowed = false;
5855 aconnector->base.stereo_allowed = false;
5856 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5857 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 5858 aconnector->audio_inst = -1;
e7b07cee
HW
5859 mutex_init(&aconnector->hpd_lock);
5860
1f6010a9
DF
5861 /*
5862 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
5863 * which means HPD hot plug not supported
5864 */
e7b07cee
HW
5865 switch (connector_type) {
5866 case DRM_MODE_CONNECTOR_HDMIA:
5867 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5868 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5869 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
5870 break;
5871 case DRM_MODE_CONNECTOR_DisplayPort:
5872 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5873 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5874 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
5875 break;
5876 case DRM_MODE_CONNECTOR_DVID:
5877 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5878 break;
5879 default:
5880 break;
5881 }
5882
5883 drm_object_attach_property(&aconnector->base.base,
5884 dm->ddev->mode_config.scaling_mode_property,
5885 DRM_MODE_SCALE_NONE);
5886
5887 drm_object_attach_property(&aconnector->base.base,
5888 adev->mode_info.underscan_property,
5889 UNDERSCAN_OFF);
5890 drm_object_attach_property(&aconnector->base.base,
5891 adev->mode_info.underscan_hborder_property,
5892 0);
5893 drm_object_attach_property(&aconnector->base.base,
5894 adev->mode_info.underscan_vborder_property,
5895 0);
1825fd34
NK
5896
5897 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5898
4a8ca46b
RL
5899 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5900 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5901 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 5902
c1ee92f9
DF
5903 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5904 dc_is_dmcu_initialized(adev->dm.dc)) {
5905 drm_object_attach_property(&aconnector->base.base,
5906 adev->mode_info.abm_level_property, 0);
5907 }
bb47de73
NK
5908
5909 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
5910 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5911 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
5912 drm_object_attach_property(
5913 &aconnector->base.base,
5914 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5915
bb47de73
NK
5916 drm_connector_attach_vrr_capable_property(
5917 &aconnector->base);
0c8620d6 5918#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 5919 if (adev->dm.hdcp_workqueue)
53e108aa 5920 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 5921#endif
bb47de73 5922 }
e7b07cee
HW
5923}
5924
7578ecda
AD
5925static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5926 struct i2c_msg *msgs, int num)
e7b07cee
HW
5927{
5928 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5929 struct ddc_service *ddc_service = i2c->ddc_service;
5930 struct i2c_command cmd;
5931 int i;
5932 int result = -EIO;
5933
b830ebc9 5934 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
5935
5936 if (!cmd.payloads)
5937 return result;
5938
5939 cmd.number_of_payloads = num;
5940 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5941 cmd.speed = 100;
5942
5943 for (i = 0; i < num; i++) {
5944 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5945 cmd.payloads[i].address = msgs[i].addr;
5946 cmd.payloads[i].length = msgs[i].len;
5947 cmd.payloads[i].data = msgs[i].buf;
5948 }
5949
c85e6e54
DF
5950 if (dc_submit_i2c(
5951 ddc_service->ctx->dc,
5952 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
5953 &cmd))
5954 result = num;
5955
5956 kfree(cmd.payloads);
5957 return result;
5958}
5959
7578ecda 5960static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
5961{
5962 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5963}
5964
5965static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5966 .master_xfer = amdgpu_dm_i2c_xfer,
5967 .functionality = amdgpu_dm_i2c_func,
5968};
5969
3ee6b26b
AD
5970static struct amdgpu_i2c_adapter *
5971create_i2c(struct ddc_service *ddc_service,
5972 int link_index,
5973 int *res)
e7b07cee
HW
5974{
5975 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5976 struct amdgpu_i2c_adapter *i2c;
5977
b830ebc9 5978 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
5979 if (!i2c)
5980 return NULL;
e7b07cee
HW
5981 i2c->base.owner = THIS_MODULE;
5982 i2c->base.class = I2C_CLASS_DDC;
5983 i2c->base.dev.parent = &adev->pdev->dev;
5984 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 5985 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
5986 i2c_set_adapdata(&i2c->base, i2c);
5987 i2c->ddc_service = ddc_service;
c85e6e54 5988 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
5989
5990 return i2c;
5991}
5992
89fc8d4e 5993
1f6010a9
DF
5994/*
5995 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
5996 * dc_link which will be represented by this aconnector.
5997 */
7578ecda
AD
5998static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5999 struct amdgpu_dm_connector *aconnector,
6000 uint32_t link_index,
6001 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6002{
6003 int res = 0;
6004 int connector_type;
6005 struct dc *dc = dm->dc;
6006 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6007 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6008
6009 link->priv = aconnector;
e7b07cee 6010
f1ad2f5e 6011 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6012
6013 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6014 if (!i2c) {
6015 DRM_ERROR("Failed to create i2c adapter data\n");
6016 return -ENOMEM;
6017 }
6018
e7b07cee
HW
6019 aconnector->i2c = i2c;
6020 res = i2c_add_adapter(&i2c->base);
6021
6022 if (res) {
6023 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6024 goto out_free;
6025 }
6026
6027 connector_type = to_drm_connector_type(link->connector_signal);
6028
17165de2 6029 res = drm_connector_init_with_ddc(
e7b07cee
HW
6030 dm->ddev,
6031 &aconnector->base,
6032 &amdgpu_dm_connector_funcs,
17165de2
AP
6033 connector_type,
6034 &i2c->base);
e7b07cee
HW
6035
6036 if (res) {
6037 DRM_ERROR("connector_init failed\n");
6038 aconnector->connector_id = -1;
6039 goto out_free;
6040 }
6041
6042 drm_connector_helper_add(
6043 &aconnector->base,
6044 &amdgpu_dm_connector_helper_funcs);
6045
6046 amdgpu_dm_connector_init_helper(
6047 dm,
6048 aconnector,
6049 connector_type,
6050 link,
6051 link_index);
6052
cde4c44d 6053 drm_connector_attach_encoder(
e7b07cee
HW
6054 &aconnector->base, &aencoder->base);
6055
e7b07cee
HW
6056 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6057 || connector_type == DRM_MODE_CONNECTOR_eDP)
6058 amdgpu_dm_initialize_dp_connector(dm, aconnector);
6059
e7b07cee
HW
6060out_free:
6061 if (res) {
6062 kfree(i2c);
6063 aconnector->i2c = NULL;
6064 }
6065 return res;
6066}
6067
6068int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6069{
6070 switch (adev->mode_info.num_crtc) {
6071 case 1:
6072 return 0x1;
6073 case 2:
6074 return 0x3;
6075 case 3:
6076 return 0x7;
6077 case 4:
6078 return 0xf;
6079 case 5:
6080 return 0x1f;
6081 case 6:
6082 default:
6083 return 0x3f;
6084 }
6085}
6086
7578ecda
AD
6087static int amdgpu_dm_encoder_init(struct drm_device *dev,
6088 struct amdgpu_encoder *aencoder,
6089 uint32_t link_index)
e7b07cee
HW
6090{
6091 struct amdgpu_device *adev = dev->dev_private;
6092
6093 int res = drm_encoder_init(dev,
6094 &aencoder->base,
6095 &amdgpu_dm_encoder_funcs,
6096 DRM_MODE_ENCODER_TMDS,
6097 NULL);
6098
6099 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6100
6101 if (!res)
6102 aencoder->encoder_id = link_index;
6103 else
6104 aencoder->encoder_id = -1;
6105
6106 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6107
6108 return res;
6109}
6110
3ee6b26b
AD
6111static void manage_dm_interrupts(struct amdgpu_device *adev,
6112 struct amdgpu_crtc *acrtc,
6113 bool enable)
e7b07cee
HW
6114{
6115 /*
6116 * this is not correct translation but will work as soon as VBLANK
6117 * constant is the same as PFLIP
6118 */
6119 int irq_type =
734dd01d 6120 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6121 adev,
6122 acrtc->crtc_id);
6123
6124 if (enable) {
6125 drm_crtc_vblank_on(&acrtc->base);
6126 amdgpu_irq_get(
6127 adev,
6128 &adev->pageflip_irq,
6129 irq_type);
6130 } else {
6131
6132 amdgpu_irq_put(
6133 adev,
6134 &adev->pageflip_irq,
6135 irq_type);
6136 drm_crtc_vblank_off(&acrtc->base);
6137 }
6138}
6139
3ee6b26b
AD
6140static bool
6141is_scaling_state_different(const struct dm_connector_state *dm_state,
6142 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6143{
6144 if (dm_state->scaling != old_dm_state->scaling)
6145 return true;
6146 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6147 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6148 return true;
6149 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6150 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6151 return true;
b830ebc9
HW
6152 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6153 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6154 return true;
e7b07cee
HW
6155 return false;
6156}
6157
0c8620d6
BL
6158#ifdef CONFIG_DRM_AMD_DC_HDCP
6159static bool is_content_protection_different(struct drm_connector_state *state,
6160 const struct drm_connector_state *old_state,
6161 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6162{
6163 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6164
53e108aa
BL
6165 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6166 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6167 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6168 return true;
6169 }
6170
0c8620d6
BL
6171 /* CP is being re enabled, ignore this */
6172 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6173 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6174 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6175 return false;
6176 }
6177
6178 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6179 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6180 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6181 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6182
6183 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6184 * hot-plug, headless s3, dpms
6185 */
6186 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6187 aconnector->dc_sink != NULL)
6188 return true;
6189
6190 if (old_state->content_protection == state->content_protection)
6191 return false;
6192
6193 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6194 return true;
6195
6196 return false;
6197}
6198
0c8620d6 6199#endif
3ee6b26b
AD
6200static void remove_stream(struct amdgpu_device *adev,
6201 struct amdgpu_crtc *acrtc,
6202 struct dc_stream_state *stream)
e7b07cee
HW
6203{
6204 /* this is the update mode case */
e7b07cee
HW
6205
6206 acrtc->otg_inst = -1;
6207 acrtc->enabled = false;
6208}
6209
7578ecda
AD
6210static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6211 struct dc_cursor_position *position)
2a8f6ccb 6212{
f4c2cc43 6213 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6214 int x, y;
6215 int xorigin = 0, yorigin = 0;
6216
e371e19c
NK
6217 position->enable = false;
6218 position->x = 0;
6219 position->y = 0;
6220
6221 if (!crtc || !plane->state->fb)
2a8f6ccb 6222 return 0;
2a8f6ccb
HW
6223
6224 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6225 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6226 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6227 __func__,
6228 plane->state->crtc_w,
6229 plane->state->crtc_h);
6230 return -EINVAL;
6231 }
6232
6233 x = plane->state->crtc_x;
6234 y = plane->state->crtc_y;
c14a005c 6235
e371e19c
NK
6236 if (x <= -amdgpu_crtc->max_cursor_width ||
6237 y <= -amdgpu_crtc->max_cursor_height)
6238 return 0;
6239
c14a005c
NK
6240 if (crtc->primary->state) {
6241 /* avivo cursor are offset into the total surface */
6242 x += crtc->primary->state->src_x >> 16;
6243 y += crtc->primary->state->src_y >> 16;
6244 }
6245
2a8f6ccb
HW
6246 if (x < 0) {
6247 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6248 x = 0;
6249 }
6250 if (y < 0) {
6251 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6252 y = 0;
6253 }
6254 position->enable = true;
6255 position->x = x;
6256 position->y = y;
6257 position->x_hotspot = xorigin;
6258 position->y_hotspot = yorigin;
6259
6260 return 0;
6261}
6262
3ee6b26b
AD
6263static void handle_cursor_update(struct drm_plane *plane,
6264 struct drm_plane_state *old_plane_state)
e7b07cee 6265{
674e78ac 6266 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
6267 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6268 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6269 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6270 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6271 uint64_t address = afb ? afb->address : 0;
6272 struct dc_cursor_position position;
6273 struct dc_cursor_attributes attributes;
6274 int ret;
6275
e7b07cee
HW
6276 if (!plane->state->fb && !old_plane_state->fb)
6277 return;
6278
f1ad2f5e 6279 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6280 __func__,
6281 amdgpu_crtc->crtc_id,
6282 plane->state->crtc_w,
6283 plane->state->crtc_h);
2a8f6ccb
HW
6284
6285 ret = get_cursor_position(plane, crtc, &position);
6286 if (ret)
6287 return;
6288
6289 if (!position.enable) {
6290 /* turn off cursor */
674e78ac
NK
6291 if (crtc_state && crtc_state->stream) {
6292 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6293 dc_stream_set_cursor_position(crtc_state->stream,
6294 &position);
674e78ac
NK
6295 mutex_unlock(&adev->dm.dc_lock);
6296 }
2a8f6ccb 6297 return;
e7b07cee 6298 }
e7b07cee 6299
2a8f6ccb
HW
6300 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6301 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6302
c1cefe11 6303 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6304 attributes.address.high_part = upper_32_bits(address);
6305 attributes.address.low_part = lower_32_bits(address);
6306 attributes.width = plane->state->crtc_w;
6307 attributes.height = plane->state->crtc_h;
6308 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6309 attributes.rotation_angle = 0;
6310 attributes.attribute_flags.value = 0;
6311
6312 attributes.pitch = attributes.width;
6313
886daac9 6314 if (crtc_state->stream) {
674e78ac 6315 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6316 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6317 &attributes))
6318 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6319
2a8f6ccb
HW
6320 if (!dc_stream_set_cursor_position(crtc_state->stream,
6321 &position))
6322 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6323 mutex_unlock(&adev->dm.dc_lock);
886daac9 6324 }
2a8f6ccb 6325}
e7b07cee
HW
6326
6327static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6328{
6329
6330 assert_spin_locked(&acrtc->base.dev->event_lock);
6331 WARN_ON(acrtc->event);
6332
6333 acrtc->event = acrtc->base.state->event;
6334
6335 /* Set the flip status */
6336 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6337
6338 /* Mark this event as consumed */
6339 acrtc->base.state->event = NULL;
6340
6341 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6342 acrtc->crtc_id);
6343}
6344
bb47de73
NK
6345static void update_freesync_state_on_stream(
6346 struct amdgpu_display_manager *dm,
6347 struct dm_crtc_state *new_crtc_state,
180db303
NK
6348 struct dc_stream_state *new_stream,
6349 struct dc_plane_state *surface,
6350 u32 flip_timestamp_in_us)
bb47de73 6351{
09aef2c4 6352 struct mod_vrr_params vrr_params;
bb47de73 6353 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
6354 struct amdgpu_device *adev = dm->adev;
6355 unsigned long flags;
bb47de73
NK
6356
6357 if (!new_stream)
6358 return;
6359
6360 /*
6361 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6362 * For now it's sufficient to just guard against these conditions.
6363 */
6364
6365 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6366 return;
6367
09aef2c4
MK
6368 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6369 vrr_params = new_crtc_state->vrr_params;
6370
180db303
NK
6371 if (surface) {
6372 mod_freesync_handle_preflip(
6373 dm->freesync_module,
6374 surface,
6375 new_stream,
6376 flip_timestamp_in_us,
6377 &vrr_params);
09aef2c4
MK
6378
6379 if (adev->family < AMDGPU_FAMILY_AI &&
6380 amdgpu_dm_vrr_active(new_crtc_state)) {
6381 mod_freesync_handle_v_update(dm->freesync_module,
6382 new_stream, &vrr_params);
e63e2491
EB
6383
6384 /* Need to call this before the frame ends. */
6385 dc_stream_adjust_vmin_vmax(dm->dc,
6386 new_crtc_state->stream,
6387 &vrr_params.adjust);
09aef2c4 6388 }
180db303 6389 }
bb47de73
NK
6390
6391 mod_freesync_build_vrr_infopacket(
6392 dm->freesync_module,
6393 new_stream,
180db303 6394 &vrr_params,
ecd0136b
HT
6395 PACKET_TYPE_VRR,
6396 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6397 &vrr_infopacket);
6398
8a48b44c 6399 new_crtc_state->freesync_timing_changed |=
180db303
NK
6400 (memcmp(&new_crtc_state->vrr_params.adjust,
6401 &vrr_params.adjust,
6402 sizeof(vrr_params.adjust)) != 0);
bb47de73 6403
8a48b44c 6404 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6405 (memcmp(&new_crtc_state->vrr_infopacket,
6406 &vrr_infopacket,
6407 sizeof(vrr_infopacket)) != 0);
6408
180db303 6409 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6410 new_crtc_state->vrr_infopacket = vrr_infopacket;
6411
180db303 6412 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6413 new_stream->vrr_infopacket = vrr_infopacket;
6414
6415 if (new_crtc_state->freesync_vrr_info_changed)
6416 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6417 new_crtc_state->base.crtc->base.id,
6418 (int)new_crtc_state->base.vrr_enabled,
180db303 6419 (int)vrr_params.state);
09aef2c4
MK
6420
6421 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6422}
6423
e854194c
MK
6424static void pre_update_freesync_state_on_stream(
6425 struct amdgpu_display_manager *dm,
6426 struct dm_crtc_state *new_crtc_state)
6427{
6428 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6429 struct mod_vrr_params vrr_params;
e854194c 6430 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6431 struct amdgpu_device *adev = dm->adev;
6432 unsigned long flags;
e854194c
MK
6433
6434 if (!new_stream)
6435 return;
6436
6437 /*
6438 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6439 * For now it's sufficient to just guard against these conditions.
6440 */
6441 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6442 return;
6443
09aef2c4
MK
6444 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6445 vrr_params = new_crtc_state->vrr_params;
6446
e854194c
MK
6447 if (new_crtc_state->vrr_supported &&
6448 config.min_refresh_in_uhz &&
6449 config.max_refresh_in_uhz) {
6450 config.state = new_crtc_state->base.vrr_enabled ?
6451 VRR_STATE_ACTIVE_VARIABLE :
6452 VRR_STATE_INACTIVE;
6453 } else {
6454 config.state = VRR_STATE_UNSUPPORTED;
6455 }
6456
6457 mod_freesync_build_vrr_params(dm->freesync_module,
6458 new_stream,
6459 &config, &vrr_params);
6460
6461 new_crtc_state->freesync_timing_changed |=
6462 (memcmp(&new_crtc_state->vrr_params.adjust,
6463 &vrr_params.adjust,
6464 sizeof(vrr_params.adjust)) != 0);
6465
6466 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6467 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6468}
6469
66b0c973
MK
6470static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6471 struct dm_crtc_state *new_state)
6472{
6473 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6474 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6475
6476 if (!old_vrr_active && new_vrr_active) {
6477 /* Transition VRR inactive -> active:
6478 * While VRR is active, we must not disable vblank irq, as a
6479 * reenable after disable would compute bogus vblank/pflip
6480 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6481 *
6482 * We also need vupdate irq for the actual core vblank handling
6483 * at end of vblank.
66b0c973 6484 */
d2574c33 6485 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6486 drm_crtc_vblank_get(new_state->base.crtc);
6487 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6488 __func__, new_state->base.crtc->base.id);
6489 } else if (old_vrr_active && !new_vrr_active) {
6490 /* Transition VRR active -> inactive:
6491 * Allow vblank irq disable again for fixed refresh rate.
6492 */
d2574c33 6493 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6494 drm_crtc_vblank_put(new_state->base.crtc);
6495 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6496 __func__, new_state->base.crtc->base.id);
6497 }
6498}
6499
8ad27806
NK
6500static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6501{
6502 struct drm_plane *plane;
6503 struct drm_plane_state *old_plane_state, *new_plane_state;
6504 int i;
6505
6506 /*
6507 * TODO: Make this per-stream so we don't issue redundant updates for
6508 * commits with multiple streams.
6509 */
6510 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6511 new_plane_state, i)
6512 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6513 handle_cursor_update(plane, old_plane_state);
6514}
6515
3be5262e 6516static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6517 struct dc_state *dc_state,
3ee6b26b
AD
6518 struct drm_device *dev,
6519 struct amdgpu_display_manager *dm,
6520 struct drm_crtc *pcrtc,
420cd472 6521 bool wait_for_vblank)
e7b07cee 6522{
570c91d5 6523 uint32_t i;
8a48b44c 6524 uint64_t timestamp_ns;
e7b07cee 6525 struct drm_plane *plane;
0bc9706d 6526 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6527 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6528 struct drm_crtc_state *new_pcrtc_state =
6529 drm_atomic_get_new_crtc_state(state, pcrtc);
6530 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
6531 struct dm_crtc_state *dm_old_crtc_state =
6532 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 6533 int planes_count = 0, vpos, hpos;
570c91d5 6534 long r;
e7b07cee 6535 unsigned long flags;
8a48b44c 6536 struct amdgpu_bo *abo;
09e5665a 6537 uint64_t tiling_flags;
fdd1fe57
MK
6538 uint32_t target_vblank, last_flip_vblank;
6539 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 6540 bool pflip_present = false;
8c322309 6541 bool swizzle = true;
bc7f670e
DF
6542 struct {
6543 struct dc_surface_update surface_updates[MAX_SURFACES];
6544 struct dc_plane_info plane_infos[MAX_SURFACES];
6545 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 6546 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 6547 struct dc_stream_update stream_update;
74aa7bd4 6548 } *bundle;
bc7f670e 6549
74aa7bd4 6550 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 6551
74aa7bd4
DF
6552 if (!bundle) {
6553 dm_error("Failed to allocate update bundle\n");
4b510503
NK
6554 goto cleanup;
6555 }
e7b07cee 6556
8ad27806
NK
6557 /*
6558 * Disable the cursor first if we're disabling all the planes.
6559 * It'll remain on the screen after the planes are re-enabled
6560 * if we don't.
6561 */
6562 if (acrtc_state->active_planes == 0)
6563 amdgpu_dm_commit_cursors(state);
6564
e7b07cee 6565 /* update planes when needed */
0bc9706d
LSL
6566 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6567 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 6568 struct drm_crtc_state *new_crtc_state;
0bc9706d 6569 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 6570 bool plane_needs_flip;
c7af5f77 6571 struct dc_plane_state *dc_plane;
54d76575 6572 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 6573
80c218d5
NK
6574 /* Cursor plane is handled after stream updates */
6575 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 6576 continue;
e7b07cee 6577
f5ba60fe
DD
6578 if (!fb || !crtc || pcrtc != crtc)
6579 continue;
6580
6581 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6582 if (!new_crtc_state->active)
e7b07cee
HW
6583 continue;
6584
bc7f670e 6585 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 6586
8c322309
RL
6587 if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
6588 swizzle = false;
6589
74aa7bd4 6590 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 6591 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
6592 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6593 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
bc7f670e 6594 }
8a48b44c 6595
695af5f9
NK
6596 fill_dc_scaling_info(new_plane_state,
6597 &bundle->scaling_infos[planes_count]);
8a48b44c 6598
695af5f9
NK
6599 bundle->surface_updates[planes_count].scaling_info =
6600 &bundle->scaling_infos[planes_count];
8a48b44c 6601
f5031000 6602 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 6603
f5031000 6604 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 6605
f5031000
DF
6606 if (!plane_needs_flip) {
6607 planes_count += 1;
6608 continue;
6609 }
8a48b44c 6610
2fac0f53
CK
6611 abo = gem_to_amdgpu_bo(fb->obj[0]);
6612
f8308898
AG
6613 /*
6614 * Wait for all fences on this FB. Do limited wait to avoid
6615 * deadlock during GPU reset when this fence will not signal
6616 * but we hold reservation lock for the BO.
6617 */
52791eee 6618 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 6619 false,
f8308898
AG
6620 msecs_to_jiffies(5000));
6621 if (unlikely(r <= 0))
ed8a5fb2 6622 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 6623
f5031000
DF
6624 /*
6625 * TODO This might fail and hence better not used, wait
6626 * explicitly on fences instead
6627 * and in general should be called for
6628 * blocking commit to as per framework helpers
6629 */
f5031000 6630 r = amdgpu_bo_reserve(abo, true);
f8308898 6631 if (unlikely(r != 0))
f5031000 6632 DRM_ERROR("failed to reserve buffer before flip\n");
8a48b44c 6633
f5031000 6634 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
8a48b44c 6635
f5031000 6636 amdgpu_bo_unreserve(abo);
8a48b44c 6637
695af5f9
NK
6638 fill_dc_plane_info_and_addr(
6639 dm->adev, new_plane_state, tiling_flags,
6640 &bundle->plane_infos[planes_count],
6641 &bundle->flip_addrs[planes_count].address);
6642
6643 bundle->surface_updates[planes_count].plane_info =
6644 &bundle->plane_infos[planes_count];
8a48b44c 6645
caff0e66
NK
6646 /*
6647 * Only allow immediate flips for fast updates that don't
6648 * change FB pitch, DCC state, rotation or mirroing.
6649 */
f5031000 6650 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 6651 crtc->state->async_flip &&
caff0e66 6652 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 6653
f5031000
DF
6654 timestamp_ns = ktime_get_ns();
6655 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6656 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6657 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 6658
f5031000
DF
6659 if (!bundle->surface_updates[planes_count].surface) {
6660 DRM_ERROR("No surface for CRTC: id=%d\n",
6661 acrtc_attach->crtc_id);
6662 continue;
bc7f670e
DF
6663 }
6664
f5031000
DF
6665 if (plane == pcrtc->primary)
6666 update_freesync_state_on_stream(
6667 dm,
6668 acrtc_state,
6669 acrtc_state->stream,
6670 dc_plane,
6671 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 6672
f5031000
DF
6673 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6674 __func__,
6675 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6676 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
6677
6678 planes_count += 1;
6679
8a48b44c
DF
6680 }
6681
74aa7bd4 6682 if (pflip_present) {
634092b1
MK
6683 if (!vrr_active) {
6684 /* Use old throttling in non-vrr fixed refresh rate mode
6685 * to keep flip scheduling based on target vblank counts
6686 * working in a backwards compatible way, e.g., for
6687 * clients using the GLX_OML_sync_control extension or
6688 * DRI3/Present extension with defined target_msc.
6689 */
e3eff4b5 6690 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
6691 }
6692 else {
6693 /* For variable refresh rate mode only:
6694 * Get vblank of last completed flip to avoid > 1 vrr
6695 * flips per video frame by use of throttling, but allow
6696 * flip programming anywhere in the possibly large
6697 * variable vrr vblank interval for fine-grained flip
6698 * timing control and more opportunity to avoid stutter
6699 * on late submission of flips.
6700 */
6701 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6702 last_flip_vblank = acrtc_attach->last_flip_vblank;
6703 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6704 }
6705
fdd1fe57 6706 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
6707
6708 /*
6709 * Wait until we're out of the vertical blank period before the one
6710 * targeted by the flip
6711 */
6712 while ((acrtc_attach->enabled &&
6713 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6714 0, &vpos, &hpos, NULL,
6715 NULL, &pcrtc->hwmode)
6716 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6717 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6718 (int)(target_vblank -
e3eff4b5 6719 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
6720 usleep_range(1000, 1100);
6721 }
6722
6723 if (acrtc_attach->base.state->event) {
6724 drm_crtc_vblank_get(pcrtc);
6725
6726 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6727
6728 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6729 prepare_flip_isr(acrtc_attach);
6730
6731 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6732 }
6733
6734 if (acrtc_state->stream) {
8a48b44c 6735 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 6736 bundle->stream_update.vrr_infopacket =
8a48b44c 6737 &acrtc_state->stream->vrr_infopacket;
e7b07cee 6738 }
e7b07cee
HW
6739 }
6740
bc92c065 6741 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
6742 if ((planes_count || acrtc_state->active_planes == 0) &&
6743 acrtc_state->stream) {
b6e881c9 6744 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 6745 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
6746 bundle->stream_update.src = acrtc_state->stream->src;
6747 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
6748 }
6749
cf020d49
NK
6750 if (new_pcrtc_state->color_mgmt_changed) {
6751 /*
6752 * TODO: This isn't fully correct since we've actually
6753 * already modified the stream in place.
6754 */
6755 bundle->stream_update.gamut_remap =
6756 &acrtc_state->stream->gamut_remap_matrix;
6757 bundle->stream_update.output_csc_transform =
6758 &acrtc_state->stream->csc_color_matrix;
6759 bundle->stream_update.out_transfer_func =
6760 acrtc_state->stream->out_transfer_func;
6761 }
bc7f670e 6762
8a48b44c 6763 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 6764 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 6765 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 6766
e63e2491
EB
6767 /*
6768 * If FreeSync state on the stream has changed then we need to
6769 * re-adjust the min/max bounds now that DC doesn't handle this
6770 * as part of commit.
6771 */
6772 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6773 amdgpu_dm_vrr_active(acrtc_state)) {
6774 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6775 dc_stream_adjust_vmin_vmax(
6776 dm->dc, acrtc_state->stream,
6777 &acrtc_state->vrr_params.adjust);
6778 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6779 }
bc7f670e 6780 mutex_lock(&dm->dc_lock);
8c322309
RL
6781 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6782 acrtc_state->stream->link->psr_allow_active)
6783 amdgpu_dm_psr_disable(acrtc_state->stream);
6784
bc7f670e 6785 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 6786 bundle->surface_updates,
bc7f670e
DF
6787 planes_count,
6788 acrtc_state->stream,
74aa7bd4 6789 &bundle->stream_update,
bc7f670e 6790 dc_state);
8c322309
RL
6791
6792 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6793 acrtc_state->stream->psr_version &&
6794 !acrtc_state->stream->link->psr_feature_enabled)
6795 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6796 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6797 acrtc_state->stream->link->psr_feature_enabled &&
6798 !acrtc_state->stream->link->psr_allow_active &&
6799 swizzle) {
6800 amdgpu_dm_psr_enable(acrtc_state->stream);
6801 }
6802
bc7f670e 6803 mutex_unlock(&dm->dc_lock);
e7b07cee 6804 }
4b510503 6805
8ad27806
NK
6806 /*
6807 * Update cursor state *after* programming all the planes.
6808 * This avoids redundant programming in the case where we're going
6809 * to be disabling a single plane - those pipes are being disabled.
6810 */
6811 if (acrtc_state->active_planes)
6812 amdgpu_dm_commit_cursors(state);
80c218d5 6813
4b510503 6814cleanup:
74aa7bd4 6815 kfree(bundle);
e7b07cee
HW
6816}
6817
6ce8f316
NK
6818static void amdgpu_dm_commit_audio(struct drm_device *dev,
6819 struct drm_atomic_state *state)
6820{
6821 struct amdgpu_device *adev = dev->dev_private;
6822 struct amdgpu_dm_connector *aconnector;
6823 struct drm_connector *connector;
6824 struct drm_connector_state *old_con_state, *new_con_state;
6825 struct drm_crtc_state *new_crtc_state;
6826 struct dm_crtc_state *new_dm_crtc_state;
6827 const struct dc_stream_status *status;
6828 int i, inst;
6829
6830 /* Notify device removals. */
6831 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6832 if (old_con_state->crtc != new_con_state->crtc) {
6833 /* CRTC changes require notification. */
6834 goto notify;
6835 }
6836
6837 if (!new_con_state->crtc)
6838 continue;
6839
6840 new_crtc_state = drm_atomic_get_new_crtc_state(
6841 state, new_con_state->crtc);
6842
6843 if (!new_crtc_state)
6844 continue;
6845
6846 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6847 continue;
6848
6849 notify:
6850 aconnector = to_amdgpu_dm_connector(connector);
6851
6852 mutex_lock(&adev->dm.audio_lock);
6853 inst = aconnector->audio_inst;
6854 aconnector->audio_inst = -1;
6855 mutex_unlock(&adev->dm.audio_lock);
6856
6857 amdgpu_dm_audio_eld_notify(adev, inst);
6858 }
6859
6860 /* Notify audio device additions. */
6861 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6862 if (!new_con_state->crtc)
6863 continue;
6864
6865 new_crtc_state = drm_atomic_get_new_crtc_state(
6866 state, new_con_state->crtc);
6867
6868 if (!new_crtc_state)
6869 continue;
6870
6871 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6872 continue;
6873
6874 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6875 if (!new_dm_crtc_state->stream)
6876 continue;
6877
6878 status = dc_stream_get_status(new_dm_crtc_state->stream);
6879 if (!status)
6880 continue;
6881
6882 aconnector = to_amdgpu_dm_connector(connector);
6883
6884 mutex_lock(&adev->dm.audio_lock);
6885 inst = status->audio_inst;
6886 aconnector->audio_inst = inst;
6887 mutex_unlock(&adev->dm.audio_lock);
6888
6889 amdgpu_dm_audio_eld_notify(adev, inst);
6890 }
6891}
6892
b5e83f6f
NK
6893/*
6894 * Enable interrupts on CRTCs that are newly active, undergone
6895 * a modeset, or have active planes again.
6896 *
6897 * Done in two passes, based on the for_modeset flag:
6898 * Pass 1: For CRTCs going through modeset
6899 * Pass 2: For CRTCs going from 0 to n active planes
6900 *
6901 * Interrupts can only be enabled after the planes are programmed,
6902 * so this requires a two-pass approach since we don't want to
6903 * just defer the interrupts until after commit planes every time.
6904 */
6905static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6906 struct drm_atomic_state *state,
6907 bool for_modeset)
6908{
6909 struct amdgpu_device *adev = dev->dev_private;
6910 struct drm_crtc *crtc;
6911 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6912 int i;
148d31e3 6913#ifdef CONFIG_DEBUG_FS
14b25846 6914 enum amdgpu_dm_pipe_crc_source source;
148d31e3 6915#endif
b5e83f6f
NK
6916
6917 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6918 new_crtc_state, i) {
6919 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6920 struct dm_crtc_state *dm_new_crtc_state =
6921 to_dm_crtc_state(new_crtc_state);
6922 struct dm_crtc_state *dm_old_crtc_state =
6923 to_dm_crtc_state(old_crtc_state);
6924 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6925 bool run_pass;
6926
6927 run_pass = (for_modeset && modeset) ||
6928 (!for_modeset && !modeset &&
6929 !dm_old_crtc_state->interrupts_enabled);
6930
6931 if (!run_pass)
6932 continue;
6933
b5e83f6f
NK
6934 if (!dm_new_crtc_state->interrupts_enabled)
6935 continue;
6936
6937 manage_dm_interrupts(adev, acrtc, true);
6938
6939#ifdef CONFIG_DEBUG_FS
6940 /* The stream has changed so CRC capture needs to re-enabled. */
14b25846
DZ
6941 source = dm_new_crtc_state->crc_src;
6942 if (amdgpu_dm_is_valid_crc_source(source)) {
57638021
NK
6943 amdgpu_dm_crtc_configure_crc_source(
6944 crtc, dm_new_crtc_state,
6945 dm_new_crtc_state->crc_src);
b5e83f6f
NK
6946 }
6947#endif
6948 }
6949}
6950
1f6010a9 6951/*
27b3f4fc
LSL
6952 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6953 * @crtc_state: the DRM CRTC state
6954 * @stream_state: the DC stream state.
6955 *
6956 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6957 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6958 */
6959static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6960 struct dc_stream_state *stream_state)
6961{
b9952f93 6962 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 6963}
e7b07cee 6964
7578ecda
AD
6965static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6966 struct drm_atomic_state *state,
6967 bool nonblock)
e7b07cee
HW
6968{
6969 struct drm_crtc *crtc;
c2cea706 6970 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
6971 struct amdgpu_device *adev = dev->dev_private;
6972 int i;
6973
6974 /*
d6ef9b41
NK
6975 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6976 * a modeset, being disabled, or have no active planes.
6977 *
6978 * It's done in atomic commit rather than commit tail for now since
6979 * some of these interrupt handlers access the current CRTC state and
6980 * potentially the stream pointer itself.
6981 *
6982 * Since the atomic state is swapped within atomic commit and not within
6983 * commit tail this would leave to new state (that hasn't been committed yet)
6984 * being accesssed from within the handlers.
6985 *
6986 * TODO: Fix this so we can do this in commit tail and not have to block
6987 * in atomic check.
e7b07cee 6988 */
c2cea706 6989 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 6990 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
428da2bd 6991 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee
HW
6992 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6993
d6ef9b41
NK
6994 if (dm_old_crtc_state->interrupts_enabled &&
6995 (!dm_new_crtc_state->interrupts_enabled ||
57638021 6996 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
6997 manage_dm_interrupts(adev, acrtc, false);
6998 }
1f6010a9
DF
6999 /*
7000 * Add check here for SoC's that support hardware cursor plane, to
7001 * unset legacy_cursor_update
7002 */
e7b07cee
HW
7003
7004 return drm_atomic_helper_commit(dev, state, nonblock);
7005
7006 /*TODO Handle EINTR, reenable IRQ*/
7007}
7008
b8592b48
LL
7009/**
7010 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7011 * @state: The atomic state to commit
7012 *
7013 * This will tell DC to commit the constructed DC state from atomic_check,
7014 * programming the hardware. Any failures here implies a hardware failure, since
7015 * atomic check should have filtered anything non-kosher.
7016 */
7578ecda 7017static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7018{
7019 struct drm_device *dev = state->dev;
7020 struct amdgpu_device *adev = dev->dev_private;
7021 struct amdgpu_display_manager *dm = &adev->dm;
7022 struct dm_atomic_state *dm_state;
eb3dc897 7023 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7024 uint32_t i, j;
5cc6dcbd 7025 struct drm_crtc *crtc;
0bc9706d 7026 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7027 unsigned long flags;
7028 bool wait_for_vblank = true;
7029 struct drm_connector *connector;
c2cea706 7030 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7031 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7032 int crtc_disable_count = 0;
e7b07cee
HW
7033
7034 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7035
eb3dc897
NK
7036 dm_state = dm_atomic_get_new_state(state);
7037 if (dm_state && dm_state->context) {
7038 dc_state = dm_state->context;
7039 } else {
7040 /* No state changes, retain current state. */
813d20dc 7041 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7042 ASSERT(dc_state_temp);
7043 dc_state = dc_state_temp;
7044 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7045 }
e7b07cee
HW
7046
7047 /* update changed items */
0bc9706d 7048 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7049 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7050
54d76575
LSL
7051 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7052 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7053
f1ad2f5e 7054 DRM_DEBUG_DRIVER(
e7b07cee
HW
7055 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7056 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7057 "connectors_changed:%d\n",
7058 acrtc->crtc_id,
0bc9706d
LSL
7059 new_crtc_state->enable,
7060 new_crtc_state->active,
7061 new_crtc_state->planes_changed,
7062 new_crtc_state->mode_changed,
7063 new_crtc_state->active_changed,
7064 new_crtc_state->connectors_changed);
e7b07cee 7065
27b3f4fc
LSL
7066 /* Copy all transient state flags into dc state */
7067 if (dm_new_crtc_state->stream) {
7068 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7069 dm_new_crtc_state->stream);
7070 }
7071
e7b07cee
HW
7072 /* handles headless hotplug case, updating new_state and
7073 * aconnector as needed
7074 */
7075
54d76575 7076 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7077
f1ad2f5e 7078 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7079
54d76575 7080 if (!dm_new_crtc_state->stream) {
e7b07cee 7081 /*
b830ebc9
HW
7082 * this could happen because of issues with
7083 * userspace notifications delivery.
7084 * In this case userspace tries to set mode on
1f6010a9
DF
7085 * display which is disconnected in fact.
7086 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7087 * We expect reset mode will come soon.
7088 *
7089 * This can also happen when unplug is done
7090 * during resume sequence ended
7091 *
7092 * In this case, we want to pretend we still
7093 * have a sink to keep the pipe running so that
7094 * hw state is consistent with the sw state
7095 */
f1ad2f5e 7096 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7097 __func__, acrtc->base.base.id);
7098 continue;
7099 }
7100
54d76575
LSL
7101 if (dm_old_crtc_state->stream)
7102 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7103
97028037
LP
7104 pm_runtime_get_noresume(dev->dev);
7105
e7b07cee 7106 acrtc->enabled = true;
0bc9706d
LSL
7107 acrtc->hw_mode = new_crtc_state->mode;
7108 crtc->hwmode = new_crtc_state->mode;
7109 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7110 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7111 /* i.e. reset mode */
8c322309
RL
7112 if (dm_old_crtc_state->stream) {
7113 if (dm_old_crtc_state->stream->link->psr_allow_active)
7114 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7115
54d76575 7116 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8c322309 7117 }
e7b07cee
HW
7118 }
7119 } /* for_each_crtc_in_state() */
7120
eb3dc897
NK
7121 if (dc_state) {
7122 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7123 mutex_lock(&dm->dc_lock);
eb3dc897 7124 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7125 mutex_unlock(&dm->dc_lock);
fa2123db 7126 }
e7b07cee 7127
0bc9706d 7128 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7129 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7130
54d76575 7131 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7132
54d76575 7133 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7134 const struct dc_stream_status *status =
54d76575 7135 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7136
eb3dc897 7137 if (!status)
09f609c3
LL
7138 status = dc_stream_get_status_from_state(dc_state,
7139 dm_new_crtc_state->stream);
eb3dc897 7140
e7b07cee 7141 if (!status)
54d76575 7142 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7143 else
7144 acrtc->otg_inst = status->primary_otg_inst;
7145 }
7146 }
0c8620d6
BL
7147#ifdef CONFIG_DRM_AMD_DC_HDCP
7148 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7149 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7150 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7151 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7152
7153 new_crtc_state = NULL;
7154
7155 if (acrtc)
7156 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7157
7158 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7159
7160 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7161 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7162 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7163 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7164 continue;
7165 }
7166
7167 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7168 hdcp_update_display(
7169 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7170 new_con_state->hdcp_content_type,
b1abe558
BL
7171 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7172 : false);
0c8620d6
BL
7173 }
7174#endif
e7b07cee 7175
02d6a6fc 7176 /* Handle connector state changes */
c2cea706 7177 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7178 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7179 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7180 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7181 struct dc_surface_update dummy_updates[MAX_SURFACES];
7182 struct dc_stream_update stream_update;
b232d4ed 7183 struct dc_info_packet hdr_packet;
e7b07cee 7184 struct dc_stream_status *status = NULL;
b232d4ed 7185 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7186
19afd799
NC
7187 memset(&dummy_updates, 0, sizeof(dummy_updates));
7188 memset(&stream_update, 0, sizeof(stream_update));
7189
44d09c6a 7190 if (acrtc) {
0bc9706d 7191 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7192 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7193 }
0bc9706d 7194
e7b07cee 7195 /* Skip any modesets/resets */
0bc9706d 7196 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7197 continue;
7198
54d76575 7199 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7200 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7201
b232d4ed
NK
7202 scaling_changed = is_scaling_state_different(dm_new_con_state,
7203 dm_old_con_state);
7204
7205 abm_changed = dm_new_crtc_state->abm_level !=
7206 dm_old_crtc_state->abm_level;
7207
7208 hdr_changed =
7209 is_hdr_metadata_different(old_con_state, new_con_state);
7210
7211 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7212 continue;
e7b07cee 7213
b6e881c9 7214 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7215 if (scaling_changed) {
02d6a6fc 7216 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7217 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7218
02d6a6fc
DF
7219 stream_update.src = dm_new_crtc_state->stream->src;
7220 stream_update.dst = dm_new_crtc_state->stream->dst;
7221 }
7222
b232d4ed 7223 if (abm_changed) {
02d6a6fc
DF
7224 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7225
7226 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7227 }
70e8ffc5 7228
b232d4ed
NK
7229 if (hdr_changed) {
7230 fill_hdr_info_packet(new_con_state, &hdr_packet);
7231 stream_update.hdr_static_metadata = &hdr_packet;
7232 }
7233
54d76575 7234 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7235 WARN_ON(!status);
3be5262e 7236 WARN_ON(!status->plane_count);
e7b07cee 7237
02d6a6fc
DF
7238 /*
7239 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7240 * Here we create an empty update on each plane.
7241 * To fix this, DC should permit updating only stream properties.
7242 */
7243 for (j = 0; j < status->plane_count; j++)
7244 dummy_updates[j].surface = status->plane_states[0];
7245
7246
7247 mutex_lock(&dm->dc_lock);
7248 dc_commit_updates_for_stream(dm->dc,
7249 dummy_updates,
7250 status->plane_count,
7251 dm_new_crtc_state->stream,
7252 &stream_update,
7253 dc_state);
7254 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7255 }
7256
b5e83f6f 7257 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7258 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7259 new_crtc_state, i) {
fe2a1965
LP
7260 if (old_crtc_state->active && !new_crtc_state->active)
7261 crtc_disable_count++;
7262
54d76575 7263 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7264 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7265
057be086
NK
7266 /* Update freesync active state. */
7267 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7268
66b0c973
MK
7269 /* Handle vrr on->off / off->on transitions */
7270 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7271 dm_new_crtc_state);
e7b07cee
HW
7272 }
7273
b5e83f6f
NK
7274 /* Enable interrupts for CRTCs going through a modeset. */
7275 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
e7b07cee 7276
420cd472 7277 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7278 if (new_crtc_state->async_flip)
420cd472
DF
7279 wait_for_vblank = false;
7280
e7b07cee 7281 /* update planes when needed per crtc*/
5cc6dcbd 7282 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7283 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7284
54d76575 7285 if (dm_new_crtc_state->stream)
eb3dc897 7286 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7287 dm, crtc, wait_for_vblank);
e7b07cee
HW
7288 }
7289
b5e83f6f
NK
7290 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7291 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
e7b07cee 7292
6ce8f316
NK
7293 /* Update audio instances for each connector. */
7294 amdgpu_dm_commit_audio(dev, state);
7295
e7b07cee
HW
7296 /*
7297 * send vblank event on all events not handled in flip and
7298 * mark consumed event for drm_atomic_helper_commit_hw_done
7299 */
7300 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 7301 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7302
0bc9706d
LSL
7303 if (new_crtc_state->event)
7304 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7305
0bc9706d 7306 new_crtc_state->event = NULL;
e7b07cee
HW
7307 }
7308 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7309
29c8f234
LL
7310 /* Signal HW programming completion */
7311 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7312
7313 if (wait_for_vblank)
320a1274 7314 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7315
7316 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7317
1f6010a9
DF
7318 /*
7319 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7320 * so we can put the GPU into runtime suspend if we're not driving any
7321 * displays anymore
7322 */
fe2a1965
LP
7323 for (i = 0; i < crtc_disable_count; i++)
7324 pm_runtime_put_autosuspend(dev->dev);
97028037 7325 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7326
7327 if (dc_state_temp)
7328 dc_release_state(dc_state_temp);
e7b07cee
HW
7329}
7330
7331
7332static int dm_force_atomic_commit(struct drm_connector *connector)
7333{
7334 int ret = 0;
7335 struct drm_device *ddev = connector->dev;
7336 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7337 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7338 struct drm_plane *plane = disconnected_acrtc->base.primary;
7339 struct drm_connector_state *conn_state;
7340 struct drm_crtc_state *crtc_state;
7341 struct drm_plane_state *plane_state;
7342
7343 if (!state)
7344 return -ENOMEM;
7345
7346 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7347
7348 /* Construct an atomic state to restore previous display setting */
7349
7350 /*
7351 * Attach connectors to drm_atomic_state
7352 */
7353 conn_state = drm_atomic_get_connector_state(state, connector);
7354
7355 ret = PTR_ERR_OR_ZERO(conn_state);
7356 if (ret)
7357 goto err;
7358
7359 /* Attach crtc to drm_atomic_state*/
7360 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7361
7362 ret = PTR_ERR_OR_ZERO(crtc_state);
7363 if (ret)
7364 goto err;
7365
7366 /* force a restore */
7367 crtc_state->mode_changed = true;
7368
7369 /* Attach plane to drm_atomic_state */
7370 plane_state = drm_atomic_get_plane_state(state, plane);
7371
7372 ret = PTR_ERR_OR_ZERO(plane_state);
7373 if (ret)
7374 goto err;
7375
7376
7377 /* Call commit internally with the state we just constructed */
7378 ret = drm_atomic_commit(state);
7379 if (!ret)
7380 return 0;
7381
7382err:
7383 DRM_ERROR("Restoring old state failed with %i\n", ret);
7384 drm_atomic_state_put(state);
7385
7386 return ret;
7387}
7388
7389/*
1f6010a9
DF
7390 * This function handles all cases when set mode does not come upon hotplug.
7391 * This includes when a display is unplugged then plugged back into the
7392 * same port and when running without usermode desktop manager supprot
e7b07cee 7393 */
3ee6b26b
AD
7394void dm_restore_drm_connector_state(struct drm_device *dev,
7395 struct drm_connector *connector)
e7b07cee 7396{
c84dec2f 7397 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7398 struct amdgpu_crtc *disconnected_acrtc;
7399 struct dm_crtc_state *acrtc_state;
7400
7401 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7402 return;
7403
7404 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7405 if (!disconnected_acrtc)
7406 return;
e7b07cee 7407
70e8ffc5
HW
7408 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7409 if (!acrtc_state->stream)
e7b07cee
HW
7410 return;
7411
7412 /*
7413 * If the previous sink is not released and different from the current,
7414 * we deduce we are in a state where we can not rely on usermode call
7415 * to turn on the display, so we do it here
7416 */
7417 if (acrtc_state->stream->sink != aconnector->dc_sink)
7418 dm_force_atomic_commit(&aconnector->base);
7419}
7420
1f6010a9 7421/*
e7b07cee
HW
7422 * Grabs all modesetting locks to serialize against any blocking commits,
7423 * Waits for completion of all non blocking commits.
7424 */
3ee6b26b
AD
7425static int do_aquire_global_lock(struct drm_device *dev,
7426 struct drm_atomic_state *state)
e7b07cee
HW
7427{
7428 struct drm_crtc *crtc;
7429 struct drm_crtc_commit *commit;
7430 long ret;
7431
1f6010a9
DF
7432 /*
7433 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7434 * ensure that when the framework release it the
7435 * extra locks we are locking here will get released to
7436 */
7437 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7438 if (ret)
7439 return ret;
7440
7441 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7442 spin_lock(&crtc->commit_lock);
7443 commit = list_first_entry_or_null(&crtc->commit_list,
7444 struct drm_crtc_commit, commit_entry);
7445 if (commit)
7446 drm_crtc_commit_get(commit);
7447 spin_unlock(&crtc->commit_lock);
7448
7449 if (!commit)
7450 continue;
7451
1f6010a9
DF
7452 /*
7453 * Make sure all pending HW programming completed and
e7b07cee
HW
7454 * page flips done
7455 */
7456 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7457
7458 if (ret > 0)
7459 ret = wait_for_completion_interruptible_timeout(
7460 &commit->flip_done, 10*HZ);
7461
7462 if (ret == 0)
7463 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7464 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7465
7466 drm_crtc_commit_put(commit);
7467 }
7468
7469 return ret < 0 ? ret : 0;
7470}
7471
bb47de73
NK
7472static void get_freesync_config_for_crtc(
7473 struct dm_crtc_state *new_crtc_state,
7474 struct dm_connector_state *new_con_state)
98e6436d
AK
7475{
7476 struct mod_freesync_config config = {0};
98e6436d
AK
7477 struct amdgpu_dm_connector *aconnector =
7478 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7479 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7480 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7481
a057ec46 7482 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7483 vrefresh >= aconnector->min_vfreq &&
7484 vrefresh <= aconnector->max_vfreq;
bb47de73 7485
a057ec46
IB
7486 if (new_crtc_state->vrr_supported) {
7487 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7488 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7489 VRR_STATE_ACTIVE_VARIABLE :
7490 VRR_STATE_INACTIVE;
7491 config.min_refresh_in_uhz =
7492 aconnector->min_vfreq * 1000000;
7493 config.max_refresh_in_uhz =
7494 aconnector->max_vfreq * 1000000;
69ff8845 7495 config.vsif_supported = true;
180db303 7496 config.btr = true;
98e6436d
AK
7497 }
7498
bb47de73
NK
7499 new_crtc_state->freesync_config = config;
7500}
98e6436d 7501
bb47de73
NK
7502static void reset_freesync_config_for_crtc(
7503 struct dm_crtc_state *new_crtc_state)
7504{
7505 new_crtc_state->vrr_supported = false;
98e6436d 7506
180db303
NK
7507 memset(&new_crtc_state->vrr_params, 0,
7508 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7509 memset(&new_crtc_state->vrr_infopacket, 0,
7510 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7511}
7512
4b9674e5
LL
7513static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7514 struct drm_atomic_state *state,
7515 struct drm_crtc *crtc,
7516 struct drm_crtc_state *old_crtc_state,
7517 struct drm_crtc_state *new_crtc_state,
7518 bool enable,
7519 bool *lock_and_validation_needed)
e7b07cee 7520{
eb3dc897 7521 struct dm_atomic_state *dm_state = NULL;
54d76575 7522 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7523 struct dc_stream_state *new_stream;
62f55537 7524 int ret = 0;
d4d4a645 7525
1f6010a9
DF
7526 /*
7527 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7528 * update changed items
7529 */
4b9674e5
LL
7530 struct amdgpu_crtc *acrtc = NULL;
7531 struct amdgpu_dm_connector *aconnector = NULL;
7532 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7533 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7534
4b9674e5 7535 new_stream = NULL;
9635b754 7536
4b9674e5
LL
7537 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7538 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7539 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7540 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7541
4b9674e5
LL
7542 /* TODO This hack should go away */
7543 if (aconnector && enable) {
7544 /* Make sure fake sink is created in plug-in scenario */
7545 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7546 &aconnector->base);
7547 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7548 &aconnector->base);
19f89e23 7549
4b9674e5
LL
7550 if (IS_ERR(drm_new_conn_state)) {
7551 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7552 goto fail;
7553 }
19f89e23 7554
4b9674e5
LL
7555 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7556 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 7557
02d35a67
JFZ
7558 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7559 goto skip_modeset;
7560
4b9674e5
LL
7561 new_stream = create_stream_for_sink(aconnector,
7562 &new_crtc_state->mode,
7563 dm_new_conn_state,
7564 dm_old_crtc_state->stream);
19f89e23 7565
4b9674e5
LL
7566 /*
7567 * we can have no stream on ACTION_SET if a display
7568 * was disconnected during S3, in this case it is not an
7569 * error, the OS will be updated after detection, and
7570 * will do the right thing on next atomic commit
7571 */
19f89e23 7572
4b9674e5
LL
7573 if (!new_stream) {
7574 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7575 __func__, acrtc->base.base.id);
7576 ret = -ENOMEM;
7577 goto fail;
7578 }
e7b07cee 7579
4b9674e5 7580 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 7581
88694af9
NK
7582 ret = fill_hdr_info_packet(drm_new_conn_state,
7583 &new_stream->hdr_static_metadata);
7584 if (ret)
7585 goto fail;
7586
7e930949
NK
7587 /*
7588 * If we already removed the old stream from the context
7589 * (and set the new stream to NULL) then we can't reuse
7590 * the old stream even if the stream and scaling are unchanged.
7591 * We'll hit the BUG_ON and black screen.
7592 *
7593 * TODO: Refactor this function to allow this check to work
7594 * in all conditions.
7595 */
7596 if (dm_new_crtc_state->stream &&
7597 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
7598 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7599 new_crtc_state->mode_changed = false;
7600 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7601 new_crtc_state->mode_changed);
62f55537 7602 }
4b9674e5 7603 }
b830ebc9 7604
02d35a67 7605 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
7606 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7607 goto skip_modeset;
e7b07cee 7608
4b9674e5
LL
7609 DRM_DEBUG_DRIVER(
7610 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7611 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7612 "connectors_changed:%d\n",
7613 acrtc->crtc_id,
7614 new_crtc_state->enable,
7615 new_crtc_state->active,
7616 new_crtc_state->planes_changed,
7617 new_crtc_state->mode_changed,
7618 new_crtc_state->active_changed,
7619 new_crtc_state->connectors_changed);
62f55537 7620
4b9674e5
LL
7621 /* Remove stream for any changed/disabled CRTC */
7622 if (!enable) {
62f55537 7623
4b9674e5
LL
7624 if (!dm_old_crtc_state->stream)
7625 goto skip_modeset;
eb3dc897 7626
4b9674e5
LL
7627 ret = dm_atomic_get_state(state, &dm_state);
7628 if (ret)
7629 goto fail;
e7b07cee 7630
4b9674e5
LL
7631 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7632 crtc->base.id);
62f55537 7633
4b9674e5
LL
7634 /* i.e. reset mode */
7635 if (dc_remove_stream_from_ctx(
7636 dm->dc,
7637 dm_state->context,
7638 dm_old_crtc_state->stream) != DC_OK) {
7639 ret = -EINVAL;
7640 goto fail;
7641 }
62f55537 7642
4b9674e5
LL
7643 dc_stream_release(dm_old_crtc_state->stream);
7644 dm_new_crtc_state->stream = NULL;
bb47de73 7645
4b9674e5 7646 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 7647
4b9674e5 7648 *lock_and_validation_needed = true;
62f55537 7649
4b9674e5
LL
7650 } else {/* Add stream for any updated/enabled CRTC */
7651 /*
7652 * Quick fix to prevent NULL pointer on new_stream when
7653 * added MST connectors not found in existing crtc_state in the chained mode
7654 * TODO: need to dig out the root cause of that
7655 */
7656 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7657 goto skip_modeset;
62f55537 7658
4b9674e5
LL
7659 if (modereset_required(new_crtc_state))
7660 goto skip_modeset;
62f55537 7661
4b9674e5
LL
7662 if (modeset_required(new_crtc_state, new_stream,
7663 dm_old_crtc_state->stream)) {
62f55537 7664
4b9674e5 7665 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 7666
4b9674e5
LL
7667 ret = dm_atomic_get_state(state, &dm_state);
7668 if (ret)
7669 goto fail;
27b3f4fc 7670
4b9674e5 7671 dm_new_crtc_state->stream = new_stream;
62f55537 7672
4b9674e5 7673 dc_stream_retain(new_stream);
1dc90497 7674
4b9674e5
LL
7675 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7676 crtc->base.id);
1dc90497 7677
4b9674e5
LL
7678 if (dc_add_stream_to_ctx(
7679 dm->dc,
7680 dm_state->context,
7681 dm_new_crtc_state->stream) != DC_OK) {
7682 ret = -EINVAL;
7683 goto fail;
9b690ef3
BL
7684 }
7685
4b9674e5
LL
7686 *lock_and_validation_needed = true;
7687 }
7688 }
e277adc5 7689
4b9674e5
LL
7690skip_modeset:
7691 /* Release extra reference */
7692 if (new_stream)
7693 dc_stream_release(new_stream);
e277adc5 7694
4b9674e5
LL
7695 /*
7696 * We want to do dc stream updates that do not require a
7697 * full modeset below.
7698 */
7699 if (!(enable && aconnector && new_crtc_state->enable &&
7700 new_crtc_state->active))
7701 return 0;
7702 /*
7703 * Given above conditions, the dc state cannot be NULL because:
7704 * 1. We're in the process of enabling CRTCs (just been added
7705 * to the dc context, or already is on the context)
7706 * 2. Has a valid connector attached, and
7707 * 3. Is currently active and enabled.
7708 * => The dc stream state currently exists.
7709 */
7710 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 7711
4b9674e5
LL
7712 /* Scaling or underscan settings */
7713 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7714 update_stream_scaling_settings(
7715 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 7716
b05e2c5e
DF
7717 /* ABM settings */
7718 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7719
4b9674e5
LL
7720 /*
7721 * Color management settings. We also update color properties
7722 * when a modeset is needed, to ensure it gets reprogrammed.
7723 */
7724 if (dm_new_crtc_state->base.color_mgmt_changed ||
7725 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 7726 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
7727 if (ret)
7728 goto fail;
62f55537 7729 }
e7b07cee 7730
4b9674e5
LL
7731 /* Update Freesync settings. */
7732 get_freesync_config_for_crtc(dm_new_crtc_state,
7733 dm_new_conn_state);
7734
62f55537 7735 return ret;
9635b754
DS
7736
7737fail:
7738 if (new_stream)
7739 dc_stream_release(new_stream);
7740 return ret;
62f55537 7741}
9b690ef3 7742
f6ff2a08
NK
7743static bool should_reset_plane(struct drm_atomic_state *state,
7744 struct drm_plane *plane,
7745 struct drm_plane_state *old_plane_state,
7746 struct drm_plane_state *new_plane_state)
7747{
7748 struct drm_plane *other;
7749 struct drm_plane_state *old_other_state, *new_other_state;
7750 struct drm_crtc_state *new_crtc_state;
7751 int i;
7752
70a1efac
NK
7753 /*
7754 * TODO: Remove this hack once the checks below are sufficient
7755 * enough to determine when we need to reset all the planes on
7756 * the stream.
7757 */
7758 if (state->allow_modeset)
7759 return true;
7760
f6ff2a08
NK
7761 /* Exit early if we know that we're adding or removing the plane. */
7762 if (old_plane_state->crtc != new_plane_state->crtc)
7763 return true;
7764
7765 /* old crtc == new_crtc == NULL, plane not in context. */
7766 if (!new_plane_state->crtc)
7767 return false;
7768
7769 new_crtc_state =
7770 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7771
7772 if (!new_crtc_state)
7773 return true;
7774
7316c4ad
NK
7775 /* CRTC Degamma changes currently require us to recreate planes. */
7776 if (new_crtc_state->color_mgmt_changed)
7777 return true;
7778
f6ff2a08
NK
7779 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7780 return true;
7781
7782 /*
7783 * If there are any new primary or overlay planes being added or
7784 * removed then the z-order can potentially change. To ensure
7785 * correct z-order and pipe acquisition the current DC architecture
7786 * requires us to remove and recreate all existing planes.
7787 *
7788 * TODO: Come up with a more elegant solution for this.
7789 */
7790 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7791 if (other->type == DRM_PLANE_TYPE_CURSOR)
7792 continue;
7793
7794 if (old_other_state->crtc != new_plane_state->crtc &&
7795 new_other_state->crtc != new_plane_state->crtc)
7796 continue;
7797
7798 if (old_other_state->crtc != new_other_state->crtc)
7799 return true;
7800
7801 /* TODO: Remove this once we can handle fast format changes. */
7802 if (old_other_state->fb && new_other_state->fb &&
7803 old_other_state->fb->format != new_other_state->fb->format)
7804 return true;
7805 }
7806
7807 return false;
7808}
7809
9e869063
LL
7810static int dm_update_plane_state(struct dc *dc,
7811 struct drm_atomic_state *state,
7812 struct drm_plane *plane,
7813 struct drm_plane_state *old_plane_state,
7814 struct drm_plane_state *new_plane_state,
7815 bool enable,
7816 bool *lock_and_validation_needed)
62f55537 7817{
eb3dc897
NK
7818
7819 struct dm_atomic_state *dm_state = NULL;
62f55537 7820 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 7821 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 7822 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 7823 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
f6ff2a08 7824 bool needs_reset;
62f55537 7825 int ret = 0;
e7b07cee 7826
9b690ef3 7827
9e869063
LL
7828 new_plane_crtc = new_plane_state->crtc;
7829 old_plane_crtc = old_plane_state->crtc;
7830 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7831 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 7832
9e869063
LL
7833 /*TODO Implement atomic check for cursor plane */
7834 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7835 return 0;
9b690ef3 7836
f6ff2a08
NK
7837 needs_reset = should_reset_plane(state, plane, old_plane_state,
7838 new_plane_state);
7839
9e869063
LL
7840 /* Remove any changed/removed planes */
7841 if (!enable) {
f6ff2a08 7842 if (!needs_reset)
9e869063 7843 return 0;
a7b06724 7844
9e869063
LL
7845 if (!old_plane_crtc)
7846 return 0;
62f55537 7847
9e869063
LL
7848 old_crtc_state = drm_atomic_get_old_crtc_state(
7849 state, old_plane_crtc);
7850 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 7851
9e869063
LL
7852 if (!dm_old_crtc_state->stream)
7853 return 0;
62f55537 7854
9e869063
LL
7855 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7856 plane->base.id, old_plane_crtc->base.id);
9b690ef3 7857
9e869063
LL
7858 ret = dm_atomic_get_state(state, &dm_state);
7859 if (ret)
7860 return ret;
eb3dc897 7861
9e869063
LL
7862 if (!dc_remove_plane_from_context(
7863 dc,
7864 dm_old_crtc_state->stream,
7865 dm_old_plane_state->dc_state,
7866 dm_state->context)) {
62f55537 7867
9e869063
LL
7868 ret = EINVAL;
7869 return ret;
7870 }
e7b07cee 7871
9b690ef3 7872
9e869063
LL
7873 dc_plane_state_release(dm_old_plane_state->dc_state);
7874 dm_new_plane_state->dc_state = NULL;
1dc90497 7875
9e869063 7876 *lock_and_validation_needed = true;
1dc90497 7877
9e869063
LL
7878 } else { /* Add new planes */
7879 struct dc_plane_state *dc_new_plane_state;
1dc90497 7880
9e869063
LL
7881 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7882 return 0;
e7b07cee 7883
9e869063
LL
7884 if (!new_plane_crtc)
7885 return 0;
e7b07cee 7886
9e869063
LL
7887 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7888 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 7889
9e869063
LL
7890 if (!dm_new_crtc_state->stream)
7891 return 0;
62f55537 7892
f6ff2a08 7893 if (!needs_reset)
9e869063 7894 return 0;
62f55537 7895
9e869063 7896 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 7897
9e869063
LL
7898 dc_new_plane_state = dc_create_plane_state(dc);
7899 if (!dc_new_plane_state)
7900 return -ENOMEM;
62f55537 7901
9e869063
LL
7902 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7903 plane->base.id, new_plane_crtc->base.id);
8c45c5db 7904
695af5f9 7905 ret = fill_dc_plane_attributes(
9e869063
LL
7906 new_plane_crtc->dev->dev_private,
7907 dc_new_plane_state,
7908 new_plane_state,
7909 new_crtc_state);
7910 if (ret) {
7911 dc_plane_state_release(dc_new_plane_state);
7912 return ret;
7913 }
62f55537 7914
9e869063
LL
7915 ret = dm_atomic_get_state(state, &dm_state);
7916 if (ret) {
7917 dc_plane_state_release(dc_new_plane_state);
7918 return ret;
7919 }
eb3dc897 7920
9e869063
LL
7921 /*
7922 * Any atomic check errors that occur after this will
7923 * not need a release. The plane state will be attached
7924 * to the stream, and therefore part of the atomic
7925 * state. It'll be released when the atomic state is
7926 * cleaned.
7927 */
7928 if (!dc_add_plane_to_context(
7929 dc,
7930 dm_new_crtc_state->stream,
7931 dc_new_plane_state,
7932 dm_state->context)) {
62f55537 7933
9e869063
LL
7934 dc_plane_state_release(dc_new_plane_state);
7935 return -EINVAL;
7936 }
8c45c5db 7937
9e869063 7938 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 7939
9e869063
LL
7940 /* Tell DC to do a full surface update every time there
7941 * is a plane change. Inefficient, but works for now.
7942 */
7943 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7944
7945 *lock_and_validation_needed = true;
62f55537 7946 }
e7b07cee
HW
7947
7948
62f55537
AG
7949 return ret;
7950}
a87fa993 7951
eb3dc897 7952static int
f843b308 7953dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
eb3dc897
NK
7954 struct drm_atomic_state *state,
7955 enum surface_update_type *out_type)
7956{
f843b308 7957 struct dc *dc = dm->dc;
eb3dc897
NK
7958 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7959 int i, j, num_plane, ret = 0;
a87fa993
BL
7960 struct drm_plane_state *old_plane_state, *new_plane_state;
7961 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
d3b65841 7962 struct drm_crtc *new_plane_crtc;
a87fa993
BL
7963 struct drm_plane *plane;
7964
7965 struct drm_crtc *crtc;
7966 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7967 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7968 struct dc_stream_status *status = NULL;
a87fa993 7969 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7527791e
RL
7970 struct surface_info_bundle {
7971 struct dc_surface_update surface_updates[MAX_SURFACES];
7972 struct dc_plane_info plane_infos[MAX_SURFACES];
7973 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7974 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7975 struct dc_stream_update stream_update;
7976 } *bundle;
a87fa993 7977
7527791e 7978 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
fe96b99d 7979
7527791e
RL
7980 if (!bundle) {
7981 DRM_ERROR("Failed to allocate update bundle\n");
4f712911
BL
7982 /* Set type to FULL to avoid crashing in DC*/
7983 update_type = UPDATE_TYPE_FULL;
eb3dc897 7984 goto cleanup;
4f712911 7985 }
a87fa993
BL
7986
7987 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2aa632c5 7988
7527791e 7989 memset(bundle, 0, sizeof(struct surface_info_bundle));
c448a53a 7990
a87fa993
BL
7991 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7992 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7993 num_plane = 0;
7994
6836d239
NK
7995 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7996 update_type = UPDATE_TYPE_FULL;
7997 goto cleanup;
7998 }
a87fa993 7999
6836d239 8000 if (!new_dm_crtc_state->stream)
c744e974 8001 continue;
eb3dc897 8002
c744e974 8003 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
2cc450ce
NK
8004 const struct amdgpu_framebuffer *amdgpu_fb =
8005 to_amdgpu_framebuffer(new_plane_state->fb);
7527791e
RL
8006 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8007 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8008 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
2cc450ce
NK
8009 uint64_t tiling_flags;
8010
c744e974 8011 new_plane_crtc = new_plane_state->crtc;
c744e974
NK
8012 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8013 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 8014
c744e974
NK
8015 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8016 continue;
eb3dc897 8017
6836d239
NK
8018 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8019 update_type = UPDATE_TYPE_FULL;
8020 goto cleanup;
8021 }
8022
c744e974
NK
8023 if (crtc != new_plane_crtc)
8024 continue;
8025
7527791e
RL
8026 bundle->surface_updates[num_plane].surface =
8027 new_dm_plane_state->dc_state;
c744e974
NK
8028
8029 if (new_crtc_state->mode_changed) {
7527791e
RL
8030 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8031 bundle->stream_update.src = new_dm_crtc_state->stream->src;
c744e974
NK
8032 }
8033
8034 if (new_crtc_state->color_mgmt_changed) {
7527791e 8035 bundle->surface_updates[num_plane].gamma =
c744e974 8036 new_dm_plane_state->dc_state->gamma_correction;
7527791e 8037 bundle->surface_updates[num_plane].in_transfer_func =
c744e974 8038 new_dm_plane_state->dc_state->in_transfer_func;
7527791e 8039 bundle->stream_update.gamut_remap =
c744e974 8040 &new_dm_crtc_state->stream->gamut_remap_matrix;
7527791e 8041 bundle->stream_update.output_csc_transform =
cf020d49 8042 &new_dm_crtc_state->stream->csc_color_matrix;
7527791e 8043 bundle->stream_update.out_transfer_func =
c744e974 8044 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
8045 }
8046
004b3938 8047 ret = fill_dc_scaling_info(new_plane_state,
7527791e 8048 scaling_info);
004b3938
NK
8049 if (ret)
8050 goto cleanup;
8051
7527791e 8052 bundle->surface_updates[num_plane].scaling_info = scaling_info;
004b3938 8053
2cc450ce
NK
8054 if (amdgpu_fb) {
8055 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8056 if (ret)
8057 goto cleanup;
8058
2cc450ce
NK
8059 ret = fill_dc_plane_info_and_addr(
8060 dm->adev, new_plane_state, tiling_flags,
7527791e
RL
8061 plane_info,
8062 &flip_addr->address);
2cc450ce
NK
8063 if (ret)
8064 goto cleanup;
8065
7527791e
RL
8066 bundle->surface_updates[num_plane].plane_info = plane_info;
8067 bundle->surface_updates[num_plane].flip_addr = flip_addr;
2cc450ce
NK
8068 }
8069
c744e974
NK
8070 num_plane++;
8071 }
8072
8073 if (num_plane == 0)
8074 continue;
8075
8076 ret = dm_atomic_get_state(state, &dm_state);
8077 if (ret)
8078 goto cleanup;
8079
8080 old_dm_state = dm_atomic_get_old_state(state);
8081 if (!old_dm_state) {
8082 ret = -EINVAL;
8083 goto cleanup;
8084 }
8085
8086 status = dc_stream_get_status_from_state(old_dm_state->context,
8087 new_dm_crtc_state->stream);
7527791e 8088 bundle->stream_update.stream = new_dm_crtc_state->stream;
f843b308
NK
8089 /*
8090 * TODO: DC modifies the surface during this call so we need
8091 * to lock here - find a way to do this without locking.
8092 */
8093 mutex_lock(&dm->dc_lock);
7527791e
RL
8094 update_type = dc_check_update_surfaces_for_stream(
8095 dc, bundle->surface_updates, num_plane,
8096 &bundle->stream_update, status);
f843b308 8097 mutex_unlock(&dm->dc_lock);
c744e974
NK
8098
8099 if (update_type > UPDATE_TYPE_MED) {
a87fa993 8100 update_type = UPDATE_TYPE_FULL;
eb3dc897 8101 goto cleanup;
a87fa993
BL
8102 }
8103 }
8104
eb3dc897 8105cleanup:
7527791e 8106 kfree(bundle);
a87fa993 8107
eb3dc897
NK
8108 *out_type = update_type;
8109 return ret;
a87fa993 8110}
62f55537 8111
44be939f
ML
8112static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8113{
8114 struct drm_connector *connector;
8115 struct drm_connector_state *conn_state;
8116 struct amdgpu_dm_connector *aconnector = NULL;
8117 int i;
8118 for_each_new_connector_in_state(state, connector, conn_state, i) {
8119 if (conn_state->crtc != crtc)
8120 continue;
8121
8122 aconnector = to_amdgpu_dm_connector(connector);
8123 if (!aconnector->port || !aconnector->mst_port)
8124 aconnector = NULL;
8125 else
8126 break;
8127 }
8128
8129 if (!aconnector)
8130 return 0;
8131
8132 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8133}
8134
b8592b48
LL
8135/**
8136 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8137 * @dev: The DRM device
8138 * @state: The atomic state to commit
8139 *
8140 * Validate that the given atomic state is programmable by DC into hardware.
8141 * This involves constructing a &struct dc_state reflecting the new hardware
8142 * state we wish to commit, then querying DC to see if it is programmable. It's
8143 * important not to modify the existing DC state. Otherwise, atomic_check
8144 * may unexpectedly commit hardware changes.
8145 *
8146 * When validating the DC state, it's important that the right locks are
8147 * acquired. For full updates case which removes/adds/updates streams on one
8148 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8149 * that any such full update commit will wait for completion of any outstanding
8150 * flip using DRMs synchronization events. See
8151 * dm_determine_update_type_for_commit()
8152 *
8153 * Note that DM adds the affected connectors for all CRTCs in state, when that
8154 * might not seem necessary. This is because DC stream creation requires the
8155 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8156 * be possible but non-trivial - a possible TODO item.
8157 *
8158 * Return: -Error code if validation failed.
8159 */
7578ecda
AD
8160static int amdgpu_dm_atomic_check(struct drm_device *dev,
8161 struct drm_atomic_state *state)
62f55537 8162{
62f55537 8163 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 8164 struct dm_atomic_state *dm_state = NULL;
62f55537 8165 struct dc *dc = adev->dm.dc;
62f55537 8166 struct drm_connector *connector;
c2cea706 8167 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8168 struct drm_crtc *crtc;
fc9e9920 8169 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8170 struct drm_plane *plane;
8171 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
8172 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8173 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8174
1e88ad0a 8175 int ret, i;
e7b07cee 8176
62f55537
AG
8177 /*
8178 * This bool will be set for true for any modeset/reset
8179 * or plane update which implies non fast surface update.
8180 */
8181 bool lock_and_validation_needed = false;
8182
8183 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8184 if (ret)
8185 goto fail;
62f55537 8186
44be939f
ML
8187 if (adev->asic_type >= CHIP_NAVI10) {
8188 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8189 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8190 ret = add_affected_mst_dsc_crtcs(state, crtc);
8191 if (ret)
8192 goto fail;
8193 }
8194 }
8195 }
8196
1e88ad0a
S
8197 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8198 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8199 !new_crtc_state->color_mgmt_changed &&
a93587b3 8200 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8201 continue;
7bef1af3 8202
1e88ad0a
S
8203 if (!new_crtc_state->enable)
8204 continue;
fc9e9920 8205
1e88ad0a
S
8206 ret = drm_atomic_add_affected_connectors(state, crtc);
8207 if (ret)
8208 return ret;
fc9e9920 8209
1e88ad0a
S
8210 ret = drm_atomic_add_affected_planes(state, crtc);
8211 if (ret)
8212 goto fail;
e7b07cee
HW
8213 }
8214
2d9e6431
NK
8215 /*
8216 * Add all primary and overlay planes on the CRTC to the state
8217 * whenever a plane is enabled to maintain correct z-ordering
8218 * and to enable fast surface updates.
8219 */
8220 drm_for_each_crtc(crtc, dev) {
8221 bool modified = false;
8222
8223 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8224 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8225 continue;
8226
8227 if (new_plane_state->crtc == crtc ||
8228 old_plane_state->crtc == crtc) {
8229 modified = true;
8230 break;
8231 }
8232 }
8233
8234 if (!modified)
8235 continue;
8236
8237 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8238 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8239 continue;
8240
8241 new_plane_state =
8242 drm_atomic_get_plane_state(state, plane);
8243
8244 if (IS_ERR(new_plane_state)) {
8245 ret = PTR_ERR(new_plane_state);
8246 goto fail;
8247 }
8248 }
8249 }
8250
62f55537 8251 /* Remove exiting planes if they are modified */
9e869063
LL
8252 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8253 ret = dm_update_plane_state(dc, state, plane,
8254 old_plane_state,
8255 new_plane_state,
8256 false,
8257 &lock_and_validation_needed);
8258 if (ret)
8259 goto fail;
62f55537
AG
8260 }
8261
8262 /* Disable all crtcs which require disable */
4b9674e5
LL
8263 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8264 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8265 old_crtc_state,
8266 new_crtc_state,
8267 false,
8268 &lock_and_validation_needed);
8269 if (ret)
8270 goto fail;
62f55537
AG
8271 }
8272
8273 /* Enable all crtcs which require enable */
4b9674e5
LL
8274 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8275 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8276 old_crtc_state,
8277 new_crtc_state,
8278 true,
8279 &lock_and_validation_needed);
8280 if (ret)
8281 goto fail;
62f55537
AG
8282 }
8283
8284 /* Add new/modified planes */
9e869063
LL
8285 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8286 ret = dm_update_plane_state(dc, state, plane,
8287 old_plane_state,
8288 new_plane_state,
8289 true,
8290 &lock_and_validation_needed);
8291 if (ret)
8292 goto fail;
62f55537
AG
8293 }
8294
b349f76e
ES
8295 /* Run this here since we want to validate the streams we created */
8296 ret = drm_atomic_helper_check_planes(dev, state);
8297 if (ret)
8298 goto fail;
62f55537 8299
43d10d30
NK
8300 if (state->legacy_cursor_update) {
8301 /*
8302 * This is a fast cursor update coming from the plane update
8303 * helper, check if it can be done asynchronously for better
8304 * performance.
8305 */
8306 state->async_update =
8307 !drm_atomic_helper_async_check(dev, state);
8308
8309 /*
8310 * Skip the remaining global validation if this is an async
8311 * update. Cursor updates can be done without affecting
8312 * state or bandwidth calcs and this avoids the performance
8313 * penalty of locking the private state object and
8314 * allocating a new dc_state.
8315 */
8316 if (state->async_update)
8317 return 0;
8318 }
8319
ebdd27e1 8320 /* Check scaling and underscan changes*/
1f6010a9 8321 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8322 * new stream into context w\o causing full reset. Need to
8323 * decide how to handle.
8324 */
c2cea706 8325 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8326 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8327 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8328 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8329
8330 /* Skip any modesets/resets */
0bc9706d
LSL
8331 if (!acrtc || drm_atomic_crtc_needs_modeset(
8332 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8333 continue;
8334
b830ebc9 8335 /* Skip any thing not scale or underscan changes */
54d76575 8336 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8337 continue;
8338
a87fa993 8339 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
8340 lock_and_validation_needed = true;
8341 }
8342
f843b308 8343 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
eb3dc897
NK
8344 if (ret)
8345 goto fail;
a87fa993
BL
8346
8347 if (overall_update_type < update_type)
8348 overall_update_type = update_type;
8349
8350 /*
8351 * lock_and_validation_needed was an old way to determine if we need to set
8352 * the global lock. Leaving it in to check if we broke any corner cases
8353 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8354 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8355 */
8356 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8357 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
e7b07cee 8358
a87fa993 8359 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
8360 ret = dm_atomic_get_state(state, &dm_state);
8361 if (ret)
8362 goto fail;
e7b07cee
HW
8363
8364 ret = do_aquire_global_lock(dev, state);
8365 if (ret)
8366 goto fail;
1dc90497 8367
d9fe1a4c 8368#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8369 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8370 goto fail;
8371
29b9ba74
ML
8372 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8373 if (ret)
8374 goto fail;
d9fe1a4c 8375#endif
29b9ba74 8376
ded58c7b
ZL
8377 /*
8378 * Perform validation of MST topology in the state:
8379 * We need to perform MST atomic check before calling
8380 * dc_validate_global_state(), or there is a chance
8381 * to get stuck in an infinite loop and hang eventually.
8382 */
8383 ret = drm_dp_mst_atomic_check(state);
8384 if (ret)
8385 goto fail;
8386
afcd526b 8387 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
e7b07cee
HW
8388 ret = -EINVAL;
8389 goto fail;
8390 }
bd200d19 8391 } else {
674e78ac 8392 /*
bd200d19
NK
8393 * The commit is a fast update. Fast updates shouldn't change
8394 * the DC context, affect global validation, and can have their
8395 * commit work done in parallel with other commits not touching
8396 * the same resource. If we have a new DC context as part of
8397 * the DM atomic state from validation we need to free it and
8398 * retain the existing one instead.
674e78ac 8399 */
bd200d19
NK
8400 struct dm_atomic_state *new_dm_state, *old_dm_state;
8401
8402 new_dm_state = dm_atomic_get_new_state(state);
8403 old_dm_state = dm_atomic_get_old_state(state);
8404
8405 if (new_dm_state && old_dm_state) {
8406 if (new_dm_state->context)
8407 dc_release_state(new_dm_state->context);
8408
8409 new_dm_state->context = old_dm_state->context;
8410
8411 if (old_dm_state->context)
8412 dc_retain_state(old_dm_state->context);
8413 }
e7b07cee
HW
8414 }
8415
caff0e66
NK
8416 /* Store the overall update type for use later in atomic check. */
8417 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8418 struct dm_crtc_state *dm_new_crtc_state =
8419 to_dm_crtc_state(new_crtc_state);
8420
8421 dm_new_crtc_state->update_type = (int)overall_update_type;
e7b07cee
HW
8422 }
8423
8424 /* Must be success */
8425 WARN_ON(ret);
8426 return ret;
8427
8428fail:
8429 if (ret == -EDEADLK)
01e28f9c 8430 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8431 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8432 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8433 else
01e28f9c 8434 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8435
8436 return ret;
8437}
8438
3ee6b26b
AD
8439static bool is_dp_capable_without_timing_msa(struct dc *dc,
8440 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8441{
8442 uint8_t dpcd_data;
8443 bool capable = false;
8444
c84dec2f 8445 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8446 dm_helpers_dp_read_dpcd(
8447 NULL,
c84dec2f 8448 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8449 DP_DOWN_STREAM_PORT_COUNT,
8450 &dpcd_data,
8451 sizeof(dpcd_data))) {
8452 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8453 }
8454
8455 return capable;
8456}
98e6436d
AK
8457void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8458 struct edid *edid)
e7b07cee
HW
8459{
8460 int i;
e7b07cee
HW
8461 bool edid_check_required;
8462 struct detailed_timing *timing;
8463 struct detailed_non_pixel *data;
8464 struct detailed_data_monitor_range *range;
c84dec2f
HW
8465 struct amdgpu_dm_connector *amdgpu_dm_connector =
8466 to_amdgpu_dm_connector(connector);
bb47de73 8467 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8468
8469 struct drm_device *dev = connector->dev;
8470 struct amdgpu_device *adev = dev->dev_private;
bb47de73 8471 bool freesync_capable = false;
b830ebc9 8472
8218d7f1
HW
8473 if (!connector->state) {
8474 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 8475 goto update;
8218d7f1
HW
8476 }
8477
98e6436d
AK
8478 if (!edid) {
8479 dm_con_state = to_dm_connector_state(connector->state);
8480
8481 amdgpu_dm_connector->min_vfreq = 0;
8482 amdgpu_dm_connector->max_vfreq = 0;
8483 amdgpu_dm_connector->pixel_clock_mhz = 0;
8484
bb47de73 8485 goto update;
98e6436d
AK
8486 }
8487
8218d7f1
HW
8488 dm_con_state = to_dm_connector_state(connector->state);
8489
e7b07cee 8490 edid_check_required = false;
c84dec2f 8491 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 8492 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 8493 goto update;
e7b07cee
HW
8494 }
8495 if (!adev->dm.freesync_module)
bb47de73 8496 goto update;
e7b07cee
HW
8497 /*
8498 * if edid non zero restrict freesync only for dp and edp
8499 */
8500 if (edid) {
c84dec2f
HW
8501 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8502 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
8503 edid_check_required = is_dp_capable_without_timing_msa(
8504 adev->dm.dc,
c84dec2f 8505 amdgpu_dm_connector);
e7b07cee
HW
8506 }
8507 }
e7b07cee
HW
8508 if (edid_check_required == true && (edid->version > 1 ||
8509 (edid->version == 1 && edid->revision > 1))) {
8510 for (i = 0; i < 4; i++) {
8511
8512 timing = &edid->detailed_timings[i];
8513 data = &timing->data.other_data;
8514 range = &data->data.range;
8515 /*
8516 * Check if monitor has continuous frequency mode
8517 */
8518 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8519 continue;
8520 /*
8521 * Check for flag range limits only. If flag == 1 then
8522 * no additional timing information provided.
8523 * Default GTF, GTF Secondary curve and CVT are not
8524 * supported
8525 */
8526 if (range->flags != 1)
8527 continue;
8528
c84dec2f
HW
8529 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8530 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8531 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
8532 range->pixel_clock_mhz * 10;
8533 break;
8534 }
8535
c84dec2f 8536 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
8537 amdgpu_dm_connector->min_vfreq > 10) {
8538
bb47de73 8539 freesync_capable = true;
e7b07cee
HW
8540 }
8541 }
bb47de73
NK
8542
8543update:
8544 if (dm_con_state)
8545 dm_con_state->freesync_capable = freesync_capable;
8546
8547 if (connector->vrr_capable_property)
8548 drm_connector_set_vrr_capable_property(connector,
8549 freesync_capable);
e7b07cee
HW
8550}
8551
8c322309
RL
8552static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8553{
8554 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8555
8556 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8557 return;
8558 if (link->type == dc_connection_none)
8559 return;
8560 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8561 dpcd_data, sizeof(dpcd_data))) {
8562 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8563 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8564 }
8565}
8566
8567/*
8568 * amdgpu_dm_link_setup_psr() - configure psr link
8569 * @stream: stream state
8570 *
8571 * Return: true if success
8572 */
8573static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8574{
8575 struct dc_link *link = NULL;
8576 struct psr_config psr_config = {0};
8577 struct psr_context psr_context = {0};
8578 struct dc *dc = NULL;
8579 bool ret = false;
8580
8581 if (stream == NULL)
8582 return false;
8583
8584 link = stream->link;
8585 dc = link->ctx->dc;
8586
8587 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8588
8589 if (psr_config.psr_version > 0) {
8590 psr_config.psr_exit_link_training_required = 0x1;
8591 psr_config.psr_frame_capture_indication_req = 0;
8592 psr_config.psr_rfb_setup_time = 0x37;
8593 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8594 psr_config.allow_smu_optimizations = 0x0;
8595
8596 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8597
8598 }
8599 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8600
8601 return ret;
8602}
8603
8604/*
8605 * amdgpu_dm_psr_enable() - enable psr f/w
8606 * @stream: stream state
8607 *
8608 * Return: true if success
8609 */
8610bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8611{
8612 struct dc_link *link = stream->link;
5b5abe95
AK
8613 unsigned int vsync_rate_hz = 0;
8614 struct dc_static_screen_params params = {0};
8615 /* Calculate number of static frames before generating interrupt to
8616 * enter PSR.
8617 */
5b5abe95
AK
8618 // Init fail safe of 2 frames static
8619 unsigned int num_frames_static = 2;
8c322309
RL
8620
8621 DRM_DEBUG_DRIVER("Enabling psr...\n");
8622
5b5abe95
AK
8623 vsync_rate_hz = div64_u64(div64_u64((
8624 stream->timing.pix_clk_100hz * 100),
8625 stream->timing.v_total),
8626 stream->timing.h_total);
8627
8628 /* Round up
8629 * Calculate number of frames such that at least 30 ms of time has
8630 * passed.
8631 */
7aa62404
RL
8632 if (vsync_rate_hz != 0) {
8633 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 8634 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 8635 }
5b5abe95
AK
8636
8637 params.triggers.cursor_update = true;
8638 params.triggers.overlay_update = true;
8639 params.triggers.surface_update = true;
8640 params.num_frames = num_frames_static;
8c322309 8641
5b5abe95 8642 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 8643 &stream, 1,
5b5abe95 8644 &params);
8c322309
RL
8645
8646 return dc_link_set_psr_allow_active(link, true, false);
8647}
8648
8649/*
8650 * amdgpu_dm_psr_disable() - disable psr f/w
8651 * @stream: stream state
8652 *
8653 * Return: true if success
8654 */
8655static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8656{
8657
8658 DRM_DEBUG_DRIVER("Disabling psr...\n");
8659
8660 return dc_link_set_psr_allow_active(stream->link, false, true);
8661}