drm/amd/display: Wait for DMCUB to finish loading before executing commands
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
743b9786
NK
33#include "dmub/inc/dmub_srv.h"
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
2200eb9e 97
a94d5569
DF
98#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 100
8c7aea40
NK
101/* Number of bytes in PSP header for firmware. */
102#define PSP_HEADER_BYTES 0x100
103
104/* Number of bytes in PSP footer for firmware. */
105#define PSP_FOOTER_BYTES 0x100
106
b8592b48
LL
107/**
108 * DOC: overview
109 *
110 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
111 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
112 * requests into DC requests, and DC responses into DRM responses.
113 *
114 * The root control structure is &struct amdgpu_display_manager.
115 */
116
7578ecda
AD
117/* basic init/fini API */
118static int amdgpu_dm_init(struct amdgpu_device *adev);
119static void amdgpu_dm_fini(struct amdgpu_device *adev);
120
1f6010a9
DF
121/*
122 * initializes drm_device display related structures, based on the information
7578ecda
AD
123 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
124 * drm_encoder, drm_mode_config
125 *
126 * Returns 0 on success
127 */
128static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
129/* removes and deallocates the drm structures, created by the above function */
130static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
131
132static void
133amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
134
135static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 136 struct drm_plane *plane,
cc1fec57
NK
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
7578ecda
AD
139static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
144 uint32_t link_index,
145 struct amdgpu_encoder *amdgpu_encoder);
146static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
149
150static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
154 bool nonblock);
155
156static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
160
674e78ac
NK
161static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
7578ecda 163
8c322309
RL
164static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
4562236b
HW
170/*
171 * dm_vblank_get_counter
172 *
173 * @brief
174 * Get counter for number of vertical blanks
175 *
176 * @param
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
179 *
180 * @return
181 * Counter for vertical blanks
182 */
183static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184{
185 if (crtc >= adev->mode_info.num_crtc)
186 return 0;
187 else {
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 acrtc->base.state);
4562236b 191
da5c47f6
AG
192
193 if (acrtc_state->stream == NULL) {
0971c40e
HW
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 crtc);
4562236b
HW
196 return 0;
197 }
198
da5c47f6 199 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
200 }
201}
202
203static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 204 u32 *vbl, u32 *position)
4562236b 205{
81c50963
ST
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
4562236b
HW
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 return -EINVAL;
210 else {
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 acrtc->base.state);
4562236b 214
da5c47f6 215 if (acrtc_state->stream == NULL) {
0971c40e
HW
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 crtc);
4562236b
HW
218 return 0;
219 }
220
81c50963
ST
221 /*
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
224 */
da5c47f6 225 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
226 &v_blank_start,
227 &v_blank_end,
228 &h_position,
229 &v_position);
230
e806208d
AG
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
233 }
234
235 return 0;
236}
237
238static bool dm_is_idle(void *handle)
239{
240 /* XXX todo */
241 return true;
242}
243
244static int dm_wait_for_idle(void *handle)
245{
246 /* XXX todo */
247 return 0;
248}
249
250static bool dm_check_soft_reset(void *handle)
251{
252 return false;
253}
254
255static int dm_soft_reset(void *handle)
256{
257 /* XXX todo */
258 return 0;
259}
260
3ee6b26b
AD
261static struct amdgpu_crtc *
262get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 int otg_inst)
4562236b
HW
264{
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
268
4562236b
HW
269 if (otg_inst == -1) {
270 WARN_ON(1);
271 return adev->mode_info.crtcs[0];
272 }
273
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277 if (amdgpu_crtc->otg_inst == otg_inst)
278 return amdgpu_crtc;
279 }
280
281 return NULL;
282}
283
66b0c973
MK
284static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285{
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288}
289
b8e8c934
HW
290/**
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
293 *
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
296 */
4562236b
HW
297static void dm_pflip_high_irq(void *interrupt_params)
298{
4562236b
HW
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
302 unsigned long flags;
71bbe51a
MK
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 bool vrr_active;
4562236b
HW
307
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310 /* IRQ could occur when in initial stage */
1f6010a9 311 /* TODO work and BO cleanup */
4562236b
HW
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 return;
315 }
316
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
318
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
324 amdgpu_crtc);
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 return;
327 }
328
71bbe51a
MK
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
4562236b 332
71bbe51a
MK
333 if (!e)
334 WARN_ON(1);
1159898a 335
71bbe51a
MK
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 if (!vrr_active ||
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
347 */
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 349
71bbe51a
MK
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
352 */
353 if (e) {
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
358 }
359 } else if (e) {
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
366 *
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
371 */
372
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
376
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 e = NULL;
379 }
4562236b 380
fdd1fe57
MK
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
385 */
386 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
387 amdgpu_crtc->crtc_id);
388
54f5499a 389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
71bbe51a
MK
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
4562236b
HW
395}
396
d2574c33
MK
397static void dm_vupdate_high_irq(void *interrupt_params)
398{
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
09aef2c4 403 unsigned long flags;
d2574c33
MK
404
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407 if (acrtc) {
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
411 amdgpu_dm_vrr_active(acrtc_state));
412
413 /* Core vblank handling is done here after end of front-porch in
414 * vrr mode, as vblank timestamping will give valid results
415 * while now done after front-porch. This will also deliver
416 * page-flip completion events that have been queued to us
417 * if a pageflip happened inside front-porch.
418 */
09aef2c4 419 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 420 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
421
422 /* BTR processing for pre-DCE12 ASICs */
423 if (acrtc_state->stream &&
424 adev->family < AMDGPU_FAMILY_AI) {
425 spin_lock_irqsave(&adev->ddev->event_lock, flags);
426 mod_freesync_handle_v_update(
427 adev->dm.freesync_module,
428 acrtc_state->stream,
429 &acrtc_state->vrr_params);
430
431 dc_stream_adjust_vmin_vmax(
432 adev->dm.dc,
433 acrtc_state->stream,
434 &acrtc_state->vrr_params.adjust);
435 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
436 }
437 }
d2574c33
MK
438 }
439}
440
b8e8c934
HW
441/**
442 * dm_crtc_high_irq() - Handles CRTC interrupt
443 * @interrupt_params: ignored
444 *
445 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
446 * event handler.
447 */
4562236b
HW
448static void dm_crtc_high_irq(void *interrupt_params)
449{
450 struct common_irq_params *irq_params = interrupt_params;
451 struct amdgpu_device *adev = irq_params->adev;
4562236b 452 struct amdgpu_crtc *acrtc;
180db303 453 struct dm_crtc_state *acrtc_state;
09aef2c4 454 unsigned long flags;
4562236b 455
b57de80a 456 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b 457
e5d0170e 458 if (acrtc) {
180db303
NK
459 acrtc_state = to_dm_crtc_state(acrtc->base.state);
460
d2574c33
MK
461 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
462 amdgpu_dm_vrr_active(acrtc_state));
463
464 /* Core vblank handling at start of front-porch is only possible
465 * in non-vrr mode, as only there vblank timestamping will give
466 * valid results while done in front-porch. Otherwise defer it
467 * to dm_vupdate_high_irq after end of front-porch.
468 */
469 if (!amdgpu_dm_vrr_active(acrtc_state))
470 drm_crtc_handle_vblank(&acrtc->base);
471
472 /* Following stuff must happen at start of vblank, for crc
473 * computation and below-the-range btr support in vrr mode.
474 */
475 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
476
09aef2c4 477 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
180db303
NK
478 acrtc_state->vrr_params.supported &&
479 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
09aef2c4 480 spin_lock_irqsave(&adev->ddev->event_lock, flags);
180db303
NK
481 mod_freesync_handle_v_update(
482 adev->dm.freesync_module,
483 acrtc_state->stream,
484 &acrtc_state->vrr_params);
485
486 dc_stream_adjust_vmin_vmax(
487 adev->dm.dc,
488 acrtc_state->stream,
489 &acrtc_state->vrr_params.adjust);
09aef2c4 490 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
180db303 491 }
e5d0170e 492 }
4562236b
HW
493}
494
b8219745 495#if defined(CONFIG_DRM_AMD_DC_DCN)
16f17eda
LL
496/**
497 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
498 * @interrupt params - interrupt parameters
499 *
500 * Notify DRM's vblank event handler at VSTARTUP
501 *
502 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
503 * * We are close enough to VUPDATE - the point of no return for hw
504 * * We are in the fixed portion of variable front porch when vrr is enabled
505 * * We are before VUPDATE, where double-buffered vrr registers are swapped
506 *
507 * It is therefore the correct place to signal vblank, send user flip events,
508 * and update VRR.
509 */
510static void dm_dcn_crtc_high_irq(void *interrupt_params)
511{
512 struct common_irq_params *irq_params = interrupt_params;
513 struct amdgpu_device *adev = irq_params->adev;
514 struct amdgpu_crtc *acrtc;
515 struct dm_crtc_state *acrtc_state;
516 unsigned long flags;
517
518 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
519
520 if (!acrtc)
521 return;
522
523 acrtc_state = to_dm_crtc_state(acrtc->base.state);
524
525 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
526 amdgpu_dm_vrr_active(acrtc_state));
527
528 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
529 drm_crtc_handle_vblank(&acrtc->base);
530
531 spin_lock_irqsave(&adev->ddev->event_lock, flags);
532
533 if (acrtc_state->vrr_params.supported &&
534 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
535 mod_freesync_handle_v_update(
536 adev->dm.freesync_module,
537 acrtc_state->stream,
538 &acrtc_state->vrr_params);
539
540 dc_stream_adjust_vmin_vmax(
541 adev->dm.dc,
542 acrtc_state->stream,
543 &acrtc_state->vrr_params.adjust);
544 }
545
546 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
547 if (acrtc->event) {
548 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
549 acrtc->event = NULL;
550 drm_crtc_vblank_put(&acrtc->base);
551 }
552 acrtc->pflip_status = AMDGPU_FLIP_NONE;
553 }
554
555 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
556}
b8219745 557#endif
16f17eda 558
4562236b
HW
559static int dm_set_clockgating_state(void *handle,
560 enum amd_clockgating_state state)
561{
562 return 0;
563}
564
565static int dm_set_powergating_state(void *handle,
566 enum amd_powergating_state state)
567{
568 return 0;
569}
570
571/* Prototypes of private functions */
572static int dm_early_init(void* handle);
573
a32e24b4 574/* Allocate memory for FBC compressed data */
3e332d3a 575static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 576{
3e332d3a
RL
577 struct drm_device *dev = connector->dev;
578 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 579 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
580 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
581 struct drm_display_mode *mode;
42e67c3b
RL
582 unsigned long max_size = 0;
583
584 if (adev->dm.dc->fbc_compressor == NULL)
585 return;
a32e24b4 586
3e332d3a 587 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
588 return;
589
3e332d3a
RL
590 if (compressor->bo_ptr)
591 return;
42e67c3b 592
42e67c3b 593
3e332d3a
RL
594 list_for_each_entry(mode, &connector->modes, head) {
595 if (max_size < mode->htotal * mode->vtotal)
596 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
597 }
598
599 if (max_size) {
600 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 601 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 602 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
603
604 if (r)
42e67c3b
RL
605 DRM_ERROR("DM: Failed to initialize FBC\n");
606 else {
607 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
608 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
609 }
610
a32e24b4
RL
611 }
612
613}
a32e24b4 614
6ce8f316
NK
615static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
616 int pipe, bool *enabled,
617 unsigned char *buf, int max_bytes)
618{
619 struct drm_device *dev = dev_get_drvdata(kdev);
620 struct amdgpu_device *adev = dev->dev_private;
621 struct drm_connector *connector;
622 struct drm_connector_list_iter conn_iter;
623 struct amdgpu_dm_connector *aconnector;
624 int ret = 0;
625
626 *enabled = false;
627
628 mutex_lock(&adev->dm.audio_lock);
629
630 drm_connector_list_iter_begin(dev, &conn_iter);
631 drm_for_each_connector_iter(connector, &conn_iter) {
632 aconnector = to_amdgpu_dm_connector(connector);
633 if (aconnector->audio_inst != port)
634 continue;
635
636 *enabled = true;
637 ret = drm_eld_size(connector->eld);
638 memcpy(buf, connector->eld, min(max_bytes, ret));
639
640 break;
641 }
642 drm_connector_list_iter_end(&conn_iter);
643
644 mutex_unlock(&adev->dm.audio_lock);
645
646 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
647
648 return ret;
649}
650
651static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
652 .get_eld = amdgpu_dm_audio_component_get_eld,
653};
654
655static int amdgpu_dm_audio_component_bind(struct device *kdev,
656 struct device *hda_kdev, void *data)
657{
658 struct drm_device *dev = dev_get_drvdata(kdev);
659 struct amdgpu_device *adev = dev->dev_private;
660 struct drm_audio_component *acomp = data;
661
662 acomp->ops = &amdgpu_dm_audio_component_ops;
663 acomp->dev = kdev;
664 adev->dm.audio_component = acomp;
665
666 return 0;
667}
668
669static void amdgpu_dm_audio_component_unbind(struct device *kdev,
670 struct device *hda_kdev, void *data)
671{
672 struct drm_device *dev = dev_get_drvdata(kdev);
673 struct amdgpu_device *adev = dev->dev_private;
674 struct drm_audio_component *acomp = data;
675
676 acomp->ops = NULL;
677 acomp->dev = NULL;
678 adev->dm.audio_component = NULL;
679}
680
681static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
682 .bind = amdgpu_dm_audio_component_bind,
683 .unbind = amdgpu_dm_audio_component_unbind,
684};
685
686static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
687{
688 int i, ret;
689
690 if (!amdgpu_audio)
691 return 0;
692
693 adev->mode_info.audio.enabled = true;
694
695 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
696
697 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
698 adev->mode_info.audio.pin[i].channels = -1;
699 adev->mode_info.audio.pin[i].rate = -1;
700 adev->mode_info.audio.pin[i].bits_per_sample = -1;
701 adev->mode_info.audio.pin[i].status_bits = 0;
702 adev->mode_info.audio.pin[i].category_code = 0;
703 adev->mode_info.audio.pin[i].connected = false;
704 adev->mode_info.audio.pin[i].id =
705 adev->dm.dc->res_pool->audios[i]->inst;
706 adev->mode_info.audio.pin[i].offset = 0;
707 }
708
709 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
710 if (ret < 0)
711 return ret;
712
713 adev->dm.audio_registered = true;
714
715 return 0;
716}
717
718static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
719{
720 if (!amdgpu_audio)
721 return;
722
723 if (!adev->mode_info.audio.enabled)
724 return;
725
726 if (adev->dm.audio_registered) {
727 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
728 adev->dm.audio_registered = false;
729 }
730
731 /* TODO: Disable audio? */
732
733 adev->mode_info.audio.enabled = false;
734}
735
736void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
737{
738 struct drm_audio_component *acomp = adev->dm.audio_component;
739
740 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
741 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
742
743 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
744 pin, -1);
745 }
746}
747
743b9786
NK
748static int dm_dmub_hw_init(struct amdgpu_device *adev)
749{
743b9786
NK
750 const struct dmcub_firmware_header_v1_0 *hdr;
751 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 752 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
753 const struct firmware *dmub_fw = adev->dm.dmub_fw;
754 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
755 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
756 struct dmub_srv_hw_params hw_params;
757 enum dmub_status status;
758 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 759 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
760 bool has_hw_support;
761
762 if (!dmub_srv)
763 /* DMUB isn't supported on the ASIC. */
764 return 0;
765
8c7aea40
NK
766 if (!fb_info) {
767 DRM_ERROR("No framebuffer info for DMUB service.\n");
768 return -EINVAL;
769 }
770
743b9786
NK
771 if (!dmub_fw) {
772 /* Firmware required for DMUB support. */
773 DRM_ERROR("No firmware provided for DMUB.\n");
774 return -EINVAL;
775 }
776
777 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
778 if (status != DMUB_STATUS_OK) {
779 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
780 return -EINVAL;
781 }
782
783 if (!has_hw_support) {
784 DRM_INFO("DMUB unsupported on ASIC\n");
785 return 0;
786 }
787
788 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
789
743b9786
NK
790 fw_inst_const = dmub_fw->data +
791 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 792 PSP_HEADER_BYTES;
743b9786
NK
793
794 fw_bss_data = dmub_fw->data +
795 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
796 le32_to_cpu(hdr->inst_const_bytes);
797
798 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
799 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
800 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
801
802 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
803
ddde28a5
HW
804 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
805 * amdgpu_ucode_init_single_fw will load dmub firmware
806 * fw_inst_const part to cw0; otherwise, the firmware back door load
807 * will be done by dm_dmub_hw_init
808 */
809 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
810 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
811 fw_inst_const_size);
812 }
813
8c7aea40
NK
814 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
815 fw_bss_data_size);
ddde28a5
HW
816
817 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
818 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
819 adev->bios_size);
820
821 /* Reset regions that need to be reset. */
822 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
823 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
824
825 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
826 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
827
828 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
829 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
830
831 /* Initialize hardware. */
832 memset(&hw_params, 0, sizeof(hw_params));
833 hw_params.fb_base = adev->gmc.fb_start;
834 hw_params.fb_offset = adev->gmc.aper_base;
835
31a7f4bb
HW
836 /* backdoor load firmware and trigger dmub running */
837 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
838 hw_params.load_inst_const = true;
839
743b9786
NK
840 if (dmcu)
841 hw_params.psp_version = dmcu->psp_version;
842
8c7aea40
NK
843 for (i = 0; i < fb_info->num_fb; ++i)
844 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
845
846 status = dmub_srv_hw_init(dmub_srv, &hw_params);
847 if (status != DMUB_STATUS_OK) {
848 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
849 return -EINVAL;
850 }
851
852 /* Wait for firmware load to finish. */
853 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
854 if (status != DMUB_STATUS_OK)
855 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
856
857 /* Init DMCU and ABM if available. */
858 if (dmcu && abm) {
859 dmcu->funcs->dmcu_init(dmcu);
860 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
861 }
862
9a71c7d3
NK
863 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
864 if (!adev->dm.dc->ctx->dmub_srv) {
865 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
866 return -ENOMEM;
867 }
868
743b9786
NK
869 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
870 adev->dm.dmcub_fw_version);
871
872 return 0;
873}
874
7578ecda 875static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
876{
877 struct dc_init_data init_data;
52704fca
BL
878#ifdef CONFIG_DRM_AMD_DC_HDCP
879 struct dc_callback_init init_params;
880#endif
743b9786 881 int r;
52704fca 882
4562236b
HW
883 adev->dm.ddev = adev->ddev;
884 adev->dm.adev = adev;
885
4562236b
HW
886 /* Zero all the fields */
887 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
888#ifdef CONFIG_DRM_AMD_DC_HDCP
889 memset(&init_params, 0, sizeof(init_params));
890#endif
4562236b 891
674e78ac 892 mutex_init(&adev->dm.dc_lock);
6ce8f316 893 mutex_init(&adev->dm.audio_lock);
674e78ac 894
4562236b
HW
895 if(amdgpu_dm_irq_init(adev)) {
896 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
897 goto error;
898 }
899
900 init_data.asic_id.chip_family = adev->family;
901
902 init_data.asic_id.pci_revision_id = adev->rev_id;
903 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
904
770d13b1 905 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
906 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
907 init_data.asic_id.atombios_base_address =
908 adev->mode_info.atom_context->bios;
909
910 init_data.driver = adev;
911
912 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
913
914 if (!adev->dm.cgs_device) {
915 DRM_ERROR("amdgpu: failed to create cgs device.\n");
916 goto error;
917 }
918
919 init_data.cgs_device = adev->dm.cgs_device;
920
4562236b
HW
921 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
922
60fb100b
AD
923 switch (adev->asic_type) {
924 case CHIP_CARRIZO:
925 case CHIP_STONEY:
926 case CHIP_RAVEN:
fe3db437 927 case CHIP_RENOIR:
6e227308 928 init_data.flags.gpu_vm_support = true;
60fb100b
AD
929 break;
930 default:
931 break;
932 }
6e227308 933
04b94af4
AD
934 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
935 init_data.flags.fbc_support = true;
936
d99f38ae
AD
937 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
938 init_data.flags.multi_mon_pp_mclk_switch = true;
939
eaf56410
LL
940 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
941 init_data.flags.disable_fractional_pwm = true;
942
27eaa492 943 init_data.flags.power_down_display_on_boot = true;
78ad75f8 944
48321c3d 945 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 946
4562236b
HW
947 /* Display Core create. */
948 adev->dm.dc = dc_create(&init_data);
949
423788c7 950 if (adev->dm.dc) {
76121231 951 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 952 } else {
76121231 953 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
954 goto error;
955 }
4562236b 956
743b9786
NK
957 r = dm_dmub_hw_init(adev);
958 if (r) {
959 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
960 goto error;
961 }
962
bb6785c1
NK
963 dc_hardware_init(adev->dm.dc);
964
4562236b
HW
965 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
966 if (!adev->dm.freesync_module) {
967 DRM_ERROR(
968 "amdgpu: failed to initialize freesync_module.\n");
969 } else
f1ad2f5e 970 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
971 adev->dm.freesync_module);
972
e277adc5
LSL
973 amdgpu_dm_init_color_mod();
974
52704fca 975#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 976 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 977 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 978
96a3b32e
BL
979 if (!adev->dm.hdcp_workqueue)
980 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
981 else
982 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 983
96a3b32e
BL
984 dc_init_callbacks(adev->dm.dc, &init_params);
985 }
52704fca 986#endif
4562236b
HW
987 if (amdgpu_dm_initialize_drm_device(adev)) {
988 DRM_ERROR(
989 "amdgpu: failed to initialize sw for display support.\n");
990 goto error;
991 }
992
993 /* Update the actual used number of crtc */
994 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
995
996 /* TODO: Add_display_info? */
997
998 /* TODO use dynamic cursor width */
ce75805e
AG
999 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1000 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
1001
1002 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1003 DRM_ERROR(
1004 "amdgpu: failed to initialize sw for display support.\n");
1005 goto error;
1006 }
1007
e498eb71
NK
1008#if defined(CONFIG_DEBUG_FS)
1009 if (dtn_debugfs_init(adev))
1010 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
1011#endif
1012
f1ad2f5e 1013 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1014
1015 return 0;
1016error:
1017 amdgpu_dm_fini(adev);
1018
59d0f396 1019 return -EINVAL;
4562236b
HW
1020}
1021
7578ecda 1022static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1023{
6ce8f316
NK
1024 amdgpu_dm_audio_fini(adev);
1025
4562236b 1026 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1027
52704fca
BL
1028#ifdef CONFIG_DRM_AMD_DC_HDCP
1029 if (adev->dm.hdcp_workqueue) {
1030 hdcp_destroy(adev->dm.hdcp_workqueue);
1031 adev->dm.hdcp_workqueue = NULL;
1032 }
1033
1034 if (adev->dm.dc)
1035 dc_deinit_callbacks(adev->dm.dc);
1036#endif
9a71c7d3
NK
1037 if (adev->dm.dc->ctx->dmub_srv) {
1038 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1039 adev->dm.dc->ctx->dmub_srv = NULL;
1040 }
1041
743b9786
NK
1042 if (adev->dm.dmub_bo)
1043 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1044 &adev->dm.dmub_bo_gpu_addr,
1045 &adev->dm.dmub_bo_cpu_addr);
52704fca 1046
c8bdf2b6
ED
1047 /* DC Destroy TODO: Replace destroy DAL */
1048 if (adev->dm.dc)
1049 dc_destroy(&adev->dm.dc);
4562236b
HW
1050 /*
1051 * TODO: pageflip, vlank interrupt
1052 *
1053 * amdgpu_dm_irq_fini(adev);
1054 */
1055
1056 if (adev->dm.cgs_device) {
1057 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1058 adev->dm.cgs_device = NULL;
1059 }
1060 if (adev->dm.freesync_module) {
1061 mod_freesync_destroy(adev->dm.freesync_module);
1062 adev->dm.freesync_module = NULL;
1063 }
674e78ac 1064
6ce8f316 1065 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1066 mutex_destroy(&adev->dm.dc_lock);
1067
4562236b
HW
1068 return;
1069}
1070
a94d5569 1071static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1072{
a7669aff 1073 const char *fw_name_dmcu = NULL;
a94d5569
DF
1074 int r;
1075 const struct dmcu_firmware_header_v1_0 *hdr;
1076
1077 switch(adev->asic_type) {
1078 case CHIP_BONAIRE:
1079 case CHIP_HAWAII:
1080 case CHIP_KAVERI:
1081 case CHIP_KABINI:
1082 case CHIP_MULLINS:
1083 case CHIP_TONGA:
1084 case CHIP_FIJI:
1085 case CHIP_CARRIZO:
1086 case CHIP_STONEY:
1087 case CHIP_POLARIS11:
1088 case CHIP_POLARIS10:
1089 case CHIP_POLARIS12:
1090 case CHIP_VEGAM:
1091 case CHIP_VEGA10:
1092 case CHIP_VEGA12:
1093 case CHIP_VEGA20:
476e955d 1094 case CHIP_NAVI10:
baebcf2e 1095 case CHIP_NAVI14:
fbd2afe5 1096 case CHIP_NAVI12:
30221ad8 1097 case CHIP_RENOIR:
a94d5569
DF
1098 return 0;
1099 case CHIP_RAVEN:
a7669aff
HW
1100 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1101 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1102 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1103 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1104 else
a7669aff 1105 return 0;
a94d5569
DF
1106 break;
1107 default:
1108 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1109 return -EINVAL;
a94d5569
DF
1110 }
1111
1112 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1113 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1114 return 0;
1115 }
1116
1117 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1118 if (r == -ENOENT) {
1119 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1120 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1121 adev->dm.fw_dmcu = NULL;
1122 return 0;
1123 }
1124 if (r) {
1125 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1126 fw_name_dmcu);
1127 return r;
1128 }
1129
1130 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1131 if (r) {
1132 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1133 fw_name_dmcu);
1134 release_firmware(adev->dm.fw_dmcu);
1135 adev->dm.fw_dmcu = NULL;
1136 return r;
1137 }
1138
1139 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1140 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1141 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1142 adev->firmware.fw_size +=
1143 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1144
1145 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1146 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1147 adev->firmware.fw_size +=
1148 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1149
ee6e89c0
DF
1150 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1151
a94d5569
DF
1152 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1153
4562236b
HW
1154 return 0;
1155}
1156
743b9786
NK
1157static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1158{
1159 struct amdgpu_device *adev = ctx;
1160
1161 return dm_read_reg(adev->dm.dc->ctx, address);
1162}
1163
1164static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1165 uint32_t value)
1166{
1167 struct amdgpu_device *adev = ctx;
1168
1169 return dm_write_reg(adev->dm.dc->ctx, address, value);
1170}
1171
1172static int dm_dmub_sw_init(struct amdgpu_device *adev)
1173{
1174 struct dmub_srv_create_params create_params;
8c7aea40
NK
1175 struct dmub_srv_region_params region_params;
1176 struct dmub_srv_region_info region_info;
1177 struct dmub_srv_fb_params fb_params;
1178 struct dmub_srv_fb_info *fb_info;
1179 struct dmub_srv *dmub_srv;
743b9786
NK
1180 const struct dmcub_firmware_header_v1_0 *hdr;
1181 const char *fw_name_dmub;
1182 enum dmub_asic dmub_asic;
1183 enum dmub_status status;
1184 int r;
1185
1186 switch (adev->asic_type) {
1187 case CHIP_RENOIR:
1188 dmub_asic = DMUB_ASIC_DCN21;
1189 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1190 break;
1191
1192 default:
1193 /* ASIC doesn't support DMUB. */
1194 return 0;
1195 }
1196
743b9786
NK
1197 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1198 if (r) {
1199 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1200 return 0;
1201 }
1202
1203 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1204 if (r) {
1205 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1206 return 0;
1207 }
1208
743b9786
NK
1209 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1210 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1211 AMDGPU_UCODE_ID_DMCUB;
1212 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
1213 adev->firmware.fw_size +=
1214 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1215
1216 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1217
1218 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1219 adev->dm.dmcub_fw_version);
1220
8c7aea40
NK
1221 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1222 dmub_srv = adev->dm.dmub_srv;
1223
1224 if (!dmub_srv) {
1225 DRM_ERROR("Failed to allocate DMUB service!\n");
1226 return -ENOMEM;
1227 }
1228
1229 memset(&create_params, 0, sizeof(create_params));
1230 create_params.user_ctx = adev;
1231 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1232 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1233 create_params.asic = dmub_asic;
1234
1235 /* Create the DMUB service. */
1236 status = dmub_srv_create(dmub_srv, &create_params);
1237 if (status != DMUB_STATUS_OK) {
1238 DRM_ERROR("Error creating DMUB service: %d\n", status);
1239 return -EINVAL;
1240 }
1241
1242 /* Calculate the size of all the regions for the DMUB service. */
1243 memset(&region_params, 0, sizeof(region_params));
1244
1245 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1246 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1247 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1248 region_params.vbios_size = adev->bios_size;
1f0674fd
NK
1249 region_params.fw_bss_data =
1250 adev->dm.dmub_fw->data +
1251 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1252 le32_to_cpu(hdr->inst_const_bytes);
8c7aea40
NK
1253
1254 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1255 &region_info);
1256
1257 if (status != DMUB_STATUS_OK) {
1258 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1259 return -EINVAL;
1260 }
1261
1262 /*
1263 * Allocate a framebuffer based on the total size of all the regions.
1264 * TODO: Move this into GART.
1265 */
1266 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1267 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1268 &adev->dm.dmub_bo_gpu_addr,
1269 &adev->dm.dmub_bo_cpu_addr);
1270 if (r)
1271 return r;
1272
1273 /* Rebase the regions on the framebuffer address. */
1274 memset(&fb_params, 0, sizeof(fb_params));
1275 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1276 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1277 fb_params.region_info = &region_info;
1278
1279 adev->dm.dmub_fb_info =
1280 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1281 fb_info = adev->dm.dmub_fb_info;
1282
1283 if (!fb_info) {
1284 DRM_ERROR(
1285 "Failed to allocate framebuffer info for DMUB service!\n");
1286 return -ENOMEM;
1287 }
1288
1289 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1290 if (status != DMUB_STATUS_OK) {
1291 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1292 return -EINVAL;
1293 }
1294
743b9786
NK
1295 return 0;
1296}
1297
a94d5569
DF
1298static int dm_sw_init(void *handle)
1299{
1300 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1301 int r;
1302
1303 r = dm_dmub_sw_init(adev);
1304 if (r)
1305 return r;
a94d5569
DF
1306
1307 return load_dmcu_fw(adev);
1308}
1309
4562236b
HW
1310static int dm_sw_fini(void *handle)
1311{
a94d5569
DF
1312 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1313
8c7aea40
NK
1314 kfree(adev->dm.dmub_fb_info);
1315 adev->dm.dmub_fb_info = NULL;
1316
743b9786
NK
1317 if (adev->dm.dmub_srv) {
1318 dmub_srv_destroy(adev->dm.dmub_srv);
1319 adev->dm.dmub_srv = NULL;
1320 }
1321
1322 if (adev->dm.dmub_fw) {
1323 release_firmware(adev->dm.dmub_fw);
1324 adev->dm.dmub_fw = NULL;
1325 }
1326
a94d5569
DF
1327 if(adev->dm.fw_dmcu) {
1328 release_firmware(adev->dm.fw_dmcu);
1329 adev->dm.fw_dmcu = NULL;
1330 }
1331
4562236b
HW
1332 return 0;
1333}
1334
7abcf6b5 1335static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1336{
c84dec2f 1337 struct amdgpu_dm_connector *aconnector;
4562236b 1338 struct drm_connector *connector;
f8d2d39e 1339 struct drm_connector_list_iter iter;
7abcf6b5 1340 int ret = 0;
4562236b 1341
f8d2d39e
LP
1342 drm_connector_list_iter_begin(dev, &iter);
1343 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1344 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1345 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1346 aconnector->mst_mgr.aux) {
f1ad2f5e 1347 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1348 aconnector,
1349 aconnector->base.base.id);
7abcf6b5
AG
1350
1351 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1352 if (ret < 0) {
1353 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1354 aconnector->dc_link->type =
1355 dc_connection_single;
1356 break;
7abcf6b5 1357 }
f8d2d39e 1358 }
4562236b 1359 }
f8d2d39e 1360 drm_connector_list_iter_end(&iter);
4562236b 1361
7abcf6b5
AG
1362 return ret;
1363}
1364
1365static int dm_late_init(void *handle)
1366{
42e67c3b 1367 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1368
bbf854dc
DF
1369 struct dmcu_iram_parameters params;
1370 unsigned int linear_lut[16];
1371 int i;
1372 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
96cb7cf1 1373 bool ret = false;
bbf854dc
DF
1374
1375 for (i = 0; i < 16; i++)
1376 linear_lut[i] = 0xFFFF * i / 15;
1377
1378 params.set = 0;
1379 params.backlight_ramping_start = 0xCCCC;
1380 params.backlight_ramping_reduction = 0xCCCCCCCC;
1381 params.backlight_lut_array_size = 16;
1382 params.backlight_lut_array = linear_lut;
1383
2ad0cdf9
AK
1384 /* Min backlight level after ABM reduction, Don't allow below 1%
1385 * 0xFFFF x 0.01 = 0x28F
1386 */
1387 params.min_abm_backlight = 0x28F;
1388
96cb7cf1 1389 /* todo will enable for navi10 */
1390 if (adev->asic_type <= CHIP_RAVEN) {
1391 ret = dmcu_load_iram(dmcu, params);
bbf854dc 1392
96cb7cf1 1393 if (!ret)
1394 return -EINVAL;
1395 }
bbf854dc 1396
42e67c3b 1397 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1398}
1399
1400static void s3_handle_mst(struct drm_device *dev, bool suspend)
1401{
c84dec2f 1402 struct amdgpu_dm_connector *aconnector;
4562236b 1403 struct drm_connector *connector;
f8d2d39e 1404 struct drm_connector_list_iter iter;
fe7553be
LP
1405 struct drm_dp_mst_topology_mgr *mgr;
1406 int ret;
1407 bool need_hotplug = false;
4562236b 1408
f8d2d39e
LP
1409 drm_connector_list_iter_begin(dev, &iter);
1410 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1411 aconnector = to_amdgpu_dm_connector(connector);
1412 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1413 aconnector->mst_port)
1414 continue;
1415
1416 mgr = &aconnector->mst_mgr;
1417
1418 if (suspend) {
1419 drm_dp_mst_topology_mgr_suspend(mgr);
1420 } else {
6f85f738 1421 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1422 if (ret < 0) {
1423 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1424 need_hotplug = true;
1425 }
1426 }
4562236b 1427 }
f8d2d39e 1428 drm_connector_list_iter_end(&iter);
fe7553be
LP
1429
1430 if (need_hotplug)
1431 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1432}
1433
b8592b48
LL
1434/**
1435 * dm_hw_init() - Initialize DC device
28d687ea 1436 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1437 *
1438 * Initialize the &struct amdgpu_display_manager device. This involves calling
1439 * the initializers of each DM component, then populating the struct with them.
1440 *
1441 * Although the function implies hardware initialization, both hardware and
1442 * software are initialized here. Splitting them out to their relevant init
1443 * hooks is a future TODO item.
1444 *
1445 * Some notable things that are initialized here:
1446 *
1447 * - Display Core, both software and hardware
1448 * - DC modules that we need (freesync and color management)
1449 * - DRM software states
1450 * - Interrupt sources and handlers
1451 * - Vblank support
1452 * - Debug FS entries, if enabled
1453 */
4562236b
HW
1454static int dm_hw_init(void *handle)
1455{
1456 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1457 /* Create DAL display manager */
1458 amdgpu_dm_init(adev);
4562236b
HW
1459 amdgpu_dm_hpd_init(adev);
1460
4562236b
HW
1461 return 0;
1462}
1463
b8592b48
LL
1464/**
1465 * dm_hw_fini() - Teardown DC device
28d687ea 1466 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1467 *
1468 * Teardown components within &struct amdgpu_display_manager that require
1469 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1470 * were loaded. Also flush IRQ workqueues and disable them.
1471 */
4562236b
HW
1472static int dm_hw_fini(void *handle)
1473{
1474 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1475
1476 amdgpu_dm_hpd_fini(adev);
1477
1478 amdgpu_dm_irq_fini(adev);
21de3396 1479 amdgpu_dm_fini(adev);
4562236b
HW
1480 return 0;
1481}
1482
1483static int dm_suspend(void *handle)
1484{
1485 struct amdgpu_device *adev = handle;
1486 struct amdgpu_display_manager *dm = &adev->dm;
1487 int ret = 0;
4562236b 1488
d2f0b53b
LHM
1489 WARN_ON(adev->dm.cached_state);
1490 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1491
4562236b
HW
1492 s3_handle_mst(adev->ddev, true);
1493
4562236b
HW
1494 amdgpu_dm_irq_suspend(adev);
1495
a3621485 1496
32f5062d 1497 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b
HW
1498
1499 return ret;
1500}
1501
1daf8c63
AD
1502static struct amdgpu_dm_connector *
1503amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1504 struct drm_crtc *crtc)
4562236b
HW
1505{
1506 uint32_t i;
c2cea706 1507 struct drm_connector_state *new_con_state;
4562236b
HW
1508 struct drm_connector *connector;
1509 struct drm_crtc *crtc_from_state;
1510
c2cea706
LSL
1511 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1512 crtc_from_state = new_con_state->crtc;
4562236b
HW
1513
1514 if (crtc_from_state == crtc)
c84dec2f 1515 return to_amdgpu_dm_connector(connector);
4562236b
HW
1516 }
1517
1518 return NULL;
1519}
1520
fbbdadf2
BL
1521static void emulated_link_detect(struct dc_link *link)
1522{
1523 struct dc_sink_init_data sink_init_data = { 0 };
1524 struct display_sink_capability sink_caps = { 0 };
1525 enum dc_edid_status edid_status;
1526 struct dc_context *dc_ctx = link->ctx;
1527 struct dc_sink *sink = NULL;
1528 struct dc_sink *prev_sink = NULL;
1529
1530 link->type = dc_connection_none;
1531 prev_sink = link->local_sink;
1532
1533 if (prev_sink != NULL)
1534 dc_sink_retain(prev_sink);
1535
1536 switch (link->connector_signal) {
1537 case SIGNAL_TYPE_HDMI_TYPE_A: {
1538 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1539 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1540 break;
1541 }
1542
1543 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1544 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1545 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1546 break;
1547 }
1548
1549 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1550 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1551 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1552 break;
1553 }
1554
1555 case SIGNAL_TYPE_LVDS: {
1556 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1557 sink_caps.signal = SIGNAL_TYPE_LVDS;
1558 break;
1559 }
1560
1561 case SIGNAL_TYPE_EDP: {
1562 sink_caps.transaction_type =
1563 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1564 sink_caps.signal = SIGNAL_TYPE_EDP;
1565 break;
1566 }
1567
1568 case SIGNAL_TYPE_DISPLAY_PORT: {
1569 sink_caps.transaction_type =
1570 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1571 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1572 break;
1573 }
1574
1575 default:
1576 DC_ERROR("Invalid connector type! signal:%d\n",
1577 link->connector_signal);
1578 return;
1579 }
1580
1581 sink_init_data.link = link;
1582 sink_init_data.sink_signal = sink_caps.signal;
1583
1584 sink = dc_sink_create(&sink_init_data);
1585 if (!sink) {
1586 DC_ERROR("Failed to create sink!\n");
1587 return;
1588 }
1589
dcd5fb82 1590 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1591 link->local_sink = sink;
1592
1593 edid_status = dm_helpers_read_local_edid(
1594 link->ctx,
1595 link,
1596 sink);
1597
1598 if (edid_status != EDID_OK)
1599 DC_ERROR("Failed to read EDID");
1600
1601}
1602
4562236b
HW
1603static int dm_resume(void *handle)
1604{
1605 struct amdgpu_device *adev = handle;
4562236b
HW
1606 struct drm_device *ddev = adev->ddev;
1607 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1608 struct amdgpu_dm_connector *aconnector;
4562236b 1609 struct drm_connector *connector;
f8d2d39e 1610 struct drm_connector_list_iter iter;
4562236b 1611 struct drm_crtc *crtc;
c2cea706 1612 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1613 struct dm_crtc_state *dm_new_crtc_state;
1614 struct drm_plane *plane;
1615 struct drm_plane_state *new_plane_state;
1616 struct dm_plane_state *dm_new_plane_state;
113b7a01 1617 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1618 enum dc_connection_type new_connection_type = dc_connection_none;
8c7aea40 1619 int i, r;
4562236b 1620
113b7a01
LL
1621 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1622 dc_release_state(dm_state->context);
1623 dm_state->context = dc_create_state(dm->dc);
1624 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1625 dc_resource_state_construct(dm->dc, dm_state->context);
1626
8c7aea40
NK
1627 /* Before powering on DC we need to re-initialize DMUB. */
1628 r = dm_dmub_hw_init(adev);
1629 if (r)
1630 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1631
a80aa93d
ML
1632 /* power on hardware */
1633 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1634
4562236b
HW
1635 /* program HPD filter */
1636 dc_resume(dm->dc);
1637
4562236b
HW
1638 /*
1639 * early enable HPD Rx IRQ, should be done before set mode as short
1640 * pulse interrupts are used for MST
1641 */
1642 amdgpu_dm_irq_resume_early(adev);
1643
d20ebea8 1644 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1645 s3_handle_mst(ddev, false);
1646
4562236b 1647 /* Do detection*/
f8d2d39e
LP
1648 drm_connector_list_iter_begin(ddev, &iter);
1649 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1650 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1651
1652 /*
1653 * this is the case when traversing through already created
1654 * MST connectors, should be skipped
1655 */
1656 if (aconnector->mst_port)
1657 continue;
1658
03ea364c 1659 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1660 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1661 DRM_ERROR("KMS: Failed to detect connector\n");
1662
1663 if (aconnector->base.force && new_connection_type == dc_connection_none)
1664 emulated_link_detect(aconnector->dc_link);
1665 else
1666 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1667
1668 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1669 aconnector->fake_enable = false;
1670
dcd5fb82
MF
1671 if (aconnector->dc_sink)
1672 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1673 aconnector->dc_sink = NULL;
1674 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1675 mutex_unlock(&aconnector->hpd_lock);
4562236b 1676 }
f8d2d39e 1677 drm_connector_list_iter_end(&iter);
4562236b 1678
1f6010a9 1679 /* Force mode set in atomic commit */
a80aa93d 1680 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1681 new_crtc_state->active_changed = true;
4f346e65 1682
fcb4019e
LSL
1683 /*
1684 * atomic_check is expected to create the dc states. We need to release
1685 * them here, since they were duplicated as part of the suspend
1686 * procedure.
1687 */
a80aa93d 1688 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1689 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1690 if (dm_new_crtc_state->stream) {
1691 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1692 dc_stream_release(dm_new_crtc_state->stream);
1693 dm_new_crtc_state->stream = NULL;
1694 }
1695 }
1696
a80aa93d 1697 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1698 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1699 if (dm_new_plane_state->dc_state) {
1700 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1701 dc_plane_state_release(dm_new_plane_state->dc_state);
1702 dm_new_plane_state->dc_state = NULL;
1703 }
1704 }
1705
2d1af6a1 1706 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1707
a80aa93d 1708 dm->cached_state = NULL;
0a214e2f 1709
9faa4237 1710 amdgpu_dm_irq_resume_late(adev);
4562236b 1711
2d1af6a1 1712 return 0;
4562236b
HW
1713}
1714
b8592b48
LL
1715/**
1716 * DOC: DM Lifecycle
1717 *
1718 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1719 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1720 * the base driver's device list to be initialized and torn down accordingly.
1721 *
1722 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1723 */
1724
4562236b
HW
1725static const struct amd_ip_funcs amdgpu_dm_funcs = {
1726 .name = "dm",
1727 .early_init = dm_early_init,
7abcf6b5 1728 .late_init = dm_late_init,
4562236b
HW
1729 .sw_init = dm_sw_init,
1730 .sw_fini = dm_sw_fini,
1731 .hw_init = dm_hw_init,
1732 .hw_fini = dm_hw_fini,
1733 .suspend = dm_suspend,
1734 .resume = dm_resume,
1735 .is_idle = dm_is_idle,
1736 .wait_for_idle = dm_wait_for_idle,
1737 .check_soft_reset = dm_check_soft_reset,
1738 .soft_reset = dm_soft_reset,
1739 .set_clockgating_state = dm_set_clockgating_state,
1740 .set_powergating_state = dm_set_powergating_state,
1741};
1742
1743const struct amdgpu_ip_block_version dm_ip_block =
1744{
1745 .type = AMD_IP_BLOCK_TYPE_DCE,
1746 .major = 1,
1747 .minor = 0,
1748 .rev = 0,
1749 .funcs = &amdgpu_dm_funcs,
1750};
1751
ca3268c4 1752
b8592b48
LL
1753/**
1754 * DOC: atomic
1755 *
1756 * *WIP*
1757 */
0a323b84 1758
b3663f70 1759static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 1760 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 1761 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 1762 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 1763 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
1764};
1765
1766static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1767 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
1768};
1769
94562810
RS
1770static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1771{
1772 u32 max_cll, min_cll, max, min, q, r;
1773 struct amdgpu_dm_backlight_caps *caps;
1774 struct amdgpu_display_manager *dm;
1775 struct drm_connector *conn_base;
1776 struct amdgpu_device *adev;
1777 static const u8 pre_computed_values[] = {
1778 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1779 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1780
1781 if (!aconnector || !aconnector->dc_link)
1782 return;
1783
1784 conn_base = &aconnector->base;
1785 adev = conn_base->dev->dev_private;
1786 dm = &adev->dm;
1787 caps = &dm->backlight_caps;
1788 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1789 caps->aux_support = false;
1790 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1791 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1792
1793 if (caps->ext_caps->bits.oled == 1 ||
1794 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1795 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1796 caps->aux_support = true;
1797
1798 /* From the specification (CTA-861-G), for calculating the maximum
1799 * luminance we need to use:
1800 * Luminance = 50*2**(CV/32)
1801 * Where CV is a one-byte value.
1802 * For calculating this expression we may need float point precision;
1803 * to avoid this complexity level, we take advantage that CV is divided
1804 * by a constant. From the Euclids division algorithm, we know that CV
1805 * can be written as: CV = 32*q + r. Next, we replace CV in the
1806 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1807 * need to pre-compute the value of r/32. For pre-computing the values
1808 * We just used the following Ruby line:
1809 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1810 * The results of the above expressions can be verified at
1811 * pre_computed_values.
1812 */
1813 q = max_cll >> 5;
1814 r = max_cll % 32;
1815 max = (1 << q) * pre_computed_values[r];
1816
1817 // min luminance: maxLum * (CV/255)^2 / 100
1818 q = DIV_ROUND_CLOSEST(min_cll, 255);
1819 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1820
1821 caps->aux_max_input_signal = max;
1822 caps->aux_min_input_signal = min;
1823}
1824
7578ecda 1825static void
3ee6b26b 1826amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
4562236b
HW
1827{
1828 struct drm_connector *connector = &aconnector->base;
1829 struct drm_device *dev = connector->dev;
b73a22d3 1830 struct dc_sink *sink;
4562236b
HW
1831
1832 /* MST handled by drm_mst framework */
1833 if (aconnector->mst_mgr.mst_state == true)
1834 return;
1835
1836
1837 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
1838 if (sink)
1839 dc_sink_retain(sink);
4562236b 1840
1f6010a9
DF
1841 /*
1842 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 1843 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 1844 * Skip if already done during boot.
4562236b
HW
1845 */
1846 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1847 && aconnector->dc_em_sink) {
1848
1f6010a9
DF
1849 /*
1850 * For S3 resume with headless use eml_sink to fake stream
1851 * because on resume connector->sink is set to NULL
4562236b
HW
1852 */
1853 mutex_lock(&dev->mode_config.mutex);
1854
1855 if (sink) {
922aa1e1 1856 if (aconnector->dc_sink) {
98e6436d 1857 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
1858 /*
1859 * retain and release below are used to
1860 * bump up refcount for sink because the link doesn't point
1861 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
1862 * reshuffle by UMD we will get into unwanted dc_sink release
1863 */
dcd5fb82 1864 dc_sink_release(aconnector->dc_sink);
922aa1e1 1865 }
4562236b 1866 aconnector->dc_sink = sink;
dcd5fb82 1867 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
1868 amdgpu_dm_update_freesync_caps(connector,
1869 aconnector->edid);
4562236b 1870 } else {
98e6436d 1871 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 1872 if (!aconnector->dc_sink) {
4562236b 1873 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 1874 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 1875 }
4562236b
HW
1876 }
1877
1878 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1879
1880 if (sink)
1881 dc_sink_release(sink);
4562236b
HW
1882 return;
1883 }
1884
1885 /*
1886 * TODO: temporary guard to look for proper fix
1887 * if this sink is MST sink, we should not do anything
1888 */
dcd5fb82
MF
1889 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1890 dc_sink_release(sink);
4562236b 1891 return;
dcd5fb82 1892 }
4562236b
HW
1893
1894 if (aconnector->dc_sink == sink) {
1f6010a9
DF
1895 /*
1896 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1897 * Do nothing!!
1898 */
f1ad2f5e 1899 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 1900 aconnector->connector_id);
dcd5fb82
MF
1901 if (sink)
1902 dc_sink_release(sink);
4562236b
HW
1903 return;
1904 }
1905
f1ad2f5e 1906 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
1907 aconnector->connector_id, aconnector->dc_sink, sink);
1908
1909 mutex_lock(&dev->mode_config.mutex);
1910
1f6010a9
DF
1911 /*
1912 * 1. Update status of the drm connector
1913 * 2. Send an event and let userspace tell us what to do
1914 */
4562236b 1915 if (sink) {
1f6010a9
DF
1916 /*
1917 * TODO: check if we still need the S3 mode update workaround.
1918 * If yes, put it here.
1919 */
4562236b 1920 if (aconnector->dc_sink)
98e6436d 1921 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
1922
1923 aconnector->dc_sink = sink;
dcd5fb82 1924 dc_sink_retain(aconnector->dc_sink);
900b3cb1 1925 if (sink->dc_edid.length == 0) {
4562236b 1926 aconnector->edid = NULL;
e86e8947 1927 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
900b3cb1 1928 } else {
4562236b
HW
1929 aconnector->edid =
1930 (struct edid *) sink->dc_edid.raw_edid;
1931
1932
c555f023 1933 drm_connector_update_edid_property(connector,
4562236b 1934 aconnector->edid);
e86e8947
HV
1935 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1936 aconnector->edid);
4562236b 1937 }
98e6436d 1938 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 1939 update_connector_ext_caps(aconnector);
4562236b 1940 } else {
e86e8947 1941 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 1942 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 1943 drm_connector_update_edid_property(connector, NULL);
4562236b 1944 aconnector->num_modes = 0;
dcd5fb82 1945 dc_sink_release(aconnector->dc_sink);
4562236b 1946 aconnector->dc_sink = NULL;
5326c452 1947 aconnector->edid = NULL;
0c8620d6
BL
1948#ifdef CONFIG_DRM_AMD_DC_HDCP
1949 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1950 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1951 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1952#endif
4562236b
HW
1953 }
1954
1955 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1956
1957 if (sink)
1958 dc_sink_release(sink);
4562236b
HW
1959}
1960
1961static void handle_hpd_irq(void *param)
1962{
c84dec2f 1963 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
1964 struct drm_connector *connector = &aconnector->base;
1965 struct drm_device *dev = connector->dev;
fbbdadf2 1966 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
1967#ifdef CONFIG_DRM_AMD_DC_HDCP
1968 struct amdgpu_device *adev = dev->dev_private;
1969#endif
4562236b 1970
1f6010a9
DF
1971 /*
1972 * In case of failure or MST no need to update connector status or notify the OS
1973 * since (for MST case) MST does this in its own context.
4562236b
HW
1974 */
1975 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 1976
0c8620d6 1977#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e
BL
1978 if (adev->asic_type >= CHIP_RAVEN)
1979 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 1980#endif
2e0ac3d6
HW
1981 if (aconnector->fake_enable)
1982 aconnector->fake_enable = false;
1983
fbbdadf2
BL
1984 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1985 DRM_ERROR("KMS: Failed to detect connector\n");
1986
1987 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1988 emulated_link_detect(aconnector->dc_link);
1989
1990
1991 drm_modeset_lock_all(dev);
1992 dm_restore_drm_connector_state(dev, connector);
1993 drm_modeset_unlock_all(dev);
1994
1995 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1996 drm_kms_helper_hotplug_event(dev);
1997
1998 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
1999 amdgpu_dm_update_connector_after_detect(aconnector);
2000
2001
2002 drm_modeset_lock_all(dev);
2003 dm_restore_drm_connector_state(dev, connector);
2004 drm_modeset_unlock_all(dev);
2005
2006 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2007 drm_kms_helper_hotplug_event(dev);
2008 }
2009 mutex_unlock(&aconnector->hpd_lock);
2010
2011}
2012
c84dec2f 2013static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2014{
2015 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2016 uint8_t dret;
2017 bool new_irq_handled = false;
2018 int dpcd_addr;
2019 int dpcd_bytes_to_read;
2020
2021 const int max_process_count = 30;
2022 int process_count = 0;
2023
2024 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2025
2026 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2027 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2028 /* DPCD 0x200 - 0x201 for downstream IRQ */
2029 dpcd_addr = DP_SINK_COUNT;
2030 } else {
2031 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2032 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2033 dpcd_addr = DP_SINK_COUNT_ESI;
2034 }
2035
2036 dret = drm_dp_dpcd_read(
2037 &aconnector->dm_dp_aux.aux,
2038 dpcd_addr,
2039 esi,
2040 dpcd_bytes_to_read);
2041
2042 while (dret == dpcd_bytes_to_read &&
2043 process_count < max_process_count) {
2044 uint8_t retry;
2045 dret = 0;
2046
2047 process_count++;
2048
f1ad2f5e 2049 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2050 /* handle HPD short pulse irq */
2051 if (aconnector->mst_mgr.mst_state)
2052 drm_dp_mst_hpd_irq(
2053 &aconnector->mst_mgr,
2054 esi,
2055 &new_irq_handled);
4562236b
HW
2056
2057 if (new_irq_handled) {
2058 /* ACK at DPCD to notify down stream */
2059 const int ack_dpcd_bytes_to_write =
2060 dpcd_bytes_to_read - 1;
2061
2062 for (retry = 0; retry < 3; retry++) {
2063 uint8_t wret;
2064
2065 wret = drm_dp_dpcd_write(
2066 &aconnector->dm_dp_aux.aux,
2067 dpcd_addr + 1,
2068 &esi[1],
2069 ack_dpcd_bytes_to_write);
2070 if (wret == ack_dpcd_bytes_to_write)
2071 break;
2072 }
2073
1f6010a9 2074 /* check if there is new irq to be handled */
4562236b
HW
2075 dret = drm_dp_dpcd_read(
2076 &aconnector->dm_dp_aux.aux,
2077 dpcd_addr,
2078 esi,
2079 dpcd_bytes_to_read);
2080
2081 new_irq_handled = false;
d4a6e8a9 2082 } else {
4562236b 2083 break;
d4a6e8a9 2084 }
4562236b
HW
2085 }
2086
2087 if (process_count == max_process_count)
f1ad2f5e 2088 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2089}
2090
2091static void handle_hpd_rx_irq(void *param)
2092{
c84dec2f 2093 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2094 struct drm_connector *connector = &aconnector->base;
2095 struct drm_device *dev = connector->dev;
53cbf65c 2096 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2097 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2098 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2099#ifdef CONFIG_DRM_AMD_DC_HDCP
2100 union hpd_irq_data hpd_irq_data;
2101 struct amdgpu_device *adev = dev->dev_private;
2102
2103 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2104#endif
4562236b 2105
1f6010a9
DF
2106 /*
2107 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2108 * conflict, after implement i2c helper, this mutex should be
2109 * retired.
2110 */
53cbf65c 2111 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2112 mutex_lock(&aconnector->hpd_lock);
2113
2a0f9270
BL
2114
2115#ifdef CONFIG_DRM_AMD_DC_HDCP
2116 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2117#else
4e18814e 2118 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2119#endif
4562236b
HW
2120 !is_mst_root_connector) {
2121 /* Downstream Port status changed. */
fbbdadf2
BL
2122 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2123 DRM_ERROR("KMS: Failed to detect connector\n");
2124
2125 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2126 emulated_link_detect(dc_link);
2127
2128 if (aconnector->fake_enable)
2129 aconnector->fake_enable = false;
2130
2131 amdgpu_dm_update_connector_after_detect(aconnector);
2132
2133
2134 drm_modeset_lock_all(dev);
2135 dm_restore_drm_connector_state(dev, connector);
2136 drm_modeset_unlock_all(dev);
2137
2138 drm_kms_helper_hotplug_event(dev);
2139 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2140
2141 if (aconnector->fake_enable)
2142 aconnector->fake_enable = false;
2143
4562236b
HW
2144 amdgpu_dm_update_connector_after_detect(aconnector);
2145
2146
2147 drm_modeset_lock_all(dev);
2148 dm_restore_drm_connector_state(dev, connector);
2149 drm_modeset_unlock_all(dev);
2150
2151 drm_kms_helper_hotplug_event(dev);
2152 }
2153 }
2a0f9270
BL
2154#ifdef CONFIG_DRM_AMD_DC_HDCP
2155 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
2156 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2157#endif
4562236b 2158 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2159 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2160 dm_handle_hpd_rx_irq(aconnector);
2161
e86e8947
HV
2162 if (dc_link->type != dc_connection_mst_branch) {
2163 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2164 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2165 }
4562236b
HW
2166}
2167
2168static void register_hpd_handlers(struct amdgpu_device *adev)
2169{
2170 struct drm_device *dev = adev->ddev;
2171 struct drm_connector *connector;
c84dec2f 2172 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2173 const struct dc_link *dc_link;
2174 struct dc_interrupt_params int_params = {0};
2175
2176 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2177 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2178
2179 list_for_each_entry(connector,
2180 &dev->mode_config.connector_list, head) {
2181
c84dec2f 2182 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2183 dc_link = aconnector->dc_link;
2184
2185 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2186 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2187 int_params.irq_source = dc_link->irq_source_hpd;
2188
2189 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2190 handle_hpd_irq,
2191 (void *) aconnector);
2192 }
2193
2194 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2195
2196 /* Also register for DP short pulse (hpd_rx). */
2197 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2198 int_params.irq_source = dc_link->irq_source_hpd_rx;
2199
2200 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2201 handle_hpd_rx_irq,
2202 (void *) aconnector);
2203 }
2204 }
2205}
2206
2207/* Register IRQ sources and initialize IRQ callbacks */
2208static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2209{
2210 struct dc *dc = adev->dm.dc;
2211 struct common_irq_params *c_irq_params;
2212 struct dc_interrupt_params int_params = {0};
2213 int r;
2214 int i;
1ffdeca6 2215 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2216
84374725 2217 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2218 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2219
2220 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2221 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2222
1f6010a9
DF
2223 /*
2224 * Actions of amdgpu_irq_add_id():
4562236b
HW
2225 * 1. Register a set() function with base driver.
2226 * Base driver will call set() function to enable/disable an
2227 * interrupt in DC hardware.
2228 * 2. Register amdgpu_dm_irq_handler().
2229 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2230 * coming from DC hardware.
2231 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2232 * for acknowledging and handling. */
2233
b57de80a 2234 /* Use VBLANK interrupt */
e9029155 2235 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2236 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2237 if (r) {
2238 DRM_ERROR("Failed to add crtc irq id!\n");
2239 return r;
2240 }
2241
2242 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2243 int_params.irq_source =
3d761e79 2244 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2245
b57de80a 2246 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2247
2248 c_irq_params->adev = adev;
2249 c_irq_params->irq_src = int_params.irq_source;
2250
2251 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2252 dm_crtc_high_irq, c_irq_params);
2253 }
2254
d2574c33
MK
2255 /* Use VUPDATE interrupt */
2256 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2257 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2258 if (r) {
2259 DRM_ERROR("Failed to add vupdate irq id!\n");
2260 return r;
2261 }
2262
2263 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2264 int_params.irq_source =
2265 dc_interrupt_to_irq_source(dc, i, 0);
2266
2267 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2268
2269 c_irq_params->adev = adev;
2270 c_irq_params->irq_src = int_params.irq_source;
2271
2272 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2273 dm_vupdate_high_irq, c_irq_params);
2274 }
2275
3d761e79 2276 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2277 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2278 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2279 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2280 if (r) {
2281 DRM_ERROR("Failed to add page flip irq id!\n");
2282 return r;
2283 }
2284
2285 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2286 int_params.irq_source =
2287 dc_interrupt_to_irq_source(dc, i, 0);
2288
2289 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2290
2291 c_irq_params->adev = adev;
2292 c_irq_params->irq_src = int_params.irq_source;
2293
2294 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2295 dm_pflip_high_irq, c_irq_params);
2296
2297 }
2298
2299 /* HPD */
2c8ad2d5
AD
2300 r = amdgpu_irq_add_id(adev, client_id,
2301 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2302 if (r) {
2303 DRM_ERROR("Failed to add hpd irq id!\n");
2304 return r;
2305 }
2306
2307 register_hpd_handlers(adev);
2308
2309 return 0;
2310}
2311
b86a1aa3 2312#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2313/* Register IRQ sources and initialize IRQ callbacks */
2314static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2315{
2316 struct dc *dc = adev->dm.dc;
2317 struct common_irq_params *c_irq_params;
2318 struct dc_interrupt_params int_params = {0};
2319 int r;
2320 int i;
2321
2322 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2323 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2324
1f6010a9
DF
2325 /*
2326 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2327 * 1. Register a set() function with base driver.
2328 * Base driver will call set() function to enable/disable an
2329 * interrupt in DC hardware.
2330 * 2. Register amdgpu_dm_irq_handler().
2331 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2332 * coming from DC hardware.
2333 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2334 * for acknowledging and handling.
1f6010a9 2335 */
ff5ef992
AD
2336
2337 /* Use VSTARTUP interrupt */
2338 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2339 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2340 i++) {
3760f76c 2341 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2342
2343 if (r) {
2344 DRM_ERROR("Failed to add crtc irq id!\n");
2345 return r;
2346 }
2347
2348 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2349 int_params.irq_source =
2350 dc_interrupt_to_irq_source(dc, i, 0);
2351
2352 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2353
2354 c_irq_params->adev = adev;
2355 c_irq_params->irq_src = int_params.irq_source;
2356
2357 amdgpu_dm_irq_register_interrupt(adev, &int_params,
16f17eda 2358 dm_dcn_crtc_high_irq, c_irq_params);
d2574c33
MK
2359 }
2360
ff5ef992
AD
2361 /* Use GRPH_PFLIP interrupt */
2362 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2363 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2364 i++) {
3760f76c 2365 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2366 if (r) {
2367 DRM_ERROR("Failed to add page flip irq id!\n");
2368 return r;
2369 }
2370
2371 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2372 int_params.irq_source =
2373 dc_interrupt_to_irq_source(dc, i, 0);
2374
2375 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2376
2377 c_irq_params->adev = adev;
2378 c_irq_params->irq_src = int_params.irq_source;
2379
2380 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2381 dm_pflip_high_irq, c_irq_params);
2382
2383 }
2384
2385 /* HPD */
3760f76c 2386 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2387 &adev->hpd_irq);
2388 if (r) {
2389 DRM_ERROR("Failed to add hpd irq id!\n");
2390 return r;
2391 }
2392
2393 register_hpd_handlers(adev);
2394
2395 return 0;
2396}
2397#endif
2398
eb3dc897
NK
2399/*
2400 * Acquires the lock for the atomic state object and returns
2401 * the new atomic state.
2402 *
2403 * This should only be called during atomic check.
2404 */
2405static int dm_atomic_get_state(struct drm_atomic_state *state,
2406 struct dm_atomic_state **dm_state)
2407{
2408 struct drm_device *dev = state->dev;
2409 struct amdgpu_device *adev = dev->dev_private;
2410 struct amdgpu_display_manager *dm = &adev->dm;
2411 struct drm_private_state *priv_state;
eb3dc897
NK
2412
2413 if (*dm_state)
2414 return 0;
2415
eb3dc897
NK
2416 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2417 if (IS_ERR(priv_state))
2418 return PTR_ERR(priv_state);
2419
2420 *dm_state = to_dm_atomic_state(priv_state);
2421
2422 return 0;
2423}
2424
2425struct dm_atomic_state *
2426dm_atomic_get_new_state(struct drm_atomic_state *state)
2427{
2428 struct drm_device *dev = state->dev;
2429 struct amdgpu_device *adev = dev->dev_private;
2430 struct amdgpu_display_manager *dm = &adev->dm;
2431 struct drm_private_obj *obj;
2432 struct drm_private_state *new_obj_state;
2433 int i;
2434
2435 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2436 if (obj->funcs == dm->atomic_obj.funcs)
2437 return to_dm_atomic_state(new_obj_state);
2438 }
2439
2440 return NULL;
2441}
2442
2443struct dm_atomic_state *
2444dm_atomic_get_old_state(struct drm_atomic_state *state)
2445{
2446 struct drm_device *dev = state->dev;
2447 struct amdgpu_device *adev = dev->dev_private;
2448 struct amdgpu_display_manager *dm = &adev->dm;
2449 struct drm_private_obj *obj;
2450 struct drm_private_state *old_obj_state;
2451 int i;
2452
2453 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2454 if (obj->funcs == dm->atomic_obj.funcs)
2455 return to_dm_atomic_state(old_obj_state);
2456 }
2457
2458 return NULL;
2459}
2460
2461static struct drm_private_state *
2462dm_atomic_duplicate_state(struct drm_private_obj *obj)
2463{
2464 struct dm_atomic_state *old_state, *new_state;
2465
2466 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2467 if (!new_state)
2468 return NULL;
2469
2470 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2471
813d20dc
AW
2472 old_state = to_dm_atomic_state(obj->state);
2473
2474 if (old_state && old_state->context)
2475 new_state->context = dc_copy_state(old_state->context);
2476
eb3dc897
NK
2477 if (!new_state->context) {
2478 kfree(new_state);
2479 return NULL;
2480 }
2481
eb3dc897
NK
2482 return &new_state->base;
2483}
2484
2485static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2486 struct drm_private_state *state)
2487{
2488 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2489
2490 if (dm_state && dm_state->context)
2491 dc_release_state(dm_state->context);
2492
2493 kfree(dm_state);
2494}
2495
2496static struct drm_private_state_funcs dm_atomic_state_funcs = {
2497 .atomic_duplicate_state = dm_atomic_duplicate_state,
2498 .atomic_destroy_state = dm_atomic_destroy_state,
2499};
2500
4562236b
HW
2501static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2502{
eb3dc897 2503 struct dm_atomic_state *state;
4562236b
HW
2504 int r;
2505
2506 adev->mode_info.mode_config_initialized = true;
2507
4562236b 2508 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2509 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2510
2511 adev->ddev->mode_config.max_width = 16384;
2512 adev->ddev->mode_config.max_height = 16384;
2513
2514 adev->ddev->mode_config.preferred_depth = 24;
2515 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2516 /* indicates support for immediate flip */
4562236b
HW
2517 adev->ddev->mode_config.async_page_flip = true;
2518
770d13b1 2519 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2520
eb3dc897
NK
2521 state = kzalloc(sizeof(*state), GFP_KERNEL);
2522 if (!state)
2523 return -ENOMEM;
2524
813d20dc 2525 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2526 if (!state->context) {
2527 kfree(state);
2528 return -ENOMEM;
2529 }
2530
2531 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2532
8c1a765b
DA
2533 drm_atomic_private_obj_init(adev->ddev,
2534 &adev->dm.atomic_obj,
eb3dc897
NK
2535 &state->base,
2536 &dm_atomic_state_funcs);
2537
3dc9b1ce 2538 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2539 if (r)
2540 return r;
2541
6ce8f316
NK
2542 r = amdgpu_dm_audio_init(adev);
2543 if (r)
2544 return r;
2545
4562236b
HW
2546 return 0;
2547}
2548
206bbafe
DF
2549#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2550#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 2551#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 2552
4562236b
HW
2553#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2554 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2555
206bbafe
DF
2556static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2557{
2558#if defined(CONFIG_ACPI)
2559 struct amdgpu_dm_backlight_caps caps;
2560
2561 if (dm->backlight_caps.caps_valid)
2562 return;
2563
2564 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2565 if (caps.caps_valid) {
94562810
RS
2566 dm->backlight_caps.caps_valid = true;
2567 if (caps.aux_support)
2568 return;
206bbafe
DF
2569 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2570 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
2571 } else {
2572 dm->backlight_caps.min_input_signal =
2573 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2574 dm->backlight_caps.max_input_signal =
2575 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2576 }
2577#else
94562810
RS
2578 if (dm->backlight_caps.aux_support)
2579 return;
2580
8bcbc9ef
DF
2581 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2582 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2583#endif
2584}
2585
94562810
RS
2586static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2587{
2588 bool rc;
2589
2590 if (!link)
2591 return 1;
2592
2593 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2594 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2595
2596 return rc ? 0 : 1;
2597}
2598
2599static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2600 const uint32_t user_brightness)
2601{
2602 u32 min, max, conversion_pace;
2603 u32 brightness = user_brightness;
2604
2605 if (!caps)
2606 goto out;
2607
2608 if (!caps->aux_support) {
2609 max = caps->max_input_signal;
2610 min = caps->min_input_signal;
2611 /*
2612 * The brightness input is in the range 0-255
2613 * It needs to be rescaled to be between the
2614 * requested min and max input signal
2615 * It also needs to be scaled up by 0x101 to
2616 * match the DC interface which has a range of
2617 * 0 to 0xffff
2618 */
2619 conversion_pace = 0x101;
2620 brightness =
2621 user_brightness
2622 * conversion_pace
2623 * (max - min)
2624 / AMDGPU_MAX_BL_LEVEL
2625 + min * conversion_pace;
2626 } else {
2627 /* TODO
2628 * We are doing a linear interpolation here, which is OK but
2629 * does not provide the optimal result. We probably want
2630 * something close to the Perceptual Quantizer (PQ) curve.
2631 */
2632 max = caps->aux_max_input_signal;
2633 min = caps->aux_min_input_signal;
2634
2635 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2636 + user_brightness * max;
2637 // Multiple the value by 1000 since we use millinits
2638 brightness *= 1000;
2639 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2640 }
2641
2642out:
2643 return brightness;
2644}
2645
4562236b
HW
2646static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2647{
2648 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 2649 struct amdgpu_dm_backlight_caps caps;
94562810
RS
2650 struct dc_link *link = NULL;
2651 u32 brightness;
2652 bool rc;
4562236b 2653
206bbafe
DF
2654 amdgpu_dm_update_backlight_caps(dm);
2655 caps = dm->backlight_caps;
94562810
RS
2656
2657 link = (struct dc_link *)dm->backlight_link;
2658
2659 brightness = convert_brightness(&caps, bd->props.brightness);
2660 // Change brightness based on AUX property
2661 if (caps.aux_support)
2662 return set_backlight_via_aux(link, brightness);
2663
2664 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2665
2666 return rc ? 0 : 1;
4562236b
HW
2667}
2668
2669static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2670{
620a0d27
DF
2671 struct amdgpu_display_manager *dm = bl_get_data(bd);
2672 int ret = dc_link_get_backlight_level(dm->backlight_link);
2673
2674 if (ret == DC_ERROR_UNEXPECTED)
2675 return bd->props.brightness;
2676 return ret;
4562236b
HW
2677}
2678
2679static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 2680 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
2681 .get_brightness = amdgpu_dm_backlight_get_brightness,
2682 .update_status = amdgpu_dm_backlight_update_status,
2683};
2684
7578ecda
AD
2685static void
2686amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
2687{
2688 char bl_name[16];
2689 struct backlight_properties props = { 0 };
2690
206bbafe
DF
2691 amdgpu_dm_update_backlight_caps(dm);
2692
4562236b 2693 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 2694 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
2695 props.type = BACKLIGHT_RAW;
2696
2697 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2698 dm->adev->ddev->primary->index);
2699
2700 dm->backlight_dev = backlight_device_register(bl_name,
2701 dm->adev->ddev->dev,
2702 dm,
2703 &amdgpu_dm_backlight_ops,
2704 &props);
2705
74baea42 2706 if (IS_ERR(dm->backlight_dev))
4562236b
HW
2707 DRM_ERROR("DM: Backlight registration failed!\n");
2708 else
f1ad2f5e 2709 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
2710}
2711
2712#endif
2713
df534fff 2714static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 2715 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
2716 enum drm_plane_type plane_type,
2717 const struct dc_plane_cap *plane_cap)
df534fff 2718{
f180b4bc 2719 struct drm_plane *plane;
df534fff
S
2720 unsigned long possible_crtcs;
2721 int ret = 0;
2722
f180b4bc 2723 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
2724 if (!plane) {
2725 DRM_ERROR("KMS: Failed to allocate plane\n");
2726 return -ENOMEM;
2727 }
b2fddb13 2728 plane->type = plane_type;
df534fff
S
2729
2730 /*
b2fddb13
NK
2731 * HACK: IGT tests expect that the primary plane for a CRTC
2732 * can only have one possible CRTC. Only expose support for
2733 * any CRTC if they're not going to be used as a primary plane
2734 * for a CRTC - like overlay or underlay planes.
df534fff
S
2735 */
2736 possible_crtcs = 1 << plane_id;
2737 if (plane_id >= dm->dc->caps.max_streams)
2738 possible_crtcs = 0xff;
2739
cc1fec57 2740 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
2741
2742 if (ret) {
2743 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 2744 kfree(plane);
df534fff
S
2745 return ret;
2746 }
2747
54087768
NK
2748 if (mode_info)
2749 mode_info->planes[plane_id] = plane;
2750
df534fff
S
2751 return ret;
2752}
2753
89fc8d4e
HW
2754
2755static void register_backlight_device(struct amdgpu_display_manager *dm,
2756 struct dc_link *link)
2757{
2758#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2759 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2760
2761 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2762 link->type != dc_connection_none) {
1f6010a9
DF
2763 /*
2764 * Event if registration failed, we should continue with
89fc8d4e
HW
2765 * DM initialization because not having a backlight control
2766 * is better then a black screen.
2767 */
2768 amdgpu_dm_register_backlight_device(dm);
2769
2770 if (dm->backlight_dev)
2771 dm->backlight_link = link;
2772 }
2773#endif
2774}
2775
2776
1f6010a9
DF
2777/*
2778 * In this architecture, the association
4562236b
HW
2779 * connector -> encoder -> crtc
2780 * id not really requried. The crtc and connector will hold the
2781 * display_index as an abstraction to use with DAL component
2782 *
2783 * Returns 0 on success
2784 */
7578ecda 2785static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
2786{
2787 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 2788 int32_t i;
c84dec2f 2789 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 2790 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 2791 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 2792 uint32_t link_cnt;
cc1fec57 2793 int32_t primary_planes;
fbbdadf2 2794 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 2795 const struct dc_plane_cap *plane;
4562236b
HW
2796
2797 link_cnt = dm->dc->caps.max_links;
4562236b
HW
2798 if (amdgpu_dm_mode_config_init(dm->adev)) {
2799 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 2800 return -EINVAL;
4562236b
HW
2801 }
2802
b2fddb13
NK
2803 /* There is one primary plane per CRTC */
2804 primary_planes = dm->dc->caps.max_streams;
54087768 2805 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 2806
b2fddb13
NK
2807 /*
2808 * Initialize primary planes, implicit planes for legacy IOCTLS.
2809 * Order is reversed to match iteration order in atomic check.
2810 */
2811 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
2812 plane = &dm->dc->caps.planes[i];
2813
b2fddb13 2814 if (initialize_plane(dm, mode_info, i,
cc1fec57 2815 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 2816 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 2817 goto fail;
d4e13b0d 2818 }
df534fff 2819 }
92f3ac40 2820
0d579c7e
NK
2821 /*
2822 * Initialize overlay planes, index starting after primary planes.
2823 * These planes have a higher DRM index than the primary planes since
2824 * they should be considered as having a higher z-order.
2825 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
2826 *
2827 * Only support DCN for now, and only expose one so we don't encourage
2828 * userspace to use up all the pipes.
0d579c7e 2829 */
cc1fec57
NK
2830 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2831 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2832
2833 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2834 continue;
2835
2836 if (!plane->blends_with_above || !plane->blends_with_below)
2837 continue;
2838
ea36ad34 2839 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
2840 continue;
2841
54087768 2842 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 2843 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 2844 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 2845 goto fail;
d4e13b0d 2846 }
cc1fec57
NK
2847
2848 /* Only create one overlay plane. */
2849 break;
d4e13b0d 2850 }
4562236b 2851
d4e13b0d 2852 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 2853 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 2854 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 2855 goto fail;
4562236b 2856 }
4562236b 2857
ab2541b6 2858 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
2859
2860 /* loops over all connectors on the board */
2861 for (i = 0; i < link_cnt; i++) {
89fc8d4e 2862 struct dc_link *link = NULL;
4562236b
HW
2863
2864 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2865 DRM_ERROR(
2866 "KMS: Cannot support more than %d display indexes\n",
2867 AMDGPU_DM_MAX_DISPLAY_INDEX);
2868 continue;
2869 }
2870
2871 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2872 if (!aconnector)
cd8a2ae8 2873 goto fail;
4562236b
HW
2874
2875 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 2876 if (!aencoder)
cd8a2ae8 2877 goto fail;
4562236b
HW
2878
2879 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2880 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 2881 goto fail;
4562236b
HW
2882 }
2883
2884 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2885 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 2886 goto fail;
4562236b
HW
2887 }
2888
89fc8d4e
HW
2889 link = dc_get_link_at_index(dm->dc, i);
2890
fbbdadf2
BL
2891 if (!dc_link_detect_sink(link, &new_connection_type))
2892 DRM_ERROR("KMS: Failed to detect connector\n");
2893
2894 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2895 emulated_link_detect(link);
2896 amdgpu_dm_update_connector_after_detect(aconnector);
2897
2898 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 2899 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 2900 register_backlight_device(dm, link);
397a9bc5
RL
2901 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2902 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
2903 }
2904
2905
4562236b
HW
2906 }
2907
2908 /* Software is initialized. Now we can register interrupt handlers. */
2909 switch (adev->asic_type) {
2910 case CHIP_BONAIRE:
2911 case CHIP_HAWAII:
cd4b356f
AD
2912 case CHIP_KAVERI:
2913 case CHIP_KABINI:
2914 case CHIP_MULLINS:
4562236b
HW
2915 case CHIP_TONGA:
2916 case CHIP_FIJI:
2917 case CHIP_CARRIZO:
2918 case CHIP_STONEY:
2919 case CHIP_POLARIS11:
2920 case CHIP_POLARIS10:
b264d345 2921 case CHIP_POLARIS12:
7737de91 2922 case CHIP_VEGAM:
2c8ad2d5 2923 case CHIP_VEGA10:
2325ff30 2924 case CHIP_VEGA12:
1fe6bf2f 2925 case CHIP_VEGA20:
4562236b
HW
2926 if (dce110_register_irq_handlers(dm->adev)) {
2927 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 2928 goto fail;
4562236b
HW
2929 }
2930 break;
b86a1aa3 2931#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 2932 case CHIP_RAVEN:
fbd2afe5 2933 case CHIP_NAVI12:
476e955d 2934 case CHIP_NAVI10:
fce651e3 2935 case CHIP_NAVI14:
30221ad8 2936 case CHIP_RENOIR:
ff5ef992
AD
2937 if (dcn10_register_irq_handlers(dm->adev)) {
2938 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 2939 goto fail;
ff5ef992
AD
2940 }
2941 break;
2942#endif
4562236b 2943 default:
e63f8673 2944 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 2945 goto fail;
4562236b
HW
2946 }
2947
1bc460a4
HW
2948 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2949 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2950
4562236b 2951 return 0;
cd8a2ae8 2952fail:
4562236b 2953 kfree(aencoder);
4562236b 2954 kfree(aconnector);
54087768 2955
59d0f396 2956 return -EINVAL;
4562236b
HW
2957}
2958
7578ecda 2959static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
2960{
2961 drm_mode_config_cleanup(dm->ddev);
eb3dc897 2962 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
2963 return;
2964}
2965
2966/******************************************************************************
2967 * amdgpu_display_funcs functions
2968 *****************************************************************************/
2969
1f6010a9 2970/*
4562236b
HW
2971 * dm_bandwidth_update - program display watermarks
2972 *
2973 * @adev: amdgpu_device pointer
2974 *
2975 * Calculate and program the display watermarks and line buffer allocation.
2976 */
2977static void dm_bandwidth_update(struct amdgpu_device *adev)
2978{
49c07a99 2979 /* TODO: implement later */
4562236b
HW
2980}
2981
39cc5be2 2982static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
2983 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2984 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
2985 .backlight_set_level = NULL, /* never called for DC */
2986 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
2987 .hpd_sense = NULL,/* called unconditionally */
2988 .hpd_set_polarity = NULL, /* called unconditionally */
2989 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
2990 .page_flip_get_scanoutpos =
2991 dm_crtc_get_scanoutpos,/* called unconditionally */
2992 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2993 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
2994};
2995
2996#if defined(CONFIG_DEBUG_KERNEL_DC)
2997
3ee6b26b
AD
2998static ssize_t s3_debug_store(struct device *device,
2999 struct device_attribute *attr,
3000 const char *buf,
3001 size_t count)
4562236b
HW
3002{
3003 int ret;
3004 int s3_state;
ef1de361 3005 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
3006 struct amdgpu_device *adev = drm_dev->dev_private;
3007
3008 ret = kstrtoint(buf, 0, &s3_state);
3009
3010 if (ret == 0) {
3011 if (s3_state) {
3012 dm_resume(adev);
4562236b
HW
3013 drm_kms_helper_hotplug_event(adev->ddev);
3014 } else
3015 dm_suspend(adev);
3016 }
3017
3018 return ret == 0 ? count : 0;
3019}
3020
3021DEVICE_ATTR_WO(s3_debug);
3022
3023#endif
3024
3025static int dm_early_init(void *handle)
3026{
3027 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3028
4562236b
HW
3029 switch (adev->asic_type) {
3030 case CHIP_BONAIRE:
3031 case CHIP_HAWAII:
3032 adev->mode_info.num_crtc = 6;
3033 adev->mode_info.num_hpd = 6;
3034 adev->mode_info.num_dig = 6;
4562236b 3035 break;
cd4b356f
AD
3036 case CHIP_KAVERI:
3037 adev->mode_info.num_crtc = 4;
3038 adev->mode_info.num_hpd = 6;
3039 adev->mode_info.num_dig = 7;
cd4b356f
AD
3040 break;
3041 case CHIP_KABINI:
3042 case CHIP_MULLINS:
3043 adev->mode_info.num_crtc = 2;
3044 adev->mode_info.num_hpd = 6;
3045 adev->mode_info.num_dig = 6;
cd4b356f 3046 break;
4562236b
HW
3047 case CHIP_FIJI:
3048 case CHIP_TONGA:
3049 adev->mode_info.num_crtc = 6;
3050 adev->mode_info.num_hpd = 6;
3051 adev->mode_info.num_dig = 7;
4562236b
HW
3052 break;
3053 case CHIP_CARRIZO:
3054 adev->mode_info.num_crtc = 3;
3055 adev->mode_info.num_hpd = 6;
3056 adev->mode_info.num_dig = 9;
4562236b
HW
3057 break;
3058 case CHIP_STONEY:
3059 adev->mode_info.num_crtc = 2;
3060 adev->mode_info.num_hpd = 6;
3061 adev->mode_info.num_dig = 9;
4562236b
HW
3062 break;
3063 case CHIP_POLARIS11:
b264d345 3064 case CHIP_POLARIS12:
4562236b
HW
3065 adev->mode_info.num_crtc = 5;
3066 adev->mode_info.num_hpd = 5;
3067 adev->mode_info.num_dig = 5;
4562236b
HW
3068 break;
3069 case CHIP_POLARIS10:
7737de91 3070 case CHIP_VEGAM:
4562236b
HW
3071 adev->mode_info.num_crtc = 6;
3072 adev->mode_info.num_hpd = 6;
3073 adev->mode_info.num_dig = 6;
4562236b 3074 break;
2c8ad2d5 3075 case CHIP_VEGA10:
2325ff30 3076 case CHIP_VEGA12:
1fe6bf2f 3077 case CHIP_VEGA20:
2c8ad2d5
AD
3078 adev->mode_info.num_crtc = 6;
3079 adev->mode_info.num_hpd = 6;
3080 adev->mode_info.num_dig = 6;
3081 break;
b86a1aa3 3082#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3083 case CHIP_RAVEN:
3084 adev->mode_info.num_crtc = 4;
3085 adev->mode_info.num_hpd = 4;
3086 adev->mode_info.num_dig = 4;
ff5ef992 3087 break;
476e955d 3088#endif
476e955d 3089 case CHIP_NAVI10:
fbd2afe5 3090 case CHIP_NAVI12:
476e955d
HW
3091 adev->mode_info.num_crtc = 6;
3092 adev->mode_info.num_hpd = 6;
3093 adev->mode_info.num_dig = 6;
3094 break;
fce651e3
BL
3095 case CHIP_NAVI14:
3096 adev->mode_info.num_crtc = 5;
3097 adev->mode_info.num_hpd = 5;
3098 adev->mode_info.num_dig = 5;
3099 break;
30221ad8
BL
3100 case CHIP_RENOIR:
3101 adev->mode_info.num_crtc = 4;
3102 adev->mode_info.num_hpd = 4;
3103 adev->mode_info.num_dig = 4;
3104 break;
4562236b 3105 default:
e63f8673 3106 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3107 return -EINVAL;
3108 }
3109
c8dd5715
MD
3110 amdgpu_dm_set_irq_funcs(adev);
3111
39cc5be2
AD
3112 if (adev->mode_info.funcs == NULL)
3113 adev->mode_info.funcs = &dm_display_funcs;
3114
1f6010a9
DF
3115 /*
3116 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3117 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3118 * amdgpu_device_init()
3119 */
4562236b
HW
3120#if defined(CONFIG_DEBUG_KERNEL_DC)
3121 device_create_file(
3122 adev->ddev->dev,
3123 &dev_attr_s3_debug);
3124#endif
3125
3126 return 0;
3127}
3128
9b690ef3 3129static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3130 struct dc_stream_state *new_stream,
3131 struct dc_stream_state *old_stream)
9b690ef3 3132{
e7b07cee
HW
3133 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3134 return false;
3135
3136 if (!crtc_state->enable)
3137 return false;
3138
3139 return crtc_state->active;
3140}
3141
3142static bool modereset_required(struct drm_crtc_state *crtc_state)
3143{
3144 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3145 return false;
3146
3147 return !crtc_state->enable || !crtc_state->active;
3148}
3149
7578ecda 3150static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3151{
3152 drm_encoder_cleanup(encoder);
3153 kfree(encoder);
3154}
3155
3156static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3157 .destroy = amdgpu_dm_encoder_destroy,
3158};
3159
e7b07cee 3160
695af5f9
NK
3161static int fill_dc_scaling_info(const struct drm_plane_state *state,
3162 struct dc_scaling_info *scaling_info)
e7b07cee 3163{
6491f0c0 3164 int scale_w, scale_h;
e7b07cee 3165
695af5f9 3166 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3167
695af5f9
NK
3168 /* Source is fixed 16.16 but we ignore mantissa for now... */
3169 scaling_info->src_rect.x = state->src_x >> 16;
3170 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3171
695af5f9
NK
3172 scaling_info->src_rect.width = state->src_w >> 16;
3173 if (scaling_info->src_rect.width == 0)
3174 return -EINVAL;
3175
3176 scaling_info->src_rect.height = state->src_h >> 16;
3177 if (scaling_info->src_rect.height == 0)
3178 return -EINVAL;
3179
3180 scaling_info->dst_rect.x = state->crtc_x;
3181 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3182
3183 if (state->crtc_w == 0)
695af5f9 3184 return -EINVAL;
e7b07cee 3185
695af5f9 3186 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3187
3188 if (state->crtc_h == 0)
695af5f9 3189 return -EINVAL;
e7b07cee 3190
695af5f9 3191 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3192
695af5f9
NK
3193 /* DRM doesn't specify clipping on destination output. */
3194 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3195
6491f0c0
NK
3196 /* TODO: Validate scaling per-format with DC plane caps */
3197 scale_w = scaling_info->dst_rect.width * 1000 /
3198 scaling_info->src_rect.width;
e7b07cee 3199
6491f0c0
NK
3200 if (scale_w < 250 || scale_w > 16000)
3201 return -EINVAL;
3202
3203 scale_h = scaling_info->dst_rect.height * 1000 /
3204 scaling_info->src_rect.height;
3205
3206 if (scale_h < 250 || scale_h > 16000)
3207 return -EINVAL;
3208
695af5f9
NK
3209 /*
3210 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3211 * assume reasonable defaults based on the format.
3212 */
e7b07cee 3213
695af5f9 3214 return 0;
4562236b 3215}
695af5f9 3216
3ee6b26b 3217static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
9817d5f5 3218 uint64_t *tiling_flags)
e7b07cee 3219{
e68d14dd 3220 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 3221 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3222
e7b07cee 3223 if (unlikely(r)) {
1f6010a9 3224 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3225 if (r != -ERESTARTSYS)
3226 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3227 return r;
3228 }
3229
e7b07cee
HW
3230 if (tiling_flags)
3231 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3232
3233 amdgpu_bo_unreserve(rbo);
3234
3235 return r;
3236}
3237
7df7e505
NK
3238static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3239{
3240 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3241
3242 return offset ? (address + offset * 256) : 0;
3243}
3244
695af5f9
NK
3245static int
3246fill_plane_dcc_attributes(struct amdgpu_device *adev,
3247 const struct amdgpu_framebuffer *afb,
3248 const enum surface_pixel_format format,
3249 const enum dc_rotation_angle rotation,
12e2b2d4 3250 const struct plane_size *plane_size,
695af5f9
NK
3251 const union dc_tiling_info *tiling_info,
3252 const uint64_t info,
3253 struct dc_plane_dcc_param *dcc,
3254 struct dc_plane_address *address)
7df7e505
NK
3255{
3256 struct dc *dc = adev->dm.dc;
8daa1218
NC
3257 struct dc_dcc_surface_param input;
3258 struct dc_surface_dcc_cap output;
7df7e505
NK
3259 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3260 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3261 uint64_t dcc_address;
3262
8daa1218
NC
3263 memset(&input, 0, sizeof(input));
3264 memset(&output, 0, sizeof(output));
3265
7df7e505 3266 if (!offset)
09e5665a
NK
3267 return 0;
3268
695af5f9 3269 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3270 return 0;
7df7e505
NK
3271
3272 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3273 return -EINVAL;
7df7e505 3274
695af5f9 3275 input.format = format;
12e2b2d4
DL
3276 input.surface_size.width = plane_size->surface_size.width;
3277 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3278 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3279
695af5f9 3280 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3281 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3282 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3283 input.scan = SCAN_DIRECTION_VERTICAL;
3284
3285 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3286 return -EINVAL;
7df7e505
NK
3287
3288 if (!output.capable)
09e5665a 3289 return -EINVAL;
7df7e505
NK
3290
3291 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3292 return -EINVAL;
7df7e505 3293
09e5665a 3294 dcc->enable = 1;
12e2b2d4 3295 dcc->meta_pitch =
7df7e505 3296 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3297 dcc->independent_64b_blks = i64b;
7df7e505
NK
3298
3299 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3300 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3301 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3302
09e5665a
NK
3303 return 0;
3304}
3305
3306static int
320932bf 3307fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3308 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3309 const enum surface_pixel_format format,
3310 const enum dc_rotation_angle rotation,
3311 const uint64_t tiling_flags,
09e5665a 3312 union dc_tiling_info *tiling_info,
12e2b2d4 3313 struct plane_size *plane_size,
09e5665a 3314 struct dc_plane_dcc_param *dcc,
695af5f9 3315 struct dc_plane_address *address)
09e5665a 3316{
320932bf 3317 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3318 int ret;
3319
3320 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3321 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3322 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3323 memset(address, 0, sizeof(*address));
3324
695af5f9 3325 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3326 plane_size->surface_size.x = 0;
3327 plane_size->surface_size.y = 0;
3328 plane_size->surface_size.width = fb->width;
3329 plane_size->surface_size.height = fb->height;
3330 plane_size->surface_pitch =
320932bf
NK
3331 fb->pitches[0] / fb->format->cpp[0];
3332
e0634e8d
NK
3333 address->type = PLN_ADDR_TYPE_GRAPHICS;
3334 address->grph.addr.low_part = lower_32_bits(afb->address);
3335 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3336 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3337 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3338
12e2b2d4
DL
3339 plane_size->surface_size.x = 0;
3340 plane_size->surface_size.y = 0;
3341 plane_size->surface_size.width = fb->width;
3342 plane_size->surface_size.height = fb->height;
3343 plane_size->surface_pitch =
320932bf
NK
3344 fb->pitches[0] / fb->format->cpp[0];
3345
12e2b2d4
DL
3346 plane_size->chroma_size.x = 0;
3347 plane_size->chroma_size.y = 0;
320932bf 3348 /* TODO: set these based on surface format */
12e2b2d4
DL
3349 plane_size->chroma_size.width = fb->width / 2;
3350 plane_size->chroma_size.height = fb->height / 2;
320932bf 3351
12e2b2d4 3352 plane_size->chroma_pitch =
320932bf
NK
3353 fb->pitches[1] / fb->format->cpp[1];
3354
e0634e8d
NK
3355 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3356 address->video_progressive.luma_addr.low_part =
3357 lower_32_bits(afb->address);
3358 address->video_progressive.luma_addr.high_part =
3359 upper_32_bits(afb->address);
3360 address->video_progressive.chroma_addr.low_part =
3361 lower_32_bits(chroma_addr);
3362 address->video_progressive.chroma_addr.high_part =
3363 upper_32_bits(chroma_addr);
3364 }
09e5665a
NK
3365
3366 /* Fill GFX8 params */
3367 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3368 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3369
3370 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3371 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3372 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3373 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3374 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3375
3376 /* XXX fix me for VI */
3377 tiling_info->gfx8.num_banks = num_banks;
3378 tiling_info->gfx8.array_mode =
3379 DC_ARRAY_2D_TILED_THIN1;
3380 tiling_info->gfx8.tile_split = tile_split;
3381 tiling_info->gfx8.bank_width = bankw;
3382 tiling_info->gfx8.bank_height = bankh;
3383 tiling_info->gfx8.tile_aspect = mtaspect;
3384 tiling_info->gfx8.tile_mode =
3385 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3386 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3387 == DC_ARRAY_1D_TILED_THIN1) {
3388 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3389 }
3390
3391 tiling_info->gfx8.pipe_config =
3392 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3393
3394 if (adev->asic_type == CHIP_VEGA10 ||
3395 adev->asic_type == CHIP_VEGA12 ||
3396 adev->asic_type == CHIP_VEGA20 ||
476e955d 3397 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3398 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3399 adev->asic_type == CHIP_NAVI12 ||
30221ad8 3400 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3401 adev->asic_type == CHIP_RAVEN) {
3402 /* Fill GFX9 params */
3403 tiling_info->gfx9.num_pipes =
3404 adev->gfx.config.gb_addr_config_fields.num_pipes;
3405 tiling_info->gfx9.num_banks =
3406 adev->gfx.config.gb_addr_config_fields.num_banks;
3407 tiling_info->gfx9.pipe_interleave =
3408 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3409 tiling_info->gfx9.num_shader_engines =
3410 adev->gfx.config.gb_addr_config_fields.num_se;
3411 tiling_info->gfx9.max_compressed_frags =
3412 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3413 tiling_info->gfx9.num_rb_per_se =
3414 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3415 tiling_info->gfx9.swizzle =
3416 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3417 tiling_info->gfx9.shaderEnable = 1;
3418
695af5f9
NK
3419 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3420 plane_size, tiling_info,
3421 tiling_flags, dcc, address);
09e5665a
NK
3422 if (ret)
3423 return ret;
3424 }
3425
3426 return 0;
7df7e505
NK
3427}
3428
d74004b6 3429static void
695af5f9 3430fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3431 bool *per_pixel_alpha, bool *global_alpha,
3432 int *global_alpha_value)
3433{
3434 *per_pixel_alpha = false;
3435 *global_alpha = false;
3436 *global_alpha_value = 0xff;
3437
3438 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3439 return;
3440
3441 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3442 static const uint32_t alpha_formats[] = {
3443 DRM_FORMAT_ARGB8888,
3444 DRM_FORMAT_RGBA8888,
3445 DRM_FORMAT_ABGR8888,
3446 };
3447 uint32_t format = plane_state->fb->format->format;
3448 unsigned int i;
3449
3450 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3451 if (format == alpha_formats[i]) {
3452 *per_pixel_alpha = true;
3453 break;
3454 }
3455 }
3456 }
3457
3458 if (plane_state->alpha < 0xffff) {
3459 *global_alpha = true;
3460 *global_alpha_value = plane_state->alpha >> 8;
3461 }
3462}
3463
004fefa3
NK
3464static int
3465fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3466 const enum surface_pixel_format format,
004fefa3
NK
3467 enum dc_color_space *color_space)
3468{
3469 bool full_range;
3470
3471 *color_space = COLOR_SPACE_SRGB;
3472
3473 /* DRM color properties only affect non-RGB formats. */
695af5f9 3474 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3475 return 0;
3476
3477 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3478
3479 switch (plane_state->color_encoding) {
3480 case DRM_COLOR_YCBCR_BT601:
3481 if (full_range)
3482 *color_space = COLOR_SPACE_YCBCR601;
3483 else
3484 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3485 break;
3486
3487 case DRM_COLOR_YCBCR_BT709:
3488 if (full_range)
3489 *color_space = COLOR_SPACE_YCBCR709;
3490 else
3491 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3492 break;
3493
3494 case DRM_COLOR_YCBCR_BT2020:
3495 if (full_range)
3496 *color_space = COLOR_SPACE_2020_YCBCR;
3497 else
3498 return -EINVAL;
3499 break;
3500
3501 default:
3502 return -EINVAL;
3503 }
3504
3505 return 0;
3506}
3507
695af5f9
NK
3508static int
3509fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3510 const struct drm_plane_state *plane_state,
3511 const uint64_t tiling_flags,
3512 struct dc_plane_info *plane_info,
3513 struct dc_plane_address *address)
3514{
3515 const struct drm_framebuffer *fb = plane_state->fb;
3516 const struct amdgpu_framebuffer *afb =
3517 to_amdgpu_framebuffer(plane_state->fb);
3518 struct drm_format_name_buf format_name;
3519 int ret;
3520
3521 memset(plane_info, 0, sizeof(*plane_info));
3522
3523 switch (fb->format->format) {
3524 case DRM_FORMAT_C8:
3525 plane_info->format =
3526 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3527 break;
3528 case DRM_FORMAT_RGB565:
3529 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3530 break;
3531 case DRM_FORMAT_XRGB8888:
3532 case DRM_FORMAT_ARGB8888:
3533 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3534 break;
3535 case DRM_FORMAT_XRGB2101010:
3536 case DRM_FORMAT_ARGB2101010:
3537 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3538 break;
3539 case DRM_FORMAT_XBGR2101010:
3540 case DRM_FORMAT_ABGR2101010:
3541 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3542 break;
3543 case DRM_FORMAT_XBGR8888:
3544 case DRM_FORMAT_ABGR8888:
3545 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3546 break;
3547 case DRM_FORMAT_NV21:
3548 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3549 break;
3550 case DRM_FORMAT_NV12:
3551 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3552 break;
3553 default:
3554 DRM_ERROR(
3555 "Unsupported screen format %s\n",
3556 drm_get_format_name(fb->format->format, &format_name));
3557 return -EINVAL;
3558 }
3559
3560 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3561 case DRM_MODE_ROTATE_0:
3562 plane_info->rotation = ROTATION_ANGLE_0;
3563 break;
3564 case DRM_MODE_ROTATE_90:
3565 plane_info->rotation = ROTATION_ANGLE_90;
3566 break;
3567 case DRM_MODE_ROTATE_180:
3568 plane_info->rotation = ROTATION_ANGLE_180;
3569 break;
3570 case DRM_MODE_ROTATE_270:
3571 plane_info->rotation = ROTATION_ANGLE_270;
3572 break;
3573 default:
3574 plane_info->rotation = ROTATION_ANGLE_0;
3575 break;
3576 }
3577
3578 plane_info->visible = true;
3579 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3580
6d83a32d
MS
3581 plane_info->layer_index = 0;
3582
695af5f9
NK
3583 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3584 &plane_info->color_space);
3585 if (ret)
3586 return ret;
3587
3588 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3589 plane_info->rotation, tiling_flags,
3590 &plane_info->tiling_info,
3591 &plane_info->plane_size,
3592 &plane_info->dcc, address);
3593 if (ret)
3594 return ret;
3595
3596 fill_blending_from_plane_state(
3597 plane_state, &plane_info->per_pixel_alpha,
3598 &plane_info->global_alpha, &plane_info->global_alpha_value);
3599
3600 return 0;
3601}
3602
3603static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3604 struct dc_plane_state *dc_plane_state,
3605 struct drm_plane_state *plane_state,
3606 struct drm_crtc_state *crtc_state)
e7b07cee 3607{
cf020d49 3608 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
3609 const struct amdgpu_framebuffer *amdgpu_fb =
3610 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
3611 struct dc_scaling_info scaling_info;
3612 struct dc_plane_info plane_info;
3613 uint64_t tiling_flags;
3614 int ret;
e7b07cee 3615
695af5f9
NK
3616 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3617 if (ret)
3618 return ret;
e7b07cee 3619
695af5f9
NK
3620 dc_plane_state->src_rect = scaling_info.src_rect;
3621 dc_plane_state->dst_rect = scaling_info.dst_rect;
3622 dc_plane_state->clip_rect = scaling_info.clip_rect;
3623 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 3624
695af5f9 3625 ret = get_fb_info(amdgpu_fb, &tiling_flags);
e7b07cee
HW
3626 if (ret)
3627 return ret;
3628
695af5f9
NK
3629 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3630 &plane_info,
3631 &dc_plane_state->address);
004fefa3
NK
3632 if (ret)
3633 return ret;
3634
695af5f9
NK
3635 dc_plane_state->format = plane_info.format;
3636 dc_plane_state->color_space = plane_info.color_space;
3637 dc_plane_state->format = plane_info.format;
3638 dc_plane_state->plane_size = plane_info.plane_size;
3639 dc_plane_state->rotation = plane_info.rotation;
3640 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3641 dc_plane_state->stereo_format = plane_info.stereo_format;
3642 dc_plane_state->tiling_info = plane_info.tiling_info;
3643 dc_plane_state->visible = plane_info.visible;
3644 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3645 dc_plane_state->global_alpha = plane_info.global_alpha;
3646 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3647 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 3648 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 3649
e277adc5
LSL
3650 /*
3651 * Always set input transfer function, since plane state is refreshed
3652 * every time.
3653 */
cf020d49
NK
3654 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3655 if (ret)
3656 return ret;
e7b07cee 3657
cf020d49 3658 return 0;
e7b07cee
HW
3659}
3660
3ee6b26b
AD
3661static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3662 const struct dm_connector_state *dm_state,
3663 struct dc_stream_state *stream)
e7b07cee
HW
3664{
3665 enum amdgpu_rmx_type rmx_type;
3666
3667 struct rect src = { 0 }; /* viewport in composition space*/
3668 struct rect dst = { 0 }; /* stream addressable area */
3669
3670 /* no mode. nothing to be done */
3671 if (!mode)
3672 return;
3673
3674 /* Full screen scaling by default */
3675 src.width = mode->hdisplay;
3676 src.height = mode->vdisplay;
3677 dst.width = stream->timing.h_addressable;
3678 dst.height = stream->timing.v_addressable;
3679
f4791779
HW
3680 if (dm_state) {
3681 rmx_type = dm_state->scaling;
3682 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3683 if (src.width * dst.height <
3684 src.height * dst.width) {
3685 /* height needs less upscaling/more downscaling */
3686 dst.width = src.width *
3687 dst.height / src.height;
3688 } else {
3689 /* width needs less upscaling/more downscaling */
3690 dst.height = src.height *
3691 dst.width / src.width;
3692 }
3693 } else if (rmx_type == RMX_CENTER) {
3694 dst = src;
e7b07cee 3695 }
e7b07cee 3696
f4791779
HW
3697 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3698 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 3699
f4791779
HW
3700 if (dm_state->underscan_enable) {
3701 dst.x += dm_state->underscan_hborder / 2;
3702 dst.y += dm_state->underscan_vborder / 2;
3703 dst.width -= dm_state->underscan_hborder;
3704 dst.height -= dm_state->underscan_vborder;
3705 }
e7b07cee
HW
3706 }
3707
3708 stream->src = src;
3709 stream->dst = dst;
3710
f1ad2f5e 3711 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
3712 dst.x, dst.y, dst.width, dst.height);
3713
3714}
3715
3ee6b26b 3716static enum dc_color_depth
42ba01fc 3717convert_color_depth_from_display_info(const struct drm_connector *connector,
1bc22f20
SW
3718 const struct drm_connector_state *state,
3719 bool is_y420)
e7b07cee 3720{
1bc22f20 3721 uint8_t bpc;
01c22997 3722
1bc22f20
SW
3723 if (is_y420) {
3724 bpc = 8;
3725
3726 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3727 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3728 bpc = 16;
3729 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3730 bpc = 12;
3731 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3732 bpc = 10;
3733 } else {
3734 bpc = (uint8_t)connector->display_info.bpc;
3735 /* Assume 8 bpc by default if no bpc is specified. */
3736 bpc = bpc ? bpc : 8;
3737 }
e7b07cee 3738
01933ba4
NK
3739 if (!state)
3740 state = connector->state;
3741
42ba01fc 3742 if (state) {
01c22997
NK
3743 /*
3744 * Cap display bpc based on the user requested value.
3745 *
3746 * The value for state->max_bpc may not correctly updated
3747 * depending on when the connector gets added to the state
3748 * or if this was called outside of atomic check, so it
3749 * can't be used directly.
3750 */
3751 bpc = min(bpc, state->max_requested_bpc);
3752
1825fd34
NK
3753 /* Round down to the nearest even number. */
3754 bpc = bpc - (bpc & 1);
3755 }
07e3a1cf 3756
e7b07cee
HW
3757 switch (bpc) {
3758 case 0:
1f6010a9
DF
3759 /*
3760 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
3761 * EDID revision before 1.4
3762 * TODO: Fix edid parsing
3763 */
3764 return COLOR_DEPTH_888;
3765 case 6:
3766 return COLOR_DEPTH_666;
3767 case 8:
3768 return COLOR_DEPTH_888;
3769 case 10:
3770 return COLOR_DEPTH_101010;
3771 case 12:
3772 return COLOR_DEPTH_121212;
3773 case 14:
3774 return COLOR_DEPTH_141414;
3775 case 16:
3776 return COLOR_DEPTH_161616;
3777 default:
3778 return COLOR_DEPTH_UNDEFINED;
3779 }
3780}
3781
3ee6b26b
AD
3782static enum dc_aspect_ratio
3783get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 3784{
e11d4147
LSL
3785 /* 1-1 mapping, since both enums follow the HDMI spec. */
3786 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
3787}
3788
3ee6b26b
AD
3789static enum dc_color_space
3790get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
3791{
3792 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3793
3794 switch (dc_crtc_timing->pixel_encoding) {
3795 case PIXEL_ENCODING_YCBCR422:
3796 case PIXEL_ENCODING_YCBCR444:
3797 case PIXEL_ENCODING_YCBCR420:
3798 {
3799 /*
3800 * 27030khz is the separation point between HDTV and SDTV
3801 * according to HDMI spec, we use YCbCr709 and YCbCr601
3802 * respectively
3803 */
380604e2 3804 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
3805 if (dc_crtc_timing->flags.Y_ONLY)
3806 color_space =
3807 COLOR_SPACE_YCBCR709_LIMITED;
3808 else
3809 color_space = COLOR_SPACE_YCBCR709;
3810 } else {
3811 if (dc_crtc_timing->flags.Y_ONLY)
3812 color_space =
3813 COLOR_SPACE_YCBCR601_LIMITED;
3814 else
3815 color_space = COLOR_SPACE_YCBCR601;
3816 }
3817
3818 }
3819 break;
3820 case PIXEL_ENCODING_RGB:
3821 color_space = COLOR_SPACE_SRGB;
3822 break;
3823
3824 default:
3825 WARN_ON(1);
3826 break;
3827 }
3828
3829 return color_space;
3830}
3831
ea117312
TA
3832static bool adjust_colour_depth_from_display_info(
3833 struct dc_crtc_timing *timing_out,
3834 const struct drm_display_info *info)
400443e8 3835{
ea117312 3836 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 3837 int normalized_clk;
400443e8 3838 do {
380604e2 3839 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
3840 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3841 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3842 normalized_clk /= 2;
3843 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
3844 switch (depth) {
3845 case COLOR_DEPTH_888:
3846 break;
400443e8
ML
3847 case COLOR_DEPTH_101010:
3848 normalized_clk = (normalized_clk * 30) / 24;
3849 break;
3850 case COLOR_DEPTH_121212:
3851 normalized_clk = (normalized_clk * 36) / 24;
3852 break;
3853 case COLOR_DEPTH_161616:
3854 normalized_clk = (normalized_clk * 48) / 24;
3855 break;
3856 default:
ea117312
TA
3857 /* The above depths are the only ones valid for HDMI. */
3858 return false;
400443e8 3859 }
ea117312
TA
3860 if (normalized_clk <= info->max_tmds_clock) {
3861 timing_out->display_color_depth = depth;
3862 return true;
3863 }
3864 } while (--depth > COLOR_DEPTH_666);
3865 return false;
400443e8 3866}
e7b07cee 3867
42ba01fc
NK
3868static void fill_stream_properties_from_drm_display_mode(
3869 struct dc_stream_state *stream,
3870 const struct drm_display_mode *mode_in,
3871 const struct drm_connector *connector,
3872 const struct drm_connector_state *connector_state,
3873 const struct dc_stream_state *old_stream)
e7b07cee
HW
3874{
3875 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 3876 const struct drm_display_info *info = &connector->display_info;
d4252eee 3877 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
3878 struct hdmi_vendor_infoframe hv_frame;
3879 struct hdmi_avi_infoframe avi_frame;
e7b07cee 3880
acf83f86
WL
3881 memset(&hv_frame, 0, sizeof(hv_frame));
3882 memset(&avi_frame, 0, sizeof(avi_frame));
3883
e7b07cee
HW
3884 timing_out->h_border_left = 0;
3885 timing_out->h_border_right = 0;
3886 timing_out->v_border_top = 0;
3887 timing_out->v_border_bottom = 0;
3888 /* TODO: un-hardcode */
fe61a2f1 3889 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 3890 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 3891 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
3892 else if (drm_mode_is_420_also(info, mode_in)
3893 && aconnector->force_yuv420_output)
3894 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 3895 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 3896 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
3897 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3898 else
3899 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3900
3901 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3902 timing_out->display_color_depth = convert_color_depth_from_display_info(
1bc22f20
SW
3903 connector, connector_state,
3904 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
e7b07cee
HW
3905 timing_out->scan_type = SCANNING_TYPE_NODATA;
3906 timing_out->hdmi_vic = 0;
b333730d
BL
3907
3908 if(old_stream) {
3909 timing_out->vic = old_stream->timing.vic;
3910 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3911 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3912 } else {
3913 timing_out->vic = drm_match_cea_mode(mode_in);
3914 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3915 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3916 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3917 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3918 }
e7b07cee 3919
1cb1d477
WL
3920 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3921 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
3922 timing_out->vic = avi_frame.video_code;
3923 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
3924 timing_out->hdmi_vic = hv_frame.vic;
3925 }
3926
e7b07cee
HW
3927 timing_out->h_addressable = mode_in->crtc_hdisplay;
3928 timing_out->h_total = mode_in->crtc_htotal;
3929 timing_out->h_sync_width =
3930 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3931 timing_out->h_front_porch =
3932 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3933 timing_out->v_total = mode_in->crtc_vtotal;
3934 timing_out->v_addressable = mode_in->crtc_vdisplay;
3935 timing_out->v_front_porch =
3936 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3937 timing_out->v_sync_width =
3938 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 3939 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 3940 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
3941
3942 stream->output_color_space = get_output_color_space(timing_out);
3943
e43a432c
AK
3944 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3945 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
3946 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3947 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
3948 drm_mode_is_420_also(info, mode_in) &&
3949 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
3950 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3951 adjust_colour_depth_from_display_info(timing_out, info);
3952 }
3953 }
e7b07cee
HW
3954}
3955
3ee6b26b
AD
3956static void fill_audio_info(struct audio_info *audio_info,
3957 const struct drm_connector *drm_connector,
3958 const struct dc_sink *dc_sink)
e7b07cee
HW
3959{
3960 int i = 0;
3961 int cea_revision = 0;
3962 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3963
3964 audio_info->manufacture_id = edid_caps->manufacturer_id;
3965 audio_info->product_id = edid_caps->product_id;
3966
3967 cea_revision = drm_connector->display_info.cea_rev;
3968
090afc1e 3969 strscpy(audio_info->display_name,
d2b2562c 3970 edid_caps->display_name,
090afc1e 3971 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 3972
b830ebc9 3973 if (cea_revision >= 3) {
e7b07cee
HW
3974 audio_info->mode_count = edid_caps->audio_mode_count;
3975
3976 for (i = 0; i < audio_info->mode_count; ++i) {
3977 audio_info->modes[i].format_code =
3978 (enum audio_format_code)
3979 (edid_caps->audio_modes[i].format_code);
3980 audio_info->modes[i].channel_count =
3981 edid_caps->audio_modes[i].channel_count;
3982 audio_info->modes[i].sample_rates.all =
3983 edid_caps->audio_modes[i].sample_rate;
3984 audio_info->modes[i].sample_size =
3985 edid_caps->audio_modes[i].sample_size;
3986 }
3987 }
3988
3989 audio_info->flags.all = edid_caps->speaker_flags;
3990
3991 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 3992 if (drm_connector->latency_present[0]) {
e7b07cee
HW
3993 audio_info->video_latency = drm_connector->video_latency[0];
3994 audio_info->audio_latency = drm_connector->audio_latency[0];
3995 }
3996
3997 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3998
3999}
4000
3ee6b26b
AD
4001static void
4002copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4003 struct drm_display_mode *dst_mode)
e7b07cee
HW
4004{
4005 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4006 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4007 dst_mode->crtc_clock = src_mode->crtc_clock;
4008 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4009 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4010 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4011 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4012 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4013 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4014 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4015 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4016 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4017 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4018 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4019}
4020
3ee6b26b
AD
4021static void
4022decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4023 const struct drm_display_mode *native_mode,
4024 bool scale_enabled)
e7b07cee
HW
4025{
4026 if (scale_enabled) {
4027 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4028 } else if (native_mode->clock == drm_mode->clock &&
4029 native_mode->htotal == drm_mode->htotal &&
4030 native_mode->vtotal == drm_mode->vtotal) {
4031 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4032 } else {
4033 /* no scaling nor amdgpu inserted, no need to patch */
4034 }
4035}
4036
aed15309
ML
4037static struct dc_sink *
4038create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4039{
2e0ac3d6 4040 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4041 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4042 sink_init_data.link = aconnector->dc_link;
4043 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4044
4045 sink = dc_sink_create(&sink_init_data);
423788c7 4046 if (!sink) {
2e0ac3d6 4047 DRM_ERROR("Failed to create sink!\n");
aed15309 4048 return NULL;
423788c7 4049 }
2e0ac3d6 4050 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4051
aed15309 4052 return sink;
2e0ac3d6
HW
4053}
4054
fa2123db
ML
4055static void set_multisync_trigger_params(
4056 struct dc_stream_state *stream)
4057{
4058 if (stream->triggered_crtc_reset.enabled) {
4059 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4060 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4061 }
4062}
4063
4064static void set_master_stream(struct dc_stream_state *stream_set[],
4065 int stream_count)
4066{
4067 int j, highest_rfr = 0, master_stream = 0;
4068
4069 for (j = 0; j < stream_count; j++) {
4070 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4071 int refresh_rate = 0;
4072
380604e2 4073 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4074 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4075 if (refresh_rate > highest_rfr) {
4076 highest_rfr = refresh_rate;
4077 master_stream = j;
4078 }
4079 }
4080 }
4081 for (j = 0; j < stream_count; j++) {
03736f4c 4082 if (stream_set[j])
fa2123db
ML
4083 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4084 }
4085}
4086
4087static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4088{
4089 int i = 0;
4090
4091 if (context->stream_count < 2)
4092 return;
4093 for (i = 0; i < context->stream_count ; i++) {
4094 if (!context->streams[i])
4095 continue;
1f6010a9
DF
4096 /*
4097 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4098 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4099 * For now it's set to false
fa2123db
ML
4100 */
4101 set_multisync_trigger_params(context->streams[i]);
4102 }
4103 set_master_stream(context->streams, context->stream_count);
4104}
4105
3ee6b26b
AD
4106static struct dc_stream_state *
4107create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4108 const struct drm_display_mode *drm_mode,
b333730d
BL
4109 const struct dm_connector_state *dm_state,
4110 const struct dc_stream_state *old_stream)
e7b07cee
HW
4111{
4112 struct drm_display_mode *preferred_mode = NULL;
391ef035 4113 struct drm_connector *drm_connector;
42ba01fc
NK
4114 const struct drm_connector_state *con_state =
4115 dm_state ? &dm_state->base : NULL;
0971c40e 4116 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4117 struct drm_display_mode mode = *drm_mode;
4118 bool native_mode_found = false;
b333730d
BL
4119 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4120 int mode_refresh;
58124bf8 4121 int preferred_refresh = 0;
defeb878 4122#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4123 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4124#endif
df2f1015 4125 uint32_t link_bandwidth_kbps;
b333730d 4126
aed15309 4127 struct dc_sink *sink = NULL;
b830ebc9 4128 if (aconnector == NULL) {
e7b07cee 4129 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4130 return stream;
e7b07cee
HW
4131 }
4132
e7b07cee 4133 drm_connector = &aconnector->base;
2e0ac3d6 4134
f4ac176e 4135 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4136 sink = create_fake_sink(aconnector);
4137 if (!sink)
4138 return stream;
aed15309
ML
4139 } else {
4140 sink = aconnector->dc_sink;
dcd5fb82 4141 dc_sink_retain(sink);
f4ac176e 4142 }
2e0ac3d6 4143
aed15309 4144 stream = dc_create_stream_for_sink(sink);
4562236b 4145
b830ebc9 4146 if (stream == NULL) {
e7b07cee 4147 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4148 goto finish;
e7b07cee
HW
4149 }
4150
ceb3dbb4
JL
4151 stream->dm_stream_context = aconnector;
4152
4a36fcba
WL
4153 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4154 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4155
e7b07cee
HW
4156 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4157 /* Search for preferred mode */
4158 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4159 native_mode_found = true;
4160 break;
4161 }
4162 }
4163 if (!native_mode_found)
4164 preferred_mode = list_first_entry_or_null(
4165 &aconnector->base.modes,
4166 struct drm_display_mode,
4167 head);
4168
b333730d
BL
4169 mode_refresh = drm_mode_vrefresh(&mode);
4170
b830ebc9 4171 if (preferred_mode == NULL) {
1f6010a9
DF
4172 /*
4173 * This may not be an error, the use case is when we have no
e7b07cee
HW
4174 * usermode calls to reset and set mode upon hotplug. In this
4175 * case, we call set mode ourselves to restore the previous mode
4176 * and the modelist may not be filled in in time.
4177 */
f1ad2f5e 4178 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4179 } else {
4180 decide_crtc_timing_for_drm_display_mode(
4181 &mode, preferred_mode,
f4791779 4182 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4183 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4184 }
4185
f783577c
JFZ
4186 if (!dm_state)
4187 drm_mode_set_crtcinfo(&mode, 0);
4188
b333730d
BL
4189 /*
4190 * If scaling is enabled and refresh rate didn't change
4191 * we copy the vic and polarities of the old timings
4192 */
4193 if (!scale || mode_refresh != preferred_refresh)
4194 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4195 &mode, &aconnector->base, con_state, NULL);
b333730d
BL
4196 else
4197 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4198 &mode, &aconnector->base, con_state, old_stream);
b333730d 4199
df2f1015
DF
4200 stream->timing.flags.DSC = 0;
4201
4202 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4203#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4204 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4205 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
df2f1015
DF
4206 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4207 &dsc_caps);
defeb878 4208#endif
df2f1015
DF
4209 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4210 dc_link_get_link_cap(aconnector->dc_link));
4211
defeb878 4212#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4213 if (dsc_caps.is_dsc_supported)
0417df16 4214 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4215 &dsc_caps,
0417df16 4216 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4217 link_bandwidth_kbps,
4218 &stream->timing,
4219 &stream->timing.dsc_cfg))
4220 stream->timing.flags.DSC = 1;
39a4eb85 4221#endif
df2f1015 4222 }
39a4eb85 4223
e7b07cee
HW
4224 update_stream_scaling_settings(&mode, dm_state, stream);
4225
4226 fill_audio_info(
4227 &stream->audio_info,
4228 drm_connector,
aed15309 4229 sink);
e7b07cee 4230
ceb3dbb4 4231 update_stream_signal(stream, sink);
9182b4cb 4232
d832fc3b
WL
4233 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4234 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
8c322309
RL
4235 if (stream->link->psr_feature_enabled) {
4236 struct dc *core_dc = stream->link->ctx->dc;
d832fc3b 4237
8c322309
RL
4238 if (dc_is_dmcu_initialized(core_dc)) {
4239 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4240
4241 stream->psr_version = dmcu->dmcu_version.psr_version;
5ed78cd6
AK
4242 mod_build_vsc_infopacket(stream,
4243 &stream->vsc_infopacket,
4244 &stream->use_vsc_sdp_for_colorimetry);
8c322309
RL
4245 }
4246 }
aed15309 4247finish:
dcd5fb82 4248 dc_sink_release(sink);
9e3efe3e 4249
e7b07cee
HW
4250 return stream;
4251}
4252
7578ecda 4253static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4254{
4255 drm_crtc_cleanup(crtc);
4256 kfree(crtc);
4257}
4258
4259static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4260 struct drm_crtc_state *state)
e7b07cee
HW
4261{
4262 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4263
4264 /* TODO Destroy dc_stream objects are stream object is flattened */
4265 if (cur->stream)
4266 dc_stream_release(cur->stream);
4267
4268
4269 __drm_atomic_helper_crtc_destroy_state(state);
4270
4271
4272 kfree(state);
4273}
4274
4275static void dm_crtc_reset_state(struct drm_crtc *crtc)
4276{
4277 struct dm_crtc_state *state;
4278
4279 if (crtc->state)
4280 dm_crtc_destroy_state(crtc, crtc->state);
4281
4282 state = kzalloc(sizeof(*state), GFP_KERNEL);
4283 if (WARN_ON(!state))
4284 return;
4285
4286 crtc->state = &state->base;
4287 crtc->state->crtc = crtc;
4288
4289}
4290
4291static struct drm_crtc_state *
4292dm_crtc_duplicate_state(struct drm_crtc *crtc)
4293{
4294 struct dm_crtc_state *state, *cur;
4295
4296 cur = to_dm_crtc_state(crtc->state);
4297
4298 if (WARN_ON(!crtc->state))
4299 return NULL;
4300
2004f45e 4301 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4302 if (!state)
4303 return NULL;
e7b07cee
HW
4304
4305 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4306
4307 if (cur->stream) {
4308 state->stream = cur->stream;
4309 dc_stream_retain(state->stream);
4310 }
4311
d6ef9b41
NK
4312 state->active_planes = cur->active_planes;
4313 state->interrupts_enabled = cur->interrupts_enabled;
180db303 4314 state->vrr_params = cur->vrr_params;
98e6436d 4315 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4316 state->abm_level = cur->abm_level;
bb47de73
NK
4317 state->vrr_supported = cur->vrr_supported;
4318 state->freesync_config = cur->freesync_config;
14b25846 4319 state->crc_src = cur->crc_src;
cf020d49
NK
4320 state->cm_has_degamma = cur->cm_has_degamma;
4321 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4322
e7b07cee
HW
4323 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4324
4325 return &state->base;
4326}
4327
d2574c33
MK
4328static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4329{
4330 enum dc_irq_source irq_source;
4331 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4332 struct amdgpu_device *adev = crtc->dev->dev_private;
4333 int rc;
4334
3a2ce8d6
LL
4335 /* Do not set vupdate for DCN hardware */
4336 if (adev->family > AMDGPU_FAMILY_AI)
4337 return 0;
4338
d2574c33
MK
4339 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4340
4341 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4342
4343 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4344 acrtc->crtc_id, enable ? "en" : "dis", rc);
4345 return rc;
4346}
589d2739
HW
4347
4348static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4349{
4350 enum dc_irq_source irq_source;
4351 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4352 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4353 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4354 int rc = 0;
4355
4356 if (enable) {
4357 /* vblank irq on -> Only need vupdate irq in vrr mode */
4358 if (amdgpu_dm_vrr_active(acrtc_state))
4359 rc = dm_set_vupdate_irq(crtc, true);
4360 } else {
4361 /* vblank irq off -> vupdate irq off */
4362 rc = dm_set_vupdate_irq(crtc, false);
4363 }
4364
4365 if (rc)
4366 return rc;
589d2739
HW
4367
4368 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4369 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4370}
4371
4372static int dm_enable_vblank(struct drm_crtc *crtc)
4373{
4374 return dm_set_vblank(crtc, true);
4375}
4376
4377static void dm_disable_vblank(struct drm_crtc *crtc)
4378{
4379 dm_set_vblank(crtc, false);
4380}
4381
e7b07cee
HW
4382/* Implemented only the options currently availible for the driver */
4383static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4384 .reset = dm_crtc_reset_state,
4385 .destroy = amdgpu_dm_crtc_destroy,
4386 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4387 .set_config = drm_atomic_helper_set_config,
4388 .page_flip = drm_atomic_helper_page_flip,
4389 .atomic_duplicate_state = dm_crtc_duplicate_state,
4390 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4391 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4392 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4393 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
589d2739
HW
4394 .enable_vblank = dm_enable_vblank,
4395 .disable_vblank = dm_disable_vblank,
e7b07cee
HW
4396};
4397
4398static enum drm_connector_status
4399amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4400{
4401 bool connected;
c84dec2f 4402 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4403
1f6010a9
DF
4404 /*
4405 * Notes:
e7b07cee
HW
4406 * 1. This interface is NOT called in context of HPD irq.
4407 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4408 * makes it a bad place for *any* MST-related activity.
4409 */
e7b07cee 4410
8580d60b
HW
4411 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4412 !aconnector->fake_enable)
e7b07cee
HW
4413 connected = (aconnector->dc_sink != NULL);
4414 else
4415 connected = (aconnector->base.force == DRM_FORCE_ON);
4416
4417 return (connected ? connector_status_connected :
4418 connector_status_disconnected);
4419}
4420
3ee6b26b
AD
4421int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4422 struct drm_connector_state *connector_state,
4423 struct drm_property *property,
4424 uint64_t val)
e7b07cee
HW
4425{
4426 struct drm_device *dev = connector->dev;
4427 struct amdgpu_device *adev = dev->dev_private;
4428 struct dm_connector_state *dm_old_state =
4429 to_dm_connector_state(connector->state);
4430 struct dm_connector_state *dm_new_state =
4431 to_dm_connector_state(connector_state);
4432
4433 int ret = -EINVAL;
4434
4435 if (property == dev->mode_config.scaling_mode_property) {
4436 enum amdgpu_rmx_type rmx_type;
4437
4438 switch (val) {
4439 case DRM_MODE_SCALE_CENTER:
4440 rmx_type = RMX_CENTER;
4441 break;
4442 case DRM_MODE_SCALE_ASPECT:
4443 rmx_type = RMX_ASPECT;
4444 break;
4445 case DRM_MODE_SCALE_FULLSCREEN:
4446 rmx_type = RMX_FULL;
4447 break;
4448 case DRM_MODE_SCALE_NONE:
4449 default:
4450 rmx_type = RMX_OFF;
4451 break;
4452 }
4453
4454 if (dm_old_state->scaling == rmx_type)
4455 return 0;
4456
4457 dm_new_state->scaling = rmx_type;
4458 ret = 0;
4459 } else if (property == adev->mode_info.underscan_hborder_property) {
4460 dm_new_state->underscan_hborder = val;
4461 ret = 0;
4462 } else if (property == adev->mode_info.underscan_vborder_property) {
4463 dm_new_state->underscan_vborder = val;
4464 ret = 0;
4465 } else if (property == adev->mode_info.underscan_property) {
4466 dm_new_state->underscan_enable = val;
4467 ret = 0;
c1ee92f9
DF
4468 } else if (property == adev->mode_info.abm_level_property) {
4469 dm_new_state->abm_level = val;
4470 ret = 0;
e7b07cee
HW
4471 }
4472
4473 return ret;
4474}
4475
3ee6b26b
AD
4476int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4477 const struct drm_connector_state *state,
4478 struct drm_property *property,
4479 uint64_t *val)
e7b07cee
HW
4480{
4481 struct drm_device *dev = connector->dev;
4482 struct amdgpu_device *adev = dev->dev_private;
4483 struct dm_connector_state *dm_state =
4484 to_dm_connector_state(state);
4485 int ret = -EINVAL;
4486
4487 if (property == dev->mode_config.scaling_mode_property) {
4488 switch (dm_state->scaling) {
4489 case RMX_CENTER:
4490 *val = DRM_MODE_SCALE_CENTER;
4491 break;
4492 case RMX_ASPECT:
4493 *val = DRM_MODE_SCALE_ASPECT;
4494 break;
4495 case RMX_FULL:
4496 *val = DRM_MODE_SCALE_FULLSCREEN;
4497 break;
4498 case RMX_OFF:
4499 default:
4500 *val = DRM_MODE_SCALE_NONE;
4501 break;
4502 }
4503 ret = 0;
4504 } else if (property == adev->mode_info.underscan_hborder_property) {
4505 *val = dm_state->underscan_hborder;
4506 ret = 0;
4507 } else if (property == adev->mode_info.underscan_vborder_property) {
4508 *val = dm_state->underscan_vborder;
4509 ret = 0;
4510 } else if (property == adev->mode_info.underscan_property) {
4511 *val = dm_state->underscan_enable;
4512 ret = 0;
c1ee92f9
DF
4513 } else if (property == adev->mode_info.abm_level_property) {
4514 *val = dm_state->abm_level;
4515 ret = 0;
e7b07cee 4516 }
c1ee92f9 4517
e7b07cee
HW
4518 return ret;
4519}
4520
526c654a
ED
4521static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4522{
4523 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4524
4525 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4526}
4527
7578ecda 4528static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4529{
c84dec2f 4530 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4531 const struct dc_link *link = aconnector->dc_link;
4532 struct amdgpu_device *adev = connector->dev->dev_private;
4533 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4534
e7b07cee
HW
4535#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4536 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4537
89fc8d4e 4538 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
4539 link->type != dc_connection_none &&
4540 dm->backlight_dev) {
4541 backlight_device_unregister(dm->backlight_dev);
4542 dm->backlight_dev = NULL;
e7b07cee
HW
4543 }
4544#endif
dcd5fb82
MF
4545
4546 if (aconnector->dc_em_sink)
4547 dc_sink_release(aconnector->dc_em_sink);
4548 aconnector->dc_em_sink = NULL;
4549 if (aconnector->dc_sink)
4550 dc_sink_release(aconnector->dc_sink);
4551 aconnector->dc_sink = NULL;
4552
e86e8947 4553 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
4554 drm_connector_unregister(connector);
4555 drm_connector_cleanup(connector);
526c654a
ED
4556 if (aconnector->i2c) {
4557 i2c_del_adapter(&aconnector->i2c->base);
4558 kfree(aconnector->i2c);
4559 }
4560
e7b07cee
HW
4561 kfree(connector);
4562}
4563
4564void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4565{
4566 struct dm_connector_state *state =
4567 to_dm_connector_state(connector->state);
4568
df099b9b
LSL
4569 if (connector->state)
4570 __drm_atomic_helper_connector_destroy_state(connector->state);
4571
e7b07cee
HW
4572 kfree(state);
4573
4574 state = kzalloc(sizeof(*state), GFP_KERNEL);
4575
4576 if (state) {
4577 state->scaling = RMX_OFF;
4578 state->underscan_enable = false;
4579 state->underscan_hborder = 0;
4580 state->underscan_vborder = 0;
01933ba4 4581 state->base.max_requested_bpc = 8;
3261e013
ML
4582 state->vcpi_slots = 0;
4583 state->pbn = 0;
c3e50f89
NK
4584 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4585 state->abm_level = amdgpu_dm_abm_level;
4586
df099b9b 4587 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
4588 }
4589}
4590
3ee6b26b
AD
4591struct drm_connector_state *
4592amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
4593{
4594 struct dm_connector_state *state =
4595 to_dm_connector_state(connector->state);
4596
4597 struct dm_connector_state *new_state =
4598 kmemdup(state, sizeof(*state), GFP_KERNEL);
4599
98e6436d
AK
4600 if (!new_state)
4601 return NULL;
e7b07cee 4602
98e6436d
AK
4603 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4604
4605 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 4606 new_state->abm_level = state->abm_level;
922454c2
NK
4607 new_state->scaling = state->scaling;
4608 new_state->underscan_enable = state->underscan_enable;
4609 new_state->underscan_hborder = state->underscan_hborder;
4610 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
4611 new_state->vcpi_slots = state->vcpi_slots;
4612 new_state->pbn = state->pbn;
98e6436d 4613 return &new_state->base;
e7b07cee
HW
4614}
4615
4616static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4617 .reset = amdgpu_dm_connector_funcs_reset,
4618 .detect = amdgpu_dm_connector_detect,
4619 .fill_modes = drm_helper_probe_single_connector_modes,
4620 .destroy = amdgpu_dm_connector_destroy,
4621 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4622 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4623 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a
ED
4624 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4625 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
4626};
4627
e7b07cee
HW
4628static int get_modes(struct drm_connector *connector)
4629{
4630 return amdgpu_dm_connector_get_modes(connector);
4631}
4632
c84dec2f 4633static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4634{
4635 struct dc_sink_init_data init_params = {
4636 .link = aconnector->dc_link,
4637 .sink_signal = SIGNAL_TYPE_VIRTUAL
4638 };
70e8ffc5 4639 struct edid *edid;
e7b07cee 4640
a89ff457 4641 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
4642 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4643 aconnector->base.name);
4644
4645 aconnector->base.force = DRM_FORCE_OFF;
4646 aconnector->base.override_edid = false;
4647 return;
4648 }
4649
70e8ffc5
HW
4650 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4651
e7b07cee
HW
4652 aconnector->edid = edid;
4653
4654 aconnector->dc_em_sink = dc_link_add_remote_sink(
4655 aconnector->dc_link,
4656 (uint8_t *)edid,
4657 (edid->extensions + 1) * EDID_LENGTH,
4658 &init_params);
4659
dcd5fb82 4660 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
4661 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4662 aconnector->dc_link->local_sink :
4663 aconnector->dc_em_sink;
dcd5fb82
MF
4664 dc_sink_retain(aconnector->dc_sink);
4665 }
e7b07cee
HW
4666}
4667
c84dec2f 4668static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4669{
4670 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4671
1f6010a9
DF
4672 /*
4673 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
4674 * Those settings have to be != 0 to get initial modeset
4675 */
4676 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4677 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4678 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4679 }
4680
4681
4682 aconnector->base.override_edid = true;
4683 create_eml_sink(aconnector);
4684}
4685
ba9ca088 4686enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 4687 struct drm_display_mode *mode)
e7b07cee
HW
4688{
4689 int result = MODE_ERROR;
4690 struct dc_sink *dc_sink;
4691 struct amdgpu_device *adev = connector->dev->dev_private;
4692 /* TODO: Unhardcode stream count */
0971c40e 4693 struct dc_stream_state *stream;
c84dec2f 4694 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
a39438f0 4695 enum dc_status dc_result = DC_OK;
e7b07cee
HW
4696
4697 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4698 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4699 return result;
4700
1f6010a9
DF
4701 /*
4702 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
4703 * EDID mgmt
4704 */
4705 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4706 !aconnector->dc_em_sink)
4707 handle_edid_mgmt(aconnector);
4708
c84dec2f 4709 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 4710
b830ebc9 4711 if (dc_sink == NULL) {
e7b07cee
HW
4712 DRM_ERROR("dc_sink is NULL!\n");
4713 goto fail;
4714 }
4715
b333730d 4716 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
b830ebc9 4717 if (stream == NULL) {
e7b07cee
HW
4718 DRM_ERROR("Failed to create stream for sink!\n");
4719 goto fail;
4720 }
4721
a39438f0
HW
4722 dc_result = dc_validate_stream(adev->dm.dc, stream);
4723
4724 if (dc_result == DC_OK)
e7b07cee 4725 result = MODE_OK;
a39438f0 4726 else
9f921b14 4727 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
a39438f0 4728 mode->hdisplay,
26e99ba6 4729 mode->vdisplay,
9f921b14
HW
4730 mode->clock,
4731 dc_result);
e7b07cee
HW
4732
4733 dc_stream_release(stream);
4734
4735fail:
4736 /* TODO: error handling*/
4737 return result;
4738}
4739
88694af9
NK
4740static int fill_hdr_info_packet(const struct drm_connector_state *state,
4741 struct dc_info_packet *out)
4742{
4743 struct hdmi_drm_infoframe frame;
4744 unsigned char buf[30]; /* 26 + 4 */
4745 ssize_t len;
4746 int ret, i;
4747
4748 memset(out, 0, sizeof(*out));
4749
4750 if (!state->hdr_output_metadata)
4751 return 0;
4752
4753 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4754 if (ret)
4755 return ret;
4756
4757 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4758 if (len < 0)
4759 return (int)len;
4760
4761 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4762 if (len != 30)
4763 return -EINVAL;
4764
4765 /* Prepare the infopacket for DC. */
4766 switch (state->connector->connector_type) {
4767 case DRM_MODE_CONNECTOR_HDMIA:
4768 out->hb0 = 0x87; /* type */
4769 out->hb1 = 0x01; /* version */
4770 out->hb2 = 0x1A; /* length */
4771 out->sb[0] = buf[3]; /* checksum */
4772 i = 1;
4773 break;
4774
4775 case DRM_MODE_CONNECTOR_DisplayPort:
4776 case DRM_MODE_CONNECTOR_eDP:
4777 out->hb0 = 0x00; /* sdp id, zero */
4778 out->hb1 = 0x87; /* type */
4779 out->hb2 = 0x1D; /* payload len - 1 */
4780 out->hb3 = (0x13 << 2); /* sdp version */
4781 out->sb[0] = 0x01; /* version */
4782 out->sb[1] = 0x1A; /* length */
4783 i = 2;
4784 break;
4785
4786 default:
4787 return -EINVAL;
4788 }
4789
4790 memcpy(&out->sb[i], &buf[4], 26);
4791 out->valid = true;
4792
4793 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4794 sizeof(out->sb), false);
4795
4796 return 0;
4797}
4798
4799static bool
4800is_hdr_metadata_different(const struct drm_connector_state *old_state,
4801 const struct drm_connector_state *new_state)
4802{
4803 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4804 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4805
4806 if (old_blob != new_blob) {
4807 if (old_blob && new_blob &&
4808 old_blob->length == new_blob->length)
4809 return memcmp(old_blob->data, new_blob->data,
4810 old_blob->length);
4811
4812 return true;
4813 }
4814
4815 return false;
4816}
4817
4818static int
4819amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 4820 struct drm_atomic_state *state)
88694af9 4821{
51e857af
SP
4822 struct drm_connector_state *new_con_state =
4823 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
4824 struct drm_connector_state *old_con_state =
4825 drm_atomic_get_old_connector_state(state, conn);
4826 struct drm_crtc *crtc = new_con_state->crtc;
4827 struct drm_crtc_state *new_crtc_state;
4828 int ret;
4829
4830 if (!crtc)
4831 return 0;
4832
4833 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4834 struct dc_info_packet hdr_infopacket;
4835
4836 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4837 if (ret)
4838 return ret;
4839
4840 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4841 if (IS_ERR(new_crtc_state))
4842 return PTR_ERR(new_crtc_state);
4843
4844 /*
4845 * DC considers the stream backends changed if the
4846 * static metadata changes. Forcing the modeset also
4847 * gives a simple way for userspace to switch from
b232d4ed
NK
4848 * 8bpc to 10bpc when setting the metadata to enter
4849 * or exit HDR.
4850 *
4851 * Changing the static metadata after it's been
4852 * set is permissible, however. So only force a
4853 * modeset if we're entering or exiting HDR.
88694af9 4854 */
b232d4ed
NK
4855 new_crtc_state->mode_changed =
4856 !old_con_state->hdr_output_metadata ||
4857 !new_con_state->hdr_output_metadata;
88694af9
NK
4858 }
4859
4860 return 0;
4861}
4862
e7b07cee
HW
4863static const struct drm_connector_helper_funcs
4864amdgpu_dm_connector_helper_funcs = {
4865 /*
1f6010a9 4866 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 4867 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 4868 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
4869 * in get_modes call back, not just return the modes count
4870 */
e7b07cee
HW
4871 .get_modes = get_modes,
4872 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 4873 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
4874};
4875
4876static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4877{
4878}
4879
bc92c065
NK
4880static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4881{
4882 struct drm_device *dev = new_crtc_state->crtc->dev;
4883 struct drm_plane *plane;
4884
4885 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4886 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4887 return true;
4888 }
4889
4890 return false;
4891}
4892
d6ef9b41 4893static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
4894{
4895 struct drm_atomic_state *state = new_crtc_state->state;
4896 struct drm_plane *plane;
4897 int num_active = 0;
4898
4899 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4900 struct drm_plane_state *new_plane_state;
4901
4902 /* Cursor planes are "fake". */
4903 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4904 continue;
4905
4906 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4907
4908 if (!new_plane_state) {
4909 /*
4910 * The plane is enable on the CRTC and hasn't changed
4911 * state. This means that it previously passed
4912 * validation and is therefore enabled.
4913 */
4914 num_active += 1;
4915 continue;
4916 }
4917
4918 /* We need a framebuffer to be considered enabled. */
4919 num_active += (new_plane_state->fb != NULL);
4920 }
4921
d6ef9b41
NK
4922 return num_active;
4923}
4924
4925/*
4926 * Sets whether interrupts should be enabled on a specific CRTC.
4927 * We require that the stream be enabled and that there exist active
4928 * DC planes on the stream.
4929 */
4930static void
4931dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4932 struct drm_crtc_state *new_crtc_state)
4933{
4934 struct dm_crtc_state *dm_new_crtc_state =
4935 to_dm_crtc_state(new_crtc_state);
4936
4937 dm_new_crtc_state->active_planes = 0;
4938 dm_new_crtc_state->interrupts_enabled = false;
4939
4940 if (!dm_new_crtc_state->stream)
4941 return;
4942
4943 dm_new_crtc_state->active_planes =
4944 count_crtc_active_planes(new_crtc_state);
4945
4946 dm_new_crtc_state->interrupts_enabled =
4947 dm_new_crtc_state->active_planes > 0;
c14a005c
NK
4948}
4949
3ee6b26b
AD
4950static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4951 struct drm_crtc_state *state)
e7b07cee
HW
4952{
4953 struct amdgpu_device *adev = crtc->dev->dev_private;
4954 struct dc *dc = adev->dm.dc;
4955 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4956 int ret = -EINVAL;
4957
d6ef9b41
NK
4958 /*
4959 * Update interrupt state for the CRTC. This needs to happen whenever
4960 * the CRTC has changed or whenever any of its planes have changed.
4961 * Atomic check satisfies both of these requirements since the CRTC
4962 * is added to the state by DRM during drm_atomic_helper_check_planes.
4963 */
4964 dm_update_crtc_interrupt_state(crtc, state);
4965
9b690ef3
BL
4966 if (unlikely(!dm_crtc_state->stream &&
4967 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
4968 WARN_ON(1);
4969 return ret;
4970 }
4971
1f6010a9 4972 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
4973 if (!dm_crtc_state->stream)
4974 return 0;
4975
bc92c065
NK
4976 /*
4977 * We want at least one hardware plane enabled to use
4978 * the stream with a cursor enabled.
4979 */
c14a005c 4980 if (state->enable && state->active &&
bc92c065 4981 does_crtc_have_active_cursor(state) &&
d6ef9b41 4982 dm_crtc_state->active_planes == 0)
c14a005c
NK
4983 return -EINVAL;
4984
62c933f9 4985 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
4986 return 0;
4987
4988 return ret;
4989}
4990
3ee6b26b
AD
4991static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4992 const struct drm_display_mode *mode,
4993 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
4994{
4995 return true;
4996}
4997
4998static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4999 .disable = dm_crtc_helper_disable,
5000 .atomic_check = dm_crtc_helper_atomic_check,
5001 .mode_fixup = dm_crtc_helper_mode_fixup
5002};
5003
5004static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5005{
5006
5007}
5008
3261e013
ML
5009static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5010{
5011 switch (display_color_depth) {
5012 case COLOR_DEPTH_666:
5013 return 6;
5014 case COLOR_DEPTH_888:
5015 return 8;
5016 case COLOR_DEPTH_101010:
5017 return 10;
5018 case COLOR_DEPTH_121212:
5019 return 12;
5020 case COLOR_DEPTH_141414:
5021 return 14;
5022 case COLOR_DEPTH_161616:
5023 return 16;
5024 default:
5025 break;
5026 }
5027 return 0;
5028}
5029
3ee6b26b
AD
5030static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5031 struct drm_crtc_state *crtc_state,
5032 struct drm_connector_state *conn_state)
e7b07cee 5033{
3261e013
ML
5034 struct drm_atomic_state *state = crtc_state->state;
5035 struct drm_connector *connector = conn_state->connector;
5036 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5037 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5038 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5039 struct drm_dp_mst_topology_mgr *mst_mgr;
5040 struct drm_dp_mst_port *mst_port;
5041 enum dc_color_depth color_depth;
5042 int clock, bpp = 0;
1bc22f20 5043 bool is_y420 = false;
3261e013
ML
5044
5045 if (!aconnector->port || !aconnector->dc_sink)
5046 return 0;
5047
5048 mst_port = aconnector->port;
5049 mst_mgr = &aconnector->mst_port->mst_mgr;
5050
5051 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5052 return 0;
5053
5054 if (!state->duplicated) {
1bc22f20
SW
5055 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5056 aconnector->force_yuv420_output;
5057 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5058 is_y420);
3261e013
ML
5059 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5060 clock = adjusted_mode->clock;
dc48529f 5061 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5062 }
5063 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5064 mst_mgr,
5065 mst_port,
1c6c1cb5
ML
5066 dm_new_connector_state->pbn,
5067 0);
3261e013
ML
5068 if (dm_new_connector_state->vcpi_slots < 0) {
5069 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5070 return dm_new_connector_state->vcpi_slots;
5071 }
e7b07cee
HW
5072 return 0;
5073}
5074
5075const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5076 .disable = dm_encoder_helper_disable,
5077 .atomic_check = dm_encoder_helper_atomic_check
5078};
5079
d9fe1a4c 5080#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5081static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5082 struct dc_state *dc_state)
5083{
5084 struct dc_stream_state *stream = NULL;
5085 struct drm_connector *connector;
5086 struct drm_connector_state *new_con_state, *old_con_state;
5087 struct amdgpu_dm_connector *aconnector;
5088 struct dm_connector_state *dm_conn_state;
5089 int i, j, clock, bpp;
5090 int vcpi, pbn_div, pbn = 0;
5091
5092 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5093
5094 aconnector = to_amdgpu_dm_connector(connector);
5095
5096 if (!aconnector->port)
5097 continue;
5098
5099 if (!new_con_state || !new_con_state->crtc)
5100 continue;
5101
5102 dm_conn_state = to_dm_connector_state(new_con_state);
5103
5104 for (j = 0; j < dc_state->stream_count; j++) {
5105 stream = dc_state->streams[j];
5106 if (!stream)
5107 continue;
5108
5109 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5110 break;
5111
5112 stream = NULL;
5113 }
5114
5115 if (!stream)
5116 continue;
5117
5118 if (stream->timing.flags.DSC != 1) {
5119 drm_dp_mst_atomic_enable_dsc(state,
5120 aconnector->port,
5121 dm_conn_state->pbn,
5122 0,
5123 false);
5124 continue;
5125 }
5126
5127 pbn_div = dm_mst_get_pbn_divider(stream->link);
5128 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5129 clock = stream->timing.pix_clk_100hz / 10;
5130 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5131 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5132 aconnector->port,
5133 pbn, pbn_div,
5134 true);
5135 if (vcpi < 0)
5136 return vcpi;
5137
5138 dm_conn_state->pbn = pbn;
5139 dm_conn_state->vcpi_slots = vcpi;
5140 }
5141 return 0;
5142}
d9fe1a4c 5143#endif
29b9ba74 5144
e7b07cee
HW
5145static void dm_drm_plane_reset(struct drm_plane *plane)
5146{
5147 struct dm_plane_state *amdgpu_state = NULL;
5148
5149 if (plane->state)
5150 plane->funcs->atomic_destroy_state(plane, plane->state);
5151
5152 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5153 WARN_ON(amdgpu_state == NULL);
1f6010a9 5154
7ddaef96
NK
5155 if (amdgpu_state)
5156 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5157}
5158
5159static struct drm_plane_state *
5160dm_drm_plane_duplicate_state(struct drm_plane *plane)
5161{
5162 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5163
5164 old_dm_plane_state = to_dm_plane_state(plane->state);
5165 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5166 if (!dm_plane_state)
5167 return NULL;
5168
5169 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5170
3be5262e
HW
5171 if (old_dm_plane_state->dc_state) {
5172 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5173 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5174 }
5175
5176 return &dm_plane_state->base;
5177}
5178
5179void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5180 struct drm_plane_state *state)
e7b07cee
HW
5181{
5182 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5183
3be5262e
HW
5184 if (dm_plane_state->dc_state)
5185 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5186
0627bbd3 5187 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5188}
5189
5190static const struct drm_plane_funcs dm_plane_funcs = {
5191 .update_plane = drm_atomic_helper_update_plane,
5192 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5193 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5194 .reset = dm_drm_plane_reset,
5195 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5196 .atomic_destroy_state = dm_drm_plane_destroy_state,
5197};
5198
3ee6b26b
AD
5199static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5200 struct drm_plane_state *new_state)
e7b07cee
HW
5201{
5202 struct amdgpu_framebuffer *afb;
5203 struct drm_gem_object *obj;
5d43be0c 5204 struct amdgpu_device *adev;
e7b07cee 5205 struct amdgpu_bo *rbo;
e7b07cee 5206 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5207 struct list_head list;
5208 struct ttm_validate_buffer tv;
5209 struct ww_acquire_ctx ticket;
e0634e8d 5210 uint64_t tiling_flags;
5d43be0c
CK
5211 uint32_t domain;
5212 int r;
e7b07cee
HW
5213
5214 dm_plane_state_old = to_dm_plane_state(plane->state);
5215 dm_plane_state_new = to_dm_plane_state(new_state);
5216
5217 if (!new_state->fb) {
f1ad2f5e 5218 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5219 return 0;
5220 }
5221
5222 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5223 obj = new_state->fb->obj[0];
e7b07cee 5224 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5225 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5226 INIT_LIST_HEAD(&list);
5227
5228 tv.bo = &rbo->tbo;
5229 tv.num_shared = 1;
5230 list_add(&tv.head, &list);
5231
9165fb87 5232 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5233 if (r) {
5234 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5235 return r;
0f257b09 5236 }
e7b07cee 5237
5d43be0c 5238 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5239 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5240 else
5241 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5242
7b7c6c81 5243 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5244 if (unlikely(r != 0)) {
30b7c614
HW
5245 if (r != -ERESTARTSYS)
5246 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5247 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5248 return r;
5249 }
5250
bb812f1e
JZ
5251 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5252 if (unlikely(r != 0)) {
5253 amdgpu_bo_unpin(rbo);
0f257b09 5254 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5255 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5256 return r;
5257 }
7df7e505
NK
5258
5259 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5260
0f257b09 5261 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5262
7b7c6c81 5263 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5264
5265 amdgpu_bo_ref(rbo);
5266
3be5262e
HW
5267 if (dm_plane_state_new->dc_state &&
5268 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5269 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 5270
320932bf 5271 fill_plane_buffer_attributes(
695af5f9
NK
5272 adev, afb, plane_state->format, plane_state->rotation,
5273 tiling_flags, &plane_state->tiling_info,
320932bf 5274 &plane_state->plane_size, &plane_state->dcc,
695af5f9 5275 &plane_state->address);
e7b07cee
HW
5276 }
5277
e7b07cee
HW
5278 return 0;
5279}
5280
3ee6b26b
AD
5281static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5282 struct drm_plane_state *old_state)
e7b07cee
HW
5283{
5284 struct amdgpu_bo *rbo;
e7b07cee
HW
5285 int r;
5286
5287 if (!old_state->fb)
5288 return;
5289
e68d14dd 5290 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5291 r = amdgpu_bo_reserve(rbo, false);
5292 if (unlikely(r)) {
5293 DRM_ERROR("failed to reserve rbo before unpin\n");
5294 return;
b830ebc9
HW
5295 }
5296
5297 amdgpu_bo_unpin(rbo);
5298 amdgpu_bo_unreserve(rbo);
5299 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5300}
5301
7578ecda
AD
5302static int dm_plane_atomic_check(struct drm_plane *plane,
5303 struct drm_plane_state *state)
cbd19488
AG
5304{
5305 struct amdgpu_device *adev = plane->dev->dev_private;
5306 struct dc *dc = adev->dm.dc;
78171832 5307 struct dm_plane_state *dm_plane_state;
695af5f9
NK
5308 struct dc_scaling_info scaling_info;
5309 int ret;
78171832
NK
5310
5311 dm_plane_state = to_dm_plane_state(state);
cbd19488 5312
3be5262e 5313 if (!dm_plane_state->dc_state)
9a3329b1 5314 return 0;
cbd19488 5315
695af5f9
NK
5316 ret = fill_dc_scaling_info(state, &scaling_info);
5317 if (ret)
5318 return ret;
a05bcff1 5319
62c933f9 5320 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5321 return 0;
5322
5323 return -EINVAL;
5324}
5325
674e78ac
NK
5326static int dm_plane_atomic_async_check(struct drm_plane *plane,
5327 struct drm_plane_state *new_plane_state)
5328{
5329 /* Only support async updates on cursor planes. */
5330 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5331 return -EINVAL;
5332
5333 return 0;
5334}
5335
5336static void dm_plane_atomic_async_update(struct drm_plane *plane,
5337 struct drm_plane_state *new_state)
5338{
5339 struct drm_plane_state *old_state =
5340 drm_atomic_get_old_plane_state(new_state->state, plane);
5341
332af874 5342 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5343
5344 plane->state->src_x = new_state->src_x;
5345 plane->state->src_y = new_state->src_y;
5346 plane->state->src_w = new_state->src_w;
5347 plane->state->src_h = new_state->src_h;
5348 plane->state->crtc_x = new_state->crtc_x;
5349 plane->state->crtc_y = new_state->crtc_y;
5350 plane->state->crtc_w = new_state->crtc_w;
5351 plane->state->crtc_h = new_state->crtc_h;
5352
5353 handle_cursor_update(plane, old_state);
5354}
5355
e7b07cee
HW
5356static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5357 .prepare_fb = dm_plane_helper_prepare_fb,
5358 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5359 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5360 .atomic_async_check = dm_plane_atomic_async_check,
5361 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5362};
5363
5364/*
5365 * TODO: these are currently initialized to rgb formats only.
5366 * For future use cases we should either initialize them dynamically based on
5367 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5368 * check will succeed, and let DC implement proper check
e7b07cee 5369 */
d90371b0 5370static const uint32_t rgb_formats[] = {
e7b07cee
HW
5371 DRM_FORMAT_XRGB8888,
5372 DRM_FORMAT_ARGB8888,
5373 DRM_FORMAT_RGBA8888,
5374 DRM_FORMAT_XRGB2101010,
5375 DRM_FORMAT_XBGR2101010,
5376 DRM_FORMAT_ARGB2101010,
5377 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5378 DRM_FORMAT_XBGR8888,
5379 DRM_FORMAT_ABGR8888,
46dd9ff7 5380 DRM_FORMAT_RGB565,
e7b07cee
HW
5381};
5382
0d579c7e
NK
5383static const uint32_t overlay_formats[] = {
5384 DRM_FORMAT_XRGB8888,
5385 DRM_FORMAT_ARGB8888,
5386 DRM_FORMAT_RGBA8888,
5387 DRM_FORMAT_XBGR8888,
5388 DRM_FORMAT_ABGR8888,
7267a1a9 5389 DRM_FORMAT_RGB565
e7b07cee
HW
5390};
5391
5392static const u32 cursor_formats[] = {
5393 DRM_FORMAT_ARGB8888
5394};
5395
37c6a93b
NK
5396static int get_plane_formats(const struct drm_plane *plane,
5397 const struct dc_plane_cap *plane_cap,
5398 uint32_t *formats, int max_formats)
e7b07cee 5399{
37c6a93b
NK
5400 int i, num_formats = 0;
5401
5402 /*
5403 * TODO: Query support for each group of formats directly from
5404 * DC plane caps. This will require adding more formats to the
5405 * caps list.
5406 */
e7b07cee 5407
f180b4bc 5408 switch (plane->type) {
e7b07cee 5409 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5410 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5411 if (num_formats >= max_formats)
5412 break;
5413
5414 formats[num_formats++] = rgb_formats[i];
5415 }
5416
ea36ad34 5417 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5418 formats[num_formats++] = DRM_FORMAT_NV12;
e7b07cee 5419 break;
37c6a93b 5420
e7b07cee 5421 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5422 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5423 if (num_formats >= max_formats)
5424 break;
5425
5426 formats[num_formats++] = overlay_formats[i];
5427 }
e7b07cee 5428 break;
37c6a93b 5429
e7b07cee 5430 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5431 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5432 if (num_formats >= max_formats)
5433 break;
5434
5435 formats[num_formats++] = cursor_formats[i];
5436 }
e7b07cee
HW
5437 break;
5438 }
5439
37c6a93b
NK
5440 return num_formats;
5441}
5442
5443static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5444 struct drm_plane *plane,
5445 unsigned long possible_crtcs,
5446 const struct dc_plane_cap *plane_cap)
5447{
5448 uint32_t formats[32];
5449 int num_formats;
5450 int res = -EPERM;
5451
5452 num_formats = get_plane_formats(plane, plane_cap, formats,
5453 ARRAY_SIZE(formats));
5454
5455 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5456 &dm_plane_funcs, formats, num_formats,
5457 NULL, plane->type, NULL);
5458 if (res)
5459 return res;
5460
cc1fec57
NK
5461 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5462 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
5463 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5464 BIT(DRM_MODE_BLEND_PREMULTI);
5465
5466 drm_plane_create_alpha_property(plane);
5467 drm_plane_create_blend_mode_property(plane, blend_caps);
5468 }
5469
fc8e5230 5470 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
ea36ad34 5471 plane_cap && plane_cap->pixel_format_support.nv12) {
fc8e5230
NK
5472 /* This only affects YUV formats. */
5473 drm_plane_create_color_properties(
5474 plane,
5475 BIT(DRM_COLOR_YCBCR_BT601) |
5476 BIT(DRM_COLOR_YCBCR_BT709),
5477 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5478 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5479 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5480 }
5481
f180b4bc 5482 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 5483
96719c54 5484 /* Create (reset) the plane state */
f180b4bc
HW
5485 if (plane->funcs->reset)
5486 plane->funcs->reset(plane);
96719c54 5487
37c6a93b 5488 return 0;
e7b07cee
HW
5489}
5490
7578ecda
AD
5491static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5492 struct drm_plane *plane,
5493 uint32_t crtc_index)
e7b07cee
HW
5494{
5495 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 5496 struct drm_plane *cursor_plane;
e7b07cee
HW
5497
5498 int res = -ENOMEM;
5499
5500 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5501 if (!cursor_plane)
5502 goto fail;
5503
f180b4bc 5504 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 5505 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
5506
5507 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5508 if (!acrtc)
5509 goto fail;
5510
5511 res = drm_crtc_init_with_planes(
5512 dm->ddev,
5513 &acrtc->base,
5514 plane,
f180b4bc 5515 cursor_plane,
e7b07cee
HW
5516 &amdgpu_dm_crtc_funcs, NULL);
5517
5518 if (res)
5519 goto fail;
5520
5521 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5522
96719c54
HW
5523 /* Create (reset) the plane state */
5524 if (acrtc->base.funcs->reset)
5525 acrtc->base.funcs->reset(&acrtc->base);
5526
e7b07cee
HW
5527 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5528 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5529
5530 acrtc->crtc_id = crtc_index;
5531 acrtc->base.enabled = false;
c37e2d29 5532 acrtc->otg_inst = -1;
e7b07cee
HW
5533
5534 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
5535 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5536 true, MAX_COLOR_LUT_ENTRIES);
086247a4 5537 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
5538
5539 return 0;
5540
5541fail:
b830ebc9
HW
5542 kfree(acrtc);
5543 kfree(cursor_plane);
e7b07cee
HW
5544 return res;
5545}
5546
5547
5548static int to_drm_connector_type(enum signal_type st)
5549{
5550 switch (st) {
5551 case SIGNAL_TYPE_HDMI_TYPE_A:
5552 return DRM_MODE_CONNECTOR_HDMIA;
5553 case SIGNAL_TYPE_EDP:
5554 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
5555 case SIGNAL_TYPE_LVDS:
5556 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
5557 case SIGNAL_TYPE_RGB:
5558 return DRM_MODE_CONNECTOR_VGA;
5559 case SIGNAL_TYPE_DISPLAY_PORT:
5560 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5561 return DRM_MODE_CONNECTOR_DisplayPort;
5562 case SIGNAL_TYPE_DVI_DUAL_LINK:
5563 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5564 return DRM_MODE_CONNECTOR_DVID;
5565 case SIGNAL_TYPE_VIRTUAL:
5566 return DRM_MODE_CONNECTOR_VIRTUAL;
5567
5568 default:
5569 return DRM_MODE_CONNECTOR_Unknown;
5570 }
5571}
5572
2b4c1c05
DV
5573static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5574{
62afb4ad
JRS
5575 struct drm_encoder *encoder;
5576
5577 /* There is only one encoder per connector */
5578 drm_connector_for_each_possible_encoder(connector, encoder)
5579 return encoder;
5580
5581 return NULL;
2b4c1c05
DV
5582}
5583
e7b07cee
HW
5584static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5585{
e7b07cee
HW
5586 struct drm_encoder *encoder;
5587 struct amdgpu_encoder *amdgpu_encoder;
5588
2b4c1c05 5589 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
5590
5591 if (encoder == NULL)
5592 return;
5593
5594 amdgpu_encoder = to_amdgpu_encoder(encoder);
5595
5596 amdgpu_encoder->native_mode.clock = 0;
5597
5598 if (!list_empty(&connector->probed_modes)) {
5599 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 5600
e7b07cee 5601 list_for_each_entry(preferred_mode,
b830ebc9
HW
5602 &connector->probed_modes,
5603 head) {
5604 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5605 amdgpu_encoder->native_mode = *preferred_mode;
5606
e7b07cee
HW
5607 break;
5608 }
5609
5610 }
5611}
5612
3ee6b26b
AD
5613static struct drm_display_mode *
5614amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5615 char *name,
5616 int hdisplay, int vdisplay)
e7b07cee
HW
5617{
5618 struct drm_device *dev = encoder->dev;
5619 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5620 struct drm_display_mode *mode = NULL;
5621 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5622
5623 mode = drm_mode_duplicate(dev, native_mode);
5624
b830ebc9 5625 if (mode == NULL)
e7b07cee
HW
5626 return NULL;
5627
5628 mode->hdisplay = hdisplay;
5629 mode->vdisplay = vdisplay;
5630 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 5631 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
5632
5633 return mode;
5634
5635}
5636
5637static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 5638 struct drm_connector *connector)
e7b07cee
HW
5639{
5640 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5641 struct drm_display_mode *mode = NULL;
5642 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
5643 struct amdgpu_dm_connector *amdgpu_dm_connector =
5644 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5645 int i;
5646 int n;
5647 struct mode_size {
5648 char name[DRM_DISPLAY_MODE_LEN];
5649 int w;
5650 int h;
b830ebc9 5651 } common_modes[] = {
e7b07cee
HW
5652 { "640x480", 640, 480},
5653 { "800x600", 800, 600},
5654 { "1024x768", 1024, 768},
5655 { "1280x720", 1280, 720},
5656 { "1280x800", 1280, 800},
5657 {"1280x1024", 1280, 1024},
5658 { "1440x900", 1440, 900},
5659 {"1680x1050", 1680, 1050},
5660 {"1600x1200", 1600, 1200},
5661 {"1920x1080", 1920, 1080},
5662 {"1920x1200", 1920, 1200}
5663 };
5664
b830ebc9 5665 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
5666
5667 for (i = 0; i < n; i++) {
5668 struct drm_display_mode *curmode = NULL;
5669 bool mode_existed = false;
5670
5671 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
5672 common_modes[i].h > native_mode->vdisplay ||
5673 (common_modes[i].w == native_mode->hdisplay &&
5674 common_modes[i].h == native_mode->vdisplay))
5675 continue;
e7b07cee
HW
5676
5677 list_for_each_entry(curmode, &connector->probed_modes, head) {
5678 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 5679 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
5680 mode_existed = true;
5681 break;
5682 }
5683 }
5684
5685 if (mode_existed)
5686 continue;
5687
5688 mode = amdgpu_dm_create_common_mode(encoder,
5689 common_modes[i].name, common_modes[i].w,
5690 common_modes[i].h);
5691 drm_mode_probed_add(connector, mode);
c84dec2f 5692 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
5693 }
5694}
5695
3ee6b26b
AD
5696static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5697 struct edid *edid)
e7b07cee 5698{
c84dec2f
HW
5699 struct amdgpu_dm_connector *amdgpu_dm_connector =
5700 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5701
5702 if (edid) {
5703 /* empty probed_modes */
5704 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 5705 amdgpu_dm_connector->num_modes =
e7b07cee
HW
5706 drm_add_edid_modes(connector, edid);
5707
f1e5e913
YMM
5708 /* sorting the probed modes before calling function
5709 * amdgpu_dm_get_native_mode() since EDID can have
5710 * more than one preferred mode. The modes that are
5711 * later in the probed mode list could be of higher
5712 * and preferred resolution. For example, 3840x2160
5713 * resolution in base EDID preferred timing and 4096x2160
5714 * preferred resolution in DID extension block later.
5715 */
5716 drm_mode_sort(&connector->probed_modes);
e7b07cee 5717 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 5718 } else {
c84dec2f 5719 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 5720 }
e7b07cee
HW
5721}
5722
7578ecda 5723static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 5724{
c84dec2f
HW
5725 struct amdgpu_dm_connector *amdgpu_dm_connector =
5726 to_amdgpu_dm_connector(connector);
e7b07cee 5727 struct drm_encoder *encoder;
c84dec2f 5728 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 5729
2b4c1c05 5730 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 5731
85ee15d6 5732 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
5733 amdgpu_dm_connector->num_modes =
5734 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
5735 } else {
5736 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5737 amdgpu_dm_connector_add_common_modes(encoder, connector);
5738 }
3e332d3a 5739 amdgpu_dm_fbc_init(connector);
5099114b 5740
c84dec2f 5741 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
5742}
5743
3ee6b26b
AD
5744void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5745 struct amdgpu_dm_connector *aconnector,
5746 int connector_type,
5747 struct dc_link *link,
5748 int link_index)
e7b07cee
HW
5749{
5750 struct amdgpu_device *adev = dm->ddev->dev_private;
5751
f04bee34
NK
5752 /*
5753 * Some of the properties below require access to state, like bpc.
5754 * Allocate some default initial connector state with our reset helper.
5755 */
5756 if (aconnector->base.funcs->reset)
5757 aconnector->base.funcs->reset(&aconnector->base);
5758
e7b07cee
HW
5759 aconnector->connector_id = link_index;
5760 aconnector->dc_link = link;
5761 aconnector->base.interlace_allowed = false;
5762 aconnector->base.doublescan_allowed = false;
5763 aconnector->base.stereo_allowed = false;
5764 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5765 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 5766 aconnector->audio_inst = -1;
e7b07cee
HW
5767 mutex_init(&aconnector->hpd_lock);
5768
1f6010a9
DF
5769 /*
5770 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
5771 * which means HPD hot plug not supported
5772 */
e7b07cee
HW
5773 switch (connector_type) {
5774 case DRM_MODE_CONNECTOR_HDMIA:
5775 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5776 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5777 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
5778 break;
5779 case DRM_MODE_CONNECTOR_DisplayPort:
5780 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5781 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5782 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
5783 break;
5784 case DRM_MODE_CONNECTOR_DVID:
5785 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5786 break;
5787 default:
5788 break;
5789 }
5790
5791 drm_object_attach_property(&aconnector->base.base,
5792 dm->ddev->mode_config.scaling_mode_property,
5793 DRM_MODE_SCALE_NONE);
5794
5795 drm_object_attach_property(&aconnector->base.base,
5796 adev->mode_info.underscan_property,
5797 UNDERSCAN_OFF);
5798 drm_object_attach_property(&aconnector->base.base,
5799 adev->mode_info.underscan_hborder_property,
5800 0);
5801 drm_object_attach_property(&aconnector->base.base,
5802 adev->mode_info.underscan_vborder_property,
5803 0);
1825fd34
NK
5804
5805 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5806
4a8ca46b
RL
5807 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5808 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5809 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 5810
c1ee92f9
DF
5811 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5812 dc_is_dmcu_initialized(adev->dm.dc)) {
5813 drm_object_attach_property(&aconnector->base.base,
5814 adev->mode_info.abm_level_property, 0);
5815 }
bb47de73
NK
5816
5817 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
5818 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5819 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
5820 drm_object_attach_property(
5821 &aconnector->base.base,
5822 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5823
bb47de73
NK
5824 drm_connector_attach_vrr_capable_property(
5825 &aconnector->base);
0c8620d6 5826#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 5827 if (adev->asic_type >= CHIP_RAVEN)
53e108aa 5828 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 5829#endif
bb47de73 5830 }
e7b07cee
HW
5831}
5832
7578ecda
AD
5833static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5834 struct i2c_msg *msgs, int num)
e7b07cee
HW
5835{
5836 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5837 struct ddc_service *ddc_service = i2c->ddc_service;
5838 struct i2c_command cmd;
5839 int i;
5840 int result = -EIO;
5841
b830ebc9 5842 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
5843
5844 if (!cmd.payloads)
5845 return result;
5846
5847 cmd.number_of_payloads = num;
5848 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5849 cmd.speed = 100;
5850
5851 for (i = 0; i < num; i++) {
5852 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5853 cmd.payloads[i].address = msgs[i].addr;
5854 cmd.payloads[i].length = msgs[i].len;
5855 cmd.payloads[i].data = msgs[i].buf;
5856 }
5857
c85e6e54
DF
5858 if (dc_submit_i2c(
5859 ddc_service->ctx->dc,
5860 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
5861 &cmd))
5862 result = num;
5863
5864 kfree(cmd.payloads);
5865 return result;
5866}
5867
7578ecda 5868static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
5869{
5870 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5871}
5872
5873static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5874 .master_xfer = amdgpu_dm_i2c_xfer,
5875 .functionality = amdgpu_dm_i2c_func,
5876};
5877
3ee6b26b
AD
5878static struct amdgpu_i2c_adapter *
5879create_i2c(struct ddc_service *ddc_service,
5880 int link_index,
5881 int *res)
e7b07cee
HW
5882{
5883 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5884 struct amdgpu_i2c_adapter *i2c;
5885
b830ebc9 5886 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
5887 if (!i2c)
5888 return NULL;
e7b07cee
HW
5889 i2c->base.owner = THIS_MODULE;
5890 i2c->base.class = I2C_CLASS_DDC;
5891 i2c->base.dev.parent = &adev->pdev->dev;
5892 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 5893 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
5894 i2c_set_adapdata(&i2c->base, i2c);
5895 i2c->ddc_service = ddc_service;
c85e6e54 5896 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
5897
5898 return i2c;
5899}
5900
89fc8d4e 5901
1f6010a9
DF
5902/*
5903 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
5904 * dc_link which will be represented by this aconnector.
5905 */
7578ecda
AD
5906static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5907 struct amdgpu_dm_connector *aconnector,
5908 uint32_t link_index,
5909 struct amdgpu_encoder *aencoder)
e7b07cee
HW
5910{
5911 int res = 0;
5912 int connector_type;
5913 struct dc *dc = dm->dc;
5914 struct dc_link *link = dc_get_link_at_index(dc, link_index);
5915 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
5916
5917 link->priv = aconnector;
e7b07cee 5918
f1ad2f5e 5919 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
5920
5921 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
5922 if (!i2c) {
5923 DRM_ERROR("Failed to create i2c adapter data\n");
5924 return -ENOMEM;
5925 }
5926
e7b07cee
HW
5927 aconnector->i2c = i2c;
5928 res = i2c_add_adapter(&i2c->base);
5929
5930 if (res) {
5931 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
5932 goto out_free;
5933 }
5934
5935 connector_type = to_drm_connector_type(link->connector_signal);
5936
17165de2 5937 res = drm_connector_init_with_ddc(
e7b07cee
HW
5938 dm->ddev,
5939 &aconnector->base,
5940 &amdgpu_dm_connector_funcs,
17165de2
AP
5941 connector_type,
5942 &i2c->base);
e7b07cee
HW
5943
5944 if (res) {
5945 DRM_ERROR("connector_init failed\n");
5946 aconnector->connector_id = -1;
5947 goto out_free;
5948 }
5949
5950 drm_connector_helper_add(
5951 &aconnector->base,
5952 &amdgpu_dm_connector_helper_funcs);
5953
5954 amdgpu_dm_connector_init_helper(
5955 dm,
5956 aconnector,
5957 connector_type,
5958 link,
5959 link_index);
5960
cde4c44d 5961 drm_connector_attach_encoder(
e7b07cee
HW
5962 &aconnector->base, &aencoder->base);
5963
5964 drm_connector_register(&aconnector->base);
dc38fd9d 5965#if defined(CONFIG_DEBUG_FS)
4be8be78 5966 connector_debugfs_init(aconnector);
f258fee6
DF
5967 aconnector->debugfs_dpcd_address = 0;
5968 aconnector->debugfs_dpcd_size = 0;
dc38fd9d 5969#endif
e7b07cee
HW
5970
5971 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
5972 || connector_type == DRM_MODE_CONNECTOR_eDP)
5973 amdgpu_dm_initialize_dp_connector(dm, aconnector);
5974
e7b07cee
HW
5975out_free:
5976 if (res) {
5977 kfree(i2c);
5978 aconnector->i2c = NULL;
5979 }
5980 return res;
5981}
5982
5983int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
5984{
5985 switch (adev->mode_info.num_crtc) {
5986 case 1:
5987 return 0x1;
5988 case 2:
5989 return 0x3;
5990 case 3:
5991 return 0x7;
5992 case 4:
5993 return 0xf;
5994 case 5:
5995 return 0x1f;
5996 case 6:
5997 default:
5998 return 0x3f;
5999 }
6000}
6001
7578ecda
AD
6002static int amdgpu_dm_encoder_init(struct drm_device *dev,
6003 struct amdgpu_encoder *aencoder,
6004 uint32_t link_index)
e7b07cee
HW
6005{
6006 struct amdgpu_device *adev = dev->dev_private;
6007
6008 int res = drm_encoder_init(dev,
6009 &aencoder->base,
6010 &amdgpu_dm_encoder_funcs,
6011 DRM_MODE_ENCODER_TMDS,
6012 NULL);
6013
6014 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6015
6016 if (!res)
6017 aencoder->encoder_id = link_index;
6018 else
6019 aencoder->encoder_id = -1;
6020
6021 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6022
6023 return res;
6024}
6025
3ee6b26b
AD
6026static void manage_dm_interrupts(struct amdgpu_device *adev,
6027 struct amdgpu_crtc *acrtc,
6028 bool enable)
e7b07cee
HW
6029{
6030 /*
6031 * this is not correct translation but will work as soon as VBLANK
6032 * constant is the same as PFLIP
6033 */
6034 int irq_type =
734dd01d 6035 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6036 adev,
6037 acrtc->crtc_id);
6038
6039 if (enable) {
6040 drm_crtc_vblank_on(&acrtc->base);
6041 amdgpu_irq_get(
6042 adev,
6043 &adev->pageflip_irq,
6044 irq_type);
6045 } else {
6046
6047 amdgpu_irq_put(
6048 adev,
6049 &adev->pageflip_irq,
6050 irq_type);
6051 drm_crtc_vblank_off(&acrtc->base);
6052 }
6053}
6054
3ee6b26b
AD
6055static bool
6056is_scaling_state_different(const struct dm_connector_state *dm_state,
6057 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6058{
6059 if (dm_state->scaling != old_dm_state->scaling)
6060 return true;
6061 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6062 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6063 return true;
6064 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6065 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6066 return true;
b830ebc9
HW
6067 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6068 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6069 return true;
e7b07cee
HW
6070 return false;
6071}
6072
0c8620d6
BL
6073#ifdef CONFIG_DRM_AMD_DC_HDCP
6074static bool is_content_protection_different(struct drm_connector_state *state,
6075 const struct drm_connector_state *old_state,
6076 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6077{
6078 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6079
53e108aa
BL
6080 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6081 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6082 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6083 return true;
6084 }
6085
0c8620d6
BL
6086 /* CP is being re enabled, ignore this */
6087 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6088 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6089 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6090 return false;
6091 }
6092
6093 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6094 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6095 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6096 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6097
6098 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6099 * hot-plug, headless s3, dpms
6100 */
6101 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6102 aconnector->dc_sink != NULL)
6103 return true;
6104
6105 if (old_state->content_protection == state->content_protection)
6106 return false;
6107
6108 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6109 return true;
6110
6111 return false;
6112}
6113
0c8620d6 6114#endif
3ee6b26b
AD
6115static void remove_stream(struct amdgpu_device *adev,
6116 struct amdgpu_crtc *acrtc,
6117 struct dc_stream_state *stream)
e7b07cee
HW
6118{
6119 /* this is the update mode case */
e7b07cee
HW
6120
6121 acrtc->otg_inst = -1;
6122 acrtc->enabled = false;
6123}
6124
7578ecda
AD
6125static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6126 struct dc_cursor_position *position)
2a8f6ccb 6127{
f4c2cc43 6128 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6129 int x, y;
6130 int xorigin = 0, yorigin = 0;
6131
e371e19c
NK
6132 position->enable = false;
6133 position->x = 0;
6134 position->y = 0;
6135
6136 if (!crtc || !plane->state->fb)
2a8f6ccb 6137 return 0;
2a8f6ccb
HW
6138
6139 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6140 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6141 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6142 __func__,
6143 plane->state->crtc_w,
6144 plane->state->crtc_h);
6145 return -EINVAL;
6146 }
6147
6148 x = plane->state->crtc_x;
6149 y = plane->state->crtc_y;
c14a005c 6150
e371e19c
NK
6151 if (x <= -amdgpu_crtc->max_cursor_width ||
6152 y <= -amdgpu_crtc->max_cursor_height)
6153 return 0;
6154
c14a005c
NK
6155 if (crtc->primary->state) {
6156 /* avivo cursor are offset into the total surface */
6157 x += crtc->primary->state->src_x >> 16;
6158 y += crtc->primary->state->src_y >> 16;
6159 }
6160
2a8f6ccb
HW
6161 if (x < 0) {
6162 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6163 x = 0;
6164 }
6165 if (y < 0) {
6166 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6167 y = 0;
6168 }
6169 position->enable = true;
6170 position->x = x;
6171 position->y = y;
6172 position->x_hotspot = xorigin;
6173 position->y_hotspot = yorigin;
6174
6175 return 0;
6176}
6177
3ee6b26b
AD
6178static void handle_cursor_update(struct drm_plane *plane,
6179 struct drm_plane_state *old_plane_state)
e7b07cee 6180{
674e78ac 6181 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
6182 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6183 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6184 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6185 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6186 uint64_t address = afb ? afb->address : 0;
6187 struct dc_cursor_position position;
6188 struct dc_cursor_attributes attributes;
6189 int ret;
6190
e7b07cee
HW
6191 if (!plane->state->fb && !old_plane_state->fb)
6192 return;
6193
f1ad2f5e 6194 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6195 __func__,
6196 amdgpu_crtc->crtc_id,
6197 plane->state->crtc_w,
6198 plane->state->crtc_h);
2a8f6ccb
HW
6199
6200 ret = get_cursor_position(plane, crtc, &position);
6201 if (ret)
6202 return;
6203
6204 if (!position.enable) {
6205 /* turn off cursor */
674e78ac
NK
6206 if (crtc_state && crtc_state->stream) {
6207 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6208 dc_stream_set_cursor_position(crtc_state->stream,
6209 &position);
674e78ac
NK
6210 mutex_unlock(&adev->dm.dc_lock);
6211 }
2a8f6ccb 6212 return;
e7b07cee 6213 }
e7b07cee 6214
2a8f6ccb
HW
6215 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6216 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6217
c1cefe11 6218 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6219 attributes.address.high_part = upper_32_bits(address);
6220 attributes.address.low_part = lower_32_bits(address);
6221 attributes.width = plane->state->crtc_w;
6222 attributes.height = plane->state->crtc_h;
6223 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6224 attributes.rotation_angle = 0;
6225 attributes.attribute_flags.value = 0;
6226
6227 attributes.pitch = attributes.width;
6228
886daac9 6229 if (crtc_state->stream) {
674e78ac 6230 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6231 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6232 &attributes))
6233 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6234
2a8f6ccb
HW
6235 if (!dc_stream_set_cursor_position(crtc_state->stream,
6236 &position))
6237 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6238 mutex_unlock(&adev->dm.dc_lock);
886daac9 6239 }
2a8f6ccb 6240}
e7b07cee
HW
6241
6242static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6243{
6244
6245 assert_spin_locked(&acrtc->base.dev->event_lock);
6246 WARN_ON(acrtc->event);
6247
6248 acrtc->event = acrtc->base.state->event;
6249
6250 /* Set the flip status */
6251 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6252
6253 /* Mark this event as consumed */
6254 acrtc->base.state->event = NULL;
6255
6256 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6257 acrtc->crtc_id);
6258}
6259
bb47de73
NK
6260static void update_freesync_state_on_stream(
6261 struct amdgpu_display_manager *dm,
6262 struct dm_crtc_state *new_crtc_state,
180db303
NK
6263 struct dc_stream_state *new_stream,
6264 struct dc_plane_state *surface,
6265 u32 flip_timestamp_in_us)
bb47de73 6266{
09aef2c4 6267 struct mod_vrr_params vrr_params;
bb47de73 6268 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
6269 struct amdgpu_device *adev = dm->adev;
6270 unsigned long flags;
bb47de73
NK
6271
6272 if (!new_stream)
6273 return;
6274
6275 /*
6276 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6277 * For now it's sufficient to just guard against these conditions.
6278 */
6279
6280 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6281 return;
6282
09aef2c4
MK
6283 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6284 vrr_params = new_crtc_state->vrr_params;
6285
180db303
NK
6286 if (surface) {
6287 mod_freesync_handle_preflip(
6288 dm->freesync_module,
6289 surface,
6290 new_stream,
6291 flip_timestamp_in_us,
6292 &vrr_params);
09aef2c4
MK
6293
6294 if (adev->family < AMDGPU_FAMILY_AI &&
6295 amdgpu_dm_vrr_active(new_crtc_state)) {
6296 mod_freesync_handle_v_update(dm->freesync_module,
6297 new_stream, &vrr_params);
e63e2491
EB
6298
6299 /* Need to call this before the frame ends. */
6300 dc_stream_adjust_vmin_vmax(dm->dc,
6301 new_crtc_state->stream,
6302 &vrr_params.adjust);
09aef2c4 6303 }
180db303 6304 }
bb47de73
NK
6305
6306 mod_freesync_build_vrr_infopacket(
6307 dm->freesync_module,
6308 new_stream,
180db303 6309 &vrr_params,
ecd0136b
HT
6310 PACKET_TYPE_VRR,
6311 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6312 &vrr_infopacket);
6313
8a48b44c 6314 new_crtc_state->freesync_timing_changed |=
180db303
NK
6315 (memcmp(&new_crtc_state->vrr_params.adjust,
6316 &vrr_params.adjust,
6317 sizeof(vrr_params.adjust)) != 0);
bb47de73 6318
8a48b44c 6319 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6320 (memcmp(&new_crtc_state->vrr_infopacket,
6321 &vrr_infopacket,
6322 sizeof(vrr_infopacket)) != 0);
6323
180db303 6324 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6325 new_crtc_state->vrr_infopacket = vrr_infopacket;
6326
180db303 6327 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6328 new_stream->vrr_infopacket = vrr_infopacket;
6329
6330 if (new_crtc_state->freesync_vrr_info_changed)
6331 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6332 new_crtc_state->base.crtc->base.id,
6333 (int)new_crtc_state->base.vrr_enabled,
180db303 6334 (int)vrr_params.state);
09aef2c4
MK
6335
6336 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6337}
6338
e854194c
MK
6339static void pre_update_freesync_state_on_stream(
6340 struct amdgpu_display_manager *dm,
6341 struct dm_crtc_state *new_crtc_state)
6342{
6343 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6344 struct mod_vrr_params vrr_params;
e854194c 6345 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6346 struct amdgpu_device *adev = dm->adev;
6347 unsigned long flags;
e854194c
MK
6348
6349 if (!new_stream)
6350 return;
6351
6352 /*
6353 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6354 * For now it's sufficient to just guard against these conditions.
6355 */
6356 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6357 return;
6358
09aef2c4
MK
6359 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6360 vrr_params = new_crtc_state->vrr_params;
6361
e854194c
MK
6362 if (new_crtc_state->vrr_supported &&
6363 config.min_refresh_in_uhz &&
6364 config.max_refresh_in_uhz) {
6365 config.state = new_crtc_state->base.vrr_enabled ?
6366 VRR_STATE_ACTIVE_VARIABLE :
6367 VRR_STATE_INACTIVE;
6368 } else {
6369 config.state = VRR_STATE_UNSUPPORTED;
6370 }
6371
6372 mod_freesync_build_vrr_params(dm->freesync_module,
6373 new_stream,
6374 &config, &vrr_params);
6375
6376 new_crtc_state->freesync_timing_changed |=
6377 (memcmp(&new_crtc_state->vrr_params.adjust,
6378 &vrr_params.adjust,
6379 sizeof(vrr_params.adjust)) != 0);
6380
6381 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6382 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6383}
6384
66b0c973
MK
6385static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6386 struct dm_crtc_state *new_state)
6387{
6388 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6389 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6390
6391 if (!old_vrr_active && new_vrr_active) {
6392 /* Transition VRR inactive -> active:
6393 * While VRR is active, we must not disable vblank irq, as a
6394 * reenable after disable would compute bogus vblank/pflip
6395 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6396 *
6397 * We also need vupdate irq for the actual core vblank handling
6398 * at end of vblank.
66b0c973 6399 */
d2574c33 6400 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6401 drm_crtc_vblank_get(new_state->base.crtc);
6402 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6403 __func__, new_state->base.crtc->base.id);
6404 } else if (old_vrr_active && !new_vrr_active) {
6405 /* Transition VRR active -> inactive:
6406 * Allow vblank irq disable again for fixed refresh rate.
6407 */
d2574c33 6408 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6409 drm_crtc_vblank_put(new_state->base.crtc);
6410 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6411 __func__, new_state->base.crtc->base.id);
6412 }
6413}
6414
8ad27806
NK
6415static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6416{
6417 struct drm_plane *plane;
6418 struct drm_plane_state *old_plane_state, *new_plane_state;
6419 int i;
6420
6421 /*
6422 * TODO: Make this per-stream so we don't issue redundant updates for
6423 * commits with multiple streams.
6424 */
6425 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6426 new_plane_state, i)
6427 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6428 handle_cursor_update(plane, old_plane_state);
6429}
6430
3be5262e 6431static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6432 struct dc_state *dc_state,
3ee6b26b
AD
6433 struct drm_device *dev,
6434 struct amdgpu_display_manager *dm,
6435 struct drm_crtc *pcrtc,
420cd472 6436 bool wait_for_vblank)
e7b07cee 6437{
570c91d5 6438 uint32_t i;
8a48b44c 6439 uint64_t timestamp_ns;
e7b07cee 6440 struct drm_plane *plane;
0bc9706d 6441 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6442 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6443 struct drm_crtc_state *new_pcrtc_state =
6444 drm_atomic_get_new_crtc_state(state, pcrtc);
6445 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
6446 struct dm_crtc_state *dm_old_crtc_state =
6447 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 6448 int planes_count = 0, vpos, hpos;
570c91d5 6449 long r;
e7b07cee 6450 unsigned long flags;
8a48b44c 6451 struct amdgpu_bo *abo;
09e5665a 6452 uint64_t tiling_flags;
fdd1fe57
MK
6453 uint32_t target_vblank, last_flip_vblank;
6454 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 6455 bool pflip_present = false;
8c322309 6456 bool swizzle = true;
bc7f670e
DF
6457 struct {
6458 struct dc_surface_update surface_updates[MAX_SURFACES];
6459 struct dc_plane_info plane_infos[MAX_SURFACES];
6460 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 6461 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 6462 struct dc_stream_update stream_update;
74aa7bd4 6463 } *bundle;
bc7f670e 6464
74aa7bd4 6465 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 6466
74aa7bd4
DF
6467 if (!bundle) {
6468 dm_error("Failed to allocate update bundle\n");
4b510503
NK
6469 goto cleanup;
6470 }
e7b07cee 6471
8ad27806
NK
6472 /*
6473 * Disable the cursor first if we're disabling all the planes.
6474 * It'll remain on the screen after the planes are re-enabled
6475 * if we don't.
6476 */
6477 if (acrtc_state->active_planes == 0)
6478 amdgpu_dm_commit_cursors(state);
6479
e7b07cee 6480 /* update planes when needed */
0bc9706d
LSL
6481 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6482 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 6483 struct drm_crtc_state *new_crtc_state;
0bc9706d 6484 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 6485 bool plane_needs_flip;
c7af5f77 6486 struct dc_plane_state *dc_plane;
54d76575 6487 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 6488
80c218d5
NK
6489 /* Cursor plane is handled after stream updates */
6490 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 6491 continue;
e7b07cee 6492
f5ba60fe
DD
6493 if (!fb || !crtc || pcrtc != crtc)
6494 continue;
6495
6496 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6497 if (!new_crtc_state->active)
e7b07cee
HW
6498 continue;
6499
bc7f670e 6500 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 6501
8c322309
RL
6502 if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
6503 swizzle = false;
6504
74aa7bd4 6505 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 6506 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
6507 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6508 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
bc7f670e 6509 }
8a48b44c 6510
695af5f9
NK
6511 fill_dc_scaling_info(new_plane_state,
6512 &bundle->scaling_infos[planes_count]);
8a48b44c 6513
695af5f9
NK
6514 bundle->surface_updates[planes_count].scaling_info =
6515 &bundle->scaling_infos[planes_count];
8a48b44c 6516
f5031000 6517 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 6518
f5031000 6519 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 6520
f5031000
DF
6521 if (!plane_needs_flip) {
6522 planes_count += 1;
6523 continue;
6524 }
8a48b44c 6525
2fac0f53
CK
6526 abo = gem_to_amdgpu_bo(fb->obj[0]);
6527
f8308898
AG
6528 /*
6529 * Wait for all fences on this FB. Do limited wait to avoid
6530 * deadlock during GPU reset when this fence will not signal
6531 * but we hold reservation lock for the BO.
6532 */
52791eee 6533 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 6534 false,
f8308898
AG
6535 msecs_to_jiffies(5000));
6536 if (unlikely(r <= 0))
ed8a5fb2 6537 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 6538
f5031000
DF
6539 /*
6540 * TODO This might fail and hence better not used, wait
6541 * explicitly on fences instead
6542 * and in general should be called for
6543 * blocking commit to as per framework helpers
6544 */
f5031000 6545 r = amdgpu_bo_reserve(abo, true);
f8308898 6546 if (unlikely(r != 0))
f5031000 6547 DRM_ERROR("failed to reserve buffer before flip\n");
8a48b44c 6548
f5031000 6549 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
8a48b44c 6550
f5031000 6551 amdgpu_bo_unreserve(abo);
8a48b44c 6552
695af5f9
NK
6553 fill_dc_plane_info_and_addr(
6554 dm->adev, new_plane_state, tiling_flags,
6555 &bundle->plane_infos[planes_count],
6556 &bundle->flip_addrs[planes_count].address);
6557
6558 bundle->surface_updates[planes_count].plane_info =
6559 &bundle->plane_infos[planes_count];
8a48b44c 6560
caff0e66
NK
6561 /*
6562 * Only allow immediate flips for fast updates that don't
6563 * change FB pitch, DCC state, rotation or mirroing.
6564 */
f5031000 6565 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 6566 crtc->state->async_flip &&
caff0e66 6567 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 6568
f5031000
DF
6569 timestamp_ns = ktime_get_ns();
6570 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6571 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6572 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 6573
f5031000
DF
6574 if (!bundle->surface_updates[planes_count].surface) {
6575 DRM_ERROR("No surface for CRTC: id=%d\n",
6576 acrtc_attach->crtc_id);
6577 continue;
bc7f670e
DF
6578 }
6579
f5031000
DF
6580 if (plane == pcrtc->primary)
6581 update_freesync_state_on_stream(
6582 dm,
6583 acrtc_state,
6584 acrtc_state->stream,
6585 dc_plane,
6586 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 6587
f5031000
DF
6588 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6589 __func__,
6590 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6591 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
6592
6593 planes_count += 1;
6594
8a48b44c
DF
6595 }
6596
74aa7bd4 6597 if (pflip_present) {
634092b1
MK
6598 if (!vrr_active) {
6599 /* Use old throttling in non-vrr fixed refresh rate mode
6600 * to keep flip scheduling based on target vblank counts
6601 * working in a backwards compatible way, e.g., for
6602 * clients using the GLX_OML_sync_control extension or
6603 * DRI3/Present extension with defined target_msc.
6604 */
fdd1fe57 6605 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
634092b1
MK
6606 }
6607 else {
6608 /* For variable refresh rate mode only:
6609 * Get vblank of last completed flip to avoid > 1 vrr
6610 * flips per video frame by use of throttling, but allow
6611 * flip programming anywhere in the possibly large
6612 * variable vrr vblank interval for fine-grained flip
6613 * timing control and more opportunity to avoid stutter
6614 * on late submission of flips.
6615 */
6616 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6617 last_flip_vblank = acrtc_attach->last_flip_vblank;
6618 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6619 }
6620
fdd1fe57 6621 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
6622
6623 /*
6624 * Wait until we're out of the vertical blank period before the one
6625 * targeted by the flip
6626 */
6627 while ((acrtc_attach->enabled &&
6628 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6629 0, &vpos, &hpos, NULL,
6630 NULL, &pcrtc->hwmode)
6631 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6632 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6633 (int)(target_vblank -
6634 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
6635 usleep_range(1000, 1100);
6636 }
6637
6638 if (acrtc_attach->base.state->event) {
6639 drm_crtc_vblank_get(pcrtc);
6640
6641 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6642
6643 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6644 prepare_flip_isr(acrtc_attach);
6645
6646 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6647 }
6648
6649 if (acrtc_state->stream) {
8a48b44c 6650 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 6651 bundle->stream_update.vrr_infopacket =
8a48b44c 6652 &acrtc_state->stream->vrr_infopacket;
e7b07cee 6653 }
e7b07cee
HW
6654 }
6655
bc92c065 6656 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
6657 if ((planes_count || acrtc_state->active_planes == 0) &&
6658 acrtc_state->stream) {
b6e881c9 6659 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 6660 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
6661 bundle->stream_update.src = acrtc_state->stream->src;
6662 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
6663 }
6664
cf020d49
NK
6665 if (new_pcrtc_state->color_mgmt_changed) {
6666 /*
6667 * TODO: This isn't fully correct since we've actually
6668 * already modified the stream in place.
6669 */
6670 bundle->stream_update.gamut_remap =
6671 &acrtc_state->stream->gamut_remap_matrix;
6672 bundle->stream_update.output_csc_transform =
6673 &acrtc_state->stream->csc_color_matrix;
6674 bundle->stream_update.out_transfer_func =
6675 acrtc_state->stream->out_transfer_func;
6676 }
bc7f670e 6677
8a48b44c 6678 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 6679 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 6680 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 6681
e63e2491
EB
6682 /*
6683 * If FreeSync state on the stream has changed then we need to
6684 * re-adjust the min/max bounds now that DC doesn't handle this
6685 * as part of commit.
6686 */
6687 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6688 amdgpu_dm_vrr_active(acrtc_state)) {
6689 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6690 dc_stream_adjust_vmin_vmax(
6691 dm->dc, acrtc_state->stream,
6692 &acrtc_state->vrr_params.adjust);
6693 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6694 }
bc7f670e 6695 mutex_lock(&dm->dc_lock);
8c322309
RL
6696 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6697 acrtc_state->stream->link->psr_allow_active)
6698 amdgpu_dm_psr_disable(acrtc_state->stream);
6699
bc7f670e 6700 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 6701 bundle->surface_updates,
bc7f670e
DF
6702 planes_count,
6703 acrtc_state->stream,
74aa7bd4 6704 &bundle->stream_update,
bc7f670e 6705 dc_state);
8c322309
RL
6706
6707 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6708 acrtc_state->stream->psr_version &&
6709 !acrtc_state->stream->link->psr_feature_enabled)
6710 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6711 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6712 acrtc_state->stream->link->psr_feature_enabled &&
6713 !acrtc_state->stream->link->psr_allow_active &&
6714 swizzle) {
6715 amdgpu_dm_psr_enable(acrtc_state->stream);
6716 }
6717
bc7f670e 6718 mutex_unlock(&dm->dc_lock);
e7b07cee 6719 }
4b510503 6720
8ad27806
NK
6721 /*
6722 * Update cursor state *after* programming all the planes.
6723 * This avoids redundant programming in the case where we're going
6724 * to be disabling a single plane - those pipes are being disabled.
6725 */
6726 if (acrtc_state->active_planes)
6727 amdgpu_dm_commit_cursors(state);
80c218d5 6728
4b510503 6729cleanup:
74aa7bd4 6730 kfree(bundle);
e7b07cee
HW
6731}
6732
6ce8f316
NK
6733static void amdgpu_dm_commit_audio(struct drm_device *dev,
6734 struct drm_atomic_state *state)
6735{
6736 struct amdgpu_device *adev = dev->dev_private;
6737 struct amdgpu_dm_connector *aconnector;
6738 struct drm_connector *connector;
6739 struct drm_connector_state *old_con_state, *new_con_state;
6740 struct drm_crtc_state *new_crtc_state;
6741 struct dm_crtc_state *new_dm_crtc_state;
6742 const struct dc_stream_status *status;
6743 int i, inst;
6744
6745 /* Notify device removals. */
6746 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6747 if (old_con_state->crtc != new_con_state->crtc) {
6748 /* CRTC changes require notification. */
6749 goto notify;
6750 }
6751
6752 if (!new_con_state->crtc)
6753 continue;
6754
6755 new_crtc_state = drm_atomic_get_new_crtc_state(
6756 state, new_con_state->crtc);
6757
6758 if (!new_crtc_state)
6759 continue;
6760
6761 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6762 continue;
6763
6764 notify:
6765 aconnector = to_amdgpu_dm_connector(connector);
6766
6767 mutex_lock(&adev->dm.audio_lock);
6768 inst = aconnector->audio_inst;
6769 aconnector->audio_inst = -1;
6770 mutex_unlock(&adev->dm.audio_lock);
6771
6772 amdgpu_dm_audio_eld_notify(adev, inst);
6773 }
6774
6775 /* Notify audio device additions. */
6776 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6777 if (!new_con_state->crtc)
6778 continue;
6779
6780 new_crtc_state = drm_atomic_get_new_crtc_state(
6781 state, new_con_state->crtc);
6782
6783 if (!new_crtc_state)
6784 continue;
6785
6786 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6787 continue;
6788
6789 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6790 if (!new_dm_crtc_state->stream)
6791 continue;
6792
6793 status = dc_stream_get_status(new_dm_crtc_state->stream);
6794 if (!status)
6795 continue;
6796
6797 aconnector = to_amdgpu_dm_connector(connector);
6798
6799 mutex_lock(&adev->dm.audio_lock);
6800 inst = status->audio_inst;
6801 aconnector->audio_inst = inst;
6802 mutex_unlock(&adev->dm.audio_lock);
6803
6804 amdgpu_dm_audio_eld_notify(adev, inst);
6805 }
6806}
6807
b5e83f6f
NK
6808/*
6809 * Enable interrupts on CRTCs that are newly active, undergone
6810 * a modeset, or have active planes again.
6811 *
6812 * Done in two passes, based on the for_modeset flag:
6813 * Pass 1: For CRTCs going through modeset
6814 * Pass 2: For CRTCs going from 0 to n active planes
6815 *
6816 * Interrupts can only be enabled after the planes are programmed,
6817 * so this requires a two-pass approach since we don't want to
6818 * just defer the interrupts until after commit planes every time.
6819 */
6820static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6821 struct drm_atomic_state *state,
6822 bool for_modeset)
6823{
6824 struct amdgpu_device *adev = dev->dev_private;
6825 struct drm_crtc *crtc;
6826 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6827 int i;
148d31e3 6828#ifdef CONFIG_DEBUG_FS
14b25846 6829 enum amdgpu_dm_pipe_crc_source source;
148d31e3 6830#endif
b5e83f6f
NK
6831
6832 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6833 new_crtc_state, i) {
6834 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6835 struct dm_crtc_state *dm_new_crtc_state =
6836 to_dm_crtc_state(new_crtc_state);
6837 struct dm_crtc_state *dm_old_crtc_state =
6838 to_dm_crtc_state(old_crtc_state);
6839 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6840 bool run_pass;
6841
6842 run_pass = (for_modeset && modeset) ||
6843 (!for_modeset && !modeset &&
6844 !dm_old_crtc_state->interrupts_enabled);
6845
6846 if (!run_pass)
6847 continue;
6848
b5e83f6f
NK
6849 if (!dm_new_crtc_state->interrupts_enabled)
6850 continue;
6851
6852 manage_dm_interrupts(adev, acrtc, true);
6853
6854#ifdef CONFIG_DEBUG_FS
6855 /* The stream has changed so CRC capture needs to re-enabled. */
14b25846
DZ
6856 source = dm_new_crtc_state->crc_src;
6857 if (amdgpu_dm_is_valid_crc_source(source)) {
57638021
NK
6858 amdgpu_dm_crtc_configure_crc_source(
6859 crtc, dm_new_crtc_state,
6860 dm_new_crtc_state->crc_src);
b5e83f6f
NK
6861 }
6862#endif
6863 }
6864}
6865
1f6010a9 6866/*
27b3f4fc
LSL
6867 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6868 * @crtc_state: the DRM CRTC state
6869 * @stream_state: the DC stream state.
6870 *
6871 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6872 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6873 */
6874static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6875 struct dc_stream_state *stream_state)
6876{
b9952f93 6877 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 6878}
e7b07cee 6879
7578ecda
AD
6880static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6881 struct drm_atomic_state *state,
6882 bool nonblock)
e7b07cee
HW
6883{
6884 struct drm_crtc *crtc;
c2cea706 6885 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
6886 struct amdgpu_device *adev = dev->dev_private;
6887 int i;
6888
6889 /*
d6ef9b41
NK
6890 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6891 * a modeset, being disabled, or have no active planes.
6892 *
6893 * It's done in atomic commit rather than commit tail for now since
6894 * some of these interrupt handlers access the current CRTC state and
6895 * potentially the stream pointer itself.
6896 *
6897 * Since the atomic state is swapped within atomic commit and not within
6898 * commit tail this would leave to new state (that hasn't been committed yet)
6899 * being accesssed from within the handlers.
6900 *
6901 * TODO: Fix this so we can do this in commit tail and not have to block
6902 * in atomic check.
e7b07cee 6903 */
c2cea706 6904 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 6905 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
428da2bd 6906 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee
HW
6907 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6908
d6ef9b41
NK
6909 if (dm_old_crtc_state->interrupts_enabled &&
6910 (!dm_new_crtc_state->interrupts_enabled ||
57638021 6911 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
6912 manage_dm_interrupts(adev, acrtc, false);
6913 }
1f6010a9
DF
6914 /*
6915 * Add check here for SoC's that support hardware cursor plane, to
6916 * unset legacy_cursor_update
6917 */
e7b07cee
HW
6918
6919 return drm_atomic_helper_commit(dev, state, nonblock);
6920
6921 /*TODO Handle EINTR, reenable IRQ*/
6922}
6923
b8592b48
LL
6924/**
6925 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
6926 * @state: The atomic state to commit
6927 *
6928 * This will tell DC to commit the constructed DC state from atomic_check,
6929 * programming the hardware. Any failures here implies a hardware failure, since
6930 * atomic check should have filtered anything non-kosher.
6931 */
7578ecda 6932static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
6933{
6934 struct drm_device *dev = state->dev;
6935 struct amdgpu_device *adev = dev->dev_private;
6936 struct amdgpu_display_manager *dm = &adev->dm;
6937 struct dm_atomic_state *dm_state;
eb3dc897 6938 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 6939 uint32_t i, j;
5cc6dcbd 6940 struct drm_crtc *crtc;
0bc9706d 6941 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
6942 unsigned long flags;
6943 bool wait_for_vblank = true;
6944 struct drm_connector *connector;
c2cea706 6945 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 6946 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 6947 int crtc_disable_count = 0;
e7b07cee
HW
6948
6949 drm_atomic_helper_update_legacy_modeset_state(dev, state);
6950
eb3dc897
NK
6951 dm_state = dm_atomic_get_new_state(state);
6952 if (dm_state && dm_state->context) {
6953 dc_state = dm_state->context;
6954 } else {
6955 /* No state changes, retain current state. */
813d20dc 6956 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
6957 ASSERT(dc_state_temp);
6958 dc_state = dc_state_temp;
6959 dc_resource_state_copy_construct_current(dm->dc, dc_state);
6960 }
e7b07cee
HW
6961
6962 /* update changed items */
0bc9706d 6963 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 6964 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 6965
54d76575
LSL
6966 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6967 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 6968
f1ad2f5e 6969 DRM_DEBUG_DRIVER(
e7b07cee
HW
6970 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6971 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6972 "connectors_changed:%d\n",
6973 acrtc->crtc_id,
0bc9706d
LSL
6974 new_crtc_state->enable,
6975 new_crtc_state->active,
6976 new_crtc_state->planes_changed,
6977 new_crtc_state->mode_changed,
6978 new_crtc_state->active_changed,
6979 new_crtc_state->connectors_changed);
e7b07cee 6980
27b3f4fc
LSL
6981 /* Copy all transient state flags into dc state */
6982 if (dm_new_crtc_state->stream) {
6983 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
6984 dm_new_crtc_state->stream);
6985 }
6986
e7b07cee
HW
6987 /* handles headless hotplug case, updating new_state and
6988 * aconnector as needed
6989 */
6990
54d76575 6991 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 6992
f1ad2f5e 6993 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 6994
54d76575 6995 if (!dm_new_crtc_state->stream) {
e7b07cee 6996 /*
b830ebc9
HW
6997 * this could happen because of issues with
6998 * userspace notifications delivery.
6999 * In this case userspace tries to set mode on
1f6010a9
DF
7000 * display which is disconnected in fact.
7001 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7002 * We expect reset mode will come soon.
7003 *
7004 * This can also happen when unplug is done
7005 * during resume sequence ended
7006 *
7007 * In this case, we want to pretend we still
7008 * have a sink to keep the pipe running so that
7009 * hw state is consistent with the sw state
7010 */
f1ad2f5e 7011 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7012 __func__, acrtc->base.base.id);
7013 continue;
7014 }
7015
54d76575
LSL
7016 if (dm_old_crtc_state->stream)
7017 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7018
97028037
LP
7019 pm_runtime_get_noresume(dev->dev);
7020
e7b07cee 7021 acrtc->enabled = true;
0bc9706d
LSL
7022 acrtc->hw_mode = new_crtc_state->mode;
7023 crtc->hwmode = new_crtc_state->mode;
7024 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7025 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7026 /* i.e. reset mode */
8c322309
RL
7027 if (dm_old_crtc_state->stream) {
7028 if (dm_old_crtc_state->stream->link->psr_allow_active)
7029 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7030
54d76575 7031 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8c322309 7032 }
e7b07cee
HW
7033 }
7034 } /* for_each_crtc_in_state() */
7035
eb3dc897
NK
7036 if (dc_state) {
7037 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7038 mutex_lock(&dm->dc_lock);
eb3dc897 7039 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7040 mutex_unlock(&dm->dc_lock);
fa2123db 7041 }
e7b07cee 7042
0bc9706d 7043 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7044 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7045
54d76575 7046 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7047
54d76575 7048 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7049 const struct dc_stream_status *status =
54d76575 7050 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7051
eb3dc897 7052 if (!status)
09f609c3
LL
7053 status = dc_stream_get_status_from_state(dc_state,
7054 dm_new_crtc_state->stream);
eb3dc897 7055
e7b07cee 7056 if (!status)
54d76575 7057 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7058 else
7059 acrtc->otg_inst = status->primary_otg_inst;
7060 }
7061 }
0c8620d6
BL
7062#ifdef CONFIG_DRM_AMD_DC_HDCP
7063 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7064 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7065 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7066 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7067
7068 new_crtc_state = NULL;
7069
7070 if (acrtc)
7071 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7072
7073 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7074
7075 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7076 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7077 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7078 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7079 continue;
7080 }
7081
7082 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7083 hdcp_update_display(
7084 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7085 new_con_state->hdcp_content_type,
b1abe558
BL
7086 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7087 : false);
0c8620d6
BL
7088 }
7089#endif
e7b07cee 7090
02d6a6fc 7091 /* Handle connector state changes */
c2cea706 7092 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7093 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7094 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7095 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7096 struct dc_surface_update dummy_updates[MAX_SURFACES];
7097 struct dc_stream_update stream_update;
b232d4ed 7098 struct dc_info_packet hdr_packet;
e7b07cee 7099 struct dc_stream_status *status = NULL;
b232d4ed 7100 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7101
19afd799
NC
7102 memset(&dummy_updates, 0, sizeof(dummy_updates));
7103 memset(&stream_update, 0, sizeof(stream_update));
7104
44d09c6a 7105 if (acrtc) {
0bc9706d 7106 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7107 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7108 }
0bc9706d 7109
e7b07cee 7110 /* Skip any modesets/resets */
0bc9706d 7111 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7112 continue;
7113
54d76575 7114 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7115 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7116
b232d4ed
NK
7117 scaling_changed = is_scaling_state_different(dm_new_con_state,
7118 dm_old_con_state);
7119
7120 abm_changed = dm_new_crtc_state->abm_level !=
7121 dm_old_crtc_state->abm_level;
7122
7123 hdr_changed =
7124 is_hdr_metadata_different(old_con_state, new_con_state);
7125
7126 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7127 continue;
e7b07cee 7128
b6e881c9 7129 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7130 if (scaling_changed) {
02d6a6fc 7131 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7132 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7133
02d6a6fc
DF
7134 stream_update.src = dm_new_crtc_state->stream->src;
7135 stream_update.dst = dm_new_crtc_state->stream->dst;
7136 }
7137
b232d4ed 7138 if (abm_changed) {
02d6a6fc
DF
7139 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7140
7141 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7142 }
70e8ffc5 7143
b232d4ed
NK
7144 if (hdr_changed) {
7145 fill_hdr_info_packet(new_con_state, &hdr_packet);
7146 stream_update.hdr_static_metadata = &hdr_packet;
7147 }
7148
54d76575 7149 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7150 WARN_ON(!status);
3be5262e 7151 WARN_ON(!status->plane_count);
e7b07cee 7152
02d6a6fc
DF
7153 /*
7154 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7155 * Here we create an empty update on each plane.
7156 * To fix this, DC should permit updating only stream properties.
7157 */
7158 for (j = 0; j < status->plane_count; j++)
7159 dummy_updates[j].surface = status->plane_states[0];
7160
7161
7162 mutex_lock(&dm->dc_lock);
7163 dc_commit_updates_for_stream(dm->dc,
7164 dummy_updates,
7165 status->plane_count,
7166 dm_new_crtc_state->stream,
7167 &stream_update,
7168 dc_state);
7169 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7170 }
7171
b5e83f6f 7172 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7173 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7174 new_crtc_state, i) {
fe2a1965
LP
7175 if (old_crtc_state->active && !new_crtc_state->active)
7176 crtc_disable_count++;
7177
54d76575 7178 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7179 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7180
057be086
NK
7181 /* Update freesync active state. */
7182 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7183
66b0c973
MK
7184 /* Handle vrr on->off / off->on transitions */
7185 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7186 dm_new_crtc_state);
e7b07cee
HW
7187 }
7188
b5e83f6f
NK
7189 /* Enable interrupts for CRTCs going through a modeset. */
7190 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
e7b07cee 7191
420cd472 7192 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7193 if (new_crtc_state->async_flip)
420cd472
DF
7194 wait_for_vblank = false;
7195
e7b07cee 7196 /* update planes when needed per crtc*/
5cc6dcbd 7197 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7198 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7199
54d76575 7200 if (dm_new_crtc_state->stream)
eb3dc897 7201 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7202 dm, crtc, wait_for_vblank);
e7b07cee
HW
7203 }
7204
b5e83f6f
NK
7205 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7206 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
e7b07cee 7207
6ce8f316
NK
7208 /* Update audio instances for each connector. */
7209 amdgpu_dm_commit_audio(dev, state);
7210
e7b07cee
HW
7211 /*
7212 * send vblank event on all events not handled in flip and
7213 * mark consumed event for drm_atomic_helper_commit_hw_done
7214 */
7215 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 7216 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7217
0bc9706d
LSL
7218 if (new_crtc_state->event)
7219 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7220
0bc9706d 7221 new_crtc_state->event = NULL;
e7b07cee
HW
7222 }
7223 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7224
29c8f234
LL
7225 /* Signal HW programming completion */
7226 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7227
7228 if (wait_for_vblank)
320a1274 7229 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7230
7231 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7232
1f6010a9
DF
7233 /*
7234 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7235 * so we can put the GPU into runtime suspend if we're not driving any
7236 * displays anymore
7237 */
fe2a1965
LP
7238 for (i = 0; i < crtc_disable_count; i++)
7239 pm_runtime_put_autosuspend(dev->dev);
97028037 7240 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7241
7242 if (dc_state_temp)
7243 dc_release_state(dc_state_temp);
e7b07cee
HW
7244}
7245
7246
7247static int dm_force_atomic_commit(struct drm_connector *connector)
7248{
7249 int ret = 0;
7250 struct drm_device *ddev = connector->dev;
7251 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7252 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7253 struct drm_plane *plane = disconnected_acrtc->base.primary;
7254 struct drm_connector_state *conn_state;
7255 struct drm_crtc_state *crtc_state;
7256 struct drm_plane_state *plane_state;
7257
7258 if (!state)
7259 return -ENOMEM;
7260
7261 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7262
7263 /* Construct an atomic state to restore previous display setting */
7264
7265 /*
7266 * Attach connectors to drm_atomic_state
7267 */
7268 conn_state = drm_atomic_get_connector_state(state, connector);
7269
7270 ret = PTR_ERR_OR_ZERO(conn_state);
7271 if (ret)
7272 goto err;
7273
7274 /* Attach crtc to drm_atomic_state*/
7275 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7276
7277 ret = PTR_ERR_OR_ZERO(crtc_state);
7278 if (ret)
7279 goto err;
7280
7281 /* force a restore */
7282 crtc_state->mode_changed = true;
7283
7284 /* Attach plane to drm_atomic_state */
7285 plane_state = drm_atomic_get_plane_state(state, plane);
7286
7287 ret = PTR_ERR_OR_ZERO(plane_state);
7288 if (ret)
7289 goto err;
7290
7291
7292 /* Call commit internally with the state we just constructed */
7293 ret = drm_atomic_commit(state);
7294 if (!ret)
7295 return 0;
7296
7297err:
7298 DRM_ERROR("Restoring old state failed with %i\n", ret);
7299 drm_atomic_state_put(state);
7300
7301 return ret;
7302}
7303
7304/*
1f6010a9
DF
7305 * This function handles all cases when set mode does not come upon hotplug.
7306 * This includes when a display is unplugged then plugged back into the
7307 * same port and when running without usermode desktop manager supprot
e7b07cee 7308 */
3ee6b26b
AD
7309void dm_restore_drm_connector_state(struct drm_device *dev,
7310 struct drm_connector *connector)
e7b07cee 7311{
c84dec2f 7312 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7313 struct amdgpu_crtc *disconnected_acrtc;
7314 struct dm_crtc_state *acrtc_state;
7315
7316 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7317 return;
7318
7319 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7320 if (!disconnected_acrtc)
7321 return;
e7b07cee 7322
70e8ffc5
HW
7323 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7324 if (!acrtc_state->stream)
e7b07cee
HW
7325 return;
7326
7327 /*
7328 * If the previous sink is not released and different from the current,
7329 * we deduce we are in a state where we can not rely on usermode call
7330 * to turn on the display, so we do it here
7331 */
7332 if (acrtc_state->stream->sink != aconnector->dc_sink)
7333 dm_force_atomic_commit(&aconnector->base);
7334}
7335
1f6010a9 7336/*
e7b07cee
HW
7337 * Grabs all modesetting locks to serialize against any blocking commits,
7338 * Waits for completion of all non blocking commits.
7339 */
3ee6b26b
AD
7340static int do_aquire_global_lock(struct drm_device *dev,
7341 struct drm_atomic_state *state)
e7b07cee
HW
7342{
7343 struct drm_crtc *crtc;
7344 struct drm_crtc_commit *commit;
7345 long ret;
7346
1f6010a9
DF
7347 /*
7348 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7349 * ensure that when the framework release it the
7350 * extra locks we are locking here will get released to
7351 */
7352 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7353 if (ret)
7354 return ret;
7355
7356 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7357 spin_lock(&crtc->commit_lock);
7358 commit = list_first_entry_or_null(&crtc->commit_list,
7359 struct drm_crtc_commit, commit_entry);
7360 if (commit)
7361 drm_crtc_commit_get(commit);
7362 spin_unlock(&crtc->commit_lock);
7363
7364 if (!commit)
7365 continue;
7366
1f6010a9
DF
7367 /*
7368 * Make sure all pending HW programming completed and
e7b07cee
HW
7369 * page flips done
7370 */
7371 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7372
7373 if (ret > 0)
7374 ret = wait_for_completion_interruptible_timeout(
7375 &commit->flip_done, 10*HZ);
7376
7377 if (ret == 0)
7378 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7379 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7380
7381 drm_crtc_commit_put(commit);
7382 }
7383
7384 return ret < 0 ? ret : 0;
7385}
7386
bb47de73
NK
7387static void get_freesync_config_for_crtc(
7388 struct dm_crtc_state *new_crtc_state,
7389 struct dm_connector_state *new_con_state)
98e6436d
AK
7390{
7391 struct mod_freesync_config config = {0};
98e6436d
AK
7392 struct amdgpu_dm_connector *aconnector =
7393 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7394 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7395 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7396
a057ec46 7397 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7398 vrefresh >= aconnector->min_vfreq &&
7399 vrefresh <= aconnector->max_vfreq;
bb47de73 7400
a057ec46
IB
7401 if (new_crtc_state->vrr_supported) {
7402 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7403 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7404 VRR_STATE_ACTIVE_VARIABLE :
7405 VRR_STATE_INACTIVE;
7406 config.min_refresh_in_uhz =
7407 aconnector->min_vfreq * 1000000;
7408 config.max_refresh_in_uhz =
7409 aconnector->max_vfreq * 1000000;
69ff8845 7410 config.vsif_supported = true;
180db303 7411 config.btr = true;
98e6436d
AK
7412 }
7413
bb47de73
NK
7414 new_crtc_state->freesync_config = config;
7415}
98e6436d 7416
bb47de73
NK
7417static void reset_freesync_config_for_crtc(
7418 struct dm_crtc_state *new_crtc_state)
7419{
7420 new_crtc_state->vrr_supported = false;
98e6436d 7421
180db303
NK
7422 memset(&new_crtc_state->vrr_params, 0,
7423 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7424 memset(&new_crtc_state->vrr_infopacket, 0,
7425 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7426}
7427
4b9674e5
LL
7428static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7429 struct drm_atomic_state *state,
7430 struct drm_crtc *crtc,
7431 struct drm_crtc_state *old_crtc_state,
7432 struct drm_crtc_state *new_crtc_state,
7433 bool enable,
7434 bool *lock_and_validation_needed)
e7b07cee 7435{
eb3dc897 7436 struct dm_atomic_state *dm_state = NULL;
54d76575 7437 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7438 struct dc_stream_state *new_stream;
62f55537 7439 int ret = 0;
d4d4a645 7440
1f6010a9
DF
7441 /*
7442 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7443 * update changed items
7444 */
4b9674e5
LL
7445 struct amdgpu_crtc *acrtc = NULL;
7446 struct amdgpu_dm_connector *aconnector = NULL;
7447 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7448 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7449
4b9674e5 7450 new_stream = NULL;
9635b754 7451
4b9674e5
LL
7452 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7453 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7454 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7455 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7456
4b9674e5
LL
7457 /* TODO This hack should go away */
7458 if (aconnector && enable) {
7459 /* Make sure fake sink is created in plug-in scenario */
7460 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7461 &aconnector->base);
7462 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7463 &aconnector->base);
19f89e23 7464
4b9674e5
LL
7465 if (IS_ERR(drm_new_conn_state)) {
7466 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7467 goto fail;
7468 }
19f89e23 7469
4b9674e5
LL
7470 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7471 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 7472
02d35a67
JFZ
7473 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7474 goto skip_modeset;
7475
4b9674e5
LL
7476 new_stream = create_stream_for_sink(aconnector,
7477 &new_crtc_state->mode,
7478 dm_new_conn_state,
7479 dm_old_crtc_state->stream);
19f89e23 7480
4b9674e5
LL
7481 /*
7482 * we can have no stream on ACTION_SET if a display
7483 * was disconnected during S3, in this case it is not an
7484 * error, the OS will be updated after detection, and
7485 * will do the right thing on next atomic commit
7486 */
19f89e23 7487
4b9674e5
LL
7488 if (!new_stream) {
7489 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7490 __func__, acrtc->base.base.id);
7491 ret = -ENOMEM;
7492 goto fail;
7493 }
e7b07cee 7494
4b9674e5 7495 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 7496
88694af9
NK
7497 ret = fill_hdr_info_packet(drm_new_conn_state,
7498 &new_stream->hdr_static_metadata);
7499 if (ret)
7500 goto fail;
7501
7e930949
NK
7502 /*
7503 * If we already removed the old stream from the context
7504 * (and set the new stream to NULL) then we can't reuse
7505 * the old stream even if the stream and scaling are unchanged.
7506 * We'll hit the BUG_ON and black screen.
7507 *
7508 * TODO: Refactor this function to allow this check to work
7509 * in all conditions.
7510 */
7511 if (dm_new_crtc_state->stream &&
7512 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
7513 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7514 new_crtc_state->mode_changed = false;
7515 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7516 new_crtc_state->mode_changed);
62f55537 7517 }
4b9674e5 7518 }
b830ebc9 7519
02d35a67 7520 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
7521 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7522 goto skip_modeset;
e7b07cee 7523
4b9674e5
LL
7524 DRM_DEBUG_DRIVER(
7525 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7526 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7527 "connectors_changed:%d\n",
7528 acrtc->crtc_id,
7529 new_crtc_state->enable,
7530 new_crtc_state->active,
7531 new_crtc_state->planes_changed,
7532 new_crtc_state->mode_changed,
7533 new_crtc_state->active_changed,
7534 new_crtc_state->connectors_changed);
62f55537 7535
4b9674e5
LL
7536 /* Remove stream for any changed/disabled CRTC */
7537 if (!enable) {
62f55537 7538
4b9674e5
LL
7539 if (!dm_old_crtc_state->stream)
7540 goto skip_modeset;
eb3dc897 7541
4b9674e5
LL
7542 ret = dm_atomic_get_state(state, &dm_state);
7543 if (ret)
7544 goto fail;
e7b07cee 7545
4b9674e5
LL
7546 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7547 crtc->base.id);
62f55537 7548
4b9674e5
LL
7549 /* i.e. reset mode */
7550 if (dc_remove_stream_from_ctx(
7551 dm->dc,
7552 dm_state->context,
7553 dm_old_crtc_state->stream) != DC_OK) {
7554 ret = -EINVAL;
7555 goto fail;
7556 }
62f55537 7557
4b9674e5
LL
7558 dc_stream_release(dm_old_crtc_state->stream);
7559 dm_new_crtc_state->stream = NULL;
bb47de73 7560
4b9674e5 7561 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 7562
4b9674e5 7563 *lock_and_validation_needed = true;
62f55537 7564
4b9674e5
LL
7565 } else {/* Add stream for any updated/enabled CRTC */
7566 /*
7567 * Quick fix to prevent NULL pointer on new_stream when
7568 * added MST connectors not found in existing crtc_state in the chained mode
7569 * TODO: need to dig out the root cause of that
7570 */
7571 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7572 goto skip_modeset;
62f55537 7573
4b9674e5
LL
7574 if (modereset_required(new_crtc_state))
7575 goto skip_modeset;
62f55537 7576
4b9674e5
LL
7577 if (modeset_required(new_crtc_state, new_stream,
7578 dm_old_crtc_state->stream)) {
62f55537 7579
4b9674e5 7580 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 7581
4b9674e5
LL
7582 ret = dm_atomic_get_state(state, &dm_state);
7583 if (ret)
7584 goto fail;
27b3f4fc 7585
4b9674e5 7586 dm_new_crtc_state->stream = new_stream;
62f55537 7587
4b9674e5 7588 dc_stream_retain(new_stream);
1dc90497 7589
4b9674e5
LL
7590 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7591 crtc->base.id);
1dc90497 7592
4b9674e5
LL
7593 if (dc_add_stream_to_ctx(
7594 dm->dc,
7595 dm_state->context,
7596 dm_new_crtc_state->stream) != DC_OK) {
7597 ret = -EINVAL;
7598 goto fail;
9b690ef3
BL
7599 }
7600
4b9674e5
LL
7601 *lock_and_validation_needed = true;
7602 }
7603 }
e277adc5 7604
4b9674e5
LL
7605skip_modeset:
7606 /* Release extra reference */
7607 if (new_stream)
7608 dc_stream_release(new_stream);
e277adc5 7609
4b9674e5
LL
7610 /*
7611 * We want to do dc stream updates that do not require a
7612 * full modeset below.
7613 */
7614 if (!(enable && aconnector && new_crtc_state->enable &&
7615 new_crtc_state->active))
7616 return 0;
7617 /*
7618 * Given above conditions, the dc state cannot be NULL because:
7619 * 1. We're in the process of enabling CRTCs (just been added
7620 * to the dc context, or already is on the context)
7621 * 2. Has a valid connector attached, and
7622 * 3. Is currently active and enabled.
7623 * => The dc stream state currently exists.
7624 */
7625 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 7626
4b9674e5
LL
7627 /* Scaling or underscan settings */
7628 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7629 update_stream_scaling_settings(
7630 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 7631
b05e2c5e
DF
7632 /* ABM settings */
7633 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7634
4b9674e5
LL
7635 /*
7636 * Color management settings. We also update color properties
7637 * when a modeset is needed, to ensure it gets reprogrammed.
7638 */
7639 if (dm_new_crtc_state->base.color_mgmt_changed ||
7640 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 7641 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
7642 if (ret)
7643 goto fail;
62f55537 7644 }
e7b07cee 7645
4b9674e5
LL
7646 /* Update Freesync settings. */
7647 get_freesync_config_for_crtc(dm_new_crtc_state,
7648 dm_new_conn_state);
7649
62f55537 7650 return ret;
9635b754
DS
7651
7652fail:
7653 if (new_stream)
7654 dc_stream_release(new_stream);
7655 return ret;
62f55537 7656}
9b690ef3 7657
f6ff2a08
NK
7658static bool should_reset_plane(struct drm_atomic_state *state,
7659 struct drm_plane *plane,
7660 struct drm_plane_state *old_plane_state,
7661 struct drm_plane_state *new_plane_state)
7662{
7663 struct drm_plane *other;
7664 struct drm_plane_state *old_other_state, *new_other_state;
7665 struct drm_crtc_state *new_crtc_state;
7666 int i;
7667
70a1efac
NK
7668 /*
7669 * TODO: Remove this hack once the checks below are sufficient
7670 * enough to determine when we need to reset all the planes on
7671 * the stream.
7672 */
7673 if (state->allow_modeset)
7674 return true;
7675
f6ff2a08
NK
7676 /* Exit early if we know that we're adding or removing the plane. */
7677 if (old_plane_state->crtc != new_plane_state->crtc)
7678 return true;
7679
7680 /* old crtc == new_crtc == NULL, plane not in context. */
7681 if (!new_plane_state->crtc)
7682 return false;
7683
7684 new_crtc_state =
7685 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7686
7687 if (!new_crtc_state)
7688 return true;
7689
7316c4ad
NK
7690 /* CRTC Degamma changes currently require us to recreate planes. */
7691 if (new_crtc_state->color_mgmt_changed)
7692 return true;
7693
f6ff2a08
NK
7694 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7695 return true;
7696
7697 /*
7698 * If there are any new primary or overlay planes being added or
7699 * removed then the z-order can potentially change. To ensure
7700 * correct z-order and pipe acquisition the current DC architecture
7701 * requires us to remove and recreate all existing planes.
7702 *
7703 * TODO: Come up with a more elegant solution for this.
7704 */
7705 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7706 if (other->type == DRM_PLANE_TYPE_CURSOR)
7707 continue;
7708
7709 if (old_other_state->crtc != new_plane_state->crtc &&
7710 new_other_state->crtc != new_plane_state->crtc)
7711 continue;
7712
7713 if (old_other_state->crtc != new_other_state->crtc)
7714 return true;
7715
7716 /* TODO: Remove this once we can handle fast format changes. */
7717 if (old_other_state->fb && new_other_state->fb &&
7718 old_other_state->fb->format != new_other_state->fb->format)
7719 return true;
7720 }
7721
7722 return false;
7723}
7724
9e869063
LL
7725static int dm_update_plane_state(struct dc *dc,
7726 struct drm_atomic_state *state,
7727 struct drm_plane *plane,
7728 struct drm_plane_state *old_plane_state,
7729 struct drm_plane_state *new_plane_state,
7730 bool enable,
7731 bool *lock_and_validation_needed)
62f55537 7732{
eb3dc897
NK
7733
7734 struct dm_atomic_state *dm_state = NULL;
62f55537 7735 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 7736 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 7737 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 7738 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
f6ff2a08 7739 bool needs_reset;
62f55537 7740 int ret = 0;
e7b07cee 7741
9b690ef3 7742
9e869063
LL
7743 new_plane_crtc = new_plane_state->crtc;
7744 old_plane_crtc = old_plane_state->crtc;
7745 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7746 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 7747
9e869063
LL
7748 /*TODO Implement atomic check for cursor plane */
7749 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7750 return 0;
9b690ef3 7751
f6ff2a08
NK
7752 needs_reset = should_reset_plane(state, plane, old_plane_state,
7753 new_plane_state);
7754
9e869063
LL
7755 /* Remove any changed/removed planes */
7756 if (!enable) {
f6ff2a08 7757 if (!needs_reset)
9e869063 7758 return 0;
a7b06724 7759
9e869063
LL
7760 if (!old_plane_crtc)
7761 return 0;
62f55537 7762
9e869063
LL
7763 old_crtc_state = drm_atomic_get_old_crtc_state(
7764 state, old_plane_crtc);
7765 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 7766
9e869063
LL
7767 if (!dm_old_crtc_state->stream)
7768 return 0;
62f55537 7769
9e869063
LL
7770 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7771 plane->base.id, old_plane_crtc->base.id);
9b690ef3 7772
9e869063
LL
7773 ret = dm_atomic_get_state(state, &dm_state);
7774 if (ret)
7775 return ret;
eb3dc897 7776
9e869063
LL
7777 if (!dc_remove_plane_from_context(
7778 dc,
7779 dm_old_crtc_state->stream,
7780 dm_old_plane_state->dc_state,
7781 dm_state->context)) {
62f55537 7782
9e869063
LL
7783 ret = EINVAL;
7784 return ret;
7785 }
e7b07cee 7786
9b690ef3 7787
9e869063
LL
7788 dc_plane_state_release(dm_old_plane_state->dc_state);
7789 dm_new_plane_state->dc_state = NULL;
1dc90497 7790
9e869063 7791 *lock_and_validation_needed = true;
1dc90497 7792
9e869063
LL
7793 } else { /* Add new planes */
7794 struct dc_plane_state *dc_new_plane_state;
1dc90497 7795
9e869063
LL
7796 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7797 return 0;
e7b07cee 7798
9e869063
LL
7799 if (!new_plane_crtc)
7800 return 0;
e7b07cee 7801
9e869063
LL
7802 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7803 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 7804
9e869063
LL
7805 if (!dm_new_crtc_state->stream)
7806 return 0;
62f55537 7807
f6ff2a08 7808 if (!needs_reset)
9e869063 7809 return 0;
62f55537 7810
9e869063 7811 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 7812
9e869063
LL
7813 dc_new_plane_state = dc_create_plane_state(dc);
7814 if (!dc_new_plane_state)
7815 return -ENOMEM;
62f55537 7816
9e869063
LL
7817 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7818 plane->base.id, new_plane_crtc->base.id);
8c45c5db 7819
695af5f9 7820 ret = fill_dc_plane_attributes(
9e869063
LL
7821 new_plane_crtc->dev->dev_private,
7822 dc_new_plane_state,
7823 new_plane_state,
7824 new_crtc_state);
7825 if (ret) {
7826 dc_plane_state_release(dc_new_plane_state);
7827 return ret;
7828 }
62f55537 7829
9e869063
LL
7830 ret = dm_atomic_get_state(state, &dm_state);
7831 if (ret) {
7832 dc_plane_state_release(dc_new_plane_state);
7833 return ret;
7834 }
eb3dc897 7835
9e869063
LL
7836 /*
7837 * Any atomic check errors that occur after this will
7838 * not need a release. The plane state will be attached
7839 * to the stream, and therefore part of the atomic
7840 * state. It'll be released when the atomic state is
7841 * cleaned.
7842 */
7843 if (!dc_add_plane_to_context(
7844 dc,
7845 dm_new_crtc_state->stream,
7846 dc_new_plane_state,
7847 dm_state->context)) {
62f55537 7848
9e869063
LL
7849 dc_plane_state_release(dc_new_plane_state);
7850 return -EINVAL;
7851 }
8c45c5db 7852
9e869063 7853 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 7854
9e869063
LL
7855 /* Tell DC to do a full surface update every time there
7856 * is a plane change. Inefficient, but works for now.
7857 */
7858 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7859
7860 *lock_and_validation_needed = true;
62f55537 7861 }
e7b07cee
HW
7862
7863
62f55537
AG
7864 return ret;
7865}
a87fa993 7866
eb3dc897 7867static int
f843b308 7868dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
eb3dc897
NK
7869 struct drm_atomic_state *state,
7870 enum surface_update_type *out_type)
7871{
f843b308 7872 struct dc *dc = dm->dc;
eb3dc897
NK
7873 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7874 int i, j, num_plane, ret = 0;
a87fa993
BL
7875 struct drm_plane_state *old_plane_state, *new_plane_state;
7876 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
d3b65841 7877 struct drm_crtc *new_plane_crtc;
a87fa993
BL
7878 struct drm_plane *plane;
7879
7880 struct drm_crtc *crtc;
7881 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7882 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7883 struct dc_stream_status *status = NULL;
a87fa993 7884 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7527791e
RL
7885 struct surface_info_bundle {
7886 struct dc_surface_update surface_updates[MAX_SURFACES];
7887 struct dc_plane_info plane_infos[MAX_SURFACES];
7888 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7889 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7890 struct dc_stream_update stream_update;
7891 } *bundle;
a87fa993 7892
7527791e 7893 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
fe96b99d 7894
7527791e
RL
7895 if (!bundle) {
7896 DRM_ERROR("Failed to allocate update bundle\n");
4f712911
BL
7897 /* Set type to FULL to avoid crashing in DC*/
7898 update_type = UPDATE_TYPE_FULL;
eb3dc897 7899 goto cleanup;
4f712911 7900 }
a87fa993
BL
7901
7902 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2aa632c5 7903
7527791e 7904 memset(bundle, 0, sizeof(struct surface_info_bundle));
c448a53a 7905
a87fa993
BL
7906 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7907 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7908 num_plane = 0;
7909
6836d239
NK
7910 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7911 update_type = UPDATE_TYPE_FULL;
7912 goto cleanup;
7913 }
a87fa993 7914
6836d239 7915 if (!new_dm_crtc_state->stream)
c744e974 7916 continue;
eb3dc897 7917
c744e974 7918 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
2cc450ce
NK
7919 const struct amdgpu_framebuffer *amdgpu_fb =
7920 to_amdgpu_framebuffer(new_plane_state->fb);
7527791e
RL
7921 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
7922 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
7923 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
2cc450ce
NK
7924 uint64_t tiling_flags;
7925
c744e974 7926 new_plane_crtc = new_plane_state->crtc;
c744e974
NK
7927 new_dm_plane_state = to_dm_plane_state(new_plane_state);
7928 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 7929
c744e974
NK
7930 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7931 continue;
eb3dc897 7932
6836d239
NK
7933 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
7934 update_type = UPDATE_TYPE_FULL;
7935 goto cleanup;
7936 }
7937
c744e974
NK
7938 if (crtc != new_plane_crtc)
7939 continue;
7940
7527791e
RL
7941 bundle->surface_updates[num_plane].surface =
7942 new_dm_plane_state->dc_state;
c744e974
NK
7943
7944 if (new_crtc_state->mode_changed) {
7527791e
RL
7945 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
7946 bundle->stream_update.src = new_dm_crtc_state->stream->src;
c744e974
NK
7947 }
7948
7949 if (new_crtc_state->color_mgmt_changed) {
7527791e 7950 bundle->surface_updates[num_plane].gamma =
c744e974 7951 new_dm_plane_state->dc_state->gamma_correction;
7527791e 7952 bundle->surface_updates[num_plane].in_transfer_func =
c744e974 7953 new_dm_plane_state->dc_state->in_transfer_func;
7527791e 7954 bundle->stream_update.gamut_remap =
c744e974 7955 &new_dm_crtc_state->stream->gamut_remap_matrix;
7527791e 7956 bundle->stream_update.output_csc_transform =
cf020d49 7957 &new_dm_crtc_state->stream->csc_color_matrix;
7527791e 7958 bundle->stream_update.out_transfer_func =
c744e974 7959 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
7960 }
7961
004b3938 7962 ret = fill_dc_scaling_info(new_plane_state,
7527791e 7963 scaling_info);
004b3938
NK
7964 if (ret)
7965 goto cleanup;
7966
7527791e 7967 bundle->surface_updates[num_plane].scaling_info = scaling_info;
004b3938 7968
2cc450ce
NK
7969 if (amdgpu_fb) {
7970 ret = get_fb_info(amdgpu_fb, &tiling_flags);
7971 if (ret)
7972 goto cleanup;
7973
2cc450ce
NK
7974 ret = fill_dc_plane_info_and_addr(
7975 dm->adev, new_plane_state, tiling_flags,
7527791e
RL
7976 plane_info,
7977 &flip_addr->address);
2cc450ce
NK
7978 if (ret)
7979 goto cleanup;
7980
7527791e
RL
7981 bundle->surface_updates[num_plane].plane_info = plane_info;
7982 bundle->surface_updates[num_plane].flip_addr = flip_addr;
2cc450ce
NK
7983 }
7984
c744e974
NK
7985 num_plane++;
7986 }
7987
7988 if (num_plane == 0)
7989 continue;
7990
7991 ret = dm_atomic_get_state(state, &dm_state);
7992 if (ret)
7993 goto cleanup;
7994
7995 old_dm_state = dm_atomic_get_old_state(state);
7996 if (!old_dm_state) {
7997 ret = -EINVAL;
7998 goto cleanup;
7999 }
8000
8001 status = dc_stream_get_status_from_state(old_dm_state->context,
8002 new_dm_crtc_state->stream);
7527791e 8003 bundle->stream_update.stream = new_dm_crtc_state->stream;
f843b308
NK
8004 /*
8005 * TODO: DC modifies the surface during this call so we need
8006 * to lock here - find a way to do this without locking.
8007 */
8008 mutex_lock(&dm->dc_lock);
7527791e
RL
8009 update_type = dc_check_update_surfaces_for_stream(
8010 dc, bundle->surface_updates, num_plane,
8011 &bundle->stream_update, status);
f843b308 8012 mutex_unlock(&dm->dc_lock);
c744e974
NK
8013
8014 if (update_type > UPDATE_TYPE_MED) {
a87fa993 8015 update_type = UPDATE_TYPE_FULL;
eb3dc897 8016 goto cleanup;
a87fa993
BL
8017 }
8018 }
8019
eb3dc897 8020cleanup:
7527791e 8021 kfree(bundle);
a87fa993 8022
eb3dc897
NK
8023 *out_type = update_type;
8024 return ret;
a87fa993 8025}
62f55537 8026
44be939f
ML
8027static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8028{
8029 struct drm_connector *connector;
8030 struct drm_connector_state *conn_state;
8031 struct amdgpu_dm_connector *aconnector = NULL;
8032 int i;
8033 for_each_new_connector_in_state(state, connector, conn_state, i) {
8034 if (conn_state->crtc != crtc)
8035 continue;
8036
8037 aconnector = to_amdgpu_dm_connector(connector);
8038 if (!aconnector->port || !aconnector->mst_port)
8039 aconnector = NULL;
8040 else
8041 break;
8042 }
8043
8044 if (!aconnector)
8045 return 0;
8046
8047 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8048}
8049
b8592b48
LL
8050/**
8051 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8052 * @dev: The DRM device
8053 * @state: The atomic state to commit
8054 *
8055 * Validate that the given atomic state is programmable by DC into hardware.
8056 * This involves constructing a &struct dc_state reflecting the new hardware
8057 * state we wish to commit, then querying DC to see if it is programmable. It's
8058 * important not to modify the existing DC state. Otherwise, atomic_check
8059 * may unexpectedly commit hardware changes.
8060 *
8061 * When validating the DC state, it's important that the right locks are
8062 * acquired. For full updates case which removes/adds/updates streams on one
8063 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8064 * that any such full update commit will wait for completion of any outstanding
8065 * flip using DRMs synchronization events. See
8066 * dm_determine_update_type_for_commit()
8067 *
8068 * Note that DM adds the affected connectors for all CRTCs in state, when that
8069 * might not seem necessary. This is because DC stream creation requires the
8070 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8071 * be possible but non-trivial - a possible TODO item.
8072 *
8073 * Return: -Error code if validation failed.
8074 */
7578ecda
AD
8075static int amdgpu_dm_atomic_check(struct drm_device *dev,
8076 struct drm_atomic_state *state)
62f55537 8077{
62f55537 8078 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 8079 struct dm_atomic_state *dm_state = NULL;
62f55537 8080 struct dc *dc = adev->dm.dc;
62f55537 8081 struct drm_connector *connector;
c2cea706 8082 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8083 struct drm_crtc *crtc;
fc9e9920 8084 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8085 struct drm_plane *plane;
8086 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
8087 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8088 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8089
1e88ad0a 8090 int ret, i;
e7b07cee 8091
62f55537
AG
8092 /*
8093 * This bool will be set for true for any modeset/reset
8094 * or plane update which implies non fast surface update.
8095 */
8096 bool lock_and_validation_needed = false;
8097
8098 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8099 if (ret)
8100 goto fail;
62f55537 8101
44be939f
ML
8102 if (adev->asic_type >= CHIP_NAVI10) {
8103 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8104 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8105 ret = add_affected_mst_dsc_crtcs(state, crtc);
8106 if (ret)
8107 goto fail;
8108 }
8109 }
8110 }
8111
1e88ad0a
S
8112 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8113 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8114 !new_crtc_state->color_mgmt_changed &&
a93587b3 8115 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8116 continue;
7bef1af3 8117
1e88ad0a
S
8118 if (!new_crtc_state->enable)
8119 continue;
fc9e9920 8120
1e88ad0a
S
8121 ret = drm_atomic_add_affected_connectors(state, crtc);
8122 if (ret)
8123 return ret;
fc9e9920 8124
1e88ad0a
S
8125 ret = drm_atomic_add_affected_planes(state, crtc);
8126 if (ret)
8127 goto fail;
e7b07cee
HW
8128 }
8129
2d9e6431
NK
8130 /*
8131 * Add all primary and overlay planes on the CRTC to the state
8132 * whenever a plane is enabled to maintain correct z-ordering
8133 * and to enable fast surface updates.
8134 */
8135 drm_for_each_crtc(crtc, dev) {
8136 bool modified = false;
8137
8138 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8139 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8140 continue;
8141
8142 if (new_plane_state->crtc == crtc ||
8143 old_plane_state->crtc == crtc) {
8144 modified = true;
8145 break;
8146 }
8147 }
8148
8149 if (!modified)
8150 continue;
8151
8152 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8153 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8154 continue;
8155
8156 new_plane_state =
8157 drm_atomic_get_plane_state(state, plane);
8158
8159 if (IS_ERR(new_plane_state)) {
8160 ret = PTR_ERR(new_plane_state);
8161 goto fail;
8162 }
8163 }
8164 }
8165
62f55537 8166 /* Remove exiting planes if they are modified */
9e869063
LL
8167 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8168 ret = dm_update_plane_state(dc, state, plane,
8169 old_plane_state,
8170 new_plane_state,
8171 false,
8172 &lock_and_validation_needed);
8173 if (ret)
8174 goto fail;
62f55537
AG
8175 }
8176
8177 /* Disable all crtcs which require disable */
4b9674e5
LL
8178 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8179 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8180 old_crtc_state,
8181 new_crtc_state,
8182 false,
8183 &lock_and_validation_needed);
8184 if (ret)
8185 goto fail;
62f55537
AG
8186 }
8187
8188 /* Enable all crtcs which require enable */
4b9674e5
LL
8189 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8190 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8191 old_crtc_state,
8192 new_crtc_state,
8193 true,
8194 &lock_and_validation_needed);
8195 if (ret)
8196 goto fail;
62f55537
AG
8197 }
8198
8199 /* Add new/modified planes */
9e869063
LL
8200 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8201 ret = dm_update_plane_state(dc, state, plane,
8202 old_plane_state,
8203 new_plane_state,
8204 true,
8205 &lock_and_validation_needed);
8206 if (ret)
8207 goto fail;
62f55537
AG
8208 }
8209
b349f76e
ES
8210 /* Run this here since we want to validate the streams we created */
8211 ret = drm_atomic_helper_check_planes(dev, state);
8212 if (ret)
8213 goto fail;
62f55537 8214
43d10d30
NK
8215 if (state->legacy_cursor_update) {
8216 /*
8217 * This is a fast cursor update coming from the plane update
8218 * helper, check if it can be done asynchronously for better
8219 * performance.
8220 */
8221 state->async_update =
8222 !drm_atomic_helper_async_check(dev, state);
8223
8224 /*
8225 * Skip the remaining global validation if this is an async
8226 * update. Cursor updates can be done without affecting
8227 * state or bandwidth calcs and this avoids the performance
8228 * penalty of locking the private state object and
8229 * allocating a new dc_state.
8230 */
8231 if (state->async_update)
8232 return 0;
8233 }
8234
ebdd27e1 8235 /* Check scaling and underscan changes*/
1f6010a9 8236 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8237 * new stream into context w\o causing full reset. Need to
8238 * decide how to handle.
8239 */
c2cea706 8240 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8241 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8242 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8243 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8244
8245 /* Skip any modesets/resets */
0bc9706d
LSL
8246 if (!acrtc || drm_atomic_crtc_needs_modeset(
8247 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8248 continue;
8249
b830ebc9 8250 /* Skip any thing not scale or underscan changes */
54d76575 8251 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8252 continue;
8253
a87fa993 8254 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
8255 lock_and_validation_needed = true;
8256 }
8257
f843b308 8258 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
eb3dc897
NK
8259 if (ret)
8260 goto fail;
a87fa993
BL
8261
8262 if (overall_update_type < update_type)
8263 overall_update_type = update_type;
8264
8265 /*
8266 * lock_and_validation_needed was an old way to determine if we need to set
8267 * the global lock. Leaving it in to check if we broke any corner cases
8268 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8269 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8270 */
8271 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8272 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
e7b07cee 8273
a87fa993 8274 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
8275 ret = dm_atomic_get_state(state, &dm_state);
8276 if (ret)
8277 goto fail;
e7b07cee
HW
8278
8279 ret = do_aquire_global_lock(dev, state);
8280 if (ret)
8281 goto fail;
1dc90497 8282
d9fe1a4c 8283#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8284 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8285 goto fail;
8286
29b9ba74
ML
8287 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8288 if (ret)
8289 goto fail;
d9fe1a4c 8290#endif
29b9ba74 8291
ded58c7b
ZL
8292 /*
8293 * Perform validation of MST topology in the state:
8294 * We need to perform MST atomic check before calling
8295 * dc_validate_global_state(), or there is a chance
8296 * to get stuck in an infinite loop and hang eventually.
8297 */
8298 ret = drm_dp_mst_atomic_check(state);
8299 if (ret)
8300 goto fail;
8301
afcd526b 8302 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
e7b07cee
HW
8303 ret = -EINVAL;
8304 goto fail;
8305 }
bd200d19 8306 } else {
674e78ac 8307 /*
bd200d19
NK
8308 * The commit is a fast update. Fast updates shouldn't change
8309 * the DC context, affect global validation, and can have their
8310 * commit work done in parallel with other commits not touching
8311 * the same resource. If we have a new DC context as part of
8312 * the DM atomic state from validation we need to free it and
8313 * retain the existing one instead.
674e78ac 8314 */
bd200d19
NK
8315 struct dm_atomic_state *new_dm_state, *old_dm_state;
8316
8317 new_dm_state = dm_atomic_get_new_state(state);
8318 old_dm_state = dm_atomic_get_old_state(state);
8319
8320 if (new_dm_state && old_dm_state) {
8321 if (new_dm_state->context)
8322 dc_release_state(new_dm_state->context);
8323
8324 new_dm_state->context = old_dm_state->context;
8325
8326 if (old_dm_state->context)
8327 dc_retain_state(old_dm_state->context);
8328 }
e7b07cee
HW
8329 }
8330
caff0e66
NK
8331 /* Store the overall update type for use later in atomic check. */
8332 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8333 struct dm_crtc_state *dm_new_crtc_state =
8334 to_dm_crtc_state(new_crtc_state);
8335
8336 dm_new_crtc_state->update_type = (int)overall_update_type;
e7b07cee
HW
8337 }
8338
8339 /* Must be success */
8340 WARN_ON(ret);
8341 return ret;
8342
8343fail:
8344 if (ret == -EDEADLK)
01e28f9c 8345 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8346 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8347 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8348 else
01e28f9c 8349 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8350
8351 return ret;
8352}
8353
3ee6b26b
AD
8354static bool is_dp_capable_without_timing_msa(struct dc *dc,
8355 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8356{
8357 uint8_t dpcd_data;
8358 bool capable = false;
8359
c84dec2f 8360 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8361 dm_helpers_dp_read_dpcd(
8362 NULL,
c84dec2f 8363 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8364 DP_DOWN_STREAM_PORT_COUNT,
8365 &dpcd_data,
8366 sizeof(dpcd_data))) {
8367 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8368 }
8369
8370 return capable;
8371}
98e6436d
AK
8372void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8373 struct edid *edid)
e7b07cee
HW
8374{
8375 int i;
e7b07cee
HW
8376 bool edid_check_required;
8377 struct detailed_timing *timing;
8378 struct detailed_non_pixel *data;
8379 struct detailed_data_monitor_range *range;
c84dec2f
HW
8380 struct amdgpu_dm_connector *amdgpu_dm_connector =
8381 to_amdgpu_dm_connector(connector);
bb47de73 8382 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8383
8384 struct drm_device *dev = connector->dev;
8385 struct amdgpu_device *adev = dev->dev_private;
bb47de73 8386 bool freesync_capable = false;
b830ebc9 8387
8218d7f1
HW
8388 if (!connector->state) {
8389 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 8390 goto update;
8218d7f1
HW
8391 }
8392
98e6436d
AK
8393 if (!edid) {
8394 dm_con_state = to_dm_connector_state(connector->state);
8395
8396 amdgpu_dm_connector->min_vfreq = 0;
8397 amdgpu_dm_connector->max_vfreq = 0;
8398 amdgpu_dm_connector->pixel_clock_mhz = 0;
8399
bb47de73 8400 goto update;
98e6436d
AK
8401 }
8402
8218d7f1
HW
8403 dm_con_state = to_dm_connector_state(connector->state);
8404
e7b07cee 8405 edid_check_required = false;
c84dec2f 8406 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 8407 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 8408 goto update;
e7b07cee
HW
8409 }
8410 if (!adev->dm.freesync_module)
bb47de73 8411 goto update;
e7b07cee
HW
8412 /*
8413 * if edid non zero restrict freesync only for dp and edp
8414 */
8415 if (edid) {
c84dec2f
HW
8416 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8417 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
8418 edid_check_required = is_dp_capable_without_timing_msa(
8419 adev->dm.dc,
c84dec2f 8420 amdgpu_dm_connector);
e7b07cee
HW
8421 }
8422 }
e7b07cee
HW
8423 if (edid_check_required == true && (edid->version > 1 ||
8424 (edid->version == 1 && edid->revision > 1))) {
8425 for (i = 0; i < 4; i++) {
8426
8427 timing = &edid->detailed_timings[i];
8428 data = &timing->data.other_data;
8429 range = &data->data.range;
8430 /*
8431 * Check if monitor has continuous frequency mode
8432 */
8433 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8434 continue;
8435 /*
8436 * Check for flag range limits only. If flag == 1 then
8437 * no additional timing information provided.
8438 * Default GTF, GTF Secondary curve and CVT are not
8439 * supported
8440 */
8441 if (range->flags != 1)
8442 continue;
8443
c84dec2f
HW
8444 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8445 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8446 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
8447 range->pixel_clock_mhz * 10;
8448 break;
8449 }
8450
c84dec2f 8451 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
8452 amdgpu_dm_connector->min_vfreq > 10) {
8453
bb47de73 8454 freesync_capable = true;
e7b07cee
HW
8455 }
8456 }
bb47de73
NK
8457
8458update:
8459 if (dm_con_state)
8460 dm_con_state->freesync_capable = freesync_capable;
8461
8462 if (connector->vrr_capable_property)
8463 drm_connector_set_vrr_capable_property(connector,
8464 freesync_capable);
e7b07cee
HW
8465}
8466
8c322309
RL
8467static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8468{
8469 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8470
8471 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8472 return;
8473 if (link->type == dc_connection_none)
8474 return;
8475 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8476 dpcd_data, sizeof(dpcd_data))) {
8477 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8478 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8479 }
8480}
8481
8482/*
8483 * amdgpu_dm_link_setup_psr() - configure psr link
8484 * @stream: stream state
8485 *
8486 * Return: true if success
8487 */
8488static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8489{
8490 struct dc_link *link = NULL;
8491 struct psr_config psr_config = {0};
8492 struct psr_context psr_context = {0};
8493 struct dc *dc = NULL;
8494 bool ret = false;
8495
8496 if (stream == NULL)
8497 return false;
8498
8499 link = stream->link;
8500 dc = link->ctx->dc;
8501
8502 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8503
8504 if (psr_config.psr_version > 0) {
8505 psr_config.psr_exit_link_training_required = 0x1;
8506 psr_config.psr_frame_capture_indication_req = 0;
8507 psr_config.psr_rfb_setup_time = 0x37;
8508 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8509 psr_config.allow_smu_optimizations = 0x0;
8510
8511 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8512
8513 }
8514 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8515
8516 return ret;
8517}
8518
8519/*
8520 * amdgpu_dm_psr_enable() - enable psr f/w
8521 * @stream: stream state
8522 *
8523 * Return: true if success
8524 */
8525bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8526{
8527 struct dc_link *link = stream->link;
5b5abe95
AK
8528 unsigned int vsync_rate_hz = 0;
8529 struct dc_static_screen_params params = {0};
8530 /* Calculate number of static frames before generating interrupt to
8531 * enter PSR.
8532 */
5b5abe95
AK
8533 // Init fail safe of 2 frames static
8534 unsigned int num_frames_static = 2;
8c322309
RL
8535
8536 DRM_DEBUG_DRIVER("Enabling psr...\n");
8537
5b5abe95
AK
8538 vsync_rate_hz = div64_u64(div64_u64((
8539 stream->timing.pix_clk_100hz * 100),
8540 stream->timing.v_total),
8541 stream->timing.h_total);
8542
8543 /* Round up
8544 * Calculate number of frames such that at least 30 ms of time has
8545 * passed.
8546 */
2286d2f9
RL
8547 if (vsync_rate_hz != 0) {
8548 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 8549 num_frames_static = (30000 / frame_time_microsec) + 1;
2286d2f9 8550 }
5b5abe95
AK
8551
8552 params.triggers.cursor_update = true;
8553 params.triggers.overlay_update = true;
8554 params.triggers.surface_update = true;
8555 params.num_frames = num_frames_static;
8c322309 8556
5b5abe95 8557 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 8558 &stream, 1,
5b5abe95 8559 &params);
8c322309
RL
8560
8561 return dc_link_set_psr_allow_active(link, true, false);
8562}
8563
8564/*
8565 * amdgpu_dm_psr_disable() - disable psr f/w
8566 * @stream: stream state
8567 *
8568 * Return: true if success
8569 */
8570static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8571{
8572
8573 DRM_DEBUG_DRIVER("Disabling psr...\n");
8574
8575 return dc_link_set_psr_allow_active(stream->link, false, true);
8576}