drm/amd/display: Register DMUB service with DC
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
743b9786
NK
33#ifdef CONFIG_DRM_AMD_DC_DMUB
34#include "dmub/inc/dmub_srv.h"
35#include "dc/inc/hw/dmcu.h"
36#include "dc/inc/hw/abm.h"
9a71c7d3 37#include "dc/dc_dmub_srv.h"
743b9786 38#endif
4562236b
HW
39
40#include "vid.h"
41#include "amdgpu.h"
a49dcb88 42#include "amdgpu_display.h"
a94d5569 43#include "amdgpu_ucode.h"
4562236b
HW
44#include "atom.h"
45#include "amdgpu_dm.h"
52704fca
BL
46#ifdef CONFIG_DRM_AMD_DC_HDCP
47#include "amdgpu_dm_hdcp.h"
48#endif
e7b07cee 49#include "amdgpu_pm.h"
4562236b
HW
50
51#include "amd_shared.h"
52#include "amdgpu_dm_irq.h"
53#include "dm_helpers.h"
e7b07cee 54#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
55#if defined(CONFIG_DEBUG_FS)
56#include "amdgpu_dm_debugfs.h"
57#endif
4562236b
HW
58
59#include "ivsrcid/ivsrcid_vislands30.h"
60
61#include <linux/module.h>
62#include <linux/moduleparam.h>
63#include <linux/version.h>
e7b07cee 64#include <linux/types.h>
97028037 65#include <linux/pm_runtime.h>
09d21852 66#include <linux/pci.h>
a94d5569 67#include <linux/firmware.h>
6ce8f316 68#include <linux/component.h>
4562236b
HW
69
70#include <drm/drm_atomic.h>
674e78ac 71#include <drm/drm_atomic_uapi.h>
4562236b
HW
72#include <drm/drm_atomic_helper.h>
73#include <drm/drm_dp_mst_helper.h>
e7b07cee 74#include <drm/drm_fb_helper.h>
09d21852 75#include <drm/drm_fourcc.h>
e7b07cee 76#include <drm/drm_edid.h>
09d21852 77#include <drm/drm_vblank.h>
6ce8f316 78#include <drm/drm_audio_component.h>
0c8620d6 79#include <drm/drm_hdcp.h>
4562236b 80
ff5ef992 81#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
5527cd06 82#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 83
ad941f7a
FX
84#include "dcn/dcn_1_0_offset.h"
85#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
86#include "soc15_hw_ip.h"
87#include "vega10_ip_offset.h"
ff5ef992
AD
88
89#include "soc15_common.h"
90#endif
91
e7b07cee 92#include "modules/inc/mod_freesync.h"
bbf854dc 93#include "modules/power/power_helpers.h"
ecd0136b 94#include "modules/inc/mod_info_packet.h"
e7b07cee 95
743b9786
NK
96#ifdef CONFIG_DRM_AMD_DC_DMUB
97#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
98MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
99#endif
a94d5569
DF
100#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
101MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 102
b8592b48
LL
103/**
104 * DOC: overview
105 *
106 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
107 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
108 * requests into DC requests, and DC responses into DRM responses.
109 *
110 * The root control structure is &struct amdgpu_display_manager.
111 */
112
7578ecda
AD
113/* basic init/fini API */
114static int amdgpu_dm_init(struct amdgpu_device *adev);
115static void amdgpu_dm_fini(struct amdgpu_device *adev);
116
1f6010a9
DF
117/*
118 * initializes drm_device display related structures, based on the information
7578ecda
AD
119 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
120 * drm_encoder, drm_mode_config
121 *
122 * Returns 0 on success
123 */
124static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
125/* removes and deallocates the drm structures, created by the above function */
126static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
127
128static void
129amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
130
131static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 132 struct drm_plane *plane,
cc1fec57
NK
133 unsigned long possible_crtcs,
134 const struct dc_plane_cap *plane_cap);
7578ecda
AD
135static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 uint32_t link_index);
138static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
139 struct amdgpu_dm_connector *amdgpu_dm_connector,
140 uint32_t link_index,
141 struct amdgpu_encoder *amdgpu_encoder);
142static int amdgpu_dm_encoder_init(struct drm_device *dev,
143 struct amdgpu_encoder *aencoder,
144 uint32_t link_index);
145
146static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
147
148static int amdgpu_dm_atomic_commit(struct drm_device *dev,
149 struct drm_atomic_state *state,
150 bool nonblock);
151
152static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
153
154static int amdgpu_dm_atomic_check(struct drm_device *dev,
155 struct drm_atomic_state *state);
156
674e78ac
NK
157static void handle_cursor_update(struct drm_plane *plane,
158 struct drm_plane_state *old_plane_state);
7578ecda 159
8c322309
RL
160static void amdgpu_dm_set_psr_caps(struct dc_link *link);
161static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
162static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
163static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
164
165
4562236b
HW
166/*
167 * dm_vblank_get_counter
168 *
169 * @brief
170 * Get counter for number of vertical blanks
171 *
172 * @param
173 * struct amdgpu_device *adev - [in] desired amdgpu device
174 * int disp_idx - [in] which CRTC to get the counter from
175 *
176 * @return
177 * Counter for vertical blanks
178 */
179static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
180{
181 if (crtc >= adev->mode_info.num_crtc)
182 return 0;
183 else {
184 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
185 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
186 acrtc->base.state);
4562236b 187
da5c47f6
AG
188
189 if (acrtc_state->stream == NULL) {
0971c40e
HW
190 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
191 crtc);
4562236b
HW
192 return 0;
193 }
194
da5c47f6 195 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
196 }
197}
198
199static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 200 u32 *vbl, u32 *position)
4562236b 201{
81c50963
ST
202 uint32_t v_blank_start, v_blank_end, h_position, v_position;
203
4562236b
HW
204 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
205 return -EINVAL;
206 else {
207 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
208 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
209 acrtc->base.state);
4562236b 210
da5c47f6 211 if (acrtc_state->stream == NULL) {
0971c40e
HW
212 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
213 crtc);
4562236b
HW
214 return 0;
215 }
216
81c50963
ST
217 /*
218 * TODO rework base driver to use values directly.
219 * for now parse it back into reg-format
220 */
da5c47f6 221 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
222 &v_blank_start,
223 &v_blank_end,
224 &h_position,
225 &v_position);
226
e806208d
AG
227 *position = v_position | (h_position << 16);
228 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
229 }
230
231 return 0;
232}
233
234static bool dm_is_idle(void *handle)
235{
236 /* XXX todo */
237 return true;
238}
239
240static int dm_wait_for_idle(void *handle)
241{
242 /* XXX todo */
243 return 0;
244}
245
246static bool dm_check_soft_reset(void *handle)
247{
248 return false;
249}
250
251static int dm_soft_reset(void *handle)
252{
253 /* XXX todo */
254 return 0;
255}
256
3ee6b26b
AD
257static struct amdgpu_crtc *
258get_crtc_by_otg_inst(struct amdgpu_device *adev,
259 int otg_inst)
4562236b
HW
260{
261 struct drm_device *dev = adev->ddev;
262 struct drm_crtc *crtc;
263 struct amdgpu_crtc *amdgpu_crtc;
264
4562236b
HW
265 if (otg_inst == -1) {
266 WARN_ON(1);
267 return adev->mode_info.crtcs[0];
268 }
269
270 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
271 amdgpu_crtc = to_amdgpu_crtc(crtc);
272
273 if (amdgpu_crtc->otg_inst == otg_inst)
274 return amdgpu_crtc;
275 }
276
277 return NULL;
278}
279
66b0c973
MK
280static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
281{
282 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
283 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
284}
285
b8e8c934
HW
286/**
287 * dm_pflip_high_irq() - Handle pageflip interrupt
288 * @interrupt_params: ignored
289 *
290 * Handles the pageflip interrupt by notifying all interested parties
291 * that the pageflip has been completed.
292 */
4562236b
HW
293static void dm_pflip_high_irq(void *interrupt_params)
294{
4562236b
HW
295 struct amdgpu_crtc *amdgpu_crtc;
296 struct common_irq_params *irq_params = interrupt_params;
297 struct amdgpu_device *adev = irq_params->adev;
298 unsigned long flags;
71bbe51a
MK
299 struct drm_pending_vblank_event *e;
300 struct dm_crtc_state *acrtc_state;
301 uint32_t vpos, hpos, v_blank_start, v_blank_end;
302 bool vrr_active;
4562236b
HW
303
304 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
305
306 /* IRQ could occur when in initial stage */
1f6010a9 307 /* TODO work and BO cleanup */
4562236b
HW
308 if (amdgpu_crtc == NULL) {
309 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
310 return;
311 }
312
313 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
314
315 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
316 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
317 amdgpu_crtc->pflip_status,
318 AMDGPU_FLIP_SUBMITTED,
319 amdgpu_crtc->crtc_id,
320 amdgpu_crtc);
321 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
322 return;
323 }
324
71bbe51a
MK
325 /* page flip completed. */
326 e = amdgpu_crtc->event;
327 amdgpu_crtc->event = NULL;
4562236b 328
71bbe51a
MK
329 if (!e)
330 WARN_ON(1);
1159898a 331
71bbe51a
MK
332 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
333 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
334
335 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
336 if (!vrr_active ||
337 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
338 &v_blank_end, &hpos, &vpos) ||
339 (vpos < v_blank_start)) {
340 /* Update to correct count and vblank timestamp if racing with
341 * vblank irq. This also updates to the correct vblank timestamp
342 * even in VRR mode, as scanout is past the front-porch atm.
343 */
344 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 345
71bbe51a
MK
346 /* Wake up userspace by sending the pageflip event with proper
347 * count and timestamp of vblank of flip completion.
348 */
349 if (e) {
350 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
351
352 /* Event sent, so done with vblank for this flip */
353 drm_crtc_vblank_put(&amdgpu_crtc->base);
354 }
355 } else if (e) {
356 /* VRR active and inside front-porch: vblank count and
357 * timestamp for pageflip event will only be up to date after
358 * drm_crtc_handle_vblank() has been executed from late vblank
359 * irq handler after start of back-porch (vline 0). We queue the
360 * pageflip event for send-out by drm_crtc_handle_vblank() with
361 * updated timestamp and count, once it runs after us.
362 *
363 * We need to open-code this instead of using the helper
364 * drm_crtc_arm_vblank_event(), as that helper would
365 * call drm_crtc_accurate_vblank_count(), which we must
366 * not call in VRR mode while we are in front-porch!
367 */
368
369 /* sequence will be replaced by real count during send-out. */
370 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
371 e->pipe = amdgpu_crtc->crtc_id;
372
373 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
374 e = NULL;
375 }
4562236b 376
fdd1fe57
MK
377 /* Keep track of vblank of this flip for flip throttling. We use the
378 * cooked hw counter, as that one incremented at start of this vblank
379 * of pageflip completion, so last_flip_vblank is the forbidden count
380 * for queueing new pageflips if vsync + VRR is enabled.
381 */
382 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
383 amdgpu_crtc->crtc_id);
384
54f5499a 385 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
386 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
387
71bbe51a
MK
388 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
389 amdgpu_crtc->crtc_id, amdgpu_crtc,
390 vrr_active, (int) !e);
4562236b
HW
391}
392
d2574c33
MK
393static void dm_vupdate_high_irq(void *interrupt_params)
394{
395 struct common_irq_params *irq_params = interrupt_params;
396 struct amdgpu_device *adev = irq_params->adev;
397 struct amdgpu_crtc *acrtc;
398 struct dm_crtc_state *acrtc_state;
09aef2c4 399 unsigned long flags;
d2574c33
MK
400
401 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
402
403 if (acrtc) {
404 acrtc_state = to_dm_crtc_state(acrtc->base.state);
405
406 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
407 amdgpu_dm_vrr_active(acrtc_state));
408
409 /* Core vblank handling is done here after end of front-porch in
410 * vrr mode, as vblank timestamping will give valid results
411 * while now done after front-porch. This will also deliver
412 * page-flip completion events that have been queued to us
413 * if a pageflip happened inside front-porch.
414 */
09aef2c4 415 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 416 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
417
418 /* BTR processing for pre-DCE12 ASICs */
419 if (acrtc_state->stream &&
420 adev->family < AMDGPU_FAMILY_AI) {
421 spin_lock_irqsave(&adev->ddev->event_lock, flags);
422 mod_freesync_handle_v_update(
423 adev->dm.freesync_module,
424 acrtc_state->stream,
425 &acrtc_state->vrr_params);
426
427 dc_stream_adjust_vmin_vmax(
428 adev->dm.dc,
429 acrtc_state->stream,
430 &acrtc_state->vrr_params.adjust);
431 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
432 }
433 }
d2574c33
MK
434 }
435}
436
b8e8c934
HW
437/**
438 * dm_crtc_high_irq() - Handles CRTC interrupt
439 * @interrupt_params: ignored
440 *
441 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
442 * event handler.
443 */
4562236b
HW
444static void dm_crtc_high_irq(void *interrupt_params)
445{
446 struct common_irq_params *irq_params = interrupt_params;
447 struct amdgpu_device *adev = irq_params->adev;
4562236b 448 struct amdgpu_crtc *acrtc;
180db303 449 struct dm_crtc_state *acrtc_state;
09aef2c4 450 unsigned long flags;
4562236b 451
b57de80a 452 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b 453
e5d0170e 454 if (acrtc) {
180db303
NK
455 acrtc_state = to_dm_crtc_state(acrtc->base.state);
456
d2574c33
MK
457 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
458 amdgpu_dm_vrr_active(acrtc_state));
459
460 /* Core vblank handling at start of front-porch is only possible
461 * in non-vrr mode, as only there vblank timestamping will give
462 * valid results while done in front-porch. Otherwise defer it
463 * to dm_vupdate_high_irq after end of front-porch.
464 */
465 if (!amdgpu_dm_vrr_active(acrtc_state))
466 drm_crtc_handle_vblank(&acrtc->base);
467
468 /* Following stuff must happen at start of vblank, for crc
469 * computation and below-the-range btr support in vrr mode.
470 */
471 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
472
09aef2c4 473 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
180db303
NK
474 acrtc_state->vrr_params.supported &&
475 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
09aef2c4 476 spin_lock_irqsave(&adev->ddev->event_lock, flags);
180db303
NK
477 mod_freesync_handle_v_update(
478 adev->dm.freesync_module,
479 acrtc_state->stream,
480 &acrtc_state->vrr_params);
481
482 dc_stream_adjust_vmin_vmax(
483 adev->dm.dc,
484 acrtc_state->stream,
485 &acrtc_state->vrr_params.adjust);
09aef2c4 486 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
180db303 487 }
e5d0170e 488 }
4562236b
HW
489}
490
491static int dm_set_clockgating_state(void *handle,
492 enum amd_clockgating_state state)
493{
494 return 0;
495}
496
497static int dm_set_powergating_state(void *handle,
498 enum amd_powergating_state state)
499{
500 return 0;
501}
502
503/* Prototypes of private functions */
504static int dm_early_init(void* handle);
505
a32e24b4 506/* Allocate memory for FBC compressed data */
3e332d3a 507static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 508{
3e332d3a
RL
509 struct drm_device *dev = connector->dev;
510 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 511 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
512 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
513 struct drm_display_mode *mode;
42e67c3b
RL
514 unsigned long max_size = 0;
515
516 if (adev->dm.dc->fbc_compressor == NULL)
517 return;
a32e24b4 518
3e332d3a 519 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
520 return;
521
3e332d3a
RL
522 if (compressor->bo_ptr)
523 return;
42e67c3b 524
42e67c3b 525
3e332d3a
RL
526 list_for_each_entry(mode, &connector->modes, head) {
527 if (max_size < mode->htotal * mode->vtotal)
528 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
529 }
530
531 if (max_size) {
532 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 533 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 534 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
535
536 if (r)
42e67c3b
RL
537 DRM_ERROR("DM: Failed to initialize FBC\n");
538 else {
539 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
540 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
541 }
542
a32e24b4
RL
543 }
544
545}
a32e24b4 546
6ce8f316
NK
547static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
548 int pipe, bool *enabled,
549 unsigned char *buf, int max_bytes)
550{
551 struct drm_device *dev = dev_get_drvdata(kdev);
552 struct amdgpu_device *adev = dev->dev_private;
553 struct drm_connector *connector;
554 struct drm_connector_list_iter conn_iter;
555 struct amdgpu_dm_connector *aconnector;
556 int ret = 0;
557
558 *enabled = false;
559
560 mutex_lock(&adev->dm.audio_lock);
561
562 drm_connector_list_iter_begin(dev, &conn_iter);
563 drm_for_each_connector_iter(connector, &conn_iter) {
564 aconnector = to_amdgpu_dm_connector(connector);
565 if (aconnector->audio_inst != port)
566 continue;
567
568 *enabled = true;
569 ret = drm_eld_size(connector->eld);
570 memcpy(buf, connector->eld, min(max_bytes, ret));
571
572 break;
573 }
574 drm_connector_list_iter_end(&conn_iter);
575
576 mutex_unlock(&adev->dm.audio_lock);
577
578 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
579
580 return ret;
581}
582
583static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
584 .get_eld = amdgpu_dm_audio_component_get_eld,
585};
586
587static int amdgpu_dm_audio_component_bind(struct device *kdev,
588 struct device *hda_kdev, void *data)
589{
590 struct drm_device *dev = dev_get_drvdata(kdev);
591 struct amdgpu_device *adev = dev->dev_private;
592 struct drm_audio_component *acomp = data;
593
594 acomp->ops = &amdgpu_dm_audio_component_ops;
595 acomp->dev = kdev;
596 adev->dm.audio_component = acomp;
597
598 return 0;
599}
600
601static void amdgpu_dm_audio_component_unbind(struct device *kdev,
602 struct device *hda_kdev, void *data)
603{
604 struct drm_device *dev = dev_get_drvdata(kdev);
605 struct amdgpu_device *adev = dev->dev_private;
606 struct drm_audio_component *acomp = data;
607
608 acomp->ops = NULL;
609 acomp->dev = NULL;
610 adev->dm.audio_component = NULL;
611}
612
613static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
614 .bind = amdgpu_dm_audio_component_bind,
615 .unbind = amdgpu_dm_audio_component_unbind,
616};
617
618static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
619{
620 int i, ret;
621
622 if (!amdgpu_audio)
623 return 0;
624
625 adev->mode_info.audio.enabled = true;
626
627 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
628
629 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
630 adev->mode_info.audio.pin[i].channels = -1;
631 adev->mode_info.audio.pin[i].rate = -1;
632 adev->mode_info.audio.pin[i].bits_per_sample = -1;
633 adev->mode_info.audio.pin[i].status_bits = 0;
634 adev->mode_info.audio.pin[i].category_code = 0;
635 adev->mode_info.audio.pin[i].connected = false;
636 adev->mode_info.audio.pin[i].id =
637 adev->dm.dc->res_pool->audios[i]->inst;
638 adev->mode_info.audio.pin[i].offset = 0;
639 }
640
641 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
642 if (ret < 0)
643 return ret;
644
645 adev->dm.audio_registered = true;
646
647 return 0;
648}
649
650static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
651{
652 if (!amdgpu_audio)
653 return;
654
655 if (!adev->mode_info.audio.enabled)
656 return;
657
658 if (adev->dm.audio_registered) {
659 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
660 adev->dm.audio_registered = false;
661 }
662
663 /* TODO: Disable audio? */
664
665 adev->mode_info.audio.enabled = false;
666}
667
668void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
669{
670 struct drm_audio_component *acomp = adev->dm.audio_component;
671
672 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
673 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
674
675 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
676 pin, -1);
677 }
678}
679
743b9786
NK
680#ifdef CONFIG_DRM_AMD_DC_DMUB
681static int dm_dmub_hw_init(struct amdgpu_device *adev)
682{
683 const unsigned int psp_header_bytes = 0x100;
684 const unsigned int psp_footer_bytes = 0x100;
685 const struct dmcub_firmware_header_v1_0 *hdr;
686 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
687 const struct firmware *dmub_fw = adev->dm.dmub_fw;
688 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
689 struct abm *abm = adev->dm.dc->res_pool->abm;
690 struct dmub_srv_region_params region_params;
691 struct dmub_srv_region_info region_info;
692 struct dmub_srv_fb_params fb_params;
693 struct dmub_srv_fb_info fb_info;
694 struct dmub_srv_hw_params hw_params;
695 enum dmub_status status;
696 const unsigned char *fw_inst_const, *fw_bss_data;
697 uint32_t i;
698 int r;
699 bool has_hw_support;
700
701 if (!dmub_srv)
702 /* DMUB isn't supported on the ASIC. */
703 return 0;
704
705 if (!dmub_fw) {
706 /* Firmware required for DMUB support. */
707 DRM_ERROR("No firmware provided for DMUB.\n");
708 return -EINVAL;
709 }
710
711 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
712 if (status != DMUB_STATUS_OK) {
713 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
714 return -EINVAL;
715 }
716
717 if (!has_hw_support) {
718 DRM_INFO("DMUB unsupported on ASIC\n");
719 return 0;
720 }
721
722 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
723
724 /* Calculate the size of all the regions for the DMUB service. */
725 memset(&region_params, 0, sizeof(region_params));
726
727 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
728 psp_header_bytes - psp_footer_bytes;
729 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
730 region_params.vbios_size = adev->dm.dc->ctx->dc_bios->bios_size;
731
732 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
733 &region_info);
734
735 if (status != DMUB_STATUS_OK) {
736 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
737 return -EINVAL;
738 }
739
740 /*
741 * Allocate a framebuffer based on the total size of all the regions.
742 * TODO: Move this into GART.
743 */
744 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
745 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
746 &adev->dm.dmub_bo_gpu_addr,
747 &adev->dm.dmub_bo_cpu_addr);
748 if (r)
749 return r;
750
751 /* Rebase the regions on the framebuffer address. */
752 memset(&fb_params, 0, sizeof(fb_params));
753 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
754 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
755 fb_params.region_info = &region_info;
756
757 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, &fb_info);
758 if (status != DMUB_STATUS_OK) {
759 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
760 return -EINVAL;
761 }
762
763 fw_inst_const = dmub_fw->data +
764 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 psp_header_bytes;
766
767 fw_bss_data = dmub_fw->data +
768 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
769 le32_to_cpu(hdr->inst_const_bytes);
770
771 /* Copy firmware and bios info into FB memory. */
772 memcpy(fb_info.fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
773 region_params.inst_const_size);
774 memcpy(fb_info.fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
775 region_params.bss_data_size);
776 memcpy(fb_info.fb[DMUB_WINDOW_3_VBIOS].cpu_addr,
777 adev->dm.dc->ctx->dc_bios->bios, region_params.vbios_size);
778
779 /* Initialize hardware. */
780 memset(&hw_params, 0, sizeof(hw_params));
781 hw_params.fb_base = adev->gmc.fb_start;
782 hw_params.fb_offset = adev->gmc.aper_base;
783
784 if (dmcu)
785 hw_params.psp_version = dmcu->psp_version;
786
787 for (i = 0; i < fb_info.num_fb; ++i)
788 hw_params.fb[i] = &fb_info.fb[i];
789
790 status = dmub_srv_hw_init(dmub_srv, &hw_params);
791 if (status != DMUB_STATUS_OK) {
792 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
793 return -EINVAL;
794 }
795
796 /* Wait for firmware load to finish. */
797 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
798 if (status != DMUB_STATUS_OK)
799 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
800
801 /* Init DMCU and ABM if available. */
802 if (dmcu && abm) {
803 dmcu->funcs->dmcu_init(dmcu);
804 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
805 }
806
9a71c7d3
NK
807 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
808 if (!adev->dm.dc->ctx->dmub_srv) {
809 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
810 return -ENOMEM;
811 }
812
743b9786
NK
813 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
814 adev->dm.dmcub_fw_version);
815
816 return 0;
817}
818
819#endif
7578ecda 820static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
821{
822 struct dc_init_data init_data;
52704fca
BL
823#ifdef CONFIG_DRM_AMD_DC_HDCP
824 struct dc_callback_init init_params;
825#endif
743b9786
NK
826#ifdef CONFIG_DRM_AMD_DC_DMUB
827 int r;
828#endif
52704fca 829
4562236b
HW
830 adev->dm.ddev = adev->ddev;
831 adev->dm.adev = adev;
832
4562236b
HW
833 /* Zero all the fields */
834 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
835#ifdef CONFIG_DRM_AMD_DC_HDCP
836 memset(&init_params, 0, sizeof(init_params));
837#endif
4562236b 838
674e78ac 839 mutex_init(&adev->dm.dc_lock);
6ce8f316 840 mutex_init(&adev->dm.audio_lock);
674e78ac 841
4562236b
HW
842 if(amdgpu_dm_irq_init(adev)) {
843 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
844 goto error;
845 }
846
847 init_data.asic_id.chip_family = adev->family;
848
849 init_data.asic_id.pci_revision_id = adev->rev_id;
850 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
851
770d13b1 852 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
853 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
854 init_data.asic_id.atombios_base_address =
855 adev->mode_info.atom_context->bios;
856
857 init_data.driver = adev;
858
859 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
860
861 if (!adev->dm.cgs_device) {
862 DRM_ERROR("amdgpu: failed to create cgs device.\n");
863 goto error;
864 }
865
866 init_data.cgs_device = adev->dm.cgs_device;
867
4562236b
HW
868 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
869
6e227308
HW
870 /*
871 * TODO debug why this doesn't work on Raven
872 */
873 if (adev->flags & AMD_IS_APU &&
874 adev->asic_type >= CHIP_CARRIZO &&
1c425915 875 adev->asic_type <= CHIP_RAVEN)
6e227308
HW
876 init_data.flags.gpu_vm_support = true;
877
04b94af4
AD
878 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
879 init_data.flags.fbc_support = true;
880
d99f38ae
AD
881 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
882 init_data.flags.multi_mon_pp_mclk_switch = true;
883
eaf56410
LL
884 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
885 init_data.flags.disable_fractional_pwm = true;
886
27eaa492 887 init_data.flags.power_down_display_on_boot = true;
78ad75f8 888
48321c3d
HW
889#ifdef CONFIG_DRM_AMD_DC_DCN2_0
890 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
891#endif
27eaa492 892
4562236b
HW
893 /* Display Core create. */
894 adev->dm.dc = dc_create(&init_data);
895
423788c7 896 if (adev->dm.dc) {
76121231 897 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 898 } else {
76121231 899 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
900 goto error;
901 }
4562236b 902
98bf2f52
JP
903 dc_hardware_init(adev->dm.dc);
904
743b9786
NK
905#ifdef CONFIG_DRM_AMD_DC_DMUB
906 r = dm_dmub_hw_init(adev);
907 if (r) {
908 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
909 goto error;
910 }
911
912#endif
4562236b
HW
913 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
914 if (!adev->dm.freesync_module) {
915 DRM_ERROR(
916 "amdgpu: failed to initialize freesync_module.\n");
917 } else
f1ad2f5e 918 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
919 adev->dm.freesync_module);
920
e277adc5
LSL
921 amdgpu_dm_init_color_mod();
922
52704fca 923#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e
BL
924 if (adev->asic_type >= CHIP_RAVEN) {
925 adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
52704fca 926
96a3b32e
BL
927 if (!adev->dm.hdcp_workqueue)
928 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
929 else
930 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 931
96a3b32e
BL
932 dc_init_callbacks(adev->dm.dc, &init_params);
933 }
52704fca 934#endif
4562236b
HW
935 if (amdgpu_dm_initialize_drm_device(adev)) {
936 DRM_ERROR(
937 "amdgpu: failed to initialize sw for display support.\n");
938 goto error;
939 }
940
941 /* Update the actual used number of crtc */
942 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
943
944 /* TODO: Add_display_info? */
945
946 /* TODO use dynamic cursor width */
ce75805e
AG
947 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
948 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
949
950 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
951 DRM_ERROR(
952 "amdgpu: failed to initialize sw for display support.\n");
953 goto error;
954 }
955
e498eb71
NK
956#if defined(CONFIG_DEBUG_FS)
957 if (dtn_debugfs_init(adev))
958 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
959#endif
960
f1ad2f5e 961 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
962
963 return 0;
964error:
965 amdgpu_dm_fini(adev);
966
59d0f396 967 return -EINVAL;
4562236b
HW
968}
969
7578ecda 970static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 971{
6ce8f316
NK
972 amdgpu_dm_audio_fini(adev);
973
4562236b 974 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 975
52704fca
BL
976#ifdef CONFIG_DRM_AMD_DC_HDCP
977 if (adev->dm.hdcp_workqueue) {
978 hdcp_destroy(adev->dm.hdcp_workqueue);
979 adev->dm.hdcp_workqueue = NULL;
980 }
981
982 if (adev->dm.dc)
983 dc_deinit_callbacks(adev->dm.dc);
984#endif
743b9786 985#ifdef CONFIG_DRM_AMD_DC_DMUB
9a71c7d3
NK
986 if (adev->dm.dc->ctx->dmub_srv) {
987 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
988 adev->dm.dc->ctx->dmub_srv = NULL;
989 }
990
743b9786
NK
991 if (adev->dm.dmub_bo)
992 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
993 &adev->dm.dmub_bo_gpu_addr,
994 &adev->dm.dmub_bo_cpu_addr);
995#endif
52704fca 996
c8bdf2b6
ED
997 /* DC Destroy TODO: Replace destroy DAL */
998 if (adev->dm.dc)
999 dc_destroy(&adev->dm.dc);
4562236b
HW
1000 /*
1001 * TODO: pageflip, vlank interrupt
1002 *
1003 * amdgpu_dm_irq_fini(adev);
1004 */
1005
1006 if (adev->dm.cgs_device) {
1007 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1008 adev->dm.cgs_device = NULL;
1009 }
1010 if (adev->dm.freesync_module) {
1011 mod_freesync_destroy(adev->dm.freesync_module);
1012 adev->dm.freesync_module = NULL;
1013 }
674e78ac 1014
6ce8f316 1015 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1016 mutex_destroy(&adev->dm.dc_lock);
1017
4562236b
HW
1018 return;
1019}
1020
a94d5569 1021static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1022{
a7669aff 1023 const char *fw_name_dmcu = NULL;
a94d5569
DF
1024 int r;
1025 const struct dmcu_firmware_header_v1_0 *hdr;
1026
1027 switch(adev->asic_type) {
1028 case CHIP_BONAIRE:
1029 case CHIP_HAWAII:
1030 case CHIP_KAVERI:
1031 case CHIP_KABINI:
1032 case CHIP_MULLINS:
1033 case CHIP_TONGA:
1034 case CHIP_FIJI:
1035 case CHIP_CARRIZO:
1036 case CHIP_STONEY:
1037 case CHIP_POLARIS11:
1038 case CHIP_POLARIS10:
1039 case CHIP_POLARIS12:
1040 case CHIP_VEGAM:
1041 case CHIP_VEGA10:
1042 case CHIP_VEGA12:
1043 case CHIP_VEGA20:
476e955d 1044 case CHIP_NAVI10:
baebcf2e 1045 case CHIP_NAVI14:
fbd2afe5 1046 case CHIP_NAVI12:
30221ad8 1047 case CHIP_RENOIR:
a94d5569
DF
1048 return 0;
1049 case CHIP_RAVEN:
a7669aff
HW
1050 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1051 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1052 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1053 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1054 else
a7669aff 1055 return 0;
a94d5569
DF
1056 break;
1057 default:
1058 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1059 return -EINVAL;
a94d5569
DF
1060 }
1061
1062 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1063 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1064 return 0;
1065 }
1066
1067 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1068 if (r == -ENOENT) {
1069 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1070 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1071 adev->dm.fw_dmcu = NULL;
1072 return 0;
1073 }
1074 if (r) {
1075 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1076 fw_name_dmcu);
1077 return r;
1078 }
1079
1080 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1081 if (r) {
1082 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1083 fw_name_dmcu);
1084 release_firmware(adev->dm.fw_dmcu);
1085 adev->dm.fw_dmcu = NULL;
1086 return r;
1087 }
1088
1089 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1090 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1091 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1092 adev->firmware.fw_size +=
1093 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1094
1095 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1096 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1097 adev->firmware.fw_size +=
1098 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1099
ee6e89c0
DF
1100 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1101
a94d5569
DF
1102 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1103
4562236b
HW
1104 return 0;
1105}
1106
743b9786
NK
1107#ifdef CONFIG_DRM_AMD_DC_DMUB
1108static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1109{
1110 struct amdgpu_device *adev = ctx;
1111
1112 return dm_read_reg(adev->dm.dc->ctx, address);
1113}
1114
1115static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1116 uint32_t value)
1117{
1118 struct amdgpu_device *adev = ctx;
1119
1120 return dm_write_reg(adev->dm.dc->ctx, address, value);
1121}
1122
1123static int dm_dmub_sw_init(struct amdgpu_device *adev)
1124{
1125 struct dmub_srv_create_params create_params;
1126 const struct dmcub_firmware_header_v1_0 *hdr;
1127 const char *fw_name_dmub;
1128 enum dmub_asic dmub_asic;
1129 enum dmub_status status;
1130 int r;
1131
1132 switch (adev->asic_type) {
1133 case CHIP_RENOIR:
1134 dmub_asic = DMUB_ASIC_DCN21;
1135 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1136 break;
1137
1138 default:
1139 /* ASIC doesn't support DMUB. */
1140 return 0;
1141 }
1142
1143 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1144 if (!adev->dm.dmub_srv) {
1145 DRM_ERROR("Failed to allocate DMUB service!\n");
1146 return -ENOMEM;
1147 }
1148
1149 memset(&create_params, 0, sizeof(create_params));
1150 create_params.user_ctx = adev;
1151 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1152 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1153 create_params.asic = dmub_asic;
1154
1155 status = dmub_srv_create(adev->dm.dmub_srv, &create_params);
1156 if (status != DMUB_STATUS_OK) {
1157 DRM_ERROR("Error creating DMUB service: %d\n", status);
1158 return -EINVAL;
1159 }
1160
1161 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1162 if (r) {
1163 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1164 return 0;
1165 }
1166
1167 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1168 if (r) {
1169 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1170 return 0;
1171 }
1172
1173 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1174 DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
1175 return 0;
1176 }
1177
1178 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1179 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1180 AMDGPU_UCODE_ID_DMCUB;
1181 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
1182 adev->firmware.fw_size +=
1183 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1184
1185 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1186
1187 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1188 adev->dm.dmcub_fw_version);
1189
1190 return 0;
1191}
1192
1193#endif
a94d5569
DF
1194static int dm_sw_init(void *handle)
1195{
1196 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1197#ifdef CONFIG_DRM_AMD_DC_DMUB
1198 int r;
1199
1200 r = dm_dmub_sw_init(adev);
1201 if (r)
1202 return r;
1203
1204#endif
a94d5569
DF
1205
1206 return load_dmcu_fw(adev);
1207}
1208
4562236b
HW
1209static int dm_sw_fini(void *handle)
1210{
a94d5569
DF
1211 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1212
743b9786
NK
1213#ifdef CONFIG_DRM_AMD_DC_DMUB
1214 if (adev->dm.dmub_srv) {
1215 dmub_srv_destroy(adev->dm.dmub_srv);
1216 adev->dm.dmub_srv = NULL;
1217 }
1218
1219 if (adev->dm.dmub_fw) {
1220 release_firmware(adev->dm.dmub_fw);
1221 adev->dm.dmub_fw = NULL;
1222 }
1223
1224#endif
a94d5569
DF
1225 if(adev->dm.fw_dmcu) {
1226 release_firmware(adev->dm.fw_dmcu);
1227 adev->dm.fw_dmcu = NULL;
1228 }
1229
4562236b
HW
1230 return 0;
1231}
1232
7abcf6b5 1233static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1234{
c84dec2f 1235 struct amdgpu_dm_connector *aconnector;
4562236b 1236 struct drm_connector *connector;
f8d2d39e 1237 struct drm_connector_list_iter iter;
7abcf6b5 1238 int ret = 0;
4562236b 1239
f8d2d39e
LP
1240 drm_connector_list_iter_begin(dev, &iter);
1241 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1242 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1243 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1244 aconnector->mst_mgr.aux) {
f1ad2f5e 1245 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1246 aconnector,
1247 aconnector->base.base.id);
7abcf6b5
AG
1248
1249 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1250 if (ret < 0) {
1251 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1252 aconnector->dc_link->type =
1253 dc_connection_single;
1254 break;
7abcf6b5 1255 }
f8d2d39e 1256 }
4562236b 1257 }
f8d2d39e 1258 drm_connector_list_iter_end(&iter);
4562236b 1259
7abcf6b5
AG
1260 return ret;
1261}
1262
1263static int dm_late_init(void *handle)
1264{
42e67c3b 1265 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1266
bbf854dc
DF
1267 struct dmcu_iram_parameters params;
1268 unsigned int linear_lut[16];
1269 int i;
1270 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
96cb7cf1 1271 bool ret = false;
bbf854dc
DF
1272
1273 for (i = 0; i < 16; i++)
1274 linear_lut[i] = 0xFFFF * i / 15;
1275
1276 params.set = 0;
1277 params.backlight_ramping_start = 0xCCCC;
1278 params.backlight_ramping_reduction = 0xCCCCCCCC;
1279 params.backlight_lut_array_size = 16;
1280 params.backlight_lut_array = linear_lut;
1281
2ad0cdf9
AK
1282 /* Min backlight level after ABM reduction, Don't allow below 1%
1283 * 0xFFFF x 0.01 = 0x28F
1284 */
1285 params.min_abm_backlight = 0x28F;
1286
96cb7cf1 1287 /* todo will enable for navi10 */
1288 if (adev->asic_type <= CHIP_RAVEN) {
1289 ret = dmcu_load_iram(dmcu, params);
bbf854dc 1290
96cb7cf1 1291 if (!ret)
1292 return -EINVAL;
1293 }
bbf854dc 1294
42e67c3b 1295 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1296}
1297
1298static void s3_handle_mst(struct drm_device *dev, bool suspend)
1299{
c84dec2f 1300 struct amdgpu_dm_connector *aconnector;
4562236b 1301 struct drm_connector *connector;
f8d2d39e 1302 struct drm_connector_list_iter iter;
fe7553be
LP
1303 struct drm_dp_mst_topology_mgr *mgr;
1304 int ret;
1305 bool need_hotplug = false;
4562236b 1306
f8d2d39e
LP
1307 drm_connector_list_iter_begin(dev, &iter);
1308 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1309 aconnector = to_amdgpu_dm_connector(connector);
1310 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1311 aconnector->mst_port)
1312 continue;
1313
1314 mgr = &aconnector->mst_mgr;
1315
1316 if (suspend) {
1317 drm_dp_mst_topology_mgr_suspend(mgr);
1318 } else {
6f85f738 1319 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1320 if (ret < 0) {
1321 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1322 need_hotplug = true;
1323 }
1324 }
4562236b 1325 }
f8d2d39e 1326 drm_connector_list_iter_end(&iter);
fe7553be
LP
1327
1328 if (need_hotplug)
1329 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1330}
1331
b8592b48
LL
1332/**
1333 * dm_hw_init() - Initialize DC device
28d687ea 1334 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1335 *
1336 * Initialize the &struct amdgpu_display_manager device. This involves calling
1337 * the initializers of each DM component, then populating the struct with them.
1338 *
1339 * Although the function implies hardware initialization, both hardware and
1340 * software are initialized here. Splitting them out to their relevant init
1341 * hooks is a future TODO item.
1342 *
1343 * Some notable things that are initialized here:
1344 *
1345 * - Display Core, both software and hardware
1346 * - DC modules that we need (freesync and color management)
1347 * - DRM software states
1348 * - Interrupt sources and handlers
1349 * - Vblank support
1350 * - Debug FS entries, if enabled
1351 */
4562236b
HW
1352static int dm_hw_init(void *handle)
1353{
1354 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1355 /* Create DAL display manager */
1356 amdgpu_dm_init(adev);
4562236b
HW
1357 amdgpu_dm_hpd_init(adev);
1358
4562236b
HW
1359 return 0;
1360}
1361
b8592b48
LL
1362/**
1363 * dm_hw_fini() - Teardown DC device
28d687ea 1364 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1365 *
1366 * Teardown components within &struct amdgpu_display_manager that require
1367 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1368 * were loaded. Also flush IRQ workqueues and disable them.
1369 */
4562236b
HW
1370static int dm_hw_fini(void *handle)
1371{
1372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1373
1374 amdgpu_dm_hpd_fini(adev);
1375
1376 amdgpu_dm_irq_fini(adev);
21de3396 1377 amdgpu_dm_fini(adev);
4562236b
HW
1378 return 0;
1379}
1380
1381static int dm_suspend(void *handle)
1382{
1383 struct amdgpu_device *adev = handle;
1384 struct amdgpu_display_manager *dm = &adev->dm;
1385 int ret = 0;
4562236b 1386
d2f0b53b
LHM
1387 WARN_ON(adev->dm.cached_state);
1388 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1389
4562236b
HW
1390 s3_handle_mst(adev->ddev, true);
1391
4562236b
HW
1392 amdgpu_dm_irq_suspend(adev);
1393
a3621485 1394
32f5062d 1395 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b
HW
1396
1397 return ret;
1398}
1399
1daf8c63
AD
1400static struct amdgpu_dm_connector *
1401amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1402 struct drm_crtc *crtc)
4562236b
HW
1403{
1404 uint32_t i;
c2cea706 1405 struct drm_connector_state *new_con_state;
4562236b
HW
1406 struct drm_connector *connector;
1407 struct drm_crtc *crtc_from_state;
1408
c2cea706
LSL
1409 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1410 crtc_from_state = new_con_state->crtc;
4562236b
HW
1411
1412 if (crtc_from_state == crtc)
c84dec2f 1413 return to_amdgpu_dm_connector(connector);
4562236b
HW
1414 }
1415
1416 return NULL;
1417}
1418
fbbdadf2
BL
1419static void emulated_link_detect(struct dc_link *link)
1420{
1421 struct dc_sink_init_data sink_init_data = { 0 };
1422 struct display_sink_capability sink_caps = { 0 };
1423 enum dc_edid_status edid_status;
1424 struct dc_context *dc_ctx = link->ctx;
1425 struct dc_sink *sink = NULL;
1426 struct dc_sink *prev_sink = NULL;
1427
1428 link->type = dc_connection_none;
1429 prev_sink = link->local_sink;
1430
1431 if (prev_sink != NULL)
1432 dc_sink_retain(prev_sink);
1433
1434 switch (link->connector_signal) {
1435 case SIGNAL_TYPE_HDMI_TYPE_A: {
1436 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1437 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1438 break;
1439 }
1440
1441 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1442 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1443 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1444 break;
1445 }
1446
1447 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1448 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1449 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1450 break;
1451 }
1452
1453 case SIGNAL_TYPE_LVDS: {
1454 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1455 sink_caps.signal = SIGNAL_TYPE_LVDS;
1456 break;
1457 }
1458
1459 case SIGNAL_TYPE_EDP: {
1460 sink_caps.transaction_type =
1461 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1462 sink_caps.signal = SIGNAL_TYPE_EDP;
1463 break;
1464 }
1465
1466 case SIGNAL_TYPE_DISPLAY_PORT: {
1467 sink_caps.transaction_type =
1468 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1469 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1470 break;
1471 }
1472
1473 default:
1474 DC_ERROR("Invalid connector type! signal:%d\n",
1475 link->connector_signal);
1476 return;
1477 }
1478
1479 sink_init_data.link = link;
1480 sink_init_data.sink_signal = sink_caps.signal;
1481
1482 sink = dc_sink_create(&sink_init_data);
1483 if (!sink) {
1484 DC_ERROR("Failed to create sink!\n");
1485 return;
1486 }
1487
dcd5fb82 1488 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1489 link->local_sink = sink;
1490
1491 edid_status = dm_helpers_read_local_edid(
1492 link->ctx,
1493 link,
1494 sink);
1495
1496 if (edid_status != EDID_OK)
1497 DC_ERROR("Failed to read EDID");
1498
1499}
1500
4562236b
HW
1501static int dm_resume(void *handle)
1502{
1503 struct amdgpu_device *adev = handle;
4562236b
HW
1504 struct drm_device *ddev = adev->ddev;
1505 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1506 struct amdgpu_dm_connector *aconnector;
4562236b 1507 struct drm_connector *connector;
f8d2d39e 1508 struct drm_connector_list_iter iter;
4562236b 1509 struct drm_crtc *crtc;
c2cea706 1510 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1511 struct dm_crtc_state *dm_new_crtc_state;
1512 struct drm_plane *plane;
1513 struct drm_plane_state *new_plane_state;
1514 struct dm_plane_state *dm_new_plane_state;
113b7a01 1515 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1516 enum dc_connection_type new_connection_type = dc_connection_none;
a3621485 1517 int i;
4562236b 1518
113b7a01
LL
1519 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1520 dc_release_state(dm_state->context);
1521 dm_state->context = dc_create_state(dm->dc);
1522 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1523 dc_resource_state_construct(dm->dc, dm_state->context);
1524
a80aa93d
ML
1525 /* power on hardware */
1526 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1527
4562236b
HW
1528 /* program HPD filter */
1529 dc_resume(dm->dc);
1530
4562236b
HW
1531 /*
1532 * early enable HPD Rx IRQ, should be done before set mode as short
1533 * pulse interrupts are used for MST
1534 */
1535 amdgpu_dm_irq_resume_early(adev);
1536
d20ebea8 1537 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1538 s3_handle_mst(ddev, false);
1539
4562236b 1540 /* Do detection*/
f8d2d39e
LP
1541 drm_connector_list_iter_begin(ddev, &iter);
1542 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1543 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1544
1545 /*
1546 * this is the case when traversing through already created
1547 * MST connectors, should be skipped
1548 */
1549 if (aconnector->mst_port)
1550 continue;
1551
03ea364c 1552 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1553 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1554 DRM_ERROR("KMS: Failed to detect connector\n");
1555
1556 if (aconnector->base.force && new_connection_type == dc_connection_none)
1557 emulated_link_detect(aconnector->dc_link);
1558 else
1559 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1560
1561 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1562 aconnector->fake_enable = false;
1563
dcd5fb82
MF
1564 if (aconnector->dc_sink)
1565 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1566 aconnector->dc_sink = NULL;
1567 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1568 mutex_unlock(&aconnector->hpd_lock);
4562236b 1569 }
f8d2d39e 1570 drm_connector_list_iter_end(&iter);
4562236b 1571
1f6010a9 1572 /* Force mode set in atomic commit */
a80aa93d 1573 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1574 new_crtc_state->active_changed = true;
4f346e65 1575
fcb4019e
LSL
1576 /*
1577 * atomic_check is expected to create the dc states. We need to release
1578 * them here, since they were duplicated as part of the suspend
1579 * procedure.
1580 */
a80aa93d 1581 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1582 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1583 if (dm_new_crtc_state->stream) {
1584 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1585 dc_stream_release(dm_new_crtc_state->stream);
1586 dm_new_crtc_state->stream = NULL;
1587 }
1588 }
1589
a80aa93d 1590 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1591 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1592 if (dm_new_plane_state->dc_state) {
1593 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1594 dc_plane_state_release(dm_new_plane_state->dc_state);
1595 dm_new_plane_state->dc_state = NULL;
1596 }
1597 }
1598
2d1af6a1 1599 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1600
a80aa93d 1601 dm->cached_state = NULL;
0a214e2f 1602
9faa4237 1603 amdgpu_dm_irq_resume_late(adev);
4562236b 1604
2d1af6a1 1605 return 0;
4562236b
HW
1606}
1607
b8592b48
LL
1608/**
1609 * DOC: DM Lifecycle
1610 *
1611 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1612 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1613 * the base driver's device list to be initialized and torn down accordingly.
1614 *
1615 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1616 */
1617
4562236b
HW
1618static const struct amd_ip_funcs amdgpu_dm_funcs = {
1619 .name = "dm",
1620 .early_init = dm_early_init,
7abcf6b5 1621 .late_init = dm_late_init,
4562236b
HW
1622 .sw_init = dm_sw_init,
1623 .sw_fini = dm_sw_fini,
1624 .hw_init = dm_hw_init,
1625 .hw_fini = dm_hw_fini,
1626 .suspend = dm_suspend,
1627 .resume = dm_resume,
1628 .is_idle = dm_is_idle,
1629 .wait_for_idle = dm_wait_for_idle,
1630 .check_soft_reset = dm_check_soft_reset,
1631 .soft_reset = dm_soft_reset,
1632 .set_clockgating_state = dm_set_clockgating_state,
1633 .set_powergating_state = dm_set_powergating_state,
1634};
1635
1636const struct amdgpu_ip_block_version dm_ip_block =
1637{
1638 .type = AMD_IP_BLOCK_TYPE_DCE,
1639 .major = 1,
1640 .minor = 0,
1641 .rev = 0,
1642 .funcs = &amdgpu_dm_funcs,
1643};
1644
ca3268c4 1645
b8592b48
LL
1646/**
1647 * DOC: atomic
1648 *
1649 * *WIP*
1650 */
0a323b84 1651
b3663f70 1652static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 1653 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 1654 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 1655 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 1656 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
1657};
1658
1659static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1660 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
1661};
1662
7578ecda 1663static void
3ee6b26b 1664amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
4562236b
HW
1665{
1666 struct drm_connector *connector = &aconnector->base;
1667 struct drm_device *dev = connector->dev;
b73a22d3 1668 struct dc_sink *sink;
4562236b
HW
1669
1670 /* MST handled by drm_mst framework */
1671 if (aconnector->mst_mgr.mst_state == true)
1672 return;
1673
1674
1675 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
1676 if (sink)
1677 dc_sink_retain(sink);
4562236b 1678
1f6010a9
DF
1679 /*
1680 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 1681 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 1682 * Skip if already done during boot.
4562236b
HW
1683 */
1684 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1685 && aconnector->dc_em_sink) {
1686
1f6010a9
DF
1687 /*
1688 * For S3 resume with headless use eml_sink to fake stream
1689 * because on resume connector->sink is set to NULL
4562236b
HW
1690 */
1691 mutex_lock(&dev->mode_config.mutex);
1692
1693 if (sink) {
922aa1e1 1694 if (aconnector->dc_sink) {
98e6436d 1695 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
1696 /*
1697 * retain and release below are used to
1698 * bump up refcount for sink because the link doesn't point
1699 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
1700 * reshuffle by UMD we will get into unwanted dc_sink release
1701 */
dcd5fb82 1702 dc_sink_release(aconnector->dc_sink);
922aa1e1 1703 }
4562236b 1704 aconnector->dc_sink = sink;
dcd5fb82 1705 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
1706 amdgpu_dm_update_freesync_caps(connector,
1707 aconnector->edid);
4562236b 1708 } else {
98e6436d 1709 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 1710 if (!aconnector->dc_sink) {
4562236b 1711 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 1712 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 1713 }
4562236b
HW
1714 }
1715
1716 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1717
1718 if (sink)
1719 dc_sink_release(sink);
4562236b
HW
1720 return;
1721 }
1722
1723 /*
1724 * TODO: temporary guard to look for proper fix
1725 * if this sink is MST sink, we should not do anything
1726 */
dcd5fb82
MF
1727 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1728 dc_sink_release(sink);
4562236b 1729 return;
dcd5fb82 1730 }
4562236b
HW
1731
1732 if (aconnector->dc_sink == sink) {
1f6010a9
DF
1733 /*
1734 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1735 * Do nothing!!
1736 */
f1ad2f5e 1737 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 1738 aconnector->connector_id);
dcd5fb82
MF
1739 if (sink)
1740 dc_sink_release(sink);
4562236b
HW
1741 return;
1742 }
1743
f1ad2f5e 1744 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
1745 aconnector->connector_id, aconnector->dc_sink, sink);
1746
1747 mutex_lock(&dev->mode_config.mutex);
1748
1f6010a9
DF
1749 /*
1750 * 1. Update status of the drm connector
1751 * 2. Send an event and let userspace tell us what to do
1752 */
4562236b 1753 if (sink) {
1f6010a9
DF
1754 /*
1755 * TODO: check if we still need the S3 mode update workaround.
1756 * If yes, put it here.
1757 */
4562236b 1758 if (aconnector->dc_sink)
98e6436d 1759 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
1760
1761 aconnector->dc_sink = sink;
dcd5fb82 1762 dc_sink_retain(aconnector->dc_sink);
900b3cb1 1763 if (sink->dc_edid.length == 0) {
4562236b 1764 aconnector->edid = NULL;
e86e8947 1765 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
900b3cb1 1766 } else {
4562236b
HW
1767 aconnector->edid =
1768 (struct edid *) sink->dc_edid.raw_edid;
1769
1770
c555f023 1771 drm_connector_update_edid_property(connector,
4562236b 1772 aconnector->edid);
e86e8947
HV
1773 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1774 aconnector->edid);
4562236b 1775 }
98e6436d 1776 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
4562236b
HW
1777
1778 } else {
e86e8947 1779 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 1780 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 1781 drm_connector_update_edid_property(connector, NULL);
4562236b 1782 aconnector->num_modes = 0;
dcd5fb82 1783 dc_sink_release(aconnector->dc_sink);
4562236b 1784 aconnector->dc_sink = NULL;
5326c452 1785 aconnector->edid = NULL;
0c8620d6
BL
1786#ifdef CONFIG_DRM_AMD_DC_HDCP
1787 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1788 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1789 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1790#endif
4562236b
HW
1791 }
1792
1793 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1794
1795 if (sink)
1796 dc_sink_release(sink);
4562236b
HW
1797}
1798
1799static void handle_hpd_irq(void *param)
1800{
c84dec2f 1801 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
1802 struct drm_connector *connector = &aconnector->base;
1803 struct drm_device *dev = connector->dev;
fbbdadf2 1804 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
1805#ifdef CONFIG_DRM_AMD_DC_HDCP
1806 struct amdgpu_device *adev = dev->dev_private;
1807#endif
4562236b 1808
1f6010a9
DF
1809 /*
1810 * In case of failure or MST no need to update connector status or notify the OS
1811 * since (for MST case) MST does this in its own context.
4562236b
HW
1812 */
1813 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 1814
0c8620d6 1815#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e
BL
1816 if (adev->asic_type >= CHIP_RAVEN)
1817 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 1818#endif
2e0ac3d6
HW
1819 if (aconnector->fake_enable)
1820 aconnector->fake_enable = false;
1821
fbbdadf2
BL
1822 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1823 DRM_ERROR("KMS: Failed to detect connector\n");
1824
1825 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1826 emulated_link_detect(aconnector->dc_link);
1827
1828
1829 drm_modeset_lock_all(dev);
1830 dm_restore_drm_connector_state(dev, connector);
1831 drm_modeset_unlock_all(dev);
1832
1833 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1834 drm_kms_helper_hotplug_event(dev);
1835
1836 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
1837 amdgpu_dm_update_connector_after_detect(aconnector);
1838
1839
1840 drm_modeset_lock_all(dev);
1841 dm_restore_drm_connector_state(dev, connector);
1842 drm_modeset_unlock_all(dev);
1843
1844 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1845 drm_kms_helper_hotplug_event(dev);
1846 }
1847 mutex_unlock(&aconnector->hpd_lock);
1848
1849}
1850
c84dec2f 1851static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
1852{
1853 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1854 uint8_t dret;
1855 bool new_irq_handled = false;
1856 int dpcd_addr;
1857 int dpcd_bytes_to_read;
1858
1859 const int max_process_count = 30;
1860 int process_count = 0;
1861
1862 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1863
1864 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1865 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1866 /* DPCD 0x200 - 0x201 for downstream IRQ */
1867 dpcd_addr = DP_SINK_COUNT;
1868 } else {
1869 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1870 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1871 dpcd_addr = DP_SINK_COUNT_ESI;
1872 }
1873
1874 dret = drm_dp_dpcd_read(
1875 &aconnector->dm_dp_aux.aux,
1876 dpcd_addr,
1877 esi,
1878 dpcd_bytes_to_read);
1879
1880 while (dret == dpcd_bytes_to_read &&
1881 process_count < max_process_count) {
1882 uint8_t retry;
1883 dret = 0;
1884
1885 process_count++;
1886
f1ad2f5e 1887 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
1888 /* handle HPD short pulse irq */
1889 if (aconnector->mst_mgr.mst_state)
1890 drm_dp_mst_hpd_irq(
1891 &aconnector->mst_mgr,
1892 esi,
1893 &new_irq_handled);
4562236b
HW
1894
1895 if (new_irq_handled) {
1896 /* ACK at DPCD to notify down stream */
1897 const int ack_dpcd_bytes_to_write =
1898 dpcd_bytes_to_read - 1;
1899
1900 for (retry = 0; retry < 3; retry++) {
1901 uint8_t wret;
1902
1903 wret = drm_dp_dpcd_write(
1904 &aconnector->dm_dp_aux.aux,
1905 dpcd_addr + 1,
1906 &esi[1],
1907 ack_dpcd_bytes_to_write);
1908 if (wret == ack_dpcd_bytes_to_write)
1909 break;
1910 }
1911
1f6010a9 1912 /* check if there is new irq to be handled */
4562236b
HW
1913 dret = drm_dp_dpcd_read(
1914 &aconnector->dm_dp_aux.aux,
1915 dpcd_addr,
1916 esi,
1917 dpcd_bytes_to_read);
1918
1919 new_irq_handled = false;
d4a6e8a9 1920 } else {
4562236b 1921 break;
d4a6e8a9 1922 }
4562236b
HW
1923 }
1924
1925 if (process_count == max_process_count)
f1ad2f5e 1926 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
1927}
1928
1929static void handle_hpd_rx_irq(void *param)
1930{
c84dec2f 1931 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
1932 struct drm_connector *connector = &aconnector->base;
1933 struct drm_device *dev = connector->dev;
53cbf65c 1934 struct dc_link *dc_link = aconnector->dc_link;
4562236b 1935 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 1936 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
1937#ifdef CONFIG_DRM_AMD_DC_HDCP
1938 union hpd_irq_data hpd_irq_data;
1939 struct amdgpu_device *adev = dev->dev_private;
1940
1941 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
1942#endif
4562236b 1943
1f6010a9
DF
1944 /*
1945 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
1946 * conflict, after implement i2c helper, this mutex should be
1947 * retired.
1948 */
53cbf65c 1949 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
1950 mutex_lock(&aconnector->hpd_lock);
1951
2a0f9270
BL
1952
1953#ifdef CONFIG_DRM_AMD_DC_HDCP
1954 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
1955#else
4e18814e 1956 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 1957#endif
4562236b
HW
1958 !is_mst_root_connector) {
1959 /* Downstream Port status changed. */
fbbdadf2
BL
1960 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1961 DRM_ERROR("KMS: Failed to detect connector\n");
1962
1963 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1964 emulated_link_detect(dc_link);
1965
1966 if (aconnector->fake_enable)
1967 aconnector->fake_enable = false;
1968
1969 amdgpu_dm_update_connector_after_detect(aconnector);
1970
1971
1972 drm_modeset_lock_all(dev);
1973 dm_restore_drm_connector_state(dev, connector);
1974 drm_modeset_unlock_all(dev);
1975
1976 drm_kms_helper_hotplug_event(dev);
1977 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
1978
1979 if (aconnector->fake_enable)
1980 aconnector->fake_enable = false;
1981
4562236b
HW
1982 amdgpu_dm_update_connector_after_detect(aconnector);
1983
1984
1985 drm_modeset_lock_all(dev);
1986 dm_restore_drm_connector_state(dev, connector);
1987 drm_modeset_unlock_all(dev);
1988
1989 drm_kms_helper_hotplug_event(dev);
1990 }
1991 }
2a0f9270
BL
1992#ifdef CONFIG_DRM_AMD_DC_HDCP
1993 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
1994 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
1995#endif
4562236b 1996 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 1997 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
1998 dm_handle_hpd_rx_irq(aconnector);
1999
e86e8947
HV
2000 if (dc_link->type != dc_connection_mst_branch) {
2001 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2002 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2003 }
4562236b
HW
2004}
2005
2006static void register_hpd_handlers(struct amdgpu_device *adev)
2007{
2008 struct drm_device *dev = adev->ddev;
2009 struct drm_connector *connector;
c84dec2f 2010 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2011 const struct dc_link *dc_link;
2012 struct dc_interrupt_params int_params = {0};
2013
2014 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2015 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2016
2017 list_for_each_entry(connector,
2018 &dev->mode_config.connector_list, head) {
2019
c84dec2f 2020 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2021 dc_link = aconnector->dc_link;
2022
2023 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2024 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2025 int_params.irq_source = dc_link->irq_source_hpd;
2026
2027 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2028 handle_hpd_irq,
2029 (void *) aconnector);
2030 }
2031
2032 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2033
2034 /* Also register for DP short pulse (hpd_rx). */
2035 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2036 int_params.irq_source = dc_link->irq_source_hpd_rx;
2037
2038 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2039 handle_hpd_rx_irq,
2040 (void *) aconnector);
2041 }
2042 }
2043}
2044
2045/* Register IRQ sources and initialize IRQ callbacks */
2046static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2047{
2048 struct dc *dc = adev->dm.dc;
2049 struct common_irq_params *c_irq_params;
2050 struct dc_interrupt_params int_params = {0};
2051 int r;
2052 int i;
1ffdeca6 2053 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2054
84374725 2055 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2056 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2057
2058 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2059 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2060
1f6010a9
DF
2061 /*
2062 * Actions of amdgpu_irq_add_id():
4562236b
HW
2063 * 1. Register a set() function with base driver.
2064 * Base driver will call set() function to enable/disable an
2065 * interrupt in DC hardware.
2066 * 2. Register amdgpu_dm_irq_handler().
2067 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2068 * coming from DC hardware.
2069 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2070 * for acknowledging and handling. */
2071
b57de80a 2072 /* Use VBLANK interrupt */
e9029155 2073 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2074 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2075 if (r) {
2076 DRM_ERROR("Failed to add crtc irq id!\n");
2077 return r;
2078 }
2079
2080 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2081 int_params.irq_source =
3d761e79 2082 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2083
b57de80a 2084 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2085
2086 c_irq_params->adev = adev;
2087 c_irq_params->irq_src = int_params.irq_source;
2088
2089 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2090 dm_crtc_high_irq, c_irq_params);
2091 }
2092
d2574c33
MK
2093 /* Use VUPDATE interrupt */
2094 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2095 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2096 if (r) {
2097 DRM_ERROR("Failed to add vupdate irq id!\n");
2098 return r;
2099 }
2100
2101 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2102 int_params.irq_source =
2103 dc_interrupt_to_irq_source(dc, i, 0);
2104
2105 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2106
2107 c_irq_params->adev = adev;
2108 c_irq_params->irq_src = int_params.irq_source;
2109
2110 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2111 dm_vupdate_high_irq, c_irq_params);
2112 }
2113
3d761e79 2114 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2115 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2116 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2117 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2118 if (r) {
2119 DRM_ERROR("Failed to add page flip irq id!\n");
2120 return r;
2121 }
2122
2123 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2124 int_params.irq_source =
2125 dc_interrupt_to_irq_source(dc, i, 0);
2126
2127 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2128
2129 c_irq_params->adev = adev;
2130 c_irq_params->irq_src = int_params.irq_source;
2131
2132 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2133 dm_pflip_high_irq, c_irq_params);
2134
2135 }
2136
2137 /* HPD */
2c8ad2d5
AD
2138 r = amdgpu_irq_add_id(adev, client_id,
2139 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2140 if (r) {
2141 DRM_ERROR("Failed to add hpd irq id!\n");
2142 return r;
2143 }
2144
2145 register_hpd_handlers(adev);
2146
2147 return 0;
2148}
2149
ff5ef992
AD
2150#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2151/* Register IRQ sources and initialize IRQ callbacks */
2152static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2153{
2154 struct dc *dc = adev->dm.dc;
2155 struct common_irq_params *c_irq_params;
2156 struct dc_interrupt_params int_params = {0};
2157 int r;
2158 int i;
2159
2160 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2161 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2162
1f6010a9
DF
2163 /*
2164 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2165 * 1. Register a set() function with base driver.
2166 * Base driver will call set() function to enable/disable an
2167 * interrupt in DC hardware.
2168 * 2. Register amdgpu_dm_irq_handler().
2169 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2170 * coming from DC hardware.
2171 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2172 * for acknowledging and handling.
1f6010a9 2173 */
ff5ef992
AD
2174
2175 /* Use VSTARTUP interrupt */
2176 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2177 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2178 i++) {
3760f76c 2179 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2180
2181 if (r) {
2182 DRM_ERROR("Failed to add crtc irq id!\n");
2183 return r;
2184 }
2185
2186 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2187 int_params.irq_source =
2188 dc_interrupt_to_irq_source(dc, i, 0);
2189
2190 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2191
2192 c_irq_params->adev = adev;
2193 c_irq_params->irq_src = int_params.irq_source;
2194
2195 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2196 dm_crtc_high_irq, c_irq_params);
2197 }
2198
d2574c33
MK
2199 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2200 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2201 * to trigger at end of each vblank, regardless of state of the lock,
2202 * matching DCE behaviour.
2203 */
2204 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2205 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2206 i++) {
2207 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2208
2209 if (r) {
2210 DRM_ERROR("Failed to add vupdate irq id!\n");
2211 return r;
2212 }
2213
2214 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2215 int_params.irq_source =
2216 dc_interrupt_to_irq_source(dc, i, 0);
2217
2218 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2219
2220 c_irq_params->adev = adev;
2221 c_irq_params->irq_src = int_params.irq_source;
2222
2223 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2224 dm_vupdate_high_irq, c_irq_params);
2225 }
2226
ff5ef992
AD
2227 /* Use GRPH_PFLIP interrupt */
2228 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2229 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2230 i++) {
3760f76c 2231 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2232 if (r) {
2233 DRM_ERROR("Failed to add page flip irq id!\n");
2234 return r;
2235 }
2236
2237 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2238 int_params.irq_source =
2239 dc_interrupt_to_irq_source(dc, i, 0);
2240
2241 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2242
2243 c_irq_params->adev = adev;
2244 c_irq_params->irq_src = int_params.irq_source;
2245
2246 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2247 dm_pflip_high_irq, c_irq_params);
2248
2249 }
2250
2251 /* HPD */
3760f76c 2252 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2253 &adev->hpd_irq);
2254 if (r) {
2255 DRM_ERROR("Failed to add hpd irq id!\n");
2256 return r;
2257 }
2258
2259 register_hpd_handlers(adev);
2260
2261 return 0;
2262}
2263#endif
2264
eb3dc897
NK
2265/*
2266 * Acquires the lock for the atomic state object and returns
2267 * the new atomic state.
2268 *
2269 * This should only be called during atomic check.
2270 */
2271static int dm_atomic_get_state(struct drm_atomic_state *state,
2272 struct dm_atomic_state **dm_state)
2273{
2274 struct drm_device *dev = state->dev;
2275 struct amdgpu_device *adev = dev->dev_private;
2276 struct amdgpu_display_manager *dm = &adev->dm;
2277 struct drm_private_state *priv_state;
eb3dc897
NK
2278
2279 if (*dm_state)
2280 return 0;
2281
eb3dc897
NK
2282 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2283 if (IS_ERR(priv_state))
2284 return PTR_ERR(priv_state);
2285
2286 *dm_state = to_dm_atomic_state(priv_state);
2287
2288 return 0;
2289}
2290
2291struct dm_atomic_state *
2292dm_atomic_get_new_state(struct drm_atomic_state *state)
2293{
2294 struct drm_device *dev = state->dev;
2295 struct amdgpu_device *adev = dev->dev_private;
2296 struct amdgpu_display_manager *dm = &adev->dm;
2297 struct drm_private_obj *obj;
2298 struct drm_private_state *new_obj_state;
2299 int i;
2300
2301 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2302 if (obj->funcs == dm->atomic_obj.funcs)
2303 return to_dm_atomic_state(new_obj_state);
2304 }
2305
2306 return NULL;
2307}
2308
2309struct dm_atomic_state *
2310dm_atomic_get_old_state(struct drm_atomic_state *state)
2311{
2312 struct drm_device *dev = state->dev;
2313 struct amdgpu_device *adev = dev->dev_private;
2314 struct amdgpu_display_manager *dm = &adev->dm;
2315 struct drm_private_obj *obj;
2316 struct drm_private_state *old_obj_state;
2317 int i;
2318
2319 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2320 if (obj->funcs == dm->atomic_obj.funcs)
2321 return to_dm_atomic_state(old_obj_state);
2322 }
2323
2324 return NULL;
2325}
2326
2327static struct drm_private_state *
2328dm_atomic_duplicate_state(struct drm_private_obj *obj)
2329{
2330 struct dm_atomic_state *old_state, *new_state;
2331
2332 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2333 if (!new_state)
2334 return NULL;
2335
2336 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2337
813d20dc
AW
2338 old_state = to_dm_atomic_state(obj->state);
2339
2340 if (old_state && old_state->context)
2341 new_state->context = dc_copy_state(old_state->context);
2342
eb3dc897
NK
2343 if (!new_state->context) {
2344 kfree(new_state);
2345 return NULL;
2346 }
2347
eb3dc897
NK
2348 return &new_state->base;
2349}
2350
2351static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2352 struct drm_private_state *state)
2353{
2354 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2355
2356 if (dm_state && dm_state->context)
2357 dc_release_state(dm_state->context);
2358
2359 kfree(dm_state);
2360}
2361
2362static struct drm_private_state_funcs dm_atomic_state_funcs = {
2363 .atomic_duplicate_state = dm_atomic_duplicate_state,
2364 .atomic_destroy_state = dm_atomic_destroy_state,
2365};
2366
4562236b
HW
2367static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2368{
eb3dc897 2369 struct dm_atomic_state *state;
4562236b
HW
2370 int r;
2371
2372 adev->mode_info.mode_config_initialized = true;
2373
4562236b 2374 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2375 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2376
2377 adev->ddev->mode_config.max_width = 16384;
2378 adev->ddev->mode_config.max_height = 16384;
2379
2380 adev->ddev->mode_config.preferred_depth = 24;
2381 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2382 /* indicates support for immediate flip */
4562236b
HW
2383 adev->ddev->mode_config.async_page_flip = true;
2384
770d13b1 2385 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2386
eb3dc897
NK
2387 state = kzalloc(sizeof(*state), GFP_KERNEL);
2388 if (!state)
2389 return -ENOMEM;
2390
813d20dc 2391 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2392 if (!state->context) {
2393 kfree(state);
2394 return -ENOMEM;
2395 }
2396
2397 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2398
8c1a765b
DA
2399 drm_atomic_private_obj_init(adev->ddev,
2400 &adev->dm.atomic_obj,
eb3dc897
NK
2401 &state->base,
2402 &dm_atomic_state_funcs);
2403
3dc9b1ce 2404 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2405 if (r)
2406 return r;
2407
6ce8f316
NK
2408 r = amdgpu_dm_audio_init(adev);
2409 if (r)
2410 return r;
2411
4562236b
HW
2412 return 0;
2413}
2414
206bbafe
DF
2415#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2416#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2417
4562236b
HW
2418#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2419 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2420
206bbafe
DF
2421static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2422{
2423#if defined(CONFIG_ACPI)
2424 struct amdgpu_dm_backlight_caps caps;
2425
2426 if (dm->backlight_caps.caps_valid)
2427 return;
2428
2429 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2430 if (caps.caps_valid) {
2431 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2432 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2433 dm->backlight_caps.caps_valid = true;
2434 } else {
2435 dm->backlight_caps.min_input_signal =
2436 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2437 dm->backlight_caps.max_input_signal =
2438 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2439 }
2440#else
8bcbc9ef
DF
2441 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2442 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2443#endif
2444}
2445
4562236b
HW
2446static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2447{
2448 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe
DF
2449 struct amdgpu_dm_backlight_caps caps;
2450 uint32_t brightness = bd->props.brightness;
4562236b 2451
206bbafe
DF
2452 amdgpu_dm_update_backlight_caps(dm);
2453 caps = dm->backlight_caps;
0cafc82f 2454 /*
206bbafe
DF
2455 * The brightness input is in the range 0-255
2456 * It needs to be rescaled to be between the
2457 * requested min and max input signal
2458 *
2459 * It also needs to be scaled up by 0x101 to
2460 * match the DC interface which has a range of
2461 * 0 to 0xffff
0cafc82f 2462 */
206bbafe
DF
2463 brightness =
2464 brightness
2465 * 0x101
2466 * (caps.max_input_signal - caps.min_input_signal)
2467 / AMDGPU_MAX_BL_LEVEL
2468 + caps.min_input_signal * 0x101;
4562236b
HW
2469
2470 if (dc_link_set_backlight_level(dm->backlight_link,
923fe495 2471 brightness, 0))
4562236b
HW
2472 return 0;
2473 else
2474 return 1;
2475}
2476
2477static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2478{
620a0d27
DF
2479 struct amdgpu_display_manager *dm = bl_get_data(bd);
2480 int ret = dc_link_get_backlight_level(dm->backlight_link);
2481
2482 if (ret == DC_ERROR_UNEXPECTED)
2483 return bd->props.brightness;
2484 return ret;
4562236b
HW
2485}
2486
2487static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 2488 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
2489 .get_brightness = amdgpu_dm_backlight_get_brightness,
2490 .update_status = amdgpu_dm_backlight_update_status,
2491};
2492
7578ecda
AD
2493static void
2494amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
2495{
2496 char bl_name[16];
2497 struct backlight_properties props = { 0 };
2498
206bbafe
DF
2499 amdgpu_dm_update_backlight_caps(dm);
2500
4562236b 2501 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 2502 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
2503 props.type = BACKLIGHT_RAW;
2504
2505 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2506 dm->adev->ddev->primary->index);
2507
2508 dm->backlight_dev = backlight_device_register(bl_name,
2509 dm->adev->ddev->dev,
2510 dm,
2511 &amdgpu_dm_backlight_ops,
2512 &props);
2513
74baea42 2514 if (IS_ERR(dm->backlight_dev))
4562236b
HW
2515 DRM_ERROR("DM: Backlight registration failed!\n");
2516 else
f1ad2f5e 2517 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
2518}
2519
2520#endif
2521
df534fff 2522static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 2523 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
2524 enum drm_plane_type plane_type,
2525 const struct dc_plane_cap *plane_cap)
df534fff 2526{
f180b4bc 2527 struct drm_plane *plane;
df534fff
S
2528 unsigned long possible_crtcs;
2529 int ret = 0;
2530
f180b4bc 2531 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
2532 if (!plane) {
2533 DRM_ERROR("KMS: Failed to allocate plane\n");
2534 return -ENOMEM;
2535 }
b2fddb13 2536 plane->type = plane_type;
df534fff
S
2537
2538 /*
b2fddb13
NK
2539 * HACK: IGT tests expect that the primary plane for a CRTC
2540 * can only have one possible CRTC. Only expose support for
2541 * any CRTC if they're not going to be used as a primary plane
2542 * for a CRTC - like overlay or underlay planes.
df534fff
S
2543 */
2544 possible_crtcs = 1 << plane_id;
2545 if (plane_id >= dm->dc->caps.max_streams)
2546 possible_crtcs = 0xff;
2547
cc1fec57 2548 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
2549
2550 if (ret) {
2551 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 2552 kfree(plane);
df534fff
S
2553 return ret;
2554 }
2555
54087768
NK
2556 if (mode_info)
2557 mode_info->planes[plane_id] = plane;
2558
df534fff
S
2559 return ret;
2560}
2561
89fc8d4e
HW
2562
2563static void register_backlight_device(struct amdgpu_display_manager *dm,
2564 struct dc_link *link)
2565{
2566#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2567 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2568
2569 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2570 link->type != dc_connection_none) {
1f6010a9
DF
2571 /*
2572 * Event if registration failed, we should continue with
89fc8d4e
HW
2573 * DM initialization because not having a backlight control
2574 * is better then a black screen.
2575 */
2576 amdgpu_dm_register_backlight_device(dm);
2577
2578 if (dm->backlight_dev)
2579 dm->backlight_link = link;
2580 }
2581#endif
2582}
2583
2584
1f6010a9
DF
2585/*
2586 * In this architecture, the association
4562236b
HW
2587 * connector -> encoder -> crtc
2588 * id not really requried. The crtc and connector will hold the
2589 * display_index as an abstraction to use with DAL component
2590 *
2591 * Returns 0 on success
2592 */
7578ecda 2593static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
2594{
2595 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 2596 int32_t i;
c84dec2f 2597 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 2598 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 2599 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 2600 uint32_t link_cnt;
cc1fec57 2601 int32_t primary_planes;
fbbdadf2 2602 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 2603 const struct dc_plane_cap *plane;
4562236b
HW
2604
2605 link_cnt = dm->dc->caps.max_links;
4562236b
HW
2606 if (amdgpu_dm_mode_config_init(dm->adev)) {
2607 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 2608 return -EINVAL;
4562236b
HW
2609 }
2610
b2fddb13
NK
2611 /* There is one primary plane per CRTC */
2612 primary_planes = dm->dc->caps.max_streams;
54087768 2613 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 2614
b2fddb13
NK
2615 /*
2616 * Initialize primary planes, implicit planes for legacy IOCTLS.
2617 * Order is reversed to match iteration order in atomic check.
2618 */
2619 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
2620 plane = &dm->dc->caps.planes[i];
2621
b2fddb13 2622 if (initialize_plane(dm, mode_info, i,
cc1fec57 2623 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 2624 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 2625 goto fail;
d4e13b0d 2626 }
df534fff 2627 }
92f3ac40 2628
0d579c7e
NK
2629 /*
2630 * Initialize overlay planes, index starting after primary planes.
2631 * These planes have a higher DRM index than the primary planes since
2632 * they should be considered as having a higher z-order.
2633 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
2634 *
2635 * Only support DCN for now, and only expose one so we don't encourage
2636 * userspace to use up all the pipes.
0d579c7e 2637 */
cc1fec57
NK
2638 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2639 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2640
2641 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2642 continue;
2643
2644 if (!plane->blends_with_above || !plane->blends_with_below)
2645 continue;
2646
ea36ad34 2647 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
2648 continue;
2649
54087768 2650 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 2651 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 2652 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 2653 goto fail;
d4e13b0d 2654 }
cc1fec57
NK
2655
2656 /* Only create one overlay plane. */
2657 break;
d4e13b0d 2658 }
4562236b 2659
d4e13b0d 2660 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 2661 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 2662 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 2663 goto fail;
4562236b 2664 }
4562236b 2665
ab2541b6 2666 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
2667
2668 /* loops over all connectors on the board */
2669 for (i = 0; i < link_cnt; i++) {
89fc8d4e 2670 struct dc_link *link = NULL;
4562236b
HW
2671
2672 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2673 DRM_ERROR(
2674 "KMS: Cannot support more than %d display indexes\n",
2675 AMDGPU_DM_MAX_DISPLAY_INDEX);
2676 continue;
2677 }
2678
2679 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2680 if (!aconnector)
cd8a2ae8 2681 goto fail;
4562236b
HW
2682
2683 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 2684 if (!aencoder)
cd8a2ae8 2685 goto fail;
4562236b
HW
2686
2687 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2688 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 2689 goto fail;
4562236b
HW
2690 }
2691
2692 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2693 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 2694 goto fail;
4562236b
HW
2695 }
2696
89fc8d4e
HW
2697 link = dc_get_link_at_index(dm->dc, i);
2698
fbbdadf2
BL
2699 if (!dc_link_detect_sink(link, &new_connection_type))
2700 DRM_ERROR("KMS: Failed to detect connector\n");
2701
2702 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2703 emulated_link_detect(link);
2704 amdgpu_dm_update_connector_after_detect(aconnector);
2705
2706 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 2707 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 2708 register_backlight_device(dm, link);
397a9bc5
RL
2709 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2710 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
2711 }
2712
2713
4562236b
HW
2714 }
2715
2716 /* Software is initialized. Now we can register interrupt handlers. */
2717 switch (adev->asic_type) {
2718 case CHIP_BONAIRE:
2719 case CHIP_HAWAII:
cd4b356f
AD
2720 case CHIP_KAVERI:
2721 case CHIP_KABINI:
2722 case CHIP_MULLINS:
4562236b
HW
2723 case CHIP_TONGA:
2724 case CHIP_FIJI:
2725 case CHIP_CARRIZO:
2726 case CHIP_STONEY:
2727 case CHIP_POLARIS11:
2728 case CHIP_POLARIS10:
b264d345 2729 case CHIP_POLARIS12:
7737de91 2730 case CHIP_VEGAM:
2c8ad2d5 2731 case CHIP_VEGA10:
2325ff30 2732 case CHIP_VEGA12:
1fe6bf2f 2733 case CHIP_VEGA20:
4562236b
HW
2734 if (dce110_register_irq_handlers(dm->adev)) {
2735 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 2736 goto fail;
4562236b
HW
2737 }
2738 break;
ff5ef992
AD
2739#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2740 case CHIP_RAVEN:
476e955d 2741#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
fbd2afe5 2742 case CHIP_NAVI12:
476e955d 2743 case CHIP_NAVI10:
fce651e3 2744 case CHIP_NAVI14:
30221ad8
BL
2745#endif
2746#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2747 case CHIP_RENOIR:
476e955d 2748#endif
ff5ef992
AD
2749 if (dcn10_register_irq_handlers(dm->adev)) {
2750 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 2751 goto fail;
ff5ef992
AD
2752 }
2753 break;
2754#endif
4562236b 2755 default:
e63f8673 2756 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 2757 goto fail;
4562236b
HW
2758 }
2759
1bc460a4
HW
2760 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2761 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2762
4562236b 2763 return 0;
cd8a2ae8 2764fail:
4562236b 2765 kfree(aencoder);
4562236b 2766 kfree(aconnector);
54087768 2767
59d0f396 2768 return -EINVAL;
4562236b
HW
2769}
2770
7578ecda 2771static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
2772{
2773 drm_mode_config_cleanup(dm->ddev);
eb3dc897 2774 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
2775 return;
2776}
2777
2778/******************************************************************************
2779 * amdgpu_display_funcs functions
2780 *****************************************************************************/
2781
1f6010a9 2782/*
4562236b
HW
2783 * dm_bandwidth_update - program display watermarks
2784 *
2785 * @adev: amdgpu_device pointer
2786 *
2787 * Calculate and program the display watermarks and line buffer allocation.
2788 */
2789static void dm_bandwidth_update(struct amdgpu_device *adev)
2790{
49c07a99 2791 /* TODO: implement later */
4562236b
HW
2792}
2793
39cc5be2 2794static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
2795 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2796 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
2797 .backlight_set_level = NULL, /* never called for DC */
2798 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
2799 .hpd_sense = NULL,/* called unconditionally */
2800 .hpd_set_polarity = NULL, /* called unconditionally */
2801 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
2802 .page_flip_get_scanoutpos =
2803 dm_crtc_get_scanoutpos,/* called unconditionally */
2804 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2805 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
2806};
2807
2808#if defined(CONFIG_DEBUG_KERNEL_DC)
2809
3ee6b26b
AD
2810static ssize_t s3_debug_store(struct device *device,
2811 struct device_attribute *attr,
2812 const char *buf,
2813 size_t count)
4562236b
HW
2814{
2815 int ret;
2816 int s3_state;
ef1de361 2817 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
2818 struct amdgpu_device *adev = drm_dev->dev_private;
2819
2820 ret = kstrtoint(buf, 0, &s3_state);
2821
2822 if (ret == 0) {
2823 if (s3_state) {
2824 dm_resume(adev);
4562236b
HW
2825 drm_kms_helper_hotplug_event(adev->ddev);
2826 } else
2827 dm_suspend(adev);
2828 }
2829
2830 return ret == 0 ? count : 0;
2831}
2832
2833DEVICE_ATTR_WO(s3_debug);
2834
2835#endif
2836
2837static int dm_early_init(void *handle)
2838{
2839 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2840
4562236b
HW
2841 switch (adev->asic_type) {
2842 case CHIP_BONAIRE:
2843 case CHIP_HAWAII:
2844 adev->mode_info.num_crtc = 6;
2845 adev->mode_info.num_hpd = 6;
2846 adev->mode_info.num_dig = 6;
4562236b 2847 break;
cd4b356f
AD
2848 case CHIP_KAVERI:
2849 adev->mode_info.num_crtc = 4;
2850 adev->mode_info.num_hpd = 6;
2851 adev->mode_info.num_dig = 7;
cd4b356f
AD
2852 break;
2853 case CHIP_KABINI:
2854 case CHIP_MULLINS:
2855 adev->mode_info.num_crtc = 2;
2856 adev->mode_info.num_hpd = 6;
2857 adev->mode_info.num_dig = 6;
cd4b356f 2858 break;
4562236b
HW
2859 case CHIP_FIJI:
2860 case CHIP_TONGA:
2861 adev->mode_info.num_crtc = 6;
2862 adev->mode_info.num_hpd = 6;
2863 adev->mode_info.num_dig = 7;
4562236b
HW
2864 break;
2865 case CHIP_CARRIZO:
2866 adev->mode_info.num_crtc = 3;
2867 adev->mode_info.num_hpd = 6;
2868 adev->mode_info.num_dig = 9;
4562236b
HW
2869 break;
2870 case CHIP_STONEY:
2871 adev->mode_info.num_crtc = 2;
2872 adev->mode_info.num_hpd = 6;
2873 adev->mode_info.num_dig = 9;
4562236b
HW
2874 break;
2875 case CHIP_POLARIS11:
b264d345 2876 case CHIP_POLARIS12:
4562236b
HW
2877 adev->mode_info.num_crtc = 5;
2878 adev->mode_info.num_hpd = 5;
2879 adev->mode_info.num_dig = 5;
4562236b
HW
2880 break;
2881 case CHIP_POLARIS10:
7737de91 2882 case CHIP_VEGAM:
4562236b
HW
2883 adev->mode_info.num_crtc = 6;
2884 adev->mode_info.num_hpd = 6;
2885 adev->mode_info.num_dig = 6;
4562236b 2886 break;
2c8ad2d5 2887 case CHIP_VEGA10:
2325ff30 2888 case CHIP_VEGA12:
1fe6bf2f 2889 case CHIP_VEGA20:
2c8ad2d5
AD
2890 adev->mode_info.num_crtc = 6;
2891 adev->mode_info.num_hpd = 6;
2892 adev->mode_info.num_dig = 6;
2893 break;
ff5ef992
AD
2894#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2895 case CHIP_RAVEN:
2896 adev->mode_info.num_crtc = 4;
2897 adev->mode_info.num_hpd = 4;
2898 adev->mode_info.num_dig = 4;
ff5ef992 2899 break;
476e955d
HW
2900#endif
2901#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2902 case CHIP_NAVI10:
fbd2afe5 2903 case CHIP_NAVI12:
476e955d
HW
2904 adev->mode_info.num_crtc = 6;
2905 adev->mode_info.num_hpd = 6;
2906 adev->mode_info.num_dig = 6;
2907 break;
fce651e3
BL
2908 case CHIP_NAVI14:
2909 adev->mode_info.num_crtc = 5;
2910 adev->mode_info.num_hpd = 5;
2911 adev->mode_info.num_dig = 5;
2912 break;
30221ad8
BL
2913#endif
2914#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2915 case CHIP_RENOIR:
2916 adev->mode_info.num_crtc = 4;
2917 adev->mode_info.num_hpd = 4;
2918 adev->mode_info.num_dig = 4;
2919 break;
ff5ef992 2920#endif
4562236b 2921 default:
e63f8673 2922 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
2923 return -EINVAL;
2924 }
2925
c8dd5715
MD
2926 amdgpu_dm_set_irq_funcs(adev);
2927
39cc5be2
AD
2928 if (adev->mode_info.funcs == NULL)
2929 adev->mode_info.funcs = &dm_display_funcs;
2930
1f6010a9
DF
2931 /*
2932 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 2933 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
2934 * amdgpu_device_init()
2935 */
4562236b
HW
2936#if defined(CONFIG_DEBUG_KERNEL_DC)
2937 device_create_file(
2938 adev->ddev->dev,
2939 &dev_attr_s3_debug);
2940#endif
2941
2942 return 0;
2943}
2944
9b690ef3 2945static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
2946 struct dc_stream_state *new_stream,
2947 struct dc_stream_state *old_stream)
9b690ef3 2948{
e7b07cee
HW
2949 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2950 return false;
2951
2952 if (!crtc_state->enable)
2953 return false;
2954
2955 return crtc_state->active;
2956}
2957
2958static bool modereset_required(struct drm_crtc_state *crtc_state)
2959{
2960 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2961 return false;
2962
2963 return !crtc_state->enable || !crtc_state->active;
2964}
2965
7578ecda 2966static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
2967{
2968 drm_encoder_cleanup(encoder);
2969 kfree(encoder);
2970}
2971
2972static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2973 .destroy = amdgpu_dm_encoder_destroy,
2974};
2975
e7b07cee 2976
695af5f9
NK
2977static int fill_dc_scaling_info(const struct drm_plane_state *state,
2978 struct dc_scaling_info *scaling_info)
e7b07cee 2979{
6491f0c0 2980 int scale_w, scale_h;
e7b07cee 2981
695af5f9 2982 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 2983
695af5f9
NK
2984 /* Source is fixed 16.16 but we ignore mantissa for now... */
2985 scaling_info->src_rect.x = state->src_x >> 16;
2986 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 2987
695af5f9
NK
2988 scaling_info->src_rect.width = state->src_w >> 16;
2989 if (scaling_info->src_rect.width == 0)
2990 return -EINVAL;
2991
2992 scaling_info->src_rect.height = state->src_h >> 16;
2993 if (scaling_info->src_rect.height == 0)
2994 return -EINVAL;
2995
2996 scaling_info->dst_rect.x = state->crtc_x;
2997 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
2998
2999 if (state->crtc_w == 0)
695af5f9 3000 return -EINVAL;
e7b07cee 3001
695af5f9 3002 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3003
3004 if (state->crtc_h == 0)
695af5f9 3005 return -EINVAL;
e7b07cee 3006
695af5f9 3007 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3008
695af5f9
NK
3009 /* DRM doesn't specify clipping on destination output. */
3010 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3011
6491f0c0
NK
3012 /* TODO: Validate scaling per-format with DC plane caps */
3013 scale_w = scaling_info->dst_rect.width * 1000 /
3014 scaling_info->src_rect.width;
e7b07cee 3015
6491f0c0
NK
3016 if (scale_w < 250 || scale_w > 16000)
3017 return -EINVAL;
3018
3019 scale_h = scaling_info->dst_rect.height * 1000 /
3020 scaling_info->src_rect.height;
3021
3022 if (scale_h < 250 || scale_h > 16000)
3023 return -EINVAL;
3024
695af5f9
NK
3025 /*
3026 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3027 * assume reasonable defaults based on the format.
3028 */
e7b07cee 3029
695af5f9 3030 return 0;
4562236b 3031}
695af5f9 3032
3ee6b26b 3033static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
9817d5f5 3034 uint64_t *tiling_flags)
e7b07cee 3035{
e68d14dd 3036 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 3037 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3038
e7b07cee 3039 if (unlikely(r)) {
1f6010a9 3040 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3041 if (r != -ERESTARTSYS)
3042 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3043 return r;
3044 }
3045
e7b07cee
HW
3046 if (tiling_flags)
3047 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3048
3049 amdgpu_bo_unreserve(rbo);
3050
3051 return r;
3052}
3053
7df7e505
NK
3054static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3055{
3056 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3057
3058 return offset ? (address + offset * 256) : 0;
3059}
3060
695af5f9
NK
3061static int
3062fill_plane_dcc_attributes(struct amdgpu_device *adev,
3063 const struct amdgpu_framebuffer *afb,
3064 const enum surface_pixel_format format,
3065 const enum dc_rotation_angle rotation,
12e2b2d4 3066 const struct plane_size *plane_size,
695af5f9
NK
3067 const union dc_tiling_info *tiling_info,
3068 const uint64_t info,
3069 struct dc_plane_dcc_param *dcc,
3070 struct dc_plane_address *address)
7df7e505
NK
3071{
3072 struct dc *dc = adev->dm.dc;
8daa1218
NC
3073 struct dc_dcc_surface_param input;
3074 struct dc_surface_dcc_cap output;
7df7e505
NK
3075 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3076 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3077 uint64_t dcc_address;
3078
8daa1218
NC
3079 memset(&input, 0, sizeof(input));
3080 memset(&output, 0, sizeof(output));
3081
7df7e505 3082 if (!offset)
09e5665a
NK
3083 return 0;
3084
695af5f9 3085 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3086 return 0;
7df7e505
NK
3087
3088 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3089 return -EINVAL;
7df7e505 3090
695af5f9 3091 input.format = format;
12e2b2d4
DL
3092 input.surface_size.width = plane_size->surface_size.width;
3093 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3094 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3095
695af5f9 3096 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3097 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3098 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3099 input.scan = SCAN_DIRECTION_VERTICAL;
3100
3101 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3102 return -EINVAL;
7df7e505
NK
3103
3104 if (!output.capable)
09e5665a 3105 return -EINVAL;
7df7e505
NK
3106
3107 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3108 return -EINVAL;
7df7e505 3109
09e5665a 3110 dcc->enable = 1;
12e2b2d4 3111 dcc->meta_pitch =
7df7e505 3112 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3113 dcc->independent_64b_blks = i64b;
7df7e505
NK
3114
3115 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3116 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3117 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3118
09e5665a
NK
3119 return 0;
3120}
3121
3122static int
320932bf 3123fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3124 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3125 const enum surface_pixel_format format,
3126 const enum dc_rotation_angle rotation,
3127 const uint64_t tiling_flags,
09e5665a 3128 union dc_tiling_info *tiling_info,
12e2b2d4 3129 struct plane_size *plane_size,
09e5665a 3130 struct dc_plane_dcc_param *dcc,
695af5f9 3131 struct dc_plane_address *address)
09e5665a 3132{
320932bf 3133 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3134 int ret;
3135
3136 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3137 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3138 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3139 memset(address, 0, sizeof(*address));
3140
695af5f9 3141 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3142 plane_size->surface_size.x = 0;
3143 plane_size->surface_size.y = 0;
3144 plane_size->surface_size.width = fb->width;
3145 plane_size->surface_size.height = fb->height;
3146 plane_size->surface_pitch =
320932bf
NK
3147 fb->pitches[0] / fb->format->cpp[0];
3148
e0634e8d
NK
3149 address->type = PLN_ADDR_TYPE_GRAPHICS;
3150 address->grph.addr.low_part = lower_32_bits(afb->address);
3151 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3152 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3153 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3154
12e2b2d4
DL
3155 plane_size->surface_size.x = 0;
3156 plane_size->surface_size.y = 0;
3157 plane_size->surface_size.width = fb->width;
3158 plane_size->surface_size.height = fb->height;
3159 plane_size->surface_pitch =
320932bf
NK
3160 fb->pitches[0] / fb->format->cpp[0];
3161
12e2b2d4
DL
3162 plane_size->chroma_size.x = 0;
3163 plane_size->chroma_size.y = 0;
320932bf 3164 /* TODO: set these based on surface format */
12e2b2d4
DL
3165 plane_size->chroma_size.width = fb->width / 2;
3166 plane_size->chroma_size.height = fb->height / 2;
320932bf 3167
12e2b2d4 3168 plane_size->chroma_pitch =
320932bf
NK
3169 fb->pitches[1] / fb->format->cpp[1];
3170
e0634e8d
NK
3171 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3172 address->video_progressive.luma_addr.low_part =
3173 lower_32_bits(afb->address);
3174 address->video_progressive.luma_addr.high_part =
3175 upper_32_bits(afb->address);
3176 address->video_progressive.chroma_addr.low_part =
3177 lower_32_bits(chroma_addr);
3178 address->video_progressive.chroma_addr.high_part =
3179 upper_32_bits(chroma_addr);
3180 }
09e5665a
NK
3181
3182 /* Fill GFX8 params */
3183 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3184 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3185
3186 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3187 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3188 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3189 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3190 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3191
3192 /* XXX fix me for VI */
3193 tiling_info->gfx8.num_banks = num_banks;
3194 tiling_info->gfx8.array_mode =
3195 DC_ARRAY_2D_TILED_THIN1;
3196 tiling_info->gfx8.tile_split = tile_split;
3197 tiling_info->gfx8.bank_width = bankw;
3198 tiling_info->gfx8.bank_height = bankh;
3199 tiling_info->gfx8.tile_aspect = mtaspect;
3200 tiling_info->gfx8.tile_mode =
3201 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3202 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3203 == DC_ARRAY_1D_TILED_THIN1) {
3204 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3205 }
3206
3207 tiling_info->gfx8.pipe_config =
3208 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3209
3210 if (adev->asic_type == CHIP_VEGA10 ||
3211 adev->asic_type == CHIP_VEGA12 ||
3212 adev->asic_type == CHIP_VEGA20 ||
476e955d
HW
3213#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
3214 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3215 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3216 adev->asic_type == CHIP_NAVI12 ||
30221ad8
BL
3217#endif
3218#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
3219 adev->asic_type == CHIP_RENOIR ||
476e955d 3220#endif
09e5665a
NK
3221 adev->asic_type == CHIP_RAVEN) {
3222 /* Fill GFX9 params */
3223 tiling_info->gfx9.num_pipes =
3224 adev->gfx.config.gb_addr_config_fields.num_pipes;
3225 tiling_info->gfx9.num_banks =
3226 adev->gfx.config.gb_addr_config_fields.num_banks;
3227 tiling_info->gfx9.pipe_interleave =
3228 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3229 tiling_info->gfx9.num_shader_engines =
3230 adev->gfx.config.gb_addr_config_fields.num_se;
3231 tiling_info->gfx9.max_compressed_frags =
3232 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3233 tiling_info->gfx9.num_rb_per_se =
3234 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3235 tiling_info->gfx9.swizzle =
3236 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3237 tiling_info->gfx9.shaderEnable = 1;
3238
695af5f9
NK
3239 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3240 plane_size, tiling_info,
3241 tiling_flags, dcc, address);
09e5665a
NK
3242 if (ret)
3243 return ret;
3244 }
3245
3246 return 0;
7df7e505
NK
3247}
3248
d74004b6 3249static void
695af5f9 3250fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3251 bool *per_pixel_alpha, bool *global_alpha,
3252 int *global_alpha_value)
3253{
3254 *per_pixel_alpha = false;
3255 *global_alpha = false;
3256 *global_alpha_value = 0xff;
3257
3258 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3259 return;
3260
3261 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3262 static const uint32_t alpha_formats[] = {
3263 DRM_FORMAT_ARGB8888,
3264 DRM_FORMAT_RGBA8888,
3265 DRM_FORMAT_ABGR8888,
3266 };
3267 uint32_t format = plane_state->fb->format->format;
3268 unsigned int i;
3269
3270 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3271 if (format == alpha_formats[i]) {
3272 *per_pixel_alpha = true;
3273 break;
3274 }
3275 }
3276 }
3277
3278 if (plane_state->alpha < 0xffff) {
3279 *global_alpha = true;
3280 *global_alpha_value = plane_state->alpha >> 8;
3281 }
3282}
3283
004fefa3
NK
3284static int
3285fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3286 const enum surface_pixel_format format,
004fefa3
NK
3287 enum dc_color_space *color_space)
3288{
3289 bool full_range;
3290
3291 *color_space = COLOR_SPACE_SRGB;
3292
3293 /* DRM color properties only affect non-RGB formats. */
695af5f9 3294 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3295 return 0;
3296
3297 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3298
3299 switch (plane_state->color_encoding) {
3300 case DRM_COLOR_YCBCR_BT601:
3301 if (full_range)
3302 *color_space = COLOR_SPACE_YCBCR601;
3303 else
3304 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3305 break;
3306
3307 case DRM_COLOR_YCBCR_BT709:
3308 if (full_range)
3309 *color_space = COLOR_SPACE_YCBCR709;
3310 else
3311 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3312 break;
3313
3314 case DRM_COLOR_YCBCR_BT2020:
3315 if (full_range)
3316 *color_space = COLOR_SPACE_2020_YCBCR;
3317 else
3318 return -EINVAL;
3319 break;
3320
3321 default:
3322 return -EINVAL;
3323 }
3324
3325 return 0;
3326}
3327
695af5f9
NK
3328static int
3329fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3330 const struct drm_plane_state *plane_state,
3331 const uint64_t tiling_flags,
3332 struct dc_plane_info *plane_info,
3333 struct dc_plane_address *address)
3334{
3335 const struct drm_framebuffer *fb = plane_state->fb;
3336 const struct amdgpu_framebuffer *afb =
3337 to_amdgpu_framebuffer(plane_state->fb);
3338 struct drm_format_name_buf format_name;
3339 int ret;
3340
3341 memset(plane_info, 0, sizeof(*plane_info));
3342
3343 switch (fb->format->format) {
3344 case DRM_FORMAT_C8:
3345 plane_info->format =
3346 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3347 break;
3348 case DRM_FORMAT_RGB565:
3349 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3350 break;
3351 case DRM_FORMAT_XRGB8888:
3352 case DRM_FORMAT_ARGB8888:
3353 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3354 break;
3355 case DRM_FORMAT_XRGB2101010:
3356 case DRM_FORMAT_ARGB2101010:
3357 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3358 break;
3359 case DRM_FORMAT_XBGR2101010:
3360 case DRM_FORMAT_ABGR2101010:
3361 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3362 break;
3363 case DRM_FORMAT_XBGR8888:
3364 case DRM_FORMAT_ABGR8888:
3365 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3366 break;
3367 case DRM_FORMAT_NV21:
3368 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3369 break;
3370 case DRM_FORMAT_NV12:
3371 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3372 break;
3373 default:
3374 DRM_ERROR(
3375 "Unsupported screen format %s\n",
3376 drm_get_format_name(fb->format->format, &format_name));
3377 return -EINVAL;
3378 }
3379
3380 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3381 case DRM_MODE_ROTATE_0:
3382 plane_info->rotation = ROTATION_ANGLE_0;
3383 break;
3384 case DRM_MODE_ROTATE_90:
3385 plane_info->rotation = ROTATION_ANGLE_90;
3386 break;
3387 case DRM_MODE_ROTATE_180:
3388 plane_info->rotation = ROTATION_ANGLE_180;
3389 break;
3390 case DRM_MODE_ROTATE_270:
3391 plane_info->rotation = ROTATION_ANGLE_270;
3392 break;
3393 default:
3394 plane_info->rotation = ROTATION_ANGLE_0;
3395 break;
3396 }
3397
3398 plane_info->visible = true;
3399 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3400
6d83a32d
MS
3401 plane_info->layer_index = 0;
3402
695af5f9
NK
3403 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3404 &plane_info->color_space);
3405 if (ret)
3406 return ret;
3407
3408 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3409 plane_info->rotation, tiling_flags,
3410 &plane_info->tiling_info,
3411 &plane_info->plane_size,
3412 &plane_info->dcc, address);
3413 if (ret)
3414 return ret;
3415
3416 fill_blending_from_plane_state(
3417 plane_state, &plane_info->per_pixel_alpha,
3418 &plane_info->global_alpha, &plane_info->global_alpha_value);
3419
3420 return 0;
3421}
3422
3423static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3424 struct dc_plane_state *dc_plane_state,
3425 struct drm_plane_state *plane_state,
3426 struct drm_crtc_state *crtc_state)
e7b07cee 3427{
cf020d49 3428 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
3429 const struct amdgpu_framebuffer *amdgpu_fb =
3430 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
3431 struct dc_scaling_info scaling_info;
3432 struct dc_plane_info plane_info;
3433 uint64_t tiling_flags;
3434 int ret;
e7b07cee 3435
695af5f9
NK
3436 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3437 if (ret)
3438 return ret;
e7b07cee 3439
695af5f9
NK
3440 dc_plane_state->src_rect = scaling_info.src_rect;
3441 dc_plane_state->dst_rect = scaling_info.dst_rect;
3442 dc_plane_state->clip_rect = scaling_info.clip_rect;
3443 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 3444
695af5f9 3445 ret = get_fb_info(amdgpu_fb, &tiling_flags);
e7b07cee
HW
3446 if (ret)
3447 return ret;
3448
695af5f9
NK
3449 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3450 &plane_info,
3451 &dc_plane_state->address);
004fefa3
NK
3452 if (ret)
3453 return ret;
3454
695af5f9
NK
3455 dc_plane_state->format = plane_info.format;
3456 dc_plane_state->color_space = plane_info.color_space;
3457 dc_plane_state->format = plane_info.format;
3458 dc_plane_state->plane_size = plane_info.plane_size;
3459 dc_plane_state->rotation = plane_info.rotation;
3460 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3461 dc_plane_state->stereo_format = plane_info.stereo_format;
3462 dc_plane_state->tiling_info = plane_info.tiling_info;
3463 dc_plane_state->visible = plane_info.visible;
3464 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3465 dc_plane_state->global_alpha = plane_info.global_alpha;
3466 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3467 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 3468 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 3469
e277adc5
LSL
3470 /*
3471 * Always set input transfer function, since plane state is refreshed
3472 * every time.
3473 */
cf020d49
NK
3474 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3475 if (ret)
3476 return ret;
e7b07cee 3477
cf020d49 3478 return 0;
e7b07cee
HW
3479}
3480
3ee6b26b
AD
3481static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3482 const struct dm_connector_state *dm_state,
3483 struct dc_stream_state *stream)
e7b07cee
HW
3484{
3485 enum amdgpu_rmx_type rmx_type;
3486
3487 struct rect src = { 0 }; /* viewport in composition space*/
3488 struct rect dst = { 0 }; /* stream addressable area */
3489
3490 /* no mode. nothing to be done */
3491 if (!mode)
3492 return;
3493
3494 /* Full screen scaling by default */
3495 src.width = mode->hdisplay;
3496 src.height = mode->vdisplay;
3497 dst.width = stream->timing.h_addressable;
3498 dst.height = stream->timing.v_addressable;
3499
f4791779
HW
3500 if (dm_state) {
3501 rmx_type = dm_state->scaling;
3502 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3503 if (src.width * dst.height <
3504 src.height * dst.width) {
3505 /* height needs less upscaling/more downscaling */
3506 dst.width = src.width *
3507 dst.height / src.height;
3508 } else {
3509 /* width needs less upscaling/more downscaling */
3510 dst.height = src.height *
3511 dst.width / src.width;
3512 }
3513 } else if (rmx_type == RMX_CENTER) {
3514 dst = src;
e7b07cee 3515 }
e7b07cee 3516
f4791779
HW
3517 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3518 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 3519
f4791779
HW
3520 if (dm_state->underscan_enable) {
3521 dst.x += dm_state->underscan_hborder / 2;
3522 dst.y += dm_state->underscan_vborder / 2;
3523 dst.width -= dm_state->underscan_hborder;
3524 dst.height -= dm_state->underscan_vborder;
3525 }
e7b07cee
HW
3526 }
3527
3528 stream->src = src;
3529 stream->dst = dst;
3530
f1ad2f5e 3531 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
3532 dst.x, dst.y, dst.width, dst.height);
3533
3534}
3535
3ee6b26b 3536static enum dc_color_depth
42ba01fc
NK
3537convert_color_depth_from_display_info(const struct drm_connector *connector,
3538 const struct drm_connector_state *state)
e7b07cee 3539{
01c22997
NK
3540 uint8_t bpc = (uint8_t)connector->display_info.bpc;
3541
3542 /* Assume 8 bpc by default if no bpc is specified. */
3543 bpc = bpc ? bpc : 8;
e7b07cee 3544
01933ba4
NK
3545 if (!state)
3546 state = connector->state;
3547
42ba01fc 3548 if (state) {
01c22997
NK
3549 /*
3550 * Cap display bpc based on the user requested value.
3551 *
3552 * The value for state->max_bpc may not correctly updated
3553 * depending on when the connector gets added to the state
3554 * or if this was called outside of atomic check, so it
3555 * can't be used directly.
3556 */
3557 bpc = min(bpc, state->max_requested_bpc);
3558
1825fd34
NK
3559 /* Round down to the nearest even number. */
3560 bpc = bpc - (bpc & 1);
3561 }
07e3a1cf 3562
e7b07cee
HW
3563 switch (bpc) {
3564 case 0:
1f6010a9
DF
3565 /*
3566 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
3567 * EDID revision before 1.4
3568 * TODO: Fix edid parsing
3569 */
3570 return COLOR_DEPTH_888;
3571 case 6:
3572 return COLOR_DEPTH_666;
3573 case 8:
3574 return COLOR_DEPTH_888;
3575 case 10:
3576 return COLOR_DEPTH_101010;
3577 case 12:
3578 return COLOR_DEPTH_121212;
3579 case 14:
3580 return COLOR_DEPTH_141414;
3581 case 16:
3582 return COLOR_DEPTH_161616;
3583 default:
3584 return COLOR_DEPTH_UNDEFINED;
3585 }
3586}
3587
3ee6b26b
AD
3588static enum dc_aspect_ratio
3589get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 3590{
e11d4147
LSL
3591 /* 1-1 mapping, since both enums follow the HDMI spec. */
3592 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
3593}
3594
3ee6b26b
AD
3595static enum dc_color_space
3596get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
3597{
3598 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3599
3600 switch (dc_crtc_timing->pixel_encoding) {
3601 case PIXEL_ENCODING_YCBCR422:
3602 case PIXEL_ENCODING_YCBCR444:
3603 case PIXEL_ENCODING_YCBCR420:
3604 {
3605 /*
3606 * 27030khz is the separation point between HDTV and SDTV
3607 * according to HDMI spec, we use YCbCr709 and YCbCr601
3608 * respectively
3609 */
380604e2 3610 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
3611 if (dc_crtc_timing->flags.Y_ONLY)
3612 color_space =
3613 COLOR_SPACE_YCBCR709_LIMITED;
3614 else
3615 color_space = COLOR_SPACE_YCBCR709;
3616 } else {
3617 if (dc_crtc_timing->flags.Y_ONLY)
3618 color_space =
3619 COLOR_SPACE_YCBCR601_LIMITED;
3620 else
3621 color_space = COLOR_SPACE_YCBCR601;
3622 }
3623
3624 }
3625 break;
3626 case PIXEL_ENCODING_RGB:
3627 color_space = COLOR_SPACE_SRGB;
3628 break;
3629
3630 default:
3631 WARN_ON(1);
3632 break;
3633 }
3634
3635 return color_space;
3636}
3637
400443e8
ML
3638static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
3639{
3640 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3641 return;
3642
3643 timing_out->display_color_depth--;
3644}
3645
3646static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
3647 const struct drm_display_info *info)
3648{
3649 int normalized_clk;
3650 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3651 return;
3652 do {
380604e2 3653 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
3654 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3655 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3656 normalized_clk /= 2;
3657 /* Adjusting pix clock following on HDMI spec based on colour depth */
3658 switch (timing_out->display_color_depth) {
3659 case COLOR_DEPTH_101010:
3660 normalized_clk = (normalized_clk * 30) / 24;
3661 break;
3662 case COLOR_DEPTH_121212:
3663 normalized_clk = (normalized_clk * 36) / 24;
3664 break;
3665 case COLOR_DEPTH_161616:
3666 normalized_clk = (normalized_clk * 48) / 24;
3667 break;
3668 default:
3669 return;
3670 }
3671 if (normalized_clk <= info->max_tmds_clock)
3672 return;
3673 reduce_mode_colour_depth(timing_out);
3674
3675 } while (timing_out->display_color_depth > COLOR_DEPTH_888);
3676
3677}
e7b07cee 3678
42ba01fc
NK
3679static void fill_stream_properties_from_drm_display_mode(
3680 struct dc_stream_state *stream,
3681 const struct drm_display_mode *mode_in,
3682 const struct drm_connector *connector,
3683 const struct drm_connector_state *connector_state,
3684 const struct dc_stream_state *old_stream)
e7b07cee
HW
3685{
3686 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 3687 const struct drm_display_info *info = &connector->display_info;
d4252eee 3688 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
3689 struct hdmi_vendor_infoframe hv_frame;
3690 struct hdmi_avi_infoframe avi_frame;
e7b07cee 3691
acf83f86
WL
3692 memset(&hv_frame, 0, sizeof(hv_frame));
3693 memset(&avi_frame, 0, sizeof(avi_frame));
3694
e7b07cee
HW
3695 timing_out->h_border_left = 0;
3696 timing_out->h_border_right = 0;
3697 timing_out->v_border_top = 0;
3698 timing_out->v_border_bottom = 0;
3699 /* TODO: un-hardcode */
fe61a2f1 3700 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 3701 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 3702 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
3703 else if (drm_mode_is_420_also(info, mode_in)
3704 && aconnector->force_yuv420_output)
3705 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 3706 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 3707 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
3708 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3709 else
3710 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3711
3712 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3713 timing_out->display_color_depth = convert_color_depth_from_display_info(
42ba01fc 3714 connector, connector_state);
e7b07cee
HW
3715 timing_out->scan_type = SCANNING_TYPE_NODATA;
3716 timing_out->hdmi_vic = 0;
b333730d
BL
3717
3718 if(old_stream) {
3719 timing_out->vic = old_stream->timing.vic;
3720 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3721 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3722 } else {
3723 timing_out->vic = drm_match_cea_mode(mode_in);
3724 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3725 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3726 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3727 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3728 }
e7b07cee 3729
1cb1d477
WL
3730 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3731 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
3732 timing_out->vic = avi_frame.video_code;
3733 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
3734 timing_out->hdmi_vic = hv_frame.vic;
3735 }
3736
e7b07cee
HW
3737 timing_out->h_addressable = mode_in->crtc_hdisplay;
3738 timing_out->h_total = mode_in->crtc_htotal;
3739 timing_out->h_sync_width =
3740 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3741 timing_out->h_front_porch =
3742 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3743 timing_out->v_total = mode_in->crtc_vtotal;
3744 timing_out->v_addressable = mode_in->crtc_vdisplay;
3745 timing_out->v_front_porch =
3746 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3747 timing_out->v_sync_width =
3748 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 3749 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 3750 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
3751
3752 stream->output_color_space = get_output_color_space(timing_out);
3753
e43a432c
AK
3754 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3755 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ceb3dbb4 3756 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
400443e8 3757 adjust_colour_depth_from_display_info(timing_out, info);
e7b07cee
HW
3758}
3759
3ee6b26b
AD
3760static void fill_audio_info(struct audio_info *audio_info,
3761 const struct drm_connector *drm_connector,
3762 const struct dc_sink *dc_sink)
e7b07cee
HW
3763{
3764 int i = 0;
3765 int cea_revision = 0;
3766 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3767
3768 audio_info->manufacture_id = edid_caps->manufacturer_id;
3769 audio_info->product_id = edid_caps->product_id;
3770
3771 cea_revision = drm_connector->display_info.cea_rev;
3772
090afc1e 3773 strscpy(audio_info->display_name,
d2b2562c 3774 edid_caps->display_name,
090afc1e 3775 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 3776
b830ebc9 3777 if (cea_revision >= 3) {
e7b07cee
HW
3778 audio_info->mode_count = edid_caps->audio_mode_count;
3779
3780 for (i = 0; i < audio_info->mode_count; ++i) {
3781 audio_info->modes[i].format_code =
3782 (enum audio_format_code)
3783 (edid_caps->audio_modes[i].format_code);
3784 audio_info->modes[i].channel_count =
3785 edid_caps->audio_modes[i].channel_count;
3786 audio_info->modes[i].sample_rates.all =
3787 edid_caps->audio_modes[i].sample_rate;
3788 audio_info->modes[i].sample_size =
3789 edid_caps->audio_modes[i].sample_size;
3790 }
3791 }
3792
3793 audio_info->flags.all = edid_caps->speaker_flags;
3794
3795 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 3796 if (drm_connector->latency_present[0]) {
e7b07cee
HW
3797 audio_info->video_latency = drm_connector->video_latency[0];
3798 audio_info->audio_latency = drm_connector->audio_latency[0];
3799 }
3800
3801 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3802
3803}
3804
3ee6b26b
AD
3805static void
3806copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3807 struct drm_display_mode *dst_mode)
e7b07cee
HW
3808{
3809 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3810 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3811 dst_mode->crtc_clock = src_mode->crtc_clock;
3812 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3813 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 3814 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
3815 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3816 dst_mode->crtc_htotal = src_mode->crtc_htotal;
3817 dst_mode->crtc_hskew = src_mode->crtc_hskew;
3818 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3819 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3820 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3821 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3822 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3823}
3824
3ee6b26b
AD
3825static void
3826decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3827 const struct drm_display_mode *native_mode,
3828 bool scale_enabled)
e7b07cee
HW
3829{
3830 if (scale_enabled) {
3831 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3832 } else if (native_mode->clock == drm_mode->clock &&
3833 native_mode->htotal == drm_mode->htotal &&
3834 native_mode->vtotal == drm_mode->vtotal) {
3835 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3836 } else {
3837 /* no scaling nor amdgpu inserted, no need to patch */
3838 }
3839}
3840
aed15309
ML
3841static struct dc_sink *
3842create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 3843{
2e0ac3d6 3844 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 3845 struct dc_sink *sink = NULL;
2e0ac3d6
HW
3846 sink_init_data.link = aconnector->dc_link;
3847 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3848
3849 sink = dc_sink_create(&sink_init_data);
423788c7 3850 if (!sink) {
2e0ac3d6 3851 DRM_ERROR("Failed to create sink!\n");
aed15309 3852 return NULL;
423788c7 3853 }
2e0ac3d6 3854 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 3855
aed15309 3856 return sink;
2e0ac3d6
HW
3857}
3858
fa2123db
ML
3859static void set_multisync_trigger_params(
3860 struct dc_stream_state *stream)
3861{
3862 if (stream->triggered_crtc_reset.enabled) {
3863 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
3864 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
3865 }
3866}
3867
3868static void set_master_stream(struct dc_stream_state *stream_set[],
3869 int stream_count)
3870{
3871 int j, highest_rfr = 0, master_stream = 0;
3872
3873 for (j = 0; j < stream_count; j++) {
3874 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
3875 int refresh_rate = 0;
3876
380604e2 3877 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
3878 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
3879 if (refresh_rate > highest_rfr) {
3880 highest_rfr = refresh_rate;
3881 master_stream = j;
3882 }
3883 }
3884 }
3885 for (j = 0; j < stream_count; j++) {
03736f4c 3886 if (stream_set[j])
fa2123db
ML
3887 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
3888 }
3889}
3890
3891static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
3892{
3893 int i = 0;
3894
3895 if (context->stream_count < 2)
3896 return;
3897 for (i = 0; i < context->stream_count ; i++) {
3898 if (!context->streams[i])
3899 continue;
1f6010a9
DF
3900 /*
3901 * TODO: add a function to read AMD VSDB bits and set
fa2123db 3902 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 3903 * For now it's set to false
fa2123db
ML
3904 */
3905 set_multisync_trigger_params(context->streams[i]);
3906 }
3907 set_master_stream(context->streams, context->stream_count);
3908}
3909
3ee6b26b
AD
3910static struct dc_stream_state *
3911create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3912 const struct drm_display_mode *drm_mode,
b333730d
BL
3913 const struct dm_connector_state *dm_state,
3914 const struct dc_stream_state *old_stream)
e7b07cee
HW
3915{
3916 struct drm_display_mode *preferred_mode = NULL;
391ef035 3917 struct drm_connector *drm_connector;
42ba01fc
NK
3918 const struct drm_connector_state *con_state =
3919 dm_state ? &dm_state->base : NULL;
0971c40e 3920 struct dc_stream_state *stream = NULL;
e7b07cee
HW
3921 struct drm_display_mode mode = *drm_mode;
3922 bool native_mode_found = false;
b333730d
BL
3923 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
3924 int mode_refresh;
58124bf8 3925 int preferred_refresh = 0;
df2f1015
DF
3926#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3927 struct dsc_dec_dpcd_caps dsc_caps;
3928 uint32_t link_bandwidth_kbps;
3929#endif
b333730d 3930
aed15309 3931 struct dc_sink *sink = NULL;
b830ebc9 3932 if (aconnector == NULL) {
e7b07cee 3933 DRM_ERROR("aconnector is NULL!\n");
64245fa7 3934 return stream;
e7b07cee
HW
3935 }
3936
e7b07cee 3937 drm_connector = &aconnector->base;
2e0ac3d6 3938
f4ac176e 3939 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
3940 sink = create_fake_sink(aconnector);
3941 if (!sink)
3942 return stream;
aed15309
ML
3943 } else {
3944 sink = aconnector->dc_sink;
dcd5fb82 3945 dc_sink_retain(sink);
f4ac176e 3946 }
2e0ac3d6 3947
aed15309 3948 stream = dc_create_stream_for_sink(sink);
4562236b 3949
b830ebc9 3950 if (stream == NULL) {
e7b07cee 3951 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 3952 goto finish;
e7b07cee
HW
3953 }
3954
ceb3dbb4
JL
3955 stream->dm_stream_context = aconnector;
3956
4a36fcba
WL
3957 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
3958 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
3959
e7b07cee
HW
3960 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3961 /* Search for preferred mode */
3962 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3963 native_mode_found = true;
3964 break;
3965 }
3966 }
3967 if (!native_mode_found)
3968 preferred_mode = list_first_entry_or_null(
3969 &aconnector->base.modes,
3970 struct drm_display_mode,
3971 head);
3972
b333730d
BL
3973 mode_refresh = drm_mode_vrefresh(&mode);
3974
b830ebc9 3975 if (preferred_mode == NULL) {
1f6010a9
DF
3976 /*
3977 * This may not be an error, the use case is when we have no
e7b07cee
HW
3978 * usermode calls to reset and set mode upon hotplug. In this
3979 * case, we call set mode ourselves to restore the previous mode
3980 * and the modelist may not be filled in in time.
3981 */
f1ad2f5e 3982 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
3983 } else {
3984 decide_crtc_timing_for_drm_display_mode(
3985 &mode, preferred_mode,
f4791779 3986 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 3987 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
3988 }
3989
f783577c
JFZ
3990 if (!dm_state)
3991 drm_mode_set_crtcinfo(&mode, 0);
3992
b333730d
BL
3993 /*
3994 * If scaling is enabled and refresh rate didn't change
3995 * we copy the vic and polarities of the old timings
3996 */
3997 if (!scale || mode_refresh != preferred_refresh)
3998 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 3999 &mode, &aconnector->base, con_state, NULL);
b333730d
BL
4000 else
4001 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4002 &mode, &aconnector->base, con_state, old_stream);
b333730d 4003
39a4eb85 4004#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
df2f1015
DF
4005 stream->timing.flags.DSC = 0;
4006
4007 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4008 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4009 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4010 &dsc_caps);
4011 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4012 dc_link_get_link_cap(aconnector->dc_link));
4013
4014 if (dsc_caps.is_dsc_supported)
0417df16 4015 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4016 &dsc_caps,
0417df16 4017 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4018 link_bandwidth_kbps,
4019 &stream->timing,
4020 &stream->timing.dsc_cfg))
4021 stream->timing.flags.DSC = 1;
4022 }
39a4eb85
WL
4023#endif
4024
e7b07cee
HW
4025 update_stream_scaling_settings(&mode, dm_state, stream);
4026
4027 fill_audio_info(
4028 &stream->audio_info,
4029 drm_connector,
aed15309 4030 sink);
e7b07cee 4031
ceb3dbb4 4032 update_stream_signal(stream, sink);
9182b4cb 4033
d832fc3b
WL
4034 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4035 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
8c322309
RL
4036 if (stream->link->psr_feature_enabled) {
4037 struct dc *core_dc = stream->link->ctx->dc;
d832fc3b 4038
8c322309
RL
4039 if (dc_is_dmcu_initialized(core_dc)) {
4040 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4041
4042 stream->psr_version = dmcu->dmcu_version.psr_version;
4043 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4044 }
4045 }
aed15309 4046finish:
dcd5fb82 4047 dc_sink_release(sink);
9e3efe3e 4048
e7b07cee
HW
4049 return stream;
4050}
4051
7578ecda 4052static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4053{
4054 drm_crtc_cleanup(crtc);
4055 kfree(crtc);
4056}
4057
4058static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4059 struct drm_crtc_state *state)
e7b07cee
HW
4060{
4061 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4062
4063 /* TODO Destroy dc_stream objects are stream object is flattened */
4064 if (cur->stream)
4065 dc_stream_release(cur->stream);
4066
4067
4068 __drm_atomic_helper_crtc_destroy_state(state);
4069
4070
4071 kfree(state);
4072}
4073
4074static void dm_crtc_reset_state(struct drm_crtc *crtc)
4075{
4076 struct dm_crtc_state *state;
4077
4078 if (crtc->state)
4079 dm_crtc_destroy_state(crtc, crtc->state);
4080
4081 state = kzalloc(sizeof(*state), GFP_KERNEL);
4082 if (WARN_ON(!state))
4083 return;
4084
4085 crtc->state = &state->base;
4086 crtc->state->crtc = crtc;
4087
4088}
4089
4090static struct drm_crtc_state *
4091dm_crtc_duplicate_state(struct drm_crtc *crtc)
4092{
4093 struct dm_crtc_state *state, *cur;
4094
4095 cur = to_dm_crtc_state(crtc->state);
4096
4097 if (WARN_ON(!crtc->state))
4098 return NULL;
4099
2004f45e 4100 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4101 if (!state)
4102 return NULL;
e7b07cee
HW
4103
4104 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4105
4106 if (cur->stream) {
4107 state->stream = cur->stream;
4108 dc_stream_retain(state->stream);
4109 }
4110
d6ef9b41
NK
4111 state->active_planes = cur->active_planes;
4112 state->interrupts_enabled = cur->interrupts_enabled;
180db303 4113 state->vrr_params = cur->vrr_params;
98e6436d 4114 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4115 state->abm_level = cur->abm_level;
bb47de73
NK
4116 state->vrr_supported = cur->vrr_supported;
4117 state->freesync_config = cur->freesync_config;
14b25846 4118 state->crc_src = cur->crc_src;
cf020d49
NK
4119 state->cm_has_degamma = cur->cm_has_degamma;
4120 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4121
e7b07cee
HW
4122 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4123
4124 return &state->base;
4125}
4126
d2574c33
MK
4127static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4128{
4129 enum dc_irq_source irq_source;
4130 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4131 struct amdgpu_device *adev = crtc->dev->dev_private;
4132 int rc;
4133
4134 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4135
4136 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4137
4138 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4139 acrtc->crtc_id, enable ? "en" : "dis", rc);
4140 return rc;
4141}
589d2739
HW
4142
4143static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4144{
4145 enum dc_irq_source irq_source;
4146 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4147 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4148 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4149 int rc = 0;
4150
4151 if (enable) {
4152 /* vblank irq on -> Only need vupdate irq in vrr mode */
4153 if (amdgpu_dm_vrr_active(acrtc_state))
4154 rc = dm_set_vupdate_irq(crtc, true);
4155 } else {
4156 /* vblank irq off -> vupdate irq off */
4157 rc = dm_set_vupdate_irq(crtc, false);
4158 }
4159
4160 if (rc)
4161 return rc;
589d2739
HW
4162
4163 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4164 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4165}
4166
4167static int dm_enable_vblank(struct drm_crtc *crtc)
4168{
4169 return dm_set_vblank(crtc, true);
4170}
4171
4172static void dm_disable_vblank(struct drm_crtc *crtc)
4173{
4174 dm_set_vblank(crtc, false);
4175}
4176
e7b07cee
HW
4177/* Implemented only the options currently availible for the driver */
4178static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4179 .reset = dm_crtc_reset_state,
4180 .destroy = amdgpu_dm_crtc_destroy,
4181 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4182 .set_config = drm_atomic_helper_set_config,
4183 .page_flip = drm_atomic_helper_page_flip,
4184 .atomic_duplicate_state = dm_crtc_duplicate_state,
4185 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4186 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4187 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4188 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
589d2739
HW
4189 .enable_vblank = dm_enable_vblank,
4190 .disable_vblank = dm_disable_vblank,
e7b07cee
HW
4191};
4192
4193static enum drm_connector_status
4194amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4195{
4196 bool connected;
c84dec2f 4197 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4198
1f6010a9
DF
4199 /*
4200 * Notes:
e7b07cee
HW
4201 * 1. This interface is NOT called in context of HPD irq.
4202 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4203 * makes it a bad place for *any* MST-related activity.
4204 */
e7b07cee 4205
8580d60b
HW
4206 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4207 !aconnector->fake_enable)
e7b07cee
HW
4208 connected = (aconnector->dc_sink != NULL);
4209 else
4210 connected = (aconnector->base.force == DRM_FORCE_ON);
4211
4212 return (connected ? connector_status_connected :
4213 connector_status_disconnected);
4214}
4215
3ee6b26b
AD
4216int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4217 struct drm_connector_state *connector_state,
4218 struct drm_property *property,
4219 uint64_t val)
e7b07cee
HW
4220{
4221 struct drm_device *dev = connector->dev;
4222 struct amdgpu_device *adev = dev->dev_private;
4223 struct dm_connector_state *dm_old_state =
4224 to_dm_connector_state(connector->state);
4225 struct dm_connector_state *dm_new_state =
4226 to_dm_connector_state(connector_state);
4227
4228 int ret = -EINVAL;
4229
4230 if (property == dev->mode_config.scaling_mode_property) {
4231 enum amdgpu_rmx_type rmx_type;
4232
4233 switch (val) {
4234 case DRM_MODE_SCALE_CENTER:
4235 rmx_type = RMX_CENTER;
4236 break;
4237 case DRM_MODE_SCALE_ASPECT:
4238 rmx_type = RMX_ASPECT;
4239 break;
4240 case DRM_MODE_SCALE_FULLSCREEN:
4241 rmx_type = RMX_FULL;
4242 break;
4243 case DRM_MODE_SCALE_NONE:
4244 default:
4245 rmx_type = RMX_OFF;
4246 break;
4247 }
4248
4249 if (dm_old_state->scaling == rmx_type)
4250 return 0;
4251
4252 dm_new_state->scaling = rmx_type;
4253 ret = 0;
4254 } else if (property == adev->mode_info.underscan_hborder_property) {
4255 dm_new_state->underscan_hborder = val;
4256 ret = 0;
4257 } else if (property == adev->mode_info.underscan_vborder_property) {
4258 dm_new_state->underscan_vborder = val;
4259 ret = 0;
4260 } else if (property == adev->mode_info.underscan_property) {
4261 dm_new_state->underscan_enable = val;
4262 ret = 0;
c1ee92f9
DF
4263 } else if (property == adev->mode_info.abm_level_property) {
4264 dm_new_state->abm_level = val;
4265 ret = 0;
e7b07cee
HW
4266 }
4267
4268 return ret;
4269}
4270
3ee6b26b
AD
4271int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4272 const struct drm_connector_state *state,
4273 struct drm_property *property,
4274 uint64_t *val)
e7b07cee
HW
4275{
4276 struct drm_device *dev = connector->dev;
4277 struct amdgpu_device *adev = dev->dev_private;
4278 struct dm_connector_state *dm_state =
4279 to_dm_connector_state(state);
4280 int ret = -EINVAL;
4281
4282 if (property == dev->mode_config.scaling_mode_property) {
4283 switch (dm_state->scaling) {
4284 case RMX_CENTER:
4285 *val = DRM_MODE_SCALE_CENTER;
4286 break;
4287 case RMX_ASPECT:
4288 *val = DRM_MODE_SCALE_ASPECT;
4289 break;
4290 case RMX_FULL:
4291 *val = DRM_MODE_SCALE_FULLSCREEN;
4292 break;
4293 case RMX_OFF:
4294 default:
4295 *val = DRM_MODE_SCALE_NONE;
4296 break;
4297 }
4298 ret = 0;
4299 } else if (property == adev->mode_info.underscan_hborder_property) {
4300 *val = dm_state->underscan_hborder;
4301 ret = 0;
4302 } else if (property == adev->mode_info.underscan_vborder_property) {
4303 *val = dm_state->underscan_vborder;
4304 ret = 0;
4305 } else if (property == adev->mode_info.underscan_property) {
4306 *val = dm_state->underscan_enable;
4307 ret = 0;
c1ee92f9
DF
4308 } else if (property == adev->mode_info.abm_level_property) {
4309 *val = dm_state->abm_level;
4310 ret = 0;
e7b07cee 4311 }
c1ee92f9 4312
e7b07cee
HW
4313 return ret;
4314}
4315
526c654a
ED
4316static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4317{
4318 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4319
4320 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4321}
4322
7578ecda 4323static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4324{
c84dec2f 4325 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4326 const struct dc_link *link = aconnector->dc_link;
4327 struct amdgpu_device *adev = connector->dev->dev_private;
4328 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4329
e7b07cee
HW
4330#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4331 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4332
89fc8d4e 4333 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
4334 link->type != dc_connection_none &&
4335 dm->backlight_dev) {
4336 backlight_device_unregister(dm->backlight_dev);
4337 dm->backlight_dev = NULL;
e7b07cee
HW
4338 }
4339#endif
dcd5fb82
MF
4340
4341 if (aconnector->dc_em_sink)
4342 dc_sink_release(aconnector->dc_em_sink);
4343 aconnector->dc_em_sink = NULL;
4344 if (aconnector->dc_sink)
4345 dc_sink_release(aconnector->dc_sink);
4346 aconnector->dc_sink = NULL;
4347
e86e8947 4348 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
4349 drm_connector_unregister(connector);
4350 drm_connector_cleanup(connector);
526c654a
ED
4351 if (aconnector->i2c) {
4352 i2c_del_adapter(&aconnector->i2c->base);
4353 kfree(aconnector->i2c);
4354 }
4355
e7b07cee
HW
4356 kfree(connector);
4357}
4358
4359void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4360{
4361 struct dm_connector_state *state =
4362 to_dm_connector_state(connector->state);
4363
df099b9b
LSL
4364 if (connector->state)
4365 __drm_atomic_helper_connector_destroy_state(connector->state);
4366
e7b07cee
HW
4367 kfree(state);
4368
4369 state = kzalloc(sizeof(*state), GFP_KERNEL);
4370
4371 if (state) {
4372 state->scaling = RMX_OFF;
4373 state->underscan_enable = false;
4374 state->underscan_hborder = 0;
4375 state->underscan_vborder = 0;
01933ba4 4376 state->base.max_requested_bpc = 8;
3261e013
ML
4377 state->vcpi_slots = 0;
4378 state->pbn = 0;
c3e50f89
NK
4379 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4380 state->abm_level = amdgpu_dm_abm_level;
4381
df099b9b 4382 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
4383 }
4384}
4385
3ee6b26b
AD
4386struct drm_connector_state *
4387amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
4388{
4389 struct dm_connector_state *state =
4390 to_dm_connector_state(connector->state);
4391
4392 struct dm_connector_state *new_state =
4393 kmemdup(state, sizeof(*state), GFP_KERNEL);
4394
98e6436d
AK
4395 if (!new_state)
4396 return NULL;
e7b07cee 4397
98e6436d
AK
4398 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4399
4400 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 4401 new_state->abm_level = state->abm_level;
922454c2
NK
4402 new_state->scaling = state->scaling;
4403 new_state->underscan_enable = state->underscan_enable;
4404 new_state->underscan_hborder = state->underscan_hborder;
4405 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
4406 new_state->vcpi_slots = state->vcpi_slots;
4407 new_state->pbn = state->pbn;
98e6436d 4408 return &new_state->base;
e7b07cee
HW
4409}
4410
4411static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4412 .reset = amdgpu_dm_connector_funcs_reset,
4413 .detect = amdgpu_dm_connector_detect,
4414 .fill_modes = drm_helper_probe_single_connector_modes,
4415 .destroy = amdgpu_dm_connector_destroy,
4416 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4417 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4418 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a
ED
4419 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4420 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
4421};
4422
e7b07cee
HW
4423static int get_modes(struct drm_connector *connector)
4424{
4425 return amdgpu_dm_connector_get_modes(connector);
4426}
4427
c84dec2f 4428static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4429{
4430 struct dc_sink_init_data init_params = {
4431 .link = aconnector->dc_link,
4432 .sink_signal = SIGNAL_TYPE_VIRTUAL
4433 };
70e8ffc5 4434 struct edid *edid;
e7b07cee 4435
a89ff457 4436 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
4437 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4438 aconnector->base.name);
4439
4440 aconnector->base.force = DRM_FORCE_OFF;
4441 aconnector->base.override_edid = false;
4442 return;
4443 }
4444
70e8ffc5
HW
4445 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4446
e7b07cee
HW
4447 aconnector->edid = edid;
4448
4449 aconnector->dc_em_sink = dc_link_add_remote_sink(
4450 aconnector->dc_link,
4451 (uint8_t *)edid,
4452 (edid->extensions + 1) * EDID_LENGTH,
4453 &init_params);
4454
dcd5fb82 4455 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
4456 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4457 aconnector->dc_link->local_sink :
4458 aconnector->dc_em_sink;
dcd5fb82
MF
4459 dc_sink_retain(aconnector->dc_sink);
4460 }
e7b07cee
HW
4461}
4462
c84dec2f 4463static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4464{
4465 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4466
1f6010a9
DF
4467 /*
4468 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
4469 * Those settings have to be != 0 to get initial modeset
4470 */
4471 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4472 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4473 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4474 }
4475
4476
4477 aconnector->base.override_edid = true;
4478 create_eml_sink(aconnector);
4479}
4480
ba9ca088 4481enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 4482 struct drm_display_mode *mode)
e7b07cee
HW
4483{
4484 int result = MODE_ERROR;
4485 struct dc_sink *dc_sink;
4486 struct amdgpu_device *adev = connector->dev->dev_private;
4487 /* TODO: Unhardcode stream count */
0971c40e 4488 struct dc_stream_state *stream;
c84dec2f 4489 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
a39438f0 4490 enum dc_status dc_result = DC_OK;
e7b07cee
HW
4491
4492 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4493 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4494 return result;
4495
1f6010a9
DF
4496 /*
4497 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
4498 * EDID mgmt
4499 */
4500 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4501 !aconnector->dc_em_sink)
4502 handle_edid_mgmt(aconnector);
4503
c84dec2f 4504 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 4505
b830ebc9 4506 if (dc_sink == NULL) {
e7b07cee
HW
4507 DRM_ERROR("dc_sink is NULL!\n");
4508 goto fail;
4509 }
4510
b333730d 4511 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
b830ebc9 4512 if (stream == NULL) {
e7b07cee
HW
4513 DRM_ERROR("Failed to create stream for sink!\n");
4514 goto fail;
4515 }
4516
a39438f0
HW
4517 dc_result = dc_validate_stream(adev->dm.dc, stream);
4518
4519 if (dc_result == DC_OK)
e7b07cee 4520 result = MODE_OK;
a39438f0 4521 else
9f921b14 4522 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
a39438f0 4523 mode->hdisplay,
26e99ba6 4524 mode->vdisplay,
9f921b14
HW
4525 mode->clock,
4526 dc_result);
e7b07cee
HW
4527
4528 dc_stream_release(stream);
4529
4530fail:
4531 /* TODO: error handling*/
4532 return result;
4533}
4534
88694af9
NK
4535static int fill_hdr_info_packet(const struct drm_connector_state *state,
4536 struct dc_info_packet *out)
4537{
4538 struct hdmi_drm_infoframe frame;
4539 unsigned char buf[30]; /* 26 + 4 */
4540 ssize_t len;
4541 int ret, i;
4542
4543 memset(out, 0, sizeof(*out));
4544
4545 if (!state->hdr_output_metadata)
4546 return 0;
4547
4548 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4549 if (ret)
4550 return ret;
4551
4552 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4553 if (len < 0)
4554 return (int)len;
4555
4556 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4557 if (len != 30)
4558 return -EINVAL;
4559
4560 /* Prepare the infopacket for DC. */
4561 switch (state->connector->connector_type) {
4562 case DRM_MODE_CONNECTOR_HDMIA:
4563 out->hb0 = 0x87; /* type */
4564 out->hb1 = 0x01; /* version */
4565 out->hb2 = 0x1A; /* length */
4566 out->sb[0] = buf[3]; /* checksum */
4567 i = 1;
4568 break;
4569
4570 case DRM_MODE_CONNECTOR_DisplayPort:
4571 case DRM_MODE_CONNECTOR_eDP:
4572 out->hb0 = 0x00; /* sdp id, zero */
4573 out->hb1 = 0x87; /* type */
4574 out->hb2 = 0x1D; /* payload len - 1 */
4575 out->hb3 = (0x13 << 2); /* sdp version */
4576 out->sb[0] = 0x01; /* version */
4577 out->sb[1] = 0x1A; /* length */
4578 i = 2;
4579 break;
4580
4581 default:
4582 return -EINVAL;
4583 }
4584
4585 memcpy(&out->sb[i], &buf[4], 26);
4586 out->valid = true;
4587
4588 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4589 sizeof(out->sb), false);
4590
4591 return 0;
4592}
4593
4594static bool
4595is_hdr_metadata_different(const struct drm_connector_state *old_state,
4596 const struct drm_connector_state *new_state)
4597{
4598 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4599 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4600
4601 if (old_blob != new_blob) {
4602 if (old_blob && new_blob &&
4603 old_blob->length == new_blob->length)
4604 return memcmp(old_blob->data, new_blob->data,
4605 old_blob->length);
4606
4607 return true;
4608 }
4609
4610 return false;
4611}
4612
4613static int
4614amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 4615 struct drm_atomic_state *state)
88694af9 4616{
51e857af
SP
4617 struct drm_connector_state *new_con_state =
4618 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
4619 struct drm_connector_state *old_con_state =
4620 drm_atomic_get_old_connector_state(state, conn);
4621 struct drm_crtc *crtc = new_con_state->crtc;
4622 struct drm_crtc_state *new_crtc_state;
4623 int ret;
4624
4625 if (!crtc)
4626 return 0;
4627
4628 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4629 struct dc_info_packet hdr_infopacket;
4630
4631 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4632 if (ret)
4633 return ret;
4634
4635 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4636 if (IS_ERR(new_crtc_state))
4637 return PTR_ERR(new_crtc_state);
4638
4639 /*
4640 * DC considers the stream backends changed if the
4641 * static metadata changes. Forcing the modeset also
4642 * gives a simple way for userspace to switch from
b232d4ed
NK
4643 * 8bpc to 10bpc when setting the metadata to enter
4644 * or exit HDR.
4645 *
4646 * Changing the static metadata after it's been
4647 * set is permissible, however. So only force a
4648 * modeset if we're entering or exiting HDR.
88694af9 4649 */
b232d4ed
NK
4650 new_crtc_state->mode_changed =
4651 !old_con_state->hdr_output_metadata ||
4652 !new_con_state->hdr_output_metadata;
88694af9
NK
4653 }
4654
4655 return 0;
4656}
4657
e7b07cee
HW
4658static const struct drm_connector_helper_funcs
4659amdgpu_dm_connector_helper_funcs = {
4660 /*
1f6010a9 4661 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 4662 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 4663 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
4664 * in get_modes call back, not just return the modes count
4665 */
e7b07cee
HW
4666 .get_modes = get_modes,
4667 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 4668 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
4669};
4670
4671static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4672{
4673}
4674
bc92c065
NK
4675static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4676{
4677 struct drm_device *dev = new_crtc_state->crtc->dev;
4678 struct drm_plane *plane;
4679
4680 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4681 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4682 return true;
4683 }
4684
4685 return false;
4686}
4687
d6ef9b41 4688static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
4689{
4690 struct drm_atomic_state *state = new_crtc_state->state;
4691 struct drm_plane *plane;
4692 int num_active = 0;
4693
4694 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4695 struct drm_plane_state *new_plane_state;
4696
4697 /* Cursor planes are "fake". */
4698 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4699 continue;
4700
4701 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4702
4703 if (!new_plane_state) {
4704 /*
4705 * The plane is enable on the CRTC and hasn't changed
4706 * state. This means that it previously passed
4707 * validation and is therefore enabled.
4708 */
4709 num_active += 1;
4710 continue;
4711 }
4712
4713 /* We need a framebuffer to be considered enabled. */
4714 num_active += (new_plane_state->fb != NULL);
4715 }
4716
d6ef9b41
NK
4717 return num_active;
4718}
4719
4720/*
4721 * Sets whether interrupts should be enabled on a specific CRTC.
4722 * We require that the stream be enabled and that there exist active
4723 * DC planes on the stream.
4724 */
4725static void
4726dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4727 struct drm_crtc_state *new_crtc_state)
4728{
4729 struct dm_crtc_state *dm_new_crtc_state =
4730 to_dm_crtc_state(new_crtc_state);
4731
4732 dm_new_crtc_state->active_planes = 0;
4733 dm_new_crtc_state->interrupts_enabled = false;
4734
4735 if (!dm_new_crtc_state->stream)
4736 return;
4737
4738 dm_new_crtc_state->active_planes =
4739 count_crtc_active_planes(new_crtc_state);
4740
4741 dm_new_crtc_state->interrupts_enabled =
4742 dm_new_crtc_state->active_planes > 0;
c14a005c
NK
4743}
4744
3ee6b26b
AD
4745static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4746 struct drm_crtc_state *state)
e7b07cee
HW
4747{
4748 struct amdgpu_device *adev = crtc->dev->dev_private;
4749 struct dc *dc = adev->dm.dc;
4750 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4751 int ret = -EINVAL;
4752
d6ef9b41
NK
4753 /*
4754 * Update interrupt state for the CRTC. This needs to happen whenever
4755 * the CRTC has changed or whenever any of its planes have changed.
4756 * Atomic check satisfies both of these requirements since the CRTC
4757 * is added to the state by DRM during drm_atomic_helper_check_planes.
4758 */
4759 dm_update_crtc_interrupt_state(crtc, state);
4760
9b690ef3
BL
4761 if (unlikely(!dm_crtc_state->stream &&
4762 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
4763 WARN_ON(1);
4764 return ret;
4765 }
4766
1f6010a9 4767 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
4768 if (!dm_crtc_state->stream)
4769 return 0;
4770
bc92c065
NK
4771 /*
4772 * We want at least one hardware plane enabled to use
4773 * the stream with a cursor enabled.
4774 */
c14a005c 4775 if (state->enable && state->active &&
bc92c065 4776 does_crtc_have_active_cursor(state) &&
d6ef9b41 4777 dm_crtc_state->active_planes == 0)
c14a005c
NK
4778 return -EINVAL;
4779
62c933f9 4780 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
4781 return 0;
4782
4783 return ret;
4784}
4785
3ee6b26b
AD
4786static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4787 const struct drm_display_mode *mode,
4788 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
4789{
4790 return true;
4791}
4792
4793static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4794 .disable = dm_crtc_helper_disable,
4795 .atomic_check = dm_crtc_helper_atomic_check,
4796 .mode_fixup = dm_crtc_helper_mode_fixup
4797};
4798
4799static void dm_encoder_helper_disable(struct drm_encoder *encoder)
4800{
4801
4802}
4803
3261e013
ML
4804static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
4805{
4806 switch (display_color_depth) {
4807 case COLOR_DEPTH_666:
4808 return 6;
4809 case COLOR_DEPTH_888:
4810 return 8;
4811 case COLOR_DEPTH_101010:
4812 return 10;
4813 case COLOR_DEPTH_121212:
4814 return 12;
4815 case COLOR_DEPTH_141414:
4816 return 14;
4817 case COLOR_DEPTH_161616:
4818 return 16;
4819 default:
4820 break;
4821 }
4822 return 0;
4823}
4824
3ee6b26b
AD
4825static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
4826 struct drm_crtc_state *crtc_state,
4827 struct drm_connector_state *conn_state)
e7b07cee 4828{
3261e013
ML
4829 struct drm_atomic_state *state = crtc_state->state;
4830 struct drm_connector *connector = conn_state->connector;
4831 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4832 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
4833 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
4834 struct drm_dp_mst_topology_mgr *mst_mgr;
4835 struct drm_dp_mst_port *mst_port;
4836 enum dc_color_depth color_depth;
4837 int clock, bpp = 0;
4838
4839 if (!aconnector->port || !aconnector->dc_sink)
4840 return 0;
4841
4842 mst_port = aconnector->port;
4843 mst_mgr = &aconnector->mst_port->mst_mgr;
4844
4845 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
4846 return 0;
4847
4848 if (!state->duplicated) {
4849 color_depth = convert_color_depth_from_display_info(connector, conn_state);
4850 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
4851 clock = adjusted_mode->clock;
4852 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp);
4853 }
4854 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
4855 mst_mgr,
4856 mst_port,
4857 dm_new_connector_state->pbn);
4858 if (dm_new_connector_state->vcpi_slots < 0) {
4859 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
4860 return dm_new_connector_state->vcpi_slots;
4861 }
e7b07cee
HW
4862 return 0;
4863}
4864
4865const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
4866 .disable = dm_encoder_helper_disable,
4867 .atomic_check = dm_encoder_helper_atomic_check
4868};
4869
4870static void dm_drm_plane_reset(struct drm_plane *plane)
4871{
4872 struct dm_plane_state *amdgpu_state = NULL;
4873
4874 if (plane->state)
4875 plane->funcs->atomic_destroy_state(plane, plane->state);
4876
4877 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 4878 WARN_ON(amdgpu_state == NULL);
1f6010a9 4879
7ddaef96
NK
4880 if (amdgpu_state)
4881 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
4882}
4883
4884static struct drm_plane_state *
4885dm_drm_plane_duplicate_state(struct drm_plane *plane)
4886{
4887 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
4888
4889 old_dm_plane_state = to_dm_plane_state(plane->state);
4890 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
4891 if (!dm_plane_state)
4892 return NULL;
4893
4894 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
4895
3be5262e
HW
4896 if (old_dm_plane_state->dc_state) {
4897 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
4898 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
4899 }
4900
4901 return &dm_plane_state->base;
4902}
4903
4904void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 4905 struct drm_plane_state *state)
e7b07cee
HW
4906{
4907 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
4908
3be5262e
HW
4909 if (dm_plane_state->dc_state)
4910 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 4911
0627bbd3 4912 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
4913}
4914
4915static const struct drm_plane_funcs dm_plane_funcs = {
4916 .update_plane = drm_atomic_helper_update_plane,
4917 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 4918 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
4919 .reset = dm_drm_plane_reset,
4920 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
4921 .atomic_destroy_state = dm_drm_plane_destroy_state,
4922};
4923
3ee6b26b
AD
4924static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
4925 struct drm_plane_state *new_state)
e7b07cee
HW
4926{
4927 struct amdgpu_framebuffer *afb;
4928 struct drm_gem_object *obj;
5d43be0c 4929 struct amdgpu_device *adev;
e7b07cee 4930 struct amdgpu_bo *rbo;
e7b07cee 4931 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
4932 struct list_head list;
4933 struct ttm_validate_buffer tv;
4934 struct ww_acquire_ctx ticket;
e0634e8d 4935 uint64_t tiling_flags;
5d43be0c
CK
4936 uint32_t domain;
4937 int r;
e7b07cee
HW
4938
4939 dm_plane_state_old = to_dm_plane_state(plane->state);
4940 dm_plane_state_new = to_dm_plane_state(new_state);
4941
4942 if (!new_state->fb) {
f1ad2f5e 4943 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
4944 return 0;
4945 }
4946
4947 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 4948 obj = new_state->fb->obj[0];
e7b07cee 4949 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 4950 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
4951 INIT_LIST_HEAD(&list);
4952
4953 tv.bo = &rbo->tbo;
4954 tv.num_shared = 1;
4955 list_add(&tv.head, &list);
4956
9165fb87 4957 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
4958 if (r) {
4959 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 4960 return r;
0f257b09 4961 }
e7b07cee 4962
5d43be0c 4963 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 4964 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
4965 else
4966 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 4967
7b7c6c81 4968 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 4969 if (unlikely(r != 0)) {
30b7c614
HW
4970 if (r != -ERESTARTSYS)
4971 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 4972 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
4973 return r;
4974 }
4975
bb812f1e
JZ
4976 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
4977 if (unlikely(r != 0)) {
4978 amdgpu_bo_unpin(rbo);
0f257b09 4979 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 4980 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
4981 return r;
4982 }
7df7e505
NK
4983
4984 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
4985
0f257b09 4986 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 4987
7b7c6c81 4988 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
4989
4990 amdgpu_bo_ref(rbo);
4991
3be5262e
HW
4992 if (dm_plane_state_new->dc_state &&
4993 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
4994 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 4995
320932bf 4996 fill_plane_buffer_attributes(
695af5f9
NK
4997 adev, afb, plane_state->format, plane_state->rotation,
4998 tiling_flags, &plane_state->tiling_info,
320932bf 4999 &plane_state->plane_size, &plane_state->dcc,
695af5f9 5000 &plane_state->address);
e7b07cee
HW
5001 }
5002
e7b07cee
HW
5003 return 0;
5004}
5005
3ee6b26b
AD
5006static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5007 struct drm_plane_state *old_state)
e7b07cee
HW
5008{
5009 struct amdgpu_bo *rbo;
e7b07cee
HW
5010 int r;
5011
5012 if (!old_state->fb)
5013 return;
5014
e68d14dd 5015 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5016 r = amdgpu_bo_reserve(rbo, false);
5017 if (unlikely(r)) {
5018 DRM_ERROR("failed to reserve rbo before unpin\n");
5019 return;
b830ebc9
HW
5020 }
5021
5022 amdgpu_bo_unpin(rbo);
5023 amdgpu_bo_unreserve(rbo);
5024 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5025}
5026
7578ecda
AD
5027static int dm_plane_atomic_check(struct drm_plane *plane,
5028 struct drm_plane_state *state)
cbd19488
AG
5029{
5030 struct amdgpu_device *adev = plane->dev->dev_private;
5031 struct dc *dc = adev->dm.dc;
78171832 5032 struct dm_plane_state *dm_plane_state;
695af5f9
NK
5033 struct dc_scaling_info scaling_info;
5034 int ret;
78171832
NK
5035
5036 dm_plane_state = to_dm_plane_state(state);
cbd19488 5037
3be5262e 5038 if (!dm_plane_state->dc_state)
9a3329b1 5039 return 0;
cbd19488 5040
695af5f9
NK
5041 ret = fill_dc_scaling_info(state, &scaling_info);
5042 if (ret)
5043 return ret;
a05bcff1 5044
62c933f9 5045 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5046 return 0;
5047
5048 return -EINVAL;
5049}
5050
674e78ac
NK
5051static int dm_plane_atomic_async_check(struct drm_plane *plane,
5052 struct drm_plane_state *new_plane_state)
5053{
5054 /* Only support async updates on cursor planes. */
5055 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5056 return -EINVAL;
5057
5058 return 0;
5059}
5060
5061static void dm_plane_atomic_async_update(struct drm_plane *plane,
5062 struct drm_plane_state *new_state)
5063{
5064 struct drm_plane_state *old_state =
5065 drm_atomic_get_old_plane_state(new_state->state, plane);
5066
332af874 5067 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5068
5069 plane->state->src_x = new_state->src_x;
5070 plane->state->src_y = new_state->src_y;
5071 plane->state->src_w = new_state->src_w;
5072 plane->state->src_h = new_state->src_h;
5073 plane->state->crtc_x = new_state->crtc_x;
5074 plane->state->crtc_y = new_state->crtc_y;
5075 plane->state->crtc_w = new_state->crtc_w;
5076 plane->state->crtc_h = new_state->crtc_h;
5077
5078 handle_cursor_update(plane, old_state);
5079}
5080
e7b07cee
HW
5081static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5082 .prepare_fb = dm_plane_helper_prepare_fb,
5083 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5084 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5085 .atomic_async_check = dm_plane_atomic_async_check,
5086 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5087};
5088
5089/*
5090 * TODO: these are currently initialized to rgb formats only.
5091 * For future use cases we should either initialize them dynamically based on
5092 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5093 * check will succeed, and let DC implement proper check
e7b07cee 5094 */
d90371b0 5095static const uint32_t rgb_formats[] = {
e7b07cee
HW
5096 DRM_FORMAT_XRGB8888,
5097 DRM_FORMAT_ARGB8888,
5098 DRM_FORMAT_RGBA8888,
5099 DRM_FORMAT_XRGB2101010,
5100 DRM_FORMAT_XBGR2101010,
5101 DRM_FORMAT_ARGB2101010,
5102 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5103 DRM_FORMAT_XBGR8888,
5104 DRM_FORMAT_ABGR8888,
46dd9ff7 5105 DRM_FORMAT_RGB565,
e7b07cee
HW
5106};
5107
0d579c7e
NK
5108static const uint32_t overlay_formats[] = {
5109 DRM_FORMAT_XRGB8888,
5110 DRM_FORMAT_ARGB8888,
5111 DRM_FORMAT_RGBA8888,
5112 DRM_FORMAT_XBGR8888,
5113 DRM_FORMAT_ABGR8888,
7267a1a9 5114 DRM_FORMAT_RGB565
e7b07cee
HW
5115};
5116
5117static const u32 cursor_formats[] = {
5118 DRM_FORMAT_ARGB8888
5119};
5120
37c6a93b
NK
5121static int get_plane_formats(const struct drm_plane *plane,
5122 const struct dc_plane_cap *plane_cap,
5123 uint32_t *formats, int max_formats)
e7b07cee 5124{
37c6a93b
NK
5125 int i, num_formats = 0;
5126
5127 /*
5128 * TODO: Query support for each group of formats directly from
5129 * DC plane caps. This will require adding more formats to the
5130 * caps list.
5131 */
e7b07cee 5132
f180b4bc 5133 switch (plane->type) {
e7b07cee 5134 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5135 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5136 if (num_formats >= max_formats)
5137 break;
5138
5139 formats[num_formats++] = rgb_formats[i];
5140 }
5141
ea36ad34 5142 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5143 formats[num_formats++] = DRM_FORMAT_NV12;
e7b07cee 5144 break;
37c6a93b 5145
e7b07cee 5146 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5147 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5148 if (num_formats >= max_formats)
5149 break;
5150
5151 formats[num_formats++] = overlay_formats[i];
5152 }
e7b07cee 5153 break;
37c6a93b 5154
e7b07cee 5155 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5156 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5157 if (num_formats >= max_formats)
5158 break;
5159
5160 formats[num_formats++] = cursor_formats[i];
5161 }
e7b07cee
HW
5162 break;
5163 }
5164
37c6a93b
NK
5165 return num_formats;
5166}
5167
5168static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5169 struct drm_plane *plane,
5170 unsigned long possible_crtcs,
5171 const struct dc_plane_cap *plane_cap)
5172{
5173 uint32_t formats[32];
5174 int num_formats;
5175 int res = -EPERM;
5176
5177 num_formats = get_plane_formats(plane, plane_cap, formats,
5178 ARRAY_SIZE(formats));
5179
5180 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5181 &dm_plane_funcs, formats, num_formats,
5182 NULL, plane->type, NULL);
5183 if (res)
5184 return res;
5185
cc1fec57
NK
5186 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5187 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
5188 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5189 BIT(DRM_MODE_BLEND_PREMULTI);
5190
5191 drm_plane_create_alpha_property(plane);
5192 drm_plane_create_blend_mode_property(plane, blend_caps);
5193 }
5194
fc8e5230 5195 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
ea36ad34 5196 plane_cap && plane_cap->pixel_format_support.nv12) {
fc8e5230
NK
5197 /* This only affects YUV formats. */
5198 drm_plane_create_color_properties(
5199 plane,
5200 BIT(DRM_COLOR_YCBCR_BT601) |
5201 BIT(DRM_COLOR_YCBCR_BT709),
5202 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5203 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5204 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5205 }
5206
f180b4bc 5207 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 5208
96719c54 5209 /* Create (reset) the plane state */
f180b4bc
HW
5210 if (plane->funcs->reset)
5211 plane->funcs->reset(plane);
96719c54 5212
37c6a93b 5213 return 0;
e7b07cee
HW
5214}
5215
7578ecda
AD
5216static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5217 struct drm_plane *plane,
5218 uint32_t crtc_index)
e7b07cee
HW
5219{
5220 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 5221 struct drm_plane *cursor_plane;
e7b07cee
HW
5222
5223 int res = -ENOMEM;
5224
5225 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5226 if (!cursor_plane)
5227 goto fail;
5228
f180b4bc 5229 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 5230 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
5231
5232 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5233 if (!acrtc)
5234 goto fail;
5235
5236 res = drm_crtc_init_with_planes(
5237 dm->ddev,
5238 &acrtc->base,
5239 plane,
f180b4bc 5240 cursor_plane,
e7b07cee
HW
5241 &amdgpu_dm_crtc_funcs, NULL);
5242
5243 if (res)
5244 goto fail;
5245
5246 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5247
96719c54
HW
5248 /* Create (reset) the plane state */
5249 if (acrtc->base.funcs->reset)
5250 acrtc->base.funcs->reset(&acrtc->base);
5251
e7b07cee
HW
5252 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5253 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5254
5255 acrtc->crtc_id = crtc_index;
5256 acrtc->base.enabled = false;
c37e2d29 5257 acrtc->otg_inst = -1;
e7b07cee
HW
5258
5259 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
5260 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5261 true, MAX_COLOR_LUT_ENTRIES);
086247a4 5262 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
5263
5264 return 0;
5265
5266fail:
b830ebc9
HW
5267 kfree(acrtc);
5268 kfree(cursor_plane);
e7b07cee
HW
5269 return res;
5270}
5271
5272
5273static int to_drm_connector_type(enum signal_type st)
5274{
5275 switch (st) {
5276 case SIGNAL_TYPE_HDMI_TYPE_A:
5277 return DRM_MODE_CONNECTOR_HDMIA;
5278 case SIGNAL_TYPE_EDP:
5279 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
5280 case SIGNAL_TYPE_LVDS:
5281 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
5282 case SIGNAL_TYPE_RGB:
5283 return DRM_MODE_CONNECTOR_VGA;
5284 case SIGNAL_TYPE_DISPLAY_PORT:
5285 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5286 return DRM_MODE_CONNECTOR_DisplayPort;
5287 case SIGNAL_TYPE_DVI_DUAL_LINK:
5288 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5289 return DRM_MODE_CONNECTOR_DVID;
5290 case SIGNAL_TYPE_VIRTUAL:
5291 return DRM_MODE_CONNECTOR_VIRTUAL;
5292
5293 default:
5294 return DRM_MODE_CONNECTOR_Unknown;
5295 }
5296}
5297
2b4c1c05
DV
5298static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5299{
62afb4ad
JRS
5300 struct drm_encoder *encoder;
5301
5302 /* There is only one encoder per connector */
5303 drm_connector_for_each_possible_encoder(connector, encoder)
5304 return encoder;
5305
5306 return NULL;
2b4c1c05
DV
5307}
5308
e7b07cee
HW
5309static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5310{
e7b07cee
HW
5311 struct drm_encoder *encoder;
5312 struct amdgpu_encoder *amdgpu_encoder;
5313
2b4c1c05 5314 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
5315
5316 if (encoder == NULL)
5317 return;
5318
5319 amdgpu_encoder = to_amdgpu_encoder(encoder);
5320
5321 amdgpu_encoder->native_mode.clock = 0;
5322
5323 if (!list_empty(&connector->probed_modes)) {
5324 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 5325
e7b07cee 5326 list_for_each_entry(preferred_mode,
b830ebc9
HW
5327 &connector->probed_modes,
5328 head) {
5329 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5330 amdgpu_encoder->native_mode = *preferred_mode;
5331
e7b07cee
HW
5332 break;
5333 }
5334
5335 }
5336}
5337
3ee6b26b
AD
5338static struct drm_display_mode *
5339amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5340 char *name,
5341 int hdisplay, int vdisplay)
e7b07cee
HW
5342{
5343 struct drm_device *dev = encoder->dev;
5344 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5345 struct drm_display_mode *mode = NULL;
5346 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5347
5348 mode = drm_mode_duplicate(dev, native_mode);
5349
b830ebc9 5350 if (mode == NULL)
e7b07cee
HW
5351 return NULL;
5352
5353 mode->hdisplay = hdisplay;
5354 mode->vdisplay = vdisplay;
5355 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 5356 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
5357
5358 return mode;
5359
5360}
5361
5362static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 5363 struct drm_connector *connector)
e7b07cee
HW
5364{
5365 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5366 struct drm_display_mode *mode = NULL;
5367 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
5368 struct amdgpu_dm_connector *amdgpu_dm_connector =
5369 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5370 int i;
5371 int n;
5372 struct mode_size {
5373 char name[DRM_DISPLAY_MODE_LEN];
5374 int w;
5375 int h;
b830ebc9 5376 } common_modes[] = {
e7b07cee
HW
5377 { "640x480", 640, 480},
5378 { "800x600", 800, 600},
5379 { "1024x768", 1024, 768},
5380 { "1280x720", 1280, 720},
5381 { "1280x800", 1280, 800},
5382 {"1280x1024", 1280, 1024},
5383 { "1440x900", 1440, 900},
5384 {"1680x1050", 1680, 1050},
5385 {"1600x1200", 1600, 1200},
5386 {"1920x1080", 1920, 1080},
5387 {"1920x1200", 1920, 1200}
5388 };
5389
b830ebc9 5390 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
5391
5392 for (i = 0; i < n; i++) {
5393 struct drm_display_mode *curmode = NULL;
5394 bool mode_existed = false;
5395
5396 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
5397 common_modes[i].h > native_mode->vdisplay ||
5398 (common_modes[i].w == native_mode->hdisplay &&
5399 common_modes[i].h == native_mode->vdisplay))
5400 continue;
e7b07cee
HW
5401
5402 list_for_each_entry(curmode, &connector->probed_modes, head) {
5403 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 5404 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
5405 mode_existed = true;
5406 break;
5407 }
5408 }
5409
5410 if (mode_existed)
5411 continue;
5412
5413 mode = amdgpu_dm_create_common_mode(encoder,
5414 common_modes[i].name, common_modes[i].w,
5415 common_modes[i].h);
5416 drm_mode_probed_add(connector, mode);
c84dec2f 5417 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
5418 }
5419}
5420
3ee6b26b
AD
5421static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5422 struct edid *edid)
e7b07cee 5423{
c84dec2f
HW
5424 struct amdgpu_dm_connector *amdgpu_dm_connector =
5425 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5426
5427 if (edid) {
5428 /* empty probed_modes */
5429 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 5430 amdgpu_dm_connector->num_modes =
e7b07cee
HW
5431 drm_add_edid_modes(connector, edid);
5432
f1e5e913
YMM
5433 /* sorting the probed modes before calling function
5434 * amdgpu_dm_get_native_mode() since EDID can have
5435 * more than one preferred mode. The modes that are
5436 * later in the probed mode list could be of higher
5437 * and preferred resolution. For example, 3840x2160
5438 * resolution in base EDID preferred timing and 4096x2160
5439 * preferred resolution in DID extension block later.
5440 */
5441 drm_mode_sort(&connector->probed_modes);
e7b07cee 5442 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 5443 } else {
c84dec2f 5444 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 5445 }
e7b07cee
HW
5446}
5447
7578ecda 5448static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 5449{
c84dec2f
HW
5450 struct amdgpu_dm_connector *amdgpu_dm_connector =
5451 to_amdgpu_dm_connector(connector);
e7b07cee 5452 struct drm_encoder *encoder;
c84dec2f 5453 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 5454
2b4c1c05 5455 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 5456
85ee15d6 5457 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
5458 amdgpu_dm_connector->num_modes =
5459 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
5460 } else {
5461 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5462 amdgpu_dm_connector_add_common_modes(encoder, connector);
5463 }
3e332d3a 5464 amdgpu_dm_fbc_init(connector);
5099114b 5465
c84dec2f 5466 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
5467}
5468
3ee6b26b
AD
5469void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5470 struct amdgpu_dm_connector *aconnector,
5471 int connector_type,
5472 struct dc_link *link,
5473 int link_index)
e7b07cee
HW
5474{
5475 struct amdgpu_device *adev = dm->ddev->dev_private;
5476
f04bee34
NK
5477 /*
5478 * Some of the properties below require access to state, like bpc.
5479 * Allocate some default initial connector state with our reset helper.
5480 */
5481 if (aconnector->base.funcs->reset)
5482 aconnector->base.funcs->reset(&aconnector->base);
5483
e7b07cee
HW
5484 aconnector->connector_id = link_index;
5485 aconnector->dc_link = link;
5486 aconnector->base.interlace_allowed = false;
5487 aconnector->base.doublescan_allowed = false;
5488 aconnector->base.stereo_allowed = false;
5489 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5490 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 5491 aconnector->audio_inst = -1;
e7b07cee
HW
5492 mutex_init(&aconnector->hpd_lock);
5493
1f6010a9
DF
5494 /*
5495 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
5496 * which means HPD hot plug not supported
5497 */
e7b07cee
HW
5498 switch (connector_type) {
5499 case DRM_MODE_CONNECTOR_HDMIA:
5500 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5501 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5502 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
5503 break;
5504 case DRM_MODE_CONNECTOR_DisplayPort:
5505 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5506 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5507 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
5508 break;
5509 case DRM_MODE_CONNECTOR_DVID:
5510 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5511 break;
5512 default:
5513 break;
5514 }
5515
5516 drm_object_attach_property(&aconnector->base.base,
5517 dm->ddev->mode_config.scaling_mode_property,
5518 DRM_MODE_SCALE_NONE);
5519
5520 drm_object_attach_property(&aconnector->base.base,
5521 adev->mode_info.underscan_property,
5522 UNDERSCAN_OFF);
5523 drm_object_attach_property(&aconnector->base.base,
5524 adev->mode_info.underscan_hborder_property,
5525 0);
5526 drm_object_attach_property(&aconnector->base.base,
5527 adev->mode_info.underscan_vborder_property,
5528 0);
1825fd34
NK
5529
5530 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5531
5532 /* This defaults to the max in the range, but we want 8bpc. */
5533 aconnector->base.state->max_bpc = 8;
5534 aconnector->base.state->max_requested_bpc = 8;
e7b07cee 5535
c1ee92f9
DF
5536 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5537 dc_is_dmcu_initialized(adev->dm.dc)) {
5538 drm_object_attach_property(&aconnector->base.base,
5539 adev->mode_info.abm_level_property, 0);
5540 }
bb47de73
NK
5541
5542 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
5543 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5544 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
5545 drm_object_attach_property(
5546 &aconnector->base.base,
5547 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5548
bb47de73
NK
5549 drm_connector_attach_vrr_capable_property(
5550 &aconnector->base);
0c8620d6 5551#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e
BL
5552 if (adev->asic_type >= CHIP_RAVEN)
5553 drm_connector_attach_content_protection_property(&aconnector->base, false);
0c8620d6 5554#endif
bb47de73 5555 }
e7b07cee
HW
5556}
5557
7578ecda
AD
5558static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5559 struct i2c_msg *msgs, int num)
e7b07cee
HW
5560{
5561 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5562 struct ddc_service *ddc_service = i2c->ddc_service;
5563 struct i2c_command cmd;
5564 int i;
5565 int result = -EIO;
5566
b830ebc9 5567 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
5568
5569 if (!cmd.payloads)
5570 return result;
5571
5572 cmd.number_of_payloads = num;
5573 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5574 cmd.speed = 100;
5575
5576 for (i = 0; i < num; i++) {
5577 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5578 cmd.payloads[i].address = msgs[i].addr;
5579 cmd.payloads[i].length = msgs[i].len;
5580 cmd.payloads[i].data = msgs[i].buf;
5581 }
5582
c85e6e54
DF
5583 if (dc_submit_i2c(
5584 ddc_service->ctx->dc,
5585 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
5586 &cmd))
5587 result = num;
5588
5589 kfree(cmd.payloads);
5590 return result;
5591}
5592
7578ecda 5593static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
5594{
5595 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5596}
5597
5598static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5599 .master_xfer = amdgpu_dm_i2c_xfer,
5600 .functionality = amdgpu_dm_i2c_func,
5601};
5602
3ee6b26b
AD
5603static struct amdgpu_i2c_adapter *
5604create_i2c(struct ddc_service *ddc_service,
5605 int link_index,
5606 int *res)
e7b07cee
HW
5607{
5608 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5609 struct amdgpu_i2c_adapter *i2c;
5610
b830ebc9 5611 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
5612 if (!i2c)
5613 return NULL;
e7b07cee
HW
5614 i2c->base.owner = THIS_MODULE;
5615 i2c->base.class = I2C_CLASS_DDC;
5616 i2c->base.dev.parent = &adev->pdev->dev;
5617 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 5618 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
5619 i2c_set_adapdata(&i2c->base, i2c);
5620 i2c->ddc_service = ddc_service;
c85e6e54 5621 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
5622
5623 return i2c;
5624}
5625
89fc8d4e 5626
1f6010a9
DF
5627/*
5628 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
5629 * dc_link which will be represented by this aconnector.
5630 */
7578ecda
AD
5631static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5632 struct amdgpu_dm_connector *aconnector,
5633 uint32_t link_index,
5634 struct amdgpu_encoder *aencoder)
e7b07cee
HW
5635{
5636 int res = 0;
5637 int connector_type;
5638 struct dc *dc = dm->dc;
5639 struct dc_link *link = dc_get_link_at_index(dc, link_index);
5640 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
5641
5642 link->priv = aconnector;
e7b07cee 5643
f1ad2f5e 5644 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
5645
5646 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
5647 if (!i2c) {
5648 DRM_ERROR("Failed to create i2c adapter data\n");
5649 return -ENOMEM;
5650 }
5651
e7b07cee
HW
5652 aconnector->i2c = i2c;
5653 res = i2c_add_adapter(&i2c->base);
5654
5655 if (res) {
5656 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
5657 goto out_free;
5658 }
5659
5660 connector_type = to_drm_connector_type(link->connector_signal);
5661
5662 res = drm_connector_init(
5663 dm->ddev,
5664 &aconnector->base,
5665 &amdgpu_dm_connector_funcs,
5666 connector_type);
5667
5668 if (res) {
5669 DRM_ERROR("connector_init failed\n");
5670 aconnector->connector_id = -1;
5671 goto out_free;
5672 }
5673
5674 drm_connector_helper_add(
5675 &aconnector->base,
5676 &amdgpu_dm_connector_helper_funcs);
5677
5678 amdgpu_dm_connector_init_helper(
5679 dm,
5680 aconnector,
5681 connector_type,
5682 link,
5683 link_index);
5684
cde4c44d 5685 drm_connector_attach_encoder(
e7b07cee
HW
5686 &aconnector->base, &aencoder->base);
5687
5688 drm_connector_register(&aconnector->base);
dc38fd9d 5689#if defined(CONFIG_DEBUG_FS)
4be8be78 5690 connector_debugfs_init(aconnector);
f258fee6
DF
5691 aconnector->debugfs_dpcd_address = 0;
5692 aconnector->debugfs_dpcd_size = 0;
dc38fd9d 5693#endif
e7b07cee
HW
5694
5695 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
5696 || connector_type == DRM_MODE_CONNECTOR_eDP)
5697 amdgpu_dm_initialize_dp_connector(dm, aconnector);
5698
e7b07cee
HW
5699out_free:
5700 if (res) {
5701 kfree(i2c);
5702 aconnector->i2c = NULL;
5703 }
5704 return res;
5705}
5706
5707int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
5708{
5709 switch (adev->mode_info.num_crtc) {
5710 case 1:
5711 return 0x1;
5712 case 2:
5713 return 0x3;
5714 case 3:
5715 return 0x7;
5716 case 4:
5717 return 0xf;
5718 case 5:
5719 return 0x1f;
5720 case 6:
5721 default:
5722 return 0x3f;
5723 }
5724}
5725
7578ecda
AD
5726static int amdgpu_dm_encoder_init(struct drm_device *dev,
5727 struct amdgpu_encoder *aencoder,
5728 uint32_t link_index)
e7b07cee
HW
5729{
5730 struct amdgpu_device *adev = dev->dev_private;
5731
5732 int res = drm_encoder_init(dev,
5733 &aencoder->base,
5734 &amdgpu_dm_encoder_funcs,
5735 DRM_MODE_ENCODER_TMDS,
5736 NULL);
5737
5738 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
5739
5740 if (!res)
5741 aencoder->encoder_id = link_index;
5742 else
5743 aencoder->encoder_id = -1;
5744
5745 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
5746
5747 return res;
5748}
5749
3ee6b26b
AD
5750static void manage_dm_interrupts(struct amdgpu_device *adev,
5751 struct amdgpu_crtc *acrtc,
5752 bool enable)
e7b07cee
HW
5753{
5754 /*
5755 * this is not correct translation but will work as soon as VBLANK
5756 * constant is the same as PFLIP
5757 */
5758 int irq_type =
734dd01d 5759 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
5760 adev,
5761 acrtc->crtc_id);
5762
5763 if (enable) {
5764 drm_crtc_vblank_on(&acrtc->base);
5765 amdgpu_irq_get(
5766 adev,
5767 &adev->pageflip_irq,
5768 irq_type);
5769 } else {
5770
5771 amdgpu_irq_put(
5772 adev,
5773 &adev->pageflip_irq,
5774 irq_type);
5775 drm_crtc_vblank_off(&acrtc->base);
5776 }
5777}
5778
3ee6b26b
AD
5779static bool
5780is_scaling_state_different(const struct dm_connector_state *dm_state,
5781 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
5782{
5783 if (dm_state->scaling != old_dm_state->scaling)
5784 return true;
5785 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
5786 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
5787 return true;
5788 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
5789 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
5790 return true;
b830ebc9
HW
5791 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
5792 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
5793 return true;
e7b07cee
HW
5794 return false;
5795}
5796
0c8620d6
BL
5797#ifdef CONFIG_DRM_AMD_DC_HDCP
5798static bool is_content_protection_different(struct drm_connector_state *state,
5799 const struct drm_connector_state *old_state,
5800 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
5801{
5802 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5803
5804 /* CP is being re enabled, ignore this */
5805 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
5806 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
5807 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
5808 return false;
5809 }
5810
5811 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
5812 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
5813 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
5814 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
5815
5816 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
5817 * hot-plug, headless s3, dpms
5818 */
5819 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
5820 aconnector->dc_sink != NULL)
5821 return true;
5822
5823 if (old_state->content_protection == state->content_protection)
5824 return false;
5825
5826 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
5827 return true;
5828
5829 return false;
5830}
5831
5832static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector,
5833 struct hdcp_workqueue *hdcp_w)
5834{
5835 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5836
da3fd7ac
BL
5837 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
5838 hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector);
5839 else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
0c8620d6
BL
5840 hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index);
5841
5842}
5843#endif
3ee6b26b
AD
5844static void remove_stream(struct amdgpu_device *adev,
5845 struct amdgpu_crtc *acrtc,
5846 struct dc_stream_state *stream)
e7b07cee
HW
5847{
5848 /* this is the update mode case */
e7b07cee
HW
5849
5850 acrtc->otg_inst = -1;
5851 acrtc->enabled = false;
5852}
5853
7578ecda
AD
5854static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
5855 struct dc_cursor_position *position)
2a8f6ccb 5856{
f4c2cc43 5857 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
5858 int x, y;
5859 int xorigin = 0, yorigin = 0;
5860
e371e19c
NK
5861 position->enable = false;
5862 position->x = 0;
5863 position->y = 0;
5864
5865 if (!crtc || !plane->state->fb)
2a8f6ccb 5866 return 0;
2a8f6ccb
HW
5867
5868 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
5869 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
5870 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
5871 __func__,
5872 plane->state->crtc_w,
5873 plane->state->crtc_h);
5874 return -EINVAL;
5875 }
5876
5877 x = plane->state->crtc_x;
5878 y = plane->state->crtc_y;
c14a005c 5879
e371e19c
NK
5880 if (x <= -amdgpu_crtc->max_cursor_width ||
5881 y <= -amdgpu_crtc->max_cursor_height)
5882 return 0;
5883
c14a005c
NK
5884 if (crtc->primary->state) {
5885 /* avivo cursor are offset into the total surface */
5886 x += crtc->primary->state->src_x >> 16;
5887 y += crtc->primary->state->src_y >> 16;
5888 }
5889
2a8f6ccb
HW
5890 if (x < 0) {
5891 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
5892 x = 0;
5893 }
5894 if (y < 0) {
5895 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
5896 y = 0;
5897 }
5898 position->enable = true;
5899 position->x = x;
5900 position->y = y;
5901 position->x_hotspot = xorigin;
5902 position->y_hotspot = yorigin;
5903
5904 return 0;
5905}
5906
3ee6b26b
AD
5907static void handle_cursor_update(struct drm_plane *plane,
5908 struct drm_plane_state *old_plane_state)
e7b07cee 5909{
674e78ac 5910 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
5911 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
5912 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
5913 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
5914 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5915 uint64_t address = afb ? afb->address : 0;
5916 struct dc_cursor_position position;
5917 struct dc_cursor_attributes attributes;
5918 int ret;
5919
e7b07cee
HW
5920 if (!plane->state->fb && !old_plane_state->fb)
5921 return;
5922
f1ad2f5e 5923 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
5924 __func__,
5925 amdgpu_crtc->crtc_id,
5926 plane->state->crtc_w,
5927 plane->state->crtc_h);
2a8f6ccb
HW
5928
5929 ret = get_cursor_position(plane, crtc, &position);
5930 if (ret)
5931 return;
5932
5933 if (!position.enable) {
5934 /* turn off cursor */
674e78ac
NK
5935 if (crtc_state && crtc_state->stream) {
5936 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
5937 dc_stream_set_cursor_position(crtc_state->stream,
5938 &position);
674e78ac
NK
5939 mutex_unlock(&adev->dm.dc_lock);
5940 }
2a8f6ccb 5941 return;
e7b07cee 5942 }
e7b07cee 5943
2a8f6ccb
HW
5944 amdgpu_crtc->cursor_width = plane->state->crtc_w;
5945 amdgpu_crtc->cursor_height = plane->state->crtc_h;
5946
c1cefe11 5947 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
5948 attributes.address.high_part = upper_32_bits(address);
5949 attributes.address.low_part = lower_32_bits(address);
5950 attributes.width = plane->state->crtc_w;
5951 attributes.height = plane->state->crtc_h;
5952 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
5953 attributes.rotation_angle = 0;
5954 attributes.attribute_flags.value = 0;
5955
5956 attributes.pitch = attributes.width;
5957
886daac9 5958 if (crtc_state->stream) {
674e78ac 5959 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
5960 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
5961 &attributes))
5962 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 5963
2a8f6ccb
HW
5964 if (!dc_stream_set_cursor_position(crtc_state->stream,
5965 &position))
5966 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 5967 mutex_unlock(&adev->dm.dc_lock);
886daac9 5968 }
2a8f6ccb 5969}
e7b07cee
HW
5970
5971static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
5972{
5973
5974 assert_spin_locked(&acrtc->base.dev->event_lock);
5975 WARN_ON(acrtc->event);
5976
5977 acrtc->event = acrtc->base.state->event;
5978
5979 /* Set the flip status */
5980 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
5981
5982 /* Mark this event as consumed */
5983 acrtc->base.state->event = NULL;
5984
5985 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
5986 acrtc->crtc_id);
5987}
5988
bb47de73
NK
5989static void update_freesync_state_on_stream(
5990 struct amdgpu_display_manager *dm,
5991 struct dm_crtc_state *new_crtc_state,
180db303
NK
5992 struct dc_stream_state *new_stream,
5993 struct dc_plane_state *surface,
5994 u32 flip_timestamp_in_us)
bb47de73 5995{
09aef2c4 5996 struct mod_vrr_params vrr_params;
bb47de73 5997 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
5998 struct amdgpu_device *adev = dm->adev;
5999 unsigned long flags;
bb47de73
NK
6000
6001 if (!new_stream)
6002 return;
6003
6004 /*
6005 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6006 * For now it's sufficient to just guard against these conditions.
6007 */
6008
6009 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6010 return;
6011
09aef2c4
MK
6012 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6013 vrr_params = new_crtc_state->vrr_params;
6014
180db303
NK
6015 if (surface) {
6016 mod_freesync_handle_preflip(
6017 dm->freesync_module,
6018 surface,
6019 new_stream,
6020 flip_timestamp_in_us,
6021 &vrr_params);
09aef2c4
MK
6022
6023 if (adev->family < AMDGPU_FAMILY_AI &&
6024 amdgpu_dm_vrr_active(new_crtc_state)) {
6025 mod_freesync_handle_v_update(dm->freesync_module,
6026 new_stream, &vrr_params);
e63e2491
EB
6027
6028 /* Need to call this before the frame ends. */
6029 dc_stream_adjust_vmin_vmax(dm->dc,
6030 new_crtc_state->stream,
6031 &vrr_params.adjust);
09aef2c4 6032 }
180db303 6033 }
bb47de73
NK
6034
6035 mod_freesync_build_vrr_infopacket(
6036 dm->freesync_module,
6037 new_stream,
180db303 6038 &vrr_params,
ecd0136b
HT
6039 PACKET_TYPE_VRR,
6040 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6041 &vrr_infopacket);
6042
8a48b44c 6043 new_crtc_state->freesync_timing_changed |=
180db303
NK
6044 (memcmp(&new_crtc_state->vrr_params.adjust,
6045 &vrr_params.adjust,
6046 sizeof(vrr_params.adjust)) != 0);
bb47de73 6047
8a48b44c 6048 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6049 (memcmp(&new_crtc_state->vrr_infopacket,
6050 &vrr_infopacket,
6051 sizeof(vrr_infopacket)) != 0);
6052
180db303 6053 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6054 new_crtc_state->vrr_infopacket = vrr_infopacket;
6055
180db303 6056 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6057 new_stream->vrr_infopacket = vrr_infopacket;
6058
6059 if (new_crtc_state->freesync_vrr_info_changed)
6060 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6061 new_crtc_state->base.crtc->base.id,
6062 (int)new_crtc_state->base.vrr_enabled,
180db303 6063 (int)vrr_params.state);
09aef2c4
MK
6064
6065 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6066}
6067
e854194c
MK
6068static void pre_update_freesync_state_on_stream(
6069 struct amdgpu_display_manager *dm,
6070 struct dm_crtc_state *new_crtc_state)
6071{
6072 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6073 struct mod_vrr_params vrr_params;
e854194c 6074 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6075 struct amdgpu_device *adev = dm->adev;
6076 unsigned long flags;
e854194c
MK
6077
6078 if (!new_stream)
6079 return;
6080
6081 /*
6082 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6083 * For now it's sufficient to just guard against these conditions.
6084 */
6085 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6086 return;
6087
09aef2c4
MK
6088 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6089 vrr_params = new_crtc_state->vrr_params;
6090
e854194c
MK
6091 if (new_crtc_state->vrr_supported &&
6092 config.min_refresh_in_uhz &&
6093 config.max_refresh_in_uhz) {
6094 config.state = new_crtc_state->base.vrr_enabled ?
6095 VRR_STATE_ACTIVE_VARIABLE :
6096 VRR_STATE_INACTIVE;
6097 } else {
6098 config.state = VRR_STATE_UNSUPPORTED;
6099 }
6100
6101 mod_freesync_build_vrr_params(dm->freesync_module,
6102 new_stream,
6103 &config, &vrr_params);
6104
6105 new_crtc_state->freesync_timing_changed |=
6106 (memcmp(&new_crtc_state->vrr_params.adjust,
6107 &vrr_params.adjust,
6108 sizeof(vrr_params.adjust)) != 0);
6109
6110 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6111 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6112}
6113
66b0c973
MK
6114static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6115 struct dm_crtc_state *new_state)
6116{
6117 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6118 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6119
6120 if (!old_vrr_active && new_vrr_active) {
6121 /* Transition VRR inactive -> active:
6122 * While VRR is active, we must not disable vblank irq, as a
6123 * reenable after disable would compute bogus vblank/pflip
6124 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6125 *
6126 * We also need vupdate irq for the actual core vblank handling
6127 * at end of vblank.
66b0c973 6128 */
d2574c33 6129 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6130 drm_crtc_vblank_get(new_state->base.crtc);
6131 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6132 __func__, new_state->base.crtc->base.id);
6133 } else if (old_vrr_active && !new_vrr_active) {
6134 /* Transition VRR active -> inactive:
6135 * Allow vblank irq disable again for fixed refresh rate.
6136 */
d2574c33 6137 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6138 drm_crtc_vblank_put(new_state->base.crtc);
6139 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6140 __func__, new_state->base.crtc->base.id);
6141 }
6142}
6143
8ad27806
NK
6144static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6145{
6146 struct drm_plane *plane;
6147 struct drm_plane_state *old_plane_state, *new_plane_state;
6148 int i;
6149
6150 /*
6151 * TODO: Make this per-stream so we don't issue redundant updates for
6152 * commits with multiple streams.
6153 */
6154 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6155 new_plane_state, i)
6156 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6157 handle_cursor_update(plane, old_plane_state);
6158}
6159
3be5262e 6160static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6161 struct dc_state *dc_state,
3ee6b26b
AD
6162 struct drm_device *dev,
6163 struct amdgpu_display_manager *dm,
6164 struct drm_crtc *pcrtc,
420cd472 6165 bool wait_for_vblank)
e7b07cee 6166{
570c91d5 6167 uint32_t i;
8a48b44c 6168 uint64_t timestamp_ns;
e7b07cee 6169 struct drm_plane *plane;
0bc9706d 6170 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6171 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6172 struct drm_crtc_state *new_pcrtc_state =
6173 drm_atomic_get_new_crtc_state(state, pcrtc);
6174 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
6175 struct dm_crtc_state *dm_old_crtc_state =
6176 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 6177 int planes_count = 0, vpos, hpos;
570c91d5 6178 long r;
e7b07cee 6179 unsigned long flags;
8a48b44c 6180 struct amdgpu_bo *abo;
09e5665a 6181 uint64_t tiling_flags;
fdd1fe57
MK
6182 uint32_t target_vblank, last_flip_vblank;
6183 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 6184 bool pflip_present = false;
8c322309 6185 bool swizzle = true;
bc7f670e
DF
6186 struct {
6187 struct dc_surface_update surface_updates[MAX_SURFACES];
6188 struct dc_plane_info plane_infos[MAX_SURFACES];
6189 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 6190 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 6191 struct dc_stream_update stream_update;
74aa7bd4 6192 } *bundle;
bc7f670e 6193
74aa7bd4 6194 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 6195
74aa7bd4
DF
6196 if (!bundle) {
6197 dm_error("Failed to allocate update bundle\n");
4b510503
NK
6198 goto cleanup;
6199 }
e7b07cee 6200
8ad27806
NK
6201 /*
6202 * Disable the cursor first if we're disabling all the planes.
6203 * It'll remain on the screen after the planes are re-enabled
6204 * if we don't.
6205 */
6206 if (acrtc_state->active_planes == 0)
6207 amdgpu_dm_commit_cursors(state);
6208
e7b07cee 6209 /* update planes when needed */
0bc9706d
LSL
6210 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6211 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 6212 struct drm_crtc_state *new_crtc_state;
0bc9706d 6213 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 6214 bool plane_needs_flip;
c7af5f77 6215 struct dc_plane_state *dc_plane;
54d76575 6216 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 6217
80c218d5
NK
6218 /* Cursor plane is handled after stream updates */
6219 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 6220 continue;
e7b07cee 6221
f5ba60fe
DD
6222 if (!fb || !crtc || pcrtc != crtc)
6223 continue;
6224
6225 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6226 if (!new_crtc_state->active)
e7b07cee
HW
6227 continue;
6228
bc7f670e 6229 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 6230
8c322309
RL
6231 if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
6232 swizzle = false;
6233
74aa7bd4 6234 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 6235 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
6236 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6237 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
bc7f670e 6238 }
8a48b44c 6239
695af5f9
NK
6240 fill_dc_scaling_info(new_plane_state,
6241 &bundle->scaling_infos[planes_count]);
8a48b44c 6242
695af5f9
NK
6243 bundle->surface_updates[planes_count].scaling_info =
6244 &bundle->scaling_infos[planes_count];
8a48b44c 6245
f5031000 6246 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 6247
f5031000 6248 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 6249
f5031000
DF
6250 if (!plane_needs_flip) {
6251 planes_count += 1;
6252 continue;
6253 }
8a48b44c 6254
2fac0f53
CK
6255 abo = gem_to_amdgpu_bo(fb->obj[0]);
6256
f8308898
AG
6257 /*
6258 * Wait for all fences on this FB. Do limited wait to avoid
6259 * deadlock during GPU reset when this fence will not signal
6260 * but we hold reservation lock for the BO.
6261 */
52791eee 6262 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 6263 false,
f8308898
AG
6264 msecs_to_jiffies(5000));
6265 if (unlikely(r <= 0))
ed8a5fb2 6266 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 6267
f5031000
DF
6268 /*
6269 * TODO This might fail and hence better not used, wait
6270 * explicitly on fences instead
6271 * and in general should be called for
6272 * blocking commit to as per framework helpers
6273 */
f5031000 6274 r = amdgpu_bo_reserve(abo, true);
f8308898 6275 if (unlikely(r != 0))
f5031000 6276 DRM_ERROR("failed to reserve buffer before flip\n");
8a48b44c 6277
f5031000 6278 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
8a48b44c 6279
f5031000 6280 amdgpu_bo_unreserve(abo);
8a48b44c 6281
695af5f9
NK
6282 fill_dc_plane_info_and_addr(
6283 dm->adev, new_plane_state, tiling_flags,
6284 &bundle->plane_infos[planes_count],
6285 &bundle->flip_addrs[planes_count].address);
6286
6287 bundle->surface_updates[planes_count].plane_info =
6288 &bundle->plane_infos[planes_count];
8a48b44c 6289
caff0e66
NK
6290 /*
6291 * Only allow immediate flips for fast updates that don't
6292 * change FB pitch, DCC state, rotation or mirroing.
6293 */
f5031000 6294 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 6295 crtc->state->async_flip &&
caff0e66 6296 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 6297
f5031000
DF
6298 timestamp_ns = ktime_get_ns();
6299 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6300 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6301 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 6302
f5031000
DF
6303 if (!bundle->surface_updates[planes_count].surface) {
6304 DRM_ERROR("No surface for CRTC: id=%d\n",
6305 acrtc_attach->crtc_id);
6306 continue;
bc7f670e
DF
6307 }
6308
f5031000
DF
6309 if (plane == pcrtc->primary)
6310 update_freesync_state_on_stream(
6311 dm,
6312 acrtc_state,
6313 acrtc_state->stream,
6314 dc_plane,
6315 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 6316
f5031000
DF
6317 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6318 __func__,
6319 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6320 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
6321
6322 planes_count += 1;
6323
8a48b44c
DF
6324 }
6325
74aa7bd4 6326 if (pflip_present) {
634092b1
MK
6327 if (!vrr_active) {
6328 /* Use old throttling in non-vrr fixed refresh rate mode
6329 * to keep flip scheduling based on target vblank counts
6330 * working in a backwards compatible way, e.g., for
6331 * clients using the GLX_OML_sync_control extension or
6332 * DRI3/Present extension with defined target_msc.
6333 */
fdd1fe57 6334 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
634092b1
MK
6335 }
6336 else {
6337 /* For variable refresh rate mode only:
6338 * Get vblank of last completed flip to avoid > 1 vrr
6339 * flips per video frame by use of throttling, but allow
6340 * flip programming anywhere in the possibly large
6341 * variable vrr vblank interval for fine-grained flip
6342 * timing control and more opportunity to avoid stutter
6343 * on late submission of flips.
6344 */
6345 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6346 last_flip_vblank = acrtc_attach->last_flip_vblank;
6347 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6348 }
6349
fdd1fe57 6350 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
6351
6352 /*
6353 * Wait until we're out of the vertical blank period before the one
6354 * targeted by the flip
6355 */
6356 while ((acrtc_attach->enabled &&
6357 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6358 0, &vpos, &hpos, NULL,
6359 NULL, &pcrtc->hwmode)
6360 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6361 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6362 (int)(target_vblank -
6363 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
6364 usleep_range(1000, 1100);
6365 }
6366
6367 if (acrtc_attach->base.state->event) {
6368 drm_crtc_vblank_get(pcrtc);
6369
6370 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6371
6372 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6373 prepare_flip_isr(acrtc_attach);
6374
6375 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6376 }
6377
6378 if (acrtc_state->stream) {
8a48b44c 6379 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 6380 bundle->stream_update.vrr_infopacket =
8a48b44c 6381 &acrtc_state->stream->vrr_infopacket;
e7b07cee 6382 }
e7b07cee
HW
6383 }
6384
bc92c065 6385 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
6386 if ((planes_count || acrtc_state->active_planes == 0) &&
6387 acrtc_state->stream) {
b6e881c9 6388 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 6389 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
6390 bundle->stream_update.src = acrtc_state->stream->src;
6391 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
6392 }
6393
cf020d49
NK
6394 if (new_pcrtc_state->color_mgmt_changed) {
6395 /*
6396 * TODO: This isn't fully correct since we've actually
6397 * already modified the stream in place.
6398 */
6399 bundle->stream_update.gamut_remap =
6400 &acrtc_state->stream->gamut_remap_matrix;
6401 bundle->stream_update.output_csc_transform =
6402 &acrtc_state->stream->csc_color_matrix;
6403 bundle->stream_update.out_transfer_func =
6404 acrtc_state->stream->out_transfer_func;
6405 }
bc7f670e 6406
8a48b44c 6407 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 6408 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 6409 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 6410
e63e2491
EB
6411 /*
6412 * If FreeSync state on the stream has changed then we need to
6413 * re-adjust the min/max bounds now that DC doesn't handle this
6414 * as part of commit.
6415 */
6416 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6417 amdgpu_dm_vrr_active(acrtc_state)) {
6418 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6419 dc_stream_adjust_vmin_vmax(
6420 dm->dc, acrtc_state->stream,
6421 &acrtc_state->vrr_params.adjust);
6422 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6423 }
bc7f670e 6424 mutex_lock(&dm->dc_lock);
8c322309
RL
6425 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6426 acrtc_state->stream->link->psr_allow_active)
6427 amdgpu_dm_psr_disable(acrtc_state->stream);
6428
bc7f670e 6429 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 6430 bundle->surface_updates,
bc7f670e
DF
6431 planes_count,
6432 acrtc_state->stream,
74aa7bd4 6433 &bundle->stream_update,
bc7f670e 6434 dc_state);
8c322309
RL
6435
6436 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6437 acrtc_state->stream->psr_version &&
6438 !acrtc_state->stream->link->psr_feature_enabled)
6439 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6440 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6441 acrtc_state->stream->link->psr_feature_enabled &&
6442 !acrtc_state->stream->link->psr_allow_active &&
6443 swizzle) {
6444 amdgpu_dm_psr_enable(acrtc_state->stream);
6445 }
6446
bc7f670e 6447 mutex_unlock(&dm->dc_lock);
e7b07cee 6448 }
4b510503 6449
8ad27806
NK
6450 /*
6451 * Update cursor state *after* programming all the planes.
6452 * This avoids redundant programming in the case where we're going
6453 * to be disabling a single plane - those pipes are being disabled.
6454 */
6455 if (acrtc_state->active_planes)
6456 amdgpu_dm_commit_cursors(state);
80c218d5 6457
4b510503 6458cleanup:
74aa7bd4 6459 kfree(bundle);
e7b07cee
HW
6460}
6461
6ce8f316
NK
6462static void amdgpu_dm_commit_audio(struct drm_device *dev,
6463 struct drm_atomic_state *state)
6464{
6465 struct amdgpu_device *adev = dev->dev_private;
6466 struct amdgpu_dm_connector *aconnector;
6467 struct drm_connector *connector;
6468 struct drm_connector_state *old_con_state, *new_con_state;
6469 struct drm_crtc_state *new_crtc_state;
6470 struct dm_crtc_state *new_dm_crtc_state;
6471 const struct dc_stream_status *status;
6472 int i, inst;
6473
6474 /* Notify device removals. */
6475 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6476 if (old_con_state->crtc != new_con_state->crtc) {
6477 /* CRTC changes require notification. */
6478 goto notify;
6479 }
6480
6481 if (!new_con_state->crtc)
6482 continue;
6483
6484 new_crtc_state = drm_atomic_get_new_crtc_state(
6485 state, new_con_state->crtc);
6486
6487 if (!new_crtc_state)
6488 continue;
6489
6490 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6491 continue;
6492
6493 notify:
6494 aconnector = to_amdgpu_dm_connector(connector);
6495
6496 mutex_lock(&adev->dm.audio_lock);
6497 inst = aconnector->audio_inst;
6498 aconnector->audio_inst = -1;
6499 mutex_unlock(&adev->dm.audio_lock);
6500
6501 amdgpu_dm_audio_eld_notify(adev, inst);
6502 }
6503
6504 /* Notify audio device additions. */
6505 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6506 if (!new_con_state->crtc)
6507 continue;
6508
6509 new_crtc_state = drm_atomic_get_new_crtc_state(
6510 state, new_con_state->crtc);
6511
6512 if (!new_crtc_state)
6513 continue;
6514
6515 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6516 continue;
6517
6518 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6519 if (!new_dm_crtc_state->stream)
6520 continue;
6521
6522 status = dc_stream_get_status(new_dm_crtc_state->stream);
6523 if (!status)
6524 continue;
6525
6526 aconnector = to_amdgpu_dm_connector(connector);
6527
6528 mutex_lock(&adev->dm.audio_lock);
6529 inst = status->audio_inst;
6530 aconnector->audio_inst = inst;
6531 mutex_unlock(&adev->dm.audio_lock);
6532
6533 amdgpu_dm_audio_eld_notify(adev, inst);
6534 }
6535}
6536
b5e83f6f
NK
6537/*
6538 * Enable interrupts on CRTCs that are newly active, undergone
6539 * a modeset, or have active planes again.
6540 *
6541 * Done in two passes, based on the for_modeset flag:
6542 * Pass 1: For CRTCs going through modeset
6543 * Pass 2: For CRTCs going from 0 to n active planes
6544 *
6545 * Interrupts can only be enabled after the planes are programmed,
6546 * so this requires a two-pass approach since we don't want to
6547 * just defer the interrupts until after commit planes every time.
6548 */
6549static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6550 struct drm_atomic_state *state,
6551 bool for_modeset)
6552{
6553 struct amdgpu_device *adev = dev->dev_private;
6554 struct drm_crtc *crtc;
6555 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6556 int i;
148d31e3 6557#ifdef CONFIG_DEBUG_FS
14b25846 6558 enum amdgpu_dm_pipe_crc_source source;
148d31e3 6559#endif
b5e83f6f
NK
6560
6561 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6562 new_crtc_state, i) {
6563 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6564 struct dm_crtc_state *dm_new_crtc_state =
6565 to_dm_crtc_state(new_crtc_state);
6566 struct dm_crtc_state *dm_old_crtc_state =
6567 to_dm_crtc_state(old_crtc_state);
6568 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6569 bool run_pass;
6570
6571 run_pass = (for_modeset && modeset) ||
6572 (!for_modeset && !modeset &&
6573 !dm_old_crtc_state->interrupts_enabled);
6574
6575 if (!run_pass)
6576 continue;
6577
b5e83f6f
NK
6578 if (!dm_new_crtc_state->interrupts_enabled)
6579 continue;
6580
6581 manage_dm_interrupts(adev, acrtc, true);
6582
6583#ifdef CONFIG_DEBUG_FS
6584 /* The stream has changed so CRC capture needs to re-enabled. */
14b25846
DZ
6585 source = dm_new_crtc_state->crc_src;
6586 if (amdgpu_dm_is_valid_crc_source(source)) {
57638021
NK
6587 amdgpu_dm_crtc_configure_crc_source(
6588 crtc, dm_new_crtc_state,
6589 dm_new_crtc_state->crc_src);
b5e83f6f
NK
6590 }
6591#endif
6592 }
6593}
6594
1f6010a9 6595/*
27b3f4fc
LSL
6596 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6597 * @crtc_state: the DRM CRTC state
6598 * @stream_state: the DC stream state.
6599 *
6600 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6601 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6602 */
6603static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6604 struct dc_stream_state *stream_state)
6605{
b9952f93 6606 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 6607}
e7b07cee 6608
7578ecda
AD
6609static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6610 struct drm_atomic_state *state,
6611 bool nonblock)
e7b07cee
HW
6612{
6613 struct drm_crtc *crtc;
c2cea706 6614 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
6615 struct amdgpu_device *adev = dev->dev_private;
6616 int i;
6617
6618 /*
d6ef9b41
NK
6619 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6620 * a modeset, being disabled, or have no active planes.
6621 *
6622 * It's done in atomic commit rather than commit tail for now since
6623 * some of these interrupt handlers access the current CRTC state and
6624 * potentially the stream pointer itself.
6625 *
6626 * Since the atomic state is swapped within atomic commit and not within
6627 * commit tail this would leave to new state (that hasn't been committed yet)
6628 * being accesssed from within the handlers.
6629 *
6630 * TODO: Fix this so we can do this in commit tail and not have to block
6631 * in atomic check.
e7b07cee 6632 */
c2cea706 6633 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 6634 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
428da2bd 6635 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee
HW
6636 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6637
d6ef9b41
NK
6638 if (dm_old_crtc_state->interrupts_enabled &&
6639 (!dm_new_crtc_state->interrupts_enabled ||
57638021 6640 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
6641 manage_dm_interrupts(adev, acrtc, false);
6642 }
1f6010a9
DF
6643 /*
6644 * Add check here for SoC's that support hardware cursor plane, to
6645 * unset legacy_cursor_update
6646 */
e7b07cee
HW
6647
6648 return drm_atomic_helper_commit(dev, state, nonblock);
6649
6650 /*TODO Handle EINTR, reenable IRQ*/
6651}
6652
b8592b48
LL
6653/**
6654 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
6655 * @state: The atomic state to commit
6656 *
6657 * This will tell DC to commit the constructed DC state from atomic_check,
6658 * programming the hardware. Any failures here implies a hardware failure, since
6659 * atomic check should have filtered anything non-kosher.
6660 */
7578ecda 6661static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
6662{
6663 struct drm_device *dev = state->dev;
6664 struct amdgpu_device *adev = dev->dev_private;
6665 struct amdgpu_display_manager *dm = &adev->dm;
6666 struct dm_atomic_state *dm_state;
eb3dc897 6667 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 6668 uint32_t i, j;
5cc6dcbd 6669 struct drm_crtc *crtc;
0bc9706d 6670 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
6671 unsigned long flags;
6672 bool wait_for_vblank = true;
6673 struct drm_connector *connector;
c2cea706 6674 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 6675 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 6676 int crtc_disable_count = 0;
e7b07cee
HW
6677
6678 drm_atomic_helper_update_legacy_modeset_state(dev, state);
6679
eb3dc897
NK
6680 dm_state = dm_atomic_get_new_state(state);
6681 if (dm_state && dm_state->context) {
6682 dc_state = dm_state->context;
6683 } else {
6684 /* No state changes, retain current state. */
813d20dc 6685 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
6686 ASSERT(dc_state_temp);
6687 dc_state = dc_state_temp;
6688 dc_resource_state_copy_construct_current(dm->dc, dc_state);
6689 }
e7b07cee
HW
6690
6691 /* update changed items */
0bc9706d 6692 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 6693 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 6694
54d76575
LSL
6695 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6696 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 6697
f1ad2f5e 6698 DRM_DEBUG_DRIVER(
e7b07cee
HW
6699 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6700 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6701 "connectors_changed:%d\n",
6702 acrtc->crtc_id,
0bc9706d
LSL
6703 new_crtc_state->enable,
6704 new_crtc_state->active,
6705 new_crtc_state->planes_changed,
6706 new_crtc_state->mode_changed,
6707 new_crtc_state->active_changed,
6708 new_crtc_state->connectors_changed);
e7b07cee 6709
27b3f4fc
LSL
6710 /* Copy all transient state flags into dc state */
6711 if (dm_new_crtc_state->stream) {
6712 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
6713 dm_new_crtc_state->stream);
6714 }
6715
e7b07cee
HW
6716 /* handles headless hotplug case, updating new_state and
6717 * aconnector as needed
6718 */
6719
54d76575 6720 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 6721
f1ad2f5e 6722 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 6723
54d76575 6724 if (!dm_new_crtc_state->stream) {
e7b07cee 6725 /*
b830ebc9
HW
6726 * this could happen because of issues with
6727 * userspace notifications delivery.
6728 * In this case userspace tries to set mode on
1f6010a9
DF
6729 * display which is disconnected in fact.
6730 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
6731 * We expect reset mode will come soon.
6732 *
6733 * This can also happen when unplug is done
6734 * during resume sequence ended
6735 *
6736 * In this case, we want to pretend we still
6737 * have a sink to keep the pipe running so that
6738 * hw state is consistent with the sw state
6739 */
f1ad2f5e 6740 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
6741 __func__, acrtc->base.base.id);
6742 continue;
6743 }
6744
54d76575
LSL
6745 if (dm_old_crtc_state->stream)
6746 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 6747
97028037
LP
6748 pm_runtime_get_noresume(dev->dev);
6749
e7b07cee 6750 acrtc->enabled = true;
0bc9706d
LSL
6751 acrtc->hw_mode = new_crtc_state->mode;
6752 crtc->hwmode = new_crtc_state->mode;
6753 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 6754 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 6755 /* i.e. reset mode */
8c322309
RL
6756 if (dm_old_crtc_state->stream) {
6757 if (dm_old_crtc_state->stream->link->psr_allow_active)
6758 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
6759
54d76575 6760 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8c322309 6761 }
e7b07cee
HW
6762 }
6763 } /* for_each_crtc_in_state() */
6764
eb3dc897
NK
6765 if (dc_state) {
6766 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 6767 mutex_lock(&dm->dc_lock);
eb3dc897 6768 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 6769 mutex_unlock(&dm->dc_lock);
fa2123db 6770 }
e7b07cee 6771
0bc9706d 6772 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 6773 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 6774
54d76575 6775 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 6776
54d76575 6777 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 6778 const struct dc_stream_status *status =
54d76575 6779 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 6780
eb3dc897 6781 if (!status)
09f609c3
LL
6782 status = dc_stream_get_status_from_state(dc_state,
6783 dm_new_crtc_state->stream);
eb3dc897 6784
e7b07cee 6785 if (!status)
54d76575 6786 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
6787 else
6788 acrtc->otg_inst = status->primary_otg_inst;
6789 }
6790 }
0c8620d6
BL
6791#ifdef CONFIG_DRM_AMD_DC_HDCP
6792 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6793 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6794 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6795 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6796
6797 new_crtc_state = NULL;
6798
6799 if (acrtc)
6800 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
6801
6802 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6803
6804 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
6805 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
6806 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
6807 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6808 continue;
6809 }
6810
6811 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
6812 update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue);
6813 }
6814#endif
e7b07cee 6815
02d6a6fc 6816 /* Handle connector state changes */
c2cea706 6817 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
6818 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6819 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6820 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
6821 struct dc_surface_update dummy_updates[MAX_SURFACES];
6822 struct dc_stream_update stream_update;
b232d4ed 6823 struct dc_info_packet hdr_packet;
e7b07cee 6824 struct dc_stream_status *status = NULL;
b232d4ed 6825 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 6826
19afd799
NC
6827 memset(&dummy_updates, 0, sizeof(dummy_updates));
6828 memset(&stream_update, 0, sizeof(stream_update));
6829
44d09c6a 6830 if (acrtc) {
0bc9706d 6831 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
6832 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
6833 }
0bc9706d 6834
e7b07cee 6835 /* Skip any modesets/resets */
0bc9706d 6836 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
6837 continue;
6838
54d76575 6839 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
6840 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6841
b232d4ed
NK
6842 scaling_changed = is_scaling_state_different(dm_new_con_state,
6843 dm_old_con_state);
6844
6845 abm_changed = dm_new_crtc_state->abm_level !=
6846 dm_old_crtc_state->abm_level;
6847
6848 hdr_changed =
6849 is_hdr_metadata_different(old_con_state, new_con_state);
6850
6851 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 6852 continue;
e7b07cee 6853
b6e881c9 6854 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 6855 if (scaling_changed) {
02d6a6fc 6856 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 6857 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 6858
02d6a6fc
DF
6859 stream_update.src = dm_new_crtc_state->stream->src;
6860 stream_update.dst = dm_new_crtc_state->stream->dst;
6861 }
6862
b232d4ed 6863 if (abm_changed) {
02d6a6fc
DF
6864 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
6865
6866 stream_update.abm_level = &dm_new_crtc_state->abm_level;
6867 }
70e8ffc5 6868
b232d4ed
NK
6869 if (hdr_changed) {
6870 fill_hdr_info_packet(new_con_state, &hdr_packet);
6871 stream_update.hdr_static_metadata = &hdr_packet;
6872 }
6873
54d76575 6874 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 6875 WARN_ON(!status);
3be5262e 6876 WARN_ON(!status->plane_count);
e7b07cee 6877
02d6a6fc
DF
6878 /*
6879 * TODO: DC refuses to perform stream updates without a dc_surface_update.
6880 * Here we create an empty update on each plane.
6881 * To fix this, DC should permit updating only stream properties.
6882 */
6883 for (j = 0; j < status->plane_count; j++)
6884 dummy_updates[j].surface = status->plane_states[0];
6885
6886
6887 mutex_lock(&dm->dc_lock);
6888 dc_commit_updates_for_stream(dm->dc,
6889 dummy_updates,
6890 status->plane_count,
6891 dm_new_crtc_state->stream,
6892 &stream_update,
6893 dc_state);
6894 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
6895 }
6896
b5e83f6f 6897 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 6898 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 6899 new_crtc_state, i) {
fe2a1965
LP
6900 if (old_crtc_state->active && !new_crtc_state->active)
6901 crtc_disable_count++;
6902
54d76575 6903 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 6904 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 6905
057be086
NK
6906 /* Update freesync active state. */
6907 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
6908
66b0c973
MK
6909 /* Handle vrr on->off / off->on transitions */
6910 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
6911 dm_new_crtc_state);
e7b07cee
HW
6912 }
6913
b5e83f6f
NK
6914 /* Enable interrupts for CRTCs going through a modeset. */
6915 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
e7b07cee 6916
420cd472 6917 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 6918 if (new_crtc_state->async_flip)
420cd472
DF
6919 wait_for_vblank = false;
6920
e7b07cee 6921 /* update planes when needed per crtc*/
5cc6dcbd 6922 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 6923 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 6924
54d76575 6925 if (dm_new_crtc_state->stream)
eb3dc897 6926 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 6927 dm, crtc, wait_for_vblank);
e7b07cee
HW
6928 }
6929
b5e83f6f
NK
6930 /* Enable interrupts for CRTCs going from 0 to n active planes. */
6931 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
e7b07cee 6932
6ce8f316
NK
6933 /* Update audio instances for each connector. */
6934 amdgpu_dm_commit_audio(dev, state);
6935
e7b07cee
HW
6936 /*
6937 * send vblank event on all events not handled in flip and
6938 * mark consumed event for drm_atomic_helper_commit_hw_done
6939 */
6940 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 6941 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 6942
0bc9706d
LSL
6943 if (new_crtc_state->event)
6944 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 6945
0bc9706d 6946 new_crtc_state->event = NULL;
e7b07cee
HW
6947 }
6948 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6949
29c8f234
LL
6950 /* Signal HW programming completion */
6951 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
6952
6953 if (wait_for_vblank)
320a1274 6954 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
6955
6956 drm_atomic_helper_cleanup_planes(dev, state);
97028037 6957
1f6010a9
DF
6958 /*
6959 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
6960 * so we can put the GPU into runtime suspend if we're not driving any
6961 * displays anymore
6962 */
fe2a1965
LP
6963 for (i = 0; i < crtc_disable_count; i++)
6964 pm_runtime_put_autosuspend(dev->dev);
97028037 6965 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
6966
6967 if (dc_state_temp)
6968 dc_release_state(dc_state_temp);
e7b07cee
HW
6969}
6970
6971
6972static int dm_force_atomic_commit(struct drm_connector *connector)
6973{
6974 int ret = 0;
6975 struct drm_device *ddev = connector->dev;
6976 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
6977 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6978 struct drm_plane *plane = disconnected_acrtc->base.primary;
6979 struct drm_connector_state *conn_state;
6980 struct drm_crtc_state *crtc_state;
6981 struct drm_plane_state *plane_state;
6982
6983 if (!state)
6984 return -ENOMEM;
6985
6986 state->acquire_ctx = ddev->mode_config.acquire_ctx;
6987
6988 /* Construct an atomic state to restore previous display setting */
6989
6990 /*
6991 * Attach connectors to drm_atomic_state
6992 */
6993 conn_state = drm_atomic_get_connector_state(state, connector);
6994
6995 ret = PTR_ERR_OR_ZERO(conn_state);
6996 if (ret)
6997 goto err;
6998
6999 /* Attach crtc to drm_atomic_state*/
7000 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7001
7002 ret = PTR_ERR_OR_ZERO(crtc_state);
7003 if (ret)
7004 goto err;
7005
7006 /* force a restore */
7007 crtc_state->mode_changed = true;
7008
7009 /* Attach plane to drm_atomic_state */
7010 plane_state = drm_atomic_get_plane_state(state, plane);
7011
7012 ret = PTR_ERR_OR_ZERO(plane_state);
7013 if (ret)
7014 goto err;
7015
7016
7017 /* Call commit internally with the state we just constructed */
7018 ret = drm_atomic_commit(state);
7019 if (!ret)
7020 return 0;
7021
7022err:
7023 DRM_ERROR("Restoring old state failed with %i\n", ret);
7024 drm_atomic_state_put(state);
7025
7026 return ret;
7027}
7028
7029/*
1f6010a9
DF
7030 * This function handles all cases when set mode does not come upon hotplug.
7031 * This includes when a display is unplugged then plugged back into the
7032 * same port and when running without usermode desktop manager supprot
e7b07cee 7033 */
3ee6b26b
AD
7034void dm_restore_drm_connector_state(struct drm_device *dev,
7035 struct drm_connector *connector)
e7b07cee 7036{
c84dec2f 7037 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7038 struct amdgpu_crtc *disconnected_acrtc;
7039 struct dm_crtc_state *acrtc_state;
7040
7041 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7042 return;
7043
7044 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7045 if (!disconnected_acrtc)
7046 return;
e7b07cee 7047
70e8ffc5
HW
7048 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7049 if (!acrtc_state->stream)
e7b07cee
HW
7050 return;
7051
7052 /*
7053 * If the previous sink is not released and different from the current,
7054 * we deduce we are in a state where we can not rely on usermode call
7055 * to turn on the display, so we do it here
7056 */
7057 if (acrtc_state->stream->sink != aconnector->dc_sink)
7058 dm_force_atomic_commit(&aconnector->base);
7059}
7060
1f6010a9 7061/*
e7b07cee
HW
7062 * Grabs all modesetting locks to serialize against any blocking commits,
7063 * Waits for completion of all non blocking commits.
7064 */
3ee6b26b
AD
7065static int do_aquire_global_lock(struct drm_device *dev,
7066 struct drm_atomic_state *state)
e7b07cee
HW
7067{
7068 struct drm_crtc *crtc;
7069 struct drm_crtc_commit *commit;
7070 long ret;
7071
1f6010a9
DF
7072 /*
7073 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7074 * ensure that when the framework release it the
7075 * extra locks we are locking here will get released to
7076 */
7077 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7078 if (ret)
7079 return ret;
7080
7081 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7082 spin_lock(&crtc->commit_lock);
7083 commit = list_first_entry_or_null(&crtc->commit_list,
7084 struct drm_crtc_commit, commit_entry);
7085 if (commit)
7086 drm_crtc_commit_get(commit);
7087 spin_unlock(&crtc->commit_lock);
7088
7089 if (!commit)
7090 continue;
7091
1f6010a9
DF
7092 /*
7093 * Make sure all pending HW programming completed and
e7b07cee
HW
7094 * page flips done
7095 */
7096 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7097
7098 if (ret > 0)
7099 ret = wait_for_completion_interruptible_timeout(
7100 &commit->flip_done, 10*HZ);
7101
7102 if (ret == 0)
7103 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7104 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7105
7106 drm_crtc_commit_put(commit);
7107 }
7108
7109 return ret < 0 ? ret : 0;
7110}
7111
bb47de73
NK
7112static void get_freesync_config_for_crtc(
7113 struct dm_crtc_state *new_crtc_state,
7114 struct dm_connector_state *new_con_state)
98e6436d
AK
7115{
7116 struct mod_freesync_config config = {0};
98e6436d
AK
7117 struct amdgpu_dm_connector *aconnector =
7118 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7119 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7120 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7121
a057ec46 7122 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7123 vrefresh >= aconnector->min_vfreq &&
7124 vrefresh <= aconnector->max_vfreq;
bb47de73 7125
a057ec46
IB
7126 if (new_crtc_state->vrr_supported) {
7127 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7128 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7129 VRR_STATE_ACTIVE_VARIABLE :
7130 VRR_STATE_INACTIVE;
7131 config.min_refresh_in_uhz =
7132 aconnector->min_vfreq * 1000000;
7133 config.max_refresh_in_uhz =
7134 aconnector->max_vfreq * 1000000;
69ff8845 7135 config.vsif_supported = true;
180db303 7136 config.btr = true;
98e6436d
AK
7137 }
7138
bb47de73
NK
7139 new_crtc_state->freesync_config = config;
7140}
98e6436d 7141
bb47de73
NK
7142static void reset_freesync_config_for_crtc(
7143 struct dm_crtc_state *new_crtc_state)
7144{
7145 new_crtc_state->vrr_supported = false;
98e6436d 7146
180db303
NK
7147 memset(&new_crtc_state->vrr_params, 0,
7148 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7149 memset(&new_crtc_state->vrr_infopacket, 0,
7150 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7151}
7152
4b9674e5
LL
7153static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7154 struct drm_atomic_state *state,
7155 struct drm_crtc *crtc,
7156 struct drm_crtc_state *old_crtc_state,
7157 struct drm_crtc_state *new_crtc_state,
7158 bool enable,
7159 bool *lock_and_validation_needed)
e7b07cee 7160{
eb3dc897 7161 struct dm_atomic_state *dm_state = NULL;
54d76575 7162 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7163 struct dc_stream_state *new_stream;
62f55537 7164 int ret = 0;
d4d4a645 7165
1f6010a9
DF
7166 /*
7167 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7168 * update changed items
7169 */
4b9674e5
LL
7170 struct amdgpu_crtc *acrtc = NULL;
7171 struct amdgpu_dm_connector *aconnector = NULL;
7172 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7173 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7174
4b9674e5 7175 new_stream = NULL;
9635b754 7176
4b9674e5
LL
7177 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7178 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7179 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7180 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7181
4b9674e5
LL
7182 /* TODO This hack should go away */
7183 if (aconnector && enable) {
7184 /* Make sure fake sink is created in plug-in scenario */
7185 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7186 &aconnector->base);
7187 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7188 &aconnector->base);
19f89e23 7189
4b9674e5
LL
7190 if (IS_ERR(drm_new_conn_state)) {
7191 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7192 goto fail;
7193 }
19f89e23 7194
4b9674e5
LL
7195 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7196 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 7197
02d35a67
JFZ
7198 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7199 goto skip_modeset;
7200
4b9674e5
LL
7201 new_stream = create_stream_for_sink(aconnector,
7202 &new_crtc_state->mode,
7203 dm_new_conn_state,
7204 dm_old_crtc_state->stream);
19f89e23 7205
4b9674e5
LL
7206 /*
7207 * we can have no stream on ACTION_SET if a display
7208 * was disconnected during S3, in this case it is not an
7209 * error, the OS will be updated after detection, and
7210 * will do the right thing on next atomic commit
7211 */
19f89e23 7212
4b9674e5
LL
7213 if (!new_stream) {
7214 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7215 __func__, acrtc->base.base.id);
7216 ret = -ENOMEM;
7217 goto fail;
7218 }
e7b07cee 7219
4b9674e5 7220 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 7221
88694af9
NK
7222 ret = fill_hdr_info_packet(drm_new_conn_state,
7223 &new_stream->hdr_static_metadata);
7224 if (ret)
7225 goto fail;
7226
7e930949
NK
7227 /*
7228 * If we already removed the old stream from the context
7229 * (and set the new stream to NULL) then we can't reuse
7230 * the old stream even if the stream and scaling are unchanged.
7231 * We'll hit the BUG_ON and black screen.
7232 *
7233 * TODO: Refactor this function to allow this check to work
7234 * in all conditions.
7235 */
7236 if (dm_new_crtc_state->stream &&
7237 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
7238 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7239 new_crtc_state->mode_changed = false;
7240 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7241 new_crtc_state->mode_changed);
62f55537 7242 }
4b9674e5 7243 }
b830ebc9 7244
02d35a67 7245 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
7246 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7247 goto skip_modeset;
e7b07cee 7248
4b9674e5
LL
7249 DRM_DEBUG_DRIVER(
7250 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7251 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7252 "connectors_changed:%d\n",
7253 acrtc->crtc_id,
7254 new_crtc_state->enable,
7255 new_crtc_state->active,
7256 new_crtc_state->planes_changed,
7257 new_crtc_state->mode_changed,
7258 new_crtc_state->active_changed,
7259 new_crtc_state->connectors_changed);
62f55537 7260
4b9674e5
LL
7261 /* Remove stream for any changed/disabled CRTC */
7262 if (!enable) {
62f55537 7263
4b9674e5
LL
7264 if (!dm_old_crtc_state->stream)
7265 goto skip_modeset;
eb3dc897 7266
4b9674e5
LL
7267 ret = dm_atomic_get_state(state, &dm_state);
7268 if (ret)
7269 goto fail;
e7b07cee 7270
4b9674e5
LL
7271 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7272 crtc->base.id);
62f55537 7273
4b9674e5
LL
7274 /* i.e. reset mode */
7275 if (dc_remove_stream_from_ctx(
7276 dm->dc,
7277 dm_state->context,
7278 dm_old_crtc_state->stream) != DC_OK) {
7279 ret = -EINVAL;
7280 goto fail;
7281 }
62f55537 7282
4b9674e5
LL
7283 dc_stream_release(dm_old_crtc_state->stream);
7284 dm_new_crtc_state->stream = NULL;
bb47de73 7285
4b9674e5 7286 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 7287
4b9674e5 7288 *lock_and_validation_needed = true;
62f55537 7289
4b9674e5
LL
7290 } else {/* Add stream for any updated/enabled CRTC */
7291 /*
7292 * Quick fix to prevent NULL pointer on new_stream when
7293 * added MST connectors not found in existing crtc_state in the chained mode
7294 * TODO: need to dig out the root cause of that
7295 */
7296 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7297 goto skip_modeset;
62f55537 7298
4b9674e5
LL
7299 if (modereset_required(new_crtc_state))
7300 goto skip_modeset;
62f55537 7301
4b9674e5
LL
7302 if (modeset_required(new_crtc_state, new_stream,
7303 dm_old_crtc_state->stream)) {
62f55537 7304
4b9674e5 7305 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 7306
4b9674e5
LL
7307 ret = dm_atomic_get_state(state, &dm_state);
7308 if (ret)
7309 goto fail;
27b3f4fc 7310
4b9674e5 7311 dm_new_crtc_state->stream = new_stream;
62f55537 7312
4b9674e5 7313 dc_stream_retain(new_stream);
1dc90497 7314
4b9674e5
LL
7315 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7316 crtc->base.id);
1dc90497 7317
4b9674e5
LL
7318 if (dc_add_stream_to_ctx(
7319 dm->dc,
7320 dm_state->context,
7321 dm_new_crtc_state->stream) != DC_OK) {
7322 ret = -EINVAL;
7323 goto fail;
9b690ef3
BL
7324 }
7325
4b9674e5
LL
7326 *lock_and_validation_needed = true;
7327 }
7328 }
e277adc5 7329
4b9674e5
LL
7330skip_modeset:
7331 /* Release extra reference */
7332 if (new_stream)
7333 dc_stream_release(new_stream);
e277adc5 7334
4b9674e5
LL
7335 /*
7336 * We want to do dc stream updates that do not require a
7337 * full modeset below.
7338 */
7339 if (!(enable && aconnector && new_crtc_state->enable &&
7340 new_crtc_state->active))
7341 return 0;
7342 /*
7343 * Given above conditions, the dc state cannot be NULL because:
7344 * 1. We're in the process of enabling CRTCs (just been added
7345 * to the dc context, or already is on the context)
7346 * 2. Has a valid connector attached, and
7347 * 3. Is currently active and enabled.
7348 * => The dc stream state currently exists.
7349 */
7350 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 7351
4b9674e5
LL
7352 /* Scaling or underscan settings */
7353 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7354 update_stream_scaling_settings(
7355 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 7356
b05e2c5e
DF
7357 /* ABM settings */
7358 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7359
4b9674e5
LL
7360 /*
7361 * Color management settings. We also update color properties
7362 * when a modeset is needed, to ensure it gets reprogrammed.
7363 */
7364 if (dm_new_crtc_state->base.color_mgmt_changed ||
7365 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 7366 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
7367 if (ret)
7368 goto fail;
62f55537 7369 }
e7b07cee 7370
4b9674e5
LL
7371 /* Update Freesync settings. */
7372 get_freesync_config_for_crtc(dm_new_crtc_state,
7373 dm_new_conn_state);
7374
62f55537 7375 return ret;
9635b754
DS
7376
7377fail:
7378 if (new_stream)
7379 dc_stream_release(new_stream);
7380 return ret;
62f55537 7381}
9b690ef3 7382
f6ff2a08
NK
7383static bool should_reset_plane(struct drm_atomic_state *state,
7384 struct drm_plane *plane,
7385 struct drm_plane_state *old_plane_state,
7386 struct drm_plane_state *new_plane_state)
7387{
7388 struct drm_plane *other;
7389 struct drm_plane_state *old_other_state, *new_other_state;
7390 struct drm_crtc_state *new_crtc_state;
7391 int i;
7392
70a1efac
NK
7393 /*
7394 * TODO: Remove this hack once the checks below are sufficient
7395 * enough to determine when we need to reset all the planes on
7396 * the stream.
7397 */
7398 if (state->allow_modeset)
7399 return true;
7400
f6ff2a08
NK
7401 /* Exit early if we know that we're adding or removing the plane. */
7402 if (old_plane_state->crtc != new_plane_state->crtc)
7403 return true;
7404
7405 /* old crtc == new_crtc == NULL, plane not in context. */
7406 if (!new_plane_state->crtc)
7407 return false;
7408
7409 new_crtc_state =
7410 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7411
7412 if (!new_crtc_state)
7413 return true;
7414
7316c4ad
NK
7415 /* CRTC Degamma changes currently require us to recreate planes. */
7416 if (new_crtc_state->color_mgmt_changed)
7417 return true;
7418
f6ff2a08
NK
7419 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7420 return true;
7421
7422 /*
7423 * If there are any new primary or overlay planes being added or
7424 * removed then the z-order can potentially change. To ensure
7425 * correct z-order and pipe acquisition the current DC architecture
7426 * requires us to remove and recreate all existing planes.
7427 *
7428 * TODO: Come up with a more elegant solution for this.
7429 */
7430 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7431 if (other->type == DRM_PLANE_TYPE_CURSOR)
7432 continue;
7433
7434 if (old_other_state->crtc != new_plane_state->crtc &&
7435 new_other_state->crtc != new_plane_state->crtc)
7436 continue;
7437
7438 if (old_other_state->crtc != new_other_state->crtc)
7439 return true;
7440
7441 /* TODO: Remove this once we can handle fast format changes. */
7442 if (old_other_state->fb && new_other_state->fb &&
7443 old_other_state->fb->format != new_other_state->fb->format)
7444 return true;
7445 }
7446
7447 return false;
7448}
7449
9e869063
LL
7450static int dm_update_plane_state(struct dc *dc,
7451 struct drm_atomic_state *state,
7452 struct drm_plane *plane,
7453 struct drm_plane_state *old_plane_state,
7454 struct drm_plane_state *new_plane_state,
7455 bool enable,
7456 bool *lock_and_validation_needed)
62f55537 7457{
eb3dc897
NK
7458
7459 struct dm_atomic_state *dm_state = NULL;
62f55537 7460 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 7461 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 7462 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 7463 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
f6ff2a08 7464 bool needs_reset;
62f55537 7465 int ret = 0;
e7b07cee 7466
9b690ef3 7467
9e869063
LL
7468 new_plane_crtc = new_plane_state->crtc;
7469 old_plane_crtc = old_plane_state->crtc;
7470 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7471 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 7472
9e869063
LL
7473 /*TODO Implement atomic check for cursor plane */
7474 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7475 return 0;
9b690ef3 7476
f6ff2a08
NK
7477 needs_reset = should_reset_plane(state, plane, old_plane_state,
7478 new_plane_state);
7479
9e869063
LL
7480 /* Remove any changed/removed planes */
7481 if (!enable) {
f6ff2a08 7482 if (!needs_reset)
9e869063 7483 return 0;
a7b06724 7484
9e869063
LL
7485 if (!old_plane_crtc)
7486 return 0;
62f55537 7487
9e869063
LL
7488 old_crtc_state = drm_atomic_get_old_crtc_state(
7489 state, old_plane_crtc);
7490 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 7491
9e869063
LL
7492 if (!dm_old_crtc_state->stream)
7493 return 0;
62f55537 7494
9e869063
LL
7495 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7496 plane->base.id, old_plane_crtc->base.id);
9b690ef3 7497
9e869063
LL
7498 ret = dm_atomic_get_state(state, &dm_state);
7499 if (ret)
7500 return ret;
eb3dc897 7501
9e869063
LL
7502 if (!dc_remove_plane_from_context(
7503 dc,
7504 dm_old_crtc_state->stream,
7505 dm_old_plane_state->dc_state,
7506 dm_state->context)) {
62f55537 7507
9e869063
LL
7508 ret = EINVAL;
7509 return ret;
7510 }
e7b07cee 7511
9b690ef3 7512
9e869063
LL
7513 dc_plane_state_release(dm_old_plane_state->dc_state);
7514 dm_new_plane_state->dc_state = NULL;
1dc90497 7515
9e869063 7516 *lock_and_validation_needed = true;
1dc90497 7517
9e869063
LL
7518 } else { /* Add new planes */
7519 struct dc_plane_state *dc_new_plane_state;
1dc90497 7520
9e869063
LL
7521 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7522 return 0;
e7b07cee 7523
9e869063
LL
7524 if (!new_plane_crtc)
7525 return 0;
e7b07cee 7526
9e869063
LL
7527 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7528 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 7529
9e869063
LL
7530 if (!dm_new_crtc_state->stream)
7531 return 0;
62f55537 7532
f6ff2a08 7533 if (!needs_reset)
9e869063 7534 return 0;
62f55537 7535
9e869063 7536 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 7537
9e869063
LL
7538 dc_new_plane_state = dc_create_plane_state(dc);
7539 if (!dc_new_plane_state)
7540 return -ENOMEM;
62f55537 7541
9e869063
LL
7542 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7543 plane->base.id, new_plane_crtc->base.id);
8c45c5db 7544
695af5f9 7545 ret = fill_dc_plane_attributes(
9e869063
LL
7546 new_plane_crtc->dev->dev_private,
7547 dc_new_plane_state,
7548 new_plane_state,
7549 new_crtc_state);
7550 if (ret) {
7551 dc_plane_state_release(dc_new_plane_state);
7552 return ret;
7553 }
62f55537 7554
9e869063
LL
7555 ret = dm_atomic_get_state(state, &dm_state);
7556 if (ret) {
7557 dc_plane_state_release(dc_new_plane_state);
7558 return ret;
7559 }
eb3dc897 7560
9e869063
LL
7561 /*
7562 * Any atomic check errors that occur after this will
7563 * not need a release. The plane state will be attached
7564 * to the stream, and therefore part of the atomic
7565 * state. It'll be released when the atomic state is
7566 * cleaned.
7567 */
7568 if (!dc_add_plane_to_context(
7569 dc,
7570 dm_new_crtc_state->stream,
7571 dc_new_plane_state,
7572 dm_state->context)) {
62f55537 7573
9e869063
LL
7574 dc_plane_state_release(dc_new_plane_state);
7575 return -EINVAL;
7576 }
8c45c5db 7577
9e869063 7578 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 7579
9e869063
LL
7580 /* Tell DC to do a full surface update every time there
7581 * is a plane change. Inefficient, but works for now.
7582 */
7583 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7584
7585 *lock_and_validation_needed = true;
62f55537 7586 }
e7b07cee
HW
7587
7588
62f55537
AG
7589 return ret;
7590}
a87fa993 7591
eb3dc897 7592static int
f843b308 7593dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
eb3dc897
NK
7594 struct drm_atomic_state *state,
7595 enum surface_update_type *out_type)
7596{
f843b308 7597 struct dc *dc = dm->dc;
eb3dc897
NK
7598 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7599 int i, j, num_plane, ret = 0;
a87fa993
BL
7600 struct drm_plane_state *old_plane_state, *new_plane_state;
7601 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7602 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7603 struct drm_plane *plane;
7604
7605 struct drm_crtc *crtc;
7606 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7607 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7608 struct dc_stream_status *status = NULL;
7609
fe96b99d 7610 struct dc_surface_update *updates;
a87fa993
BL
7611 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7612
fe96b99d 7613 updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
fe96b99d 7614
f843b308
NK
7615 if (!updates) {
7616 DRM_ERROR("Failed to allocate plane updates\n");
4f712911
BL
7617 /* Set type to FULL to avoid crashing in DC*/
7618 update_type = UPDATE_TYPE_FULL;
eb3dc897 7619 goto cleanup;
4f712911 7620 }
a87fa993
BL
7621
7622 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
004b3938 7623 struct dc_scaling_info scaling_info;
2aa632c5
NK
7624 struct dc_stream_update stream_update;
7625
7626 memset(&stream_update, 0, sizeof(stream_update));
c448a53a 7627
a87fa993
BL
7628 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7629 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7630 num_plane = 0;
7631
6836d239
NK
7632 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7633 update_type = UPDATE_TYPE_FULL;
7634 goto cleanup;
7635 }
a87fa993 7636
6836d239 7637 if (!new_dm_crtc_state->stream)
c744e974 7638 continue;
eb3dc897 7639
c744e974 7640 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
2cc450ce
NK
7641 const struct amdgpu_framebuffer *amdgpu_fb =
7642 to_amdgpu_framebuffer(new_plane_state->fb);
7643 struct dc_plane_info plane_info;
7644 struct dc_flip_addrs flip_addr;
7645 uint64_t tiling_flags;
7646
c744e974
NK
7647 new_plane_crtc = new_plane_state->crtc;
7648 old_plane_crtc = old_plane_state->crtc;
7649 new_dm_plane_state = to_dm_plane_state(new_plane_state);
7650 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 7651
c744e974
NK
7652 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7653 continue;
eb3dc897 7654
6836d239
NK
7655 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
7656 update_type = UPDATE_TYPE_FULL;
7657 goto cleanup;
7658 }
7659
c744e974
NK
7660 if (crtc != new_plane_crtc)
7661 continue;
7662
f843b308 7663 updates[num_plane].surface = new_dm_plane_state->dc_state;
c744e974
NK
7664
7665 if (new_crtc_state->mode_changed) {
c744e974
NK
7666 stream_update.dst = new_dm_crtc_state->stream->dst;
7667 stream_update.src = new_dm_crtc_state->stream->src;
7668 }
7669
7670 if (new_crtc_state->color_mgmt_changed) {
7671 updates[num_plane].gamma =
7672 new_dm_plane_state->dc_state->gamma_correction;
7673 updates[num_plane].in_transfer_func =
7674 new_dm_plane_state->dc_state->in_transfer_func;
7675 stream_update.gamut_remap =
7676 &new_dm_crtc_state->stream->gamut_remap_matrix;
cf020d49
NK
7677 stream_update.output_csc_transform =
7678 &new_dm_crtc_state->stream->csc_color_matrix;
c744e974
NK
7679 stream_update.out_transfer_func =
7680 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
7681 }
7682
004b3938
NK
7683 ret = fill_dc_scaling_info(new_plane_state,
7684 &scaling_info);
7685 if (ret)
7686 goto cleanup;
7687
7688 updates[num_plane].scaling_info = &scaling_info;
7689
2cc450ce
NK
7690 if (amdgpu_fb) {
7691 ret = get_fb_info(amdgpu_fb, &tiling_flags);
7692 if (ret)
7693 goto cleanup;
7694
7695 memset(&flip_addr, 0, sizeof(flip_addr));
7696
7697 ret = fill_dc_plane_info_and_addr(
7698 dm->adev, new_plane_state, tiling_flags,
7699 &plane_info,
7700 &flip_addr.address);
7701 if (ret)
7702 goto cleanup;
7703
7704 updates[num_plane].plane_info = &plane_info;
7705 updates[num_plane].flip_addr = &flip_addr;
7706 }
7707
c744e974
NK
7708 num_plane++;
7709 }
7710
7711 if (num_plane == 0)
7712 continue;
7713
7714 ret = dm_atomic_get_state(state, &dm_state);
7715 if (ret)
7716 goto cleanup;
7717
7718 old_dm_state = dm_atomic_get_old_state(state);
7719 if (!old_dm_state) {
7720 ret = -EINVAL;
7721 goto cleanup;
7722 }
7723
7724 status = dc_stream_get_status_from_state(old_dm_state->context,
7725 new_dm_crtc_state->stream);
b6e881c9 7726 stream_update.stream = new_dm_crtc_state->stream;
f843b308
NK
7727 /*
7728 * TODO: DC modifies the surface during this call so we need
7729 * to lock here - find a way to do this without locking.
7730 */
7731 mutex_lock(&dm->dc_lock);
c744e974
NK
7732 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
7733 &stream_update, status);
f843b308 7734 mutex_unlock(&dm->dc_lock);
c744e974
NK
7735
7736 if (update_type > UPDATE_TYPE_MED) {
a87fa993 7737 update_type = UPDATE_TYPE_FULL;
eb3dc897 7738 goto cleanup;
a87fa993
BL
7739 }
7740 }
7741
eb3dc897 7742cleanup:
a87fa993 7743 kfree(updates);
a87fa993 7744
eb3dc897
NK
7745 *out_type = update_type;
7746 return ret;
a87fa993 7747}
62f55537 7748
b8592b48
LL
7749/**
7750 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
7751 * @dev: The DRM device
7752 * @state: The atomic state to commit
7753 *
7754 * Validate that the given atomic state is programmable by DC into hardware.
7755 * This involves constructing a &struct dc_state reflecting the new hardware
7756 * state we wish to commit, then querying DC to see if it is programmable. It's
7757 * important not to modify the existing DC state. Otherwise, atomic_check
7758 * may unexpectedly commit hardware changes.
7759 *
7760 * When validating the DC state, it's important that the right locks are
7761 * acquired. For full updates case which removes/adds/updates streams on one
7762 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
7763 * that any such full update commit will wait for completion of any outstanding
7764 * flip using DRMs synchronization events. See
7765 * dm_determine_update_type_for_commit()
7766 *
7767 * Note that DM adds the affected connectors for all CRTCs in state, when that
7768 * might not seem necessary. This is because DC stream creation requires the
7769 * DC sink, which is tied to the DRM connector state. Cleaning this up should
7770 * be possible but non-trivial - a possible TODO item.
7771 *
7772 * Return: -Error code if validation failed.
7773 */
7578ecda
AD
7774static int amdgpu_dm_atomic_check(struct drm_device *dev,
7775 struct drm_atomic_state *state)
62f55537 7776{
62f55537 7777 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 7778 struct dm_atomic_state *dm_state = NULL;
62f55537 7779 struct dc *dc = adev->dm.dc;
62f55537 7780 struct drm_connector *connector;
c2cea706 7781 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 7782 struct drm_crtc *crtc;
fc9e9920 7783 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
7784 struct drm_plane *plane;
7785 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
7786 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7787 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
7788
1e88ad0a 7789 int ret, i;
e7b07cee 7790
62f55537
AG
7791 /*
7792 * This bool will be set for true for any modeset/reset
7793 * or plane update which implies non fast surface update.
7794 */
7795 bool lock_and_validation_needed = false;
7796
7797 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
7798 if (ret)
7799 goto fail;
62f55537 7800
1e88ad0a
S
7801 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7802 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 7803 !new_crtc_state->color_mgmt_changed &&
a93587b3 7804 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 7805 continue;
7bef1af3 7806
1e88ad0a
S
7807 if (!new_crtc_state->enable)
7808 continue;
fc9e9920 7809
1e88ad0a
S
7810 ret = drm_atomic_add_affected_connectors(state, crtc);
7811 if (ret)
7812 return ret;
fc9e9920 7813
1e88ad0a
S
7814 ret = drm_atomic_add_affected_planes(state, crtc);
7815 if (ret)
7816 goto fail;
e7b07cee
HW
7817 }
7818
2d9e6431
NK
7819 /*
7820 * Add all primary and overlay planes on the CRTC to the state
7821 * whenever a plane is enabled to maintain correct z-ordering
7822 * and to enable fast surface updates.
7823 */
7824 drm_for_each_crtc(crtc, dev) {
7825 bool modified = false;
7826
7827 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7828 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7829 continue;
7830
7831 if (new_plane_state->crtc == crtc ||
7832 old_plane_state->crtc == crtc) {
7833 modified = true;
7834 break;
7835 }
7836 }
7837
7838 if (!modified)
7839 continue;
7840
7841 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
7842 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7843 continue;
7844
7845 new_plane_state =
7846 drm_atomic_get_plane_state(state, plane);
7847
7848 if (IS_ERR(new_plane_state)) {
7849 ret = PTR_ERR(new_plane_state);
7850 goto fail;
7851 }
7852 }
7853 }
7854
62f55537 7855 /* Remove exiting planes if they are modified */
9e869063
LL
7856 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7857 ret = dm_update_plane_state(dc, state, plane,
7858 old_plane_state,
7859 new_plane_state,
7860 false,
7861 &lock_and_validation_needed);
7862 if (ret)
7863 goto fail;
62f55537
AG
7864 }
7865
7866 /* Disable all crtcs which require disable */
4b9674e5
LL
7867 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7868 ret = dm_update_crtc_state(&adev->dm, state, crtc,
7869 old_crtc_state,
7870 new_crtc_state,
7871 false,
7872 &lock_and_validation_needed);
7873 if (ret)
7874 goto fail;
62f55537
AG
7875 }
7876
7877 /* Enable all crtcs which require enable */
4b9674e5
LL
7878 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7879 ret = dm_update_crtc_state(&adev->dm, state, crtc,
7880 old_crtc_state,
7881 new_crtc_state,
7882 true,
7883 &lock_and_validation_needed);
7884 if (ret)
7885 goto fail;
62f55537
AG
7886 }
7887
7888 /* Add new/modified planes */
9e869063
LL
7889 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7890 ret = dm_update_plane_state(dc, state, plane,
7891 old_plane_state,
7892 new_plane_state,
7893 true,
7894 &lock_and_validation_needed);
7895 if (ret)
7896 goto fail;
62f55537
AG
7897 }
7898
b349f76e
ES
7899 /* Run this here since we want to validate the streams we created */
7900 ret = drm_atomic_helper_check_planes(dev, state);
7901 if (ret)
7902 goto fail;
62f55537 7903
3261e013
ML
7904 /* Perform validation of MST topology in the state*/
7905 ret = drm_dp_mst_atomic_check(state);
7906 if (ret)
7907 goto fail;
7908
43d10d30
NK
7909 if (state->legacy_cursor_update) {
7910 /*
7911 * This is a fast cursor update coming from the plane update
7912 * helper, check if it can be done asynchronously for better
7913 * performance.
7914 */
7915 state->async_update =
7916 !drm_atomic_helper_async_check(dev, state);
7917
7918 /*
7919 * Skip the remaining global validation if this is an async
7920 * update. Cursor updates can be done without affecting
7921 * state or bandwidth calcs and this avoids the performance
7922 * penalty of locking the private state object and
7923 * allocating a new dc_state.
7924 */
7925 if (state->async_update)
7926 return 0;
7927 }
7928
ebdd27e1 7929 /* Check scaling and underscan changes*/
1f6010a9 7930 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
7931 * new stream into context w\o causing full reset. Need to
7932 * decide how to handle.
7933 */
c2cea706 7934 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7935 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7936 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7937 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
7938
7939 /* Skip any modesets/resets */
0bc9706d
LSL
7940 if (!acrtc || drm_atomic_crtc_needs_modeset(
7941 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
7942 continue;
7943
b830ebc9 7944 /* Skip any thing not scale or underscan changes */
54d76575 7945 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
7946 continue;
7947
a87fa993 7948 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
7949 lock_and_validation_needed = true;
7950 }
7951
f843b308 7952 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
eb3dc897
NK
7953 if (ret)
7954 goto fail;
a87fa993
BL
7955
7956 if (overall_update_type < update_type)
7957 overall_update_type = update_type;
7958
7959 /*
7960 * lock_and_validation_needed was an old way to determine if we need to set
7961 * the global lock. Leaving it in to check if we broke any corner cases
7962 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
7963 * lock_and_validation_needed false = UPDATE_TYPE_FAST
7964 */
7965 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
7966 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
e7b07cee 7967
a87fa993 7968 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
7969 ret = dm_atomic_get_state(state, &dm_state);
7970 if (ret)
7971 goto fail;
e7b07cee
HW
7972
7973 ret = do_aquire_global_lock(dev, state);
7974 if (ret)
7975 goto fail;
1dc90497 7976
afcd526b 7977 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
e7b07cee
HW
7978 ret = -EINVAL;
7979 goto fail;
7980 }
bd200d19 7981 } else {
674e78ac 7982 /*
bd200d19
NK
7983 * The commit is a fast update. Fast updates shouldn't change
7984 * the DC context, affect global validation, and can have their
7985 * commit work done in parallel with other commits not touching
7986 * the same resource. If we have a new DC context as part of
7987 * the DM atomic state from validation we need to free it and
7988 * retain the existing one instead.
674e78ac 7989 */
bd200d19
NK
7990 struct dm_atomic_state *new_dm_state, *old_dm_state;
7991
7992 new_dm_state = dm_atomic_get_new_state(state);
7993 old_dm_state = dm_atomic_get_old_state(state);
7994
7995 if (new_dm_state && old_dm_state) {
7996 if (new_dm_state->context)
7997 dc_release_state(new_dm_state->context);
7998
7999 new_dm_state->context = old_dm_state->context;
8000
8001 if (old_dm_state->context)
8002 dc_retain_state(old_dm_state->context);
8003 }
e7b07cee
HW
8004 }
8005
caff0e66
NK
8006 /* Store the overall update type for use later in atomic check. */
8007 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8008 struct dm_crtc_state *dm_new_crtc_state =
8009 to_dm_crtc_state(new_crtc_state);
8010
8011 dm_new_crtc_state->update_type = (int)overall_update_type;
e7b07cee
HW
8012 }
8013
8014 /* Must be success */
8015 WARN_ON(ret);
8016 return ret;
8017
8018fail:
8019 if (ret == -EDEADLK)
01e28f9c 8020 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8021 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8022 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8023 else
01e28f9c 8024 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8025
8026 return ret;
8027}
8028
3ee6b26b
AD
8029static bool is_dp_capable_without_timing_msa(struct dc *dc,
8030 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8031{
8032 uint8_t dpcd_data;
8033 bool capable = false;
8034
c84dec2f 8035 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8036 dm_helpers_dp_read_dpcd(
8037 NULL,
c84dec2f 8038 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8039 DP_DOWN_STREAM_PORT_COUNT,
8040 &dpcd_data,
8041 sizeof(dpcd_data))) {
8042 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8043 }
8044
8045 return capable;
8046}
98e6436d
AK
8047void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8048 struct edid *edid)
e7b07cee
HW
8049{
8050 int i;
e7b07cee
HW
8051 bool edid_check_required;
8052 struct detailed_timing *timing;
8053 struct detailed_non_pixel *data;
8054 struct detailed_data_monitor_range *range;
c84dec2f
HW
8055 struct amdgpu_dm_connector *amdgpu_dm_connector =
8056 to_amdgpu_dm_connector(connector);
bb47de73 8057 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8058
8059 struct drm_device *dev = connector->dev;
8060 struct amdgpu_device *adev = dev->dev_private;
bb47de73 8061 bool freesync_capable = false;
b830ebc9 8062
8218d7f1
HW
8063 if (!connector->state) {
8064 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 8065 goto update;
8218d7f1
HW
8066 }
8067
98e6436d
AK
8068 if (!edid) {
8069 dm_con_state = to_dm_connector_state(connector->state);
8070
8071 amdgpu_dm_connector->min_vfreq = 0;
8072 amdgpu_dm_connector->max_vfreq = 0;
8073 amdgpu_dm_connector->pixel_clock_mhz = 0;
8074
bb47de73 8075 goto update;
98e6436d
AK
8076 }
8077
8218d7f1
HW
8078 dm_con_state = to_dm_connector_state(connector->state);
8079
e7b07cee 8080 edid_check_required = false;
c84dec2f 8081 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 8082 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 8083 goto update;
e7b07cee
HW
8084 }
8085 if (!adev->dm.freesync_module)
bb47de73 8086 goto update;
e7b07cee
HW
8087 /*
8088 * if edid non zero restrict freesync only for dp and edp
8089 */
8090 if (edid) {
c84dec2f
HW
8091 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8092 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
8093 edid_check_required = is_dp_capable_without_timing_msa(
8094 adev->dm.dc,
c84dec2f 8095 amdgpu_dm_connector);
e7b07cee
HW
8096 }
8097 }
e7b07cee
HW
8098 if (edid_check_required == true && (edid->version > 1 ||
8099 (edid->version == 1 && edid->revision > 1))) {
8100 for (i = 0; i < 4; i++) {
8101
8102 timing = &edid->detailed_timings[i];
8103 data = &timing->data.other_data;
8104 range = &data->data.range;
8105 /*
8106 * Check if monitor has continuous frequency mode
8107 */
8108 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8109 continue;
8110 /*
8111 * Check for flag range limits only. If flag == 1 then
8112 * no additional timing information provided.
8113 * Default GTF, GTF Secondary curve and CVT are not
8114 * supported
8115 */
8116 if (range->flags != 1)
8117 continue;
8118
c84dec2f
HW
8119 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8120 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8121 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
8122 range->pixel_clock_mhz * 10;
8123 break;
8124 }
8125
c84dec2f 8126 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
8127 amdgpu_dm_connector->min_vfreq > 10) {
8128
bb47de73 8129 freesync_capable = true;
e7b07cee
HW
8130 }
8131 }
bb47de73
NK
8132
8133update:
8134 if (dm_con_state)
8135 dm_con_state->freesync_capable = freesync_capable;
8136
8137 if (connector->vrr_capable_property)
8138 drm_connector_set_vrr_capable_property(connector,
8139 freesync_capable);
e7b07cee
HW
8140}
8141
8c322309
RL
8142static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8143{
8144 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8145
8146 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8147 return;
8148 if (link->type == dc_connection_none)
8149 return;
8150 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8151 dpcd_data, sizeof(dpcd_data))) {
8152 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8153 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8154 }
8155}
8156
8157/*
8158 * amdgpu_dm_link_setup_psr() - configure psr link
8159 * @stream: stream state
8160 *
8161 * Return: true if success
8162 */
8163static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8164{
8165 struct dc_link *link = NULL;
8166 struct psr_config psr_config = {0};
8167 struct psr_context psr_context = {0};
8168 struct dc *dc = NULL;
8169 bool ret = false;
8170
8171 if (stream == NULL)
8172 return false;
8173
8174 link = stream->link;
8175 dc = link->ctx->dc;
8176
8177 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8178
8179 if (psr_config.psr_version > 0) {
8180 psr_config.psr_exit_link_training_required = 0x1;
8181 psr_config.psr_frame_capture_indication_req = 0;
8182 psr_config.psr_rfb_setup_time = 0x37;
8183 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8184 psr_config.allow_smu_optimizations = 0x0;
8185
8186 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8187
8188 }
8189 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8190
8191 return ret;
8192}
8193
8194/*
8195 * amdgpu_dm_psr_enable() - enable psr f/w
8196 * @stream: stream state
8197 *
8198 * Return: true if success
8199 */
8200bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8201{
8202 struct dc_link *link = stream->link;
8203 struct dc_static_screen_events triggers = {0};
8204
8205 DRM_DEBUG_DRIVER("Enabling psr...\n");
8206
8207 triggers.cursor_update = true;
8208 triggers.overlay_update = true;
8209 triggers.surface_update = true;
8210
8211 dc_stream_set_static_screen_events(link->ctx->dc,
8212 &stream, 1,
8213 &triggers);
8214
8215 return dc_link_set_psr_allow_active(link, true, false);
8216}
8217
8218/*
8219 * amdgpu_dm_psr_disable() - disable psr f/w
8220 * @stream: stream state
8221 *
8222 * Return: true if success
8223 */
8224static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8225{
8226
8227 DRM_DEBUG_DRIVER("Disabling psr...\n");
8228
8229 return dc_link_set_psr_allow_active(stream->link, false, true);
8230}