drm/amd/display: Prevent dpcd reads with passive dongles
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
743b9786
NK
33#include "dmub/inc/dmub_srv.h"
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
2200eb9e 97
a94d5569
DF
98#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 100
5ea23931
RL
101#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
8c7aea40
NK
104/* Number of bytes in PSP header for firmware. */
105#define PSP_HEADER_BYTES 0x100
106
107/* Number of bytes in PSP footer for firmware. */
108#define PSP_FOOTER_BYTES 0x100
109
b8592b48
LL
110/**
111 * DOC: overview
112 *
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
116 *
117 * The root control structure is &struct amdgpu_display_manager.
118 */
119
7578ecda
AD
120/* basic init/fini API */
121static int amdgpu_dm_init(struct amdgpu_device *adev);
122static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
1f6010a9
DF
124/*
125 * initializes drm_device display related structures, based on the information
7578ecda
AD
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
128 *
129 * Returns 0 on success
130 */
131static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132/* removes and deallocates the drm structures, created by the above function */
133static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
7578ecda 135static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 136 struct drm_plane *plane,
cc1fec57
NK
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
7578ecda
AD
139static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
144 uint32_t link_index,
145 struct amdgpu_encoder *amdgpu_encoder);
146static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
149
150static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
154 bool nonblock);
155
156static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
160
674e78ac
NK
161static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
7578ecda 163
8c322309
RL
164static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
4562236b
HW
170/*
171 * dm_vblank_get_counter
172 *
173 * @brief
174 * Get counter for number of vertical blanks
175 *
176 * @param
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
179 *
180 * @return
181 * Counter for vertical blanks
182 */
183static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184{
185 if (crtc >= adev->mode_info.num_crtc)
186 return 0;
187 else {
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 acrtc->base.state);
4562236b 191
da5c47f6
AG
192
193 if (acrtc_state->stream == NULL) {
0971c40e
HW
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 crtc);
4562236b
HW
196 return 0;
197 }
198
da5c47f6 199 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
200 }
201}
202
203static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 204 u32 *vbl, u32 *position)
4562236b 205{
81c50963
ST
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
4562236b
HW
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 return -EINVAL;
210 else {
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 acrtc->base.state);
4562236b 214
da5c47f6 215 if (acrtc_state->stream == NULL) {
0971c40e
HW
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 crtc);
4562236b
HW
218 return 0;
219 }
220
81c50963
ST
221 /*
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
224 */
da5c47f6 225 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
226 &v_blank_start,
227 &v_blank_end,
228 &h_position,
229 &v_position);
230
e806208d
AG
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
233 }
234
235 return 0;
236}
237
238static bool dm_is_idle(void *handle)
239{
240 /* XXX todo */
241 return true;
242}
243
244static int dm_wait_for_idle(void *handle)
245{
246 /* XXX todo */
247 return 0;
248}
249
250static bool dm_check_soft_reset(void *handle)
251{
252 return false;
253}
254
255static int dm_soft_reset(void *handle)
256{
257 /* XXX todo */
258 return 0;
259}
260
3ee6b26b
AD
261static struct amdgpu_crtc *
262get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 int otg_inst)
4562236b
HW
264{
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
268
4562236b
HW
269 if (otg_inst == -1) {
270 WARN_ON(1);
271 return adev->mode_info.crtcs[0];
272 }
273
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277 if (amdgpu_crtc->otg_inst == otg_inst)
278 return amdgpu_crtc;
279 }
280
281 return NULL;
282}
283
66b0c973
MK
284static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285{
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288}
289
b8e8c934
HW
290/**
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
293 *
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
296 */
4562236b
HW
297static void dm_pflip_high_irq(void *interrupt_params)
298{
4562236b
HW
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
302 unsigned long flags;
71bbe51a
MK
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 bool vrr_active;
4562236b
HW
307
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310 /* IRQ could occur when in initial stage */
1f6010a9 311 /* TODO work and BO cleanup */
4562236b
HW
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 return;
315 }
316
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
318
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
324 amdgpu_crtc);
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 return;
327 }
328
71bbe51a
MK
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
4562236b 332
71bbe51a
MK
333 if (!e)
334 WARN_ON(1);
1159898a 335
71bbe51a
MK
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 if (!vrr_active ||
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
347 */
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 349
71bbe51a
MK
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
352 */
353 if (e) {
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
358 }
359 } else if (e) {
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
366 *
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
371 */
372
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
376
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 e = NULL;
379 }
4562236b 380
fdd1fe57
MK
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
385 */
e3eff4b5
TZ
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 388
54f5499a 389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
71bbe51a
MK
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
4562236b
HW
395}
396
d2574c33
MK
397static void dm_vupdate_high_irq(void *interrupt_params)
398{
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
09aef2c4 403 unsigned long flags;
d2574c33
MK
404
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407 if (acrtc) {
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
7f2be468
LP
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 acrtc->crtc_id,
412 amdgpu_dm_vrr_active(acrtc_state));
d2574c33
MK
413
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
419 */
09aef2c4 420 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 421 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
422
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
429 acrtc_state->stream,
430 &acrtc_state->vrr_params);
431
432 dc_stream_adjust_vmin_vmax(
433 adev->dm.dc,
434 acrtc_state->stream,
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 }
438 }
d2574c33
MK
439 }
440}
441
b8e8c934
HW
442/**
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: ignored
445 *
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447 * event handler.
448 */
4562236b
HW
449static void dm_crtc_high_irq(void *interrupt_params)
450{
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
4562236b 453 struct amdgpu_crtc *acrtc;
180db303 454 struct dm_crtc_state *acrtc_state;
09aef2c4 455 unsigned long flags;
4562236b 456
b57de80a 457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b 458
e5d0170e 459 if (acrtc) {
180db303
NK
460 acrtc_state = to_dm_crtc_state(acrtc->base.state);
461
7f2be468
LP
462 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463 acrtc->crtc_id,
464 amdgpu_dm_vrr_active(acrtc_state));
d2574c33
MK
465
466 /* Core vblank handling at start of front-porch is only possible
467 * in non-vrr mode, as only there vblank timestamping will give
468 * valid results while done in front-porch. Otherwise defer it
469 * to dm_vupdate_high_irq after end of front-porch.
470 */
471 if (!amdgpu_dm_vrr_active(acrtc_state))
472 drm_crtc_handle_vblank(&acrtc->base);
473
474 /* Following stuff must happen at start of vblank, for crc
475 * computation and below-the-range btr support in vrr mode.
476 */
477 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478
09aef2c4 479 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
180db303
NK
480 acrtc_state->vrr_params.supported &&
481 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
09aef2c4 482 spin_lock_irqsave(&adev->ddev->event_lock, flags);
180db303
NK
483 mod_freesync_handle_v_update(
484 adev->dm.freesync_module,
485 acrtc_state->stream,
486 &acrtc_state->vrr_params);
487
488 dc_stream_adjust_vmin_vmax(
489 adev->dm.dc,
490 acrtc_state->stream,
491 &acrtc_state->vrr_params.adjust);
09aef2c4 492 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
180db303 493 }
e5d0170e 494 }
4562236b
HW
495}
496
b8219745 497#if defined(CONFIG_DRM_AMD_DC_DCN)
16f17eda
LL
498/**
499 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500 * @interrupt params - interrupt parameters
501 *
502 * Notify DRM's vblank event handler at VSTARTUP
503 *
504 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505 * * We are close enough to VUPDATE - the point of no return for hw
506 * * We are in the fixed portion of variable front porch when vrr is enabled
507 * * We are before VUPDATE, where double-buffered vrr registers are swapped
508 *
509 * It is therefore the correct place to signal vblank, send user flip events,
510 * and update VRR.
511 */
512static void dm_dcn_crtc_high_irq(void *interrupt_params)
513{
514 struct common_irq_params *irq_params = interrupt_params;
515 struct amdgpu_device *adev = irq_params->adev;
516 struct amdgpu_crtc *acrtc;
517 struct dm_crtc_state *acrtc_state;
518 unsigned long flags;
519
520 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521
522 if (!acrtc)
523 return;
524
525 acrtc_state = to_dm_crtc_state(acrtc->base.state);
526
2b5aed9a
MK
527 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528 amdgpu_dm_vrr_active(acrtc_state),
529 acrtc_state->active_planes);
16f17eda
LL
530
531 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 drm_crtc_handle_vblank(&acrtc->base);
533
534 spin_lock_irqsave(&adev->ddev->event_lock, flags);
535
536 if (acrtc_state->vrr_params.supported &&
537 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(
539 adev->dm.freesync_module,
540 acrtc_state->stream,
541 &acrtc_state->vrr_params);
542
543 dc_stream_adjust_vmin_vmax(
544 adev->dm.dc,
545 acrtc_state->stream,
546 &acrtc_state->vrr_params.adjust);
547 }
548
2b5aed9a
MK
549 /*
550 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 * In that case, pageflip completion interrupts won't fire and pageflip
552 * completion events won't get delivered. Prevent this by sending
553 * pending pageflip events from here if a flip is still pending.
554 *
555 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 * avoid race conditions between flip programming and completion,
557 * which could cause too early flip completion events.
558 */
559 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560 acrtc_state->active_planes == 0) {
16f17eda
LL
561 if (acrtc->event) {
562 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563 acrtc->event = NULL;
564 drm_crtc_vblank_put(&acrtc->base);
565 }
566 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 }
568
569 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570}
b8219745 571#endif
16f17eda 572
4562236b
HW
573static int dm_set_clockgating_state(void *handle,
574 enum amd_clockgating_state state)
575{
576 return 0;
577}
578
579static int dm_set_powergating_state(void *handle,
580 enum amd_powergating_state state)
581{
582 return 0;
583}
584
585/* Prototypes of private functions */
586static int dm_early_init(void* handle);
587
a32e24b4 588/* Allocate memory for FBC compressed data */
3e332d3a 589static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 590{
3e332d3a
RL
591 struct drm_device *dev = connector->dev;
592 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 593 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
594 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595 struct drm_display_mode *mode;
42e67c3b
RL
596 unsigned long max_size = 0;
597
598 if (adev->dm.dc->fbc_compressor == NULL)
599 return;
a32e24b4 600
3e332d3a 601 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
602 return;
603
3e332d3a
RL
604 if (compressor->bo_ptr)
605 return;
42e67c3b 606
42e67c3b 607
3e332d3a
RL
608 list_for_each_entry(mode, &connector->modes, head) {
609 if (max_size < mode->htotal * mode->vtotal)
610 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
611 }
612
613 if (max_size) {
614 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 615 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 616 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
617
618 if (r)
42e67c3b
RL
619 DRM_ERROR("DM: Failed to initialize FBC\n");
620 else {
621 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623 }
624
a32e24b4
RL
625 }
626
627}
a32e24b4 628
6ce8f316
NK
629static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630 int pipe, bool *enabled,
631 unsigned char *buf, int max_bytes)
632{
633 struct drm_device *dev = dev_get_drvdata(kdev);
634 struct amdgpu_device *adev = dev->dev_private;
635 struct drm_connector *connector;
636 struct drm_connector_list_iter conn_iter;
637 struct amdgpu_dm_connector *aconnector;
638 int ret = 0;
639
640 *enabled = false;
641
642 mutex_lock(&adev->dm.audio_lock);
643
644 drm_connector_list_iter_begin(dev, &conn_iter);
645 drm_for_each_connector_iter(connector, &conn_iter) {
646 aconnector = to_amdgpu_dm_connector(connector);
647 if (aconnector->audio_inst != port)
648 continue;
649
650 *enabled = true;
651 ret = drm_eld_size(connector->eld);
652 memcpy(buf, connector->eld, min(max_bytes, ret));
653
654 break;
655 }
656 drm_connector_list_iter_end(&conn_iter);
657
658 mutex_unlock(&adev->dm.audio_lock);
659
660 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661
662 return ret;
663}
664
665static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666 .get_eld = amdgpu_dm_audio_component_get_eld,
667};
668
669static int amdgpu_dm_audio_component_bind(struct device *kdev,
670 struct device *hda_kdev, void *data)
671{
672 struct drm_device *dev = dev_get_drvdata(kdev);
673 struct amdgpu_device *adev = dev->dev_private;
674 struct drm_audio_component *acomp = data;
675
676 acomp->ops = &amdgpu_dm_audio_component_ops;
677 acomp->dev = kdev;
678 adev->dm.audio_component = acomp;
679
680 return 0;
681}
682
683static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684 struct device *hda_kdev, void *data)
685{
686 struct drm_device *dev = dev_get_drvdata(kdev);
687 struct amdgpu_device *adev = dev->dev_private;
688 struct drm_audio_component *acomp = data;
689
690 acomp->ops = NULL;
691 acomp->dev = NULL;
692 adev->dm.audio_component = NULL;
693}
694
695static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696 .bind = amdgpu_dm_audio_component_bind,
697 .unbind = amdgpu_dm_audio_component_unbind,
698};
699
700static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701{
702 int i, ret;
703
704 if (!amdgpu_audio)
705 return 0;
706
707 adev->mode_info.audio.enabled = true;
708
709 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710
711 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712 adev->mode_info.audio.pin[i].channels = -1;
713 adev->mode_info.audio.pin[i].rate = -1;
714 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715 adev->mode_info.audio.pin[i].status_bits = 0;
716 adev->mode_info.audio.pin[i].category_code = 0;
717 adev->mode_info.audio.pin[i].connected = false;
718 adev->mode_info.audio.pin[i].id =
719 adev->dm.dc->res_pool->audios[i]->inst;
720 adev->mode_info.audio.pin[i].offset = 0;
721 }
722
723 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724 if (ret < 0)
725 return ret;
726
727 adev->dm.audio_registered = true;
728
729 return 0;
730}
731
732static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733{
734 if (!amdgpu_audio)
735 return;
736
737 if (!adev->mode_info.audio.enabled)
738 return;
739
740 if (adev->dm.audio_registered) {
741 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742 adev->dm.audio_registered = false;
743 }
744
745 /* TODO: Disable audio? */
746
747 adev->mode_info.audio.enabled = false;
748}
749
750void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751{
752 struct drm_audio_component *acomp = adev->dm.audio_component;
753
754 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756
757 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 pin, -1);
759 }
760}
761
743b9786
NK
762static int dm_dmub_hw_init(struct amdgpu_device *adev)
763{
743b9786
NK
764 const struct dmcub_firmware_header_v1_0 *hdr;
765 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 766 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
767 const struct firmware *dmub_fw = adev->dm.dmub_fw;
768 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
770 struct dmub_srv_hw_params hw_params;
771 enum dmub_status status;
772 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 773 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
774 bool has_hw_support;
775
776 if (!dmub_srv)
777 /* DMUB isn't supported on the ASIC. */
778 return 0;
779
8c7aea40
NK
780 if (!fb_info) {
781 DRM_ERROR("No framebuffer info for DMUB service.\n");
782 return -EINVAL;
783 }
784
743b9786
NK
785 if (!dmub_fw) {
786 /* Firmware required for DMUB support. */
787 DRM_ERROR("No firmware provided for DMUB.\n");
788 return -EINVAL;
789 }
790
791 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792 if (status != DMUB_STATUS_OK) {
793 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794 return -EINVAL;
795 }
796
797 if (!has_hw_support) {
798 DRM_INFO("DMUB unsupported on ASIC\n");
799 return 0;
800 }
801
802 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803
743b9786
NK
804 fw_inst_const = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 806 PSP_HEADER_BYTES;
743b9786
NK
807
808 fw_bss_data = dmub_fw->data +
809 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810 le32_to_cpu(hdr->inst_const_bytes);
811
812 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
813 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815
816 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817
ddde28a5
HW
818 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 * amdgpu_ucode_init_single_fw will load dmub firmware
820 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 * will be done by dm_dmub_hw_init
822 */
823 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825 fw_inst_const_size);
826 }
827
a576b345
NK
828 if (fw_bss_data_size)
829 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
830 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
831
832 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
833 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
834 adev->bios_size);
835
836 /* Reset regions that need to be reset. */
837 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
838 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
839
840 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
841 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
842
843 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
844 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
845
846 /* Initialize hardware. */
847 memset(&hw_params, 0, sizeof(hw_params));
848 hw_params.fb_base = adev->gmc.fb_start;
849 hw_params.fb_offset = adev->gmc.aper_base;
850
31a7f4bb
HW
851 /* backdoor load firmware and trigger dmub running */
852 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
853 hw_params.load_inst_const = true;
854
743b9786
NK
855 if (dmcu)
856 hw_params.psp_version = dmcu->psp_version;
857
8c7aea40
NK
858 for (i = 0; i < fb_info->num_fb; ++i)
859 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
860
861 status = dmub_srv_hw_init(dmub_srv, &hw_params);
862 if (status != DMUB_STATUS_OK) {
863 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
864 return -EINVAL;
865 }
866
867 /* Wait for firmware load to finish. */
868 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
869 if (status != DMUB_STATUS_OK)
870 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
871
872 /* Init DMCU and ABM if available. */
873 if (dmcu && abm) {
874 dmcu->funcs->dmcu_init(dmcu);
875 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
876 }
877
9a71c7d3
NK
878 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
879 if (!adev->dm.dc->ctx->dmub_srv) {
880 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
881 return -ENOMEM;
882 }
883
743b9786
NK
884 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
885 adev->dm.dmcub_fw_version);
886
887 return 0;
888}
889
7578ecda 890static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
891{
892 struct dc_init_data init_data;
52704fca
BL
893#ifdef CONFIG_DRM_AMD_DC_HDCP
894 struct dc_callback_init init_params;
895#endif
743b9786 896 int r;
52704fca 897
4562236b
HW
898 adev->dm.ddev = adev->ddev;
899 adev->dm.adev = adev;
900
4562236b
HW
901 /* Zero all the fields */
902 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
903#ifdef CONFIG_DRM_AMD_DC_HDCP
904 memset(&init_params, 0, sizeof(init_params));
905#endif
4562236b 906
674e78ac 907 mutex_init(&adev->dm.dc_lock);
6ce8f316 908 mutex_init(&adev->dm.audio_lock);
674e78ac 909
4562236b
HW
910 if(amdgpu_dm_irq_init(adev)) {
911 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
912 goto error;
913 }
914
915 init_data.asic_id.chip_family = adev->family;
916
2dc31ca1 917 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
918 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
919
770d13b1 920 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
921 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
922 init_data.asic_id.atombios_base_address =
923 adev->mode_info.atom_context->bios;
924
925 init_data.driver = adev;
926
927 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
928
929 if (!adev->dm.cgs_device) {
930 DRM_ERROR("amdgpu: failed to create cgs device.\n");
931 goto error;
932 }
933
934 init_data.cgs_device = adev->dm.cgs_device;
935
4562236b
HW
936 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
937
60fb100b
AD
938 switch (adev->asic_type) {
939 case CHIP_CARRIZO:
940 case CHIP_STONEY:
941 case CHIP_RAVEN:
fe3db437 942 case CHIP_RENOIR:
6e227308 943 init_data.flags.gpu_vm_support = true;
60fb100b
AD
944 break;
945 default:
946 break;
947 }
6e227308 948
04b94af4
AD
949 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
950 init_data.flags.fbc_support = true;
951
d99f38ae
AD
952 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
953 init_data.flags.multi_mon_pp_mclk_switch = true;
954
eaf56410
LL
955 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
956 init_data.flags.disable_fractional_pwm = true;
957
27eaa492 958 init_data.flags.power_down_display_on_boot = true;
78ad75f8 959
48321c3d 960 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 961
4562236b
HW
962 /* Display Core create. */
963 adev->dm.dc = dc_create(&init_data);
964
423788c7 965 if (adev->dm.dc) {
76121231 966 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 967 } else {
76121231 968 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
969 goto error;
970 }
4562236b 971
743b9786
NK
972 r = dm_dmub_hw_init(adev);
973 if (r) {
974 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
975 goto error;
976 }
977
bb6785c1
NK
978 dc_hardware_init(adev->dm.dc);
979
4562236b
HW
980 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
981 if (!adev->dm.freesync_module) {
982 DRM_ERROR(
983 "amdgpu: failed to initialize freesync_module.\n");
984 } else
f1ad2f5e 985 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
986 adev->dm.freesync_module);
987
e277adc5
LSL
988 amdgpu_dm_init_color_mod();
989
52704fca 990#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 991 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 992 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 993
96a3b32e
BL
994 if (!adev->dm.hdcp_workqueue)
995 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
996 else
997 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 998
96a3b32e
BL
999 dc_init_callbacks(adev->dm.dc, &init_params);
1000 }
52704fca 1001#endif
4562236b
HW
1002 if (amdgpu_dm_initialize_drm_device(adev)) {
1003 DRM_ERROR(
1004 "amdgpu: failed to initialize sw for display support.\n");
1005 goto error;
1006 }
1007
1008 /* Update the actual used number of crtc */
1009 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1010
1011 /* TODO: Add_display_info? */
1012
1013 /* TODO use dynamic cursor width */
ce75805e
AG
1014 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1015 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
1016
1017 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1018 DRM_ERROR(
1019 "amdgpu: failed to initialize sw for display support.\n");
1020 goto error;
1021 }
1022
f1ad2f5e 1023 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1024
1025 return 0;
1026error:
1027 amdgpu_dm_fini(adev);
1028
59d0f396 1029 return -EINVAL;
4562236b
HW
1030}
1031
7578ecda 1032static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1033{
6ce8f316
NK
1034 amdgpu_dm_audio_fini(adev);
1035
4562236b 1036 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1037
52704fca
BL
1038#ifdef CONFIG_DRM_AMD_DC_HDCP
1039 if (adev->dm.hdcp_workqueue) {
1040 hdcp_destroy(adev->dm.hdcp_workqueue);
1041 adev->dm.hdcp_workqueue = NULL;
1042 }
1043
1044 if (adev->dm.dc)
1045 dc_deinit_callbacks(adev->dm.dc);
1046#endif
9a71c7d3
NK
1047 if (adev->dm.dc->ctx->dmub_srv) {
1048 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1049 adev->dm.dc->ctx->dmub_srv = NULL;
1050 }
1051
743b9786
NK
1052 if (adev->dm.dmub_bo)
1053 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1054 &adev->dm.dmub_bo_gpu_addr,
1055 &adev->dm.dmub_bo_cpu_addr);
52704fca 1056
c8bdf2b6
ED
1057 /* DC Destroy TODO: Replace destroy DAL */
1058 if (adev->dm.dc)
1059 dc_destroy(&adev->dm.dc);
4562236b
HW
1060 /*
1061 * TODO: pageflip, vlank interrupt
1062 *
1063 * amdgpu_dm_irq_fini(adev);
1064 */
1065
1066 if (adev->dm.cgs_device) {
1067 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1068 adev->dm.cgs_device = NULL;
1069 }
1070 if (adev->dm.freesync_module) {
1071 mod_freesync_destroy(adev->dm.freesync_module);
1072 adev->dm.freesync_module = NULL;
1073 }
674e78ac 1074
6ce8f316 1075 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1076 mutex_destroy(&adev->dm.dc_lock);
1077
4562236b
HW
1078 return;
1079}
1080
a94d5569 1081static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1082{
a7669aff 1083 const char *fw_name_dmcu = NULL;
a94d5569
DF
1084 int r;
1085 const struct dmcu_firmware_header_v1_0 *hdr;
1086
1087 switch(adev->asic_type) {
1088 case CHIP_BONAIRE:
1089 case CHIP_HAWAII:
1090 case CHIP_KAVERI:
1091 case CHIP_KABINI:
1092 case CHIP_MULLINS:
1093 case CHIP_TONGA:
1094 case CHIP_FIJI:
1095 case CHIP_CARRIZO:
1096 case CHIP_STONEY:
1097 case CHIP_POLARIS11:
1098 case CHIP_POLARIS10:
1099 case CHIP_POLARIS12:
1100 case CHIP_VEGAM:
1101 case CHIP_VEGA10:
1102 case CHIP_VEGA12:
1103 case CHIP_VEGA20:
476e955d 1104 case CHIP_NAVI10:
baebcf2e 1105 case CHIP_NAVI14:
30221ad8 1106 case CHIP_RENOIR:
a94d5569 1107 return 0;
5ea23931
RL
1108 case CHIP_NAVI12:
1109 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1110 break;
a94d5569 1111 case CHIP_RAVEN:
a7669aff
HW
1112 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1113 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1114 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1115 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1116 else
a7669aff 1117 return 0;
a94d5569
DF
1118 break;
1119 default:
1120 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1121 return -EINVAL;
a94d5569
DF
1122 }
1123
1124 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1125 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1126 return 0;
1127 }
1128
1129 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1130 if (r == -ENOENT) {
1131 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1132 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1133 adev->dm.fw_dmcu = NULL;
1134 return 0;
1135 }
1136 if (r) {
1137 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1138 fw_name_dmcu);
1139 return r;
1140 }
1141
1142 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1143 if (r) {
1144 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1145 fw_name_dmcu);
1146 release_firmware(adev->dm.fw_dmcu);
1147 adev->dm.fw_dmcu = NULL;
1148 return r;
1149 }
1150
1151 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1152 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1153 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1154 adev->firmware.fw_size +=
1155 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1156
1157 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1158 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1159 adev->firmware.fw_size +=
1160 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1161
ee6e89c0
DF
1162 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1163
a94d5569
DF
1164 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1165
4562236b
HW
1166 return 0;
1167}
1168
743b9786
NK
1169static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1170{
1171 struct amdgpu_device *adev = ctx;
1172
1173 return dm_read_reg(adev->dm.dc->ctx, address);
1174}
1175
1176static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1177 uint32_t value)
1178{
1179 struct amdgpu_device *adev = ctx;
1180
1181 return dm_write_reg(adev->dm.dc->ctx, address, value);
1182}
1183
1184static int dm_dmub_sw_init(struct amdgpu_device *adev)
1185{
1186 struct dmub_srv_create_params create_params;
8c7aea40
NK
1187 struct dmub_srv_region_params region_params;
1188 struct dmub_srv_region_info region_info;
1189 struct dmub_srv_fb_params fb_params;
1190 struct dmub_srv_fb_info *fb_info;
1191 struct dmub_srv *dmub_srv;
743b9786
NK
1192 const struct dmcub_firmware_header_v1_0 *hdr;
1193 const char *fw_name_dmub;
1194 enum dmub_asic dmub_asic;
1195 enum dmub_status status;
1196 int r;
1197
1198 switch (adev->asic_type) {
1199 case CHIP_RENOIR:
1200 dmub_asic = DMUB_ASIC_DCN21;
1201 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1202 break;
1203
1204 default:
1205 /* ASIC doesn't support DMUB. */
1206 return 0;
1207 }
1208
743b9786
NK
1209 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1210 if (r) {
1211 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1212 return 0;
1213 }
1214
1215 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1216 if (r) {
1217 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1218 return 0;
1219 }
1220
743b9786 1221 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1222
9a6ed547
NK
1223 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1224 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1225 AMDGPU_UCODE_ID_DMCUB;
1226 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1227 adev->dm.dmub_fw;
1228 adev->firmware.fw_size +=
1229 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1230
9a6ed547
NK
1231 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1232 adev->dm.dmcub_fw_version);
1233 }
1234
1235 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1236
8c7aea40
NK
1237 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1238 dmub_srv = adev->dm.dmub_srv;
1239
1240 if (!dmub_srv) {
1241 DRM_ERROR("Failed to allocate DMUB service!\n");
1242 return -ENOMEM;
1243 }
1244
1245 memset(&create_params, 0, sizeof(create_params));
1246 create_params.user_ctx = adev;
1247 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1248 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1249 create_params.asic = dmub_asic;
1250
1251 /* Create the DMUB service. */
1252 status = dmub_srv_create(dmub_srv, &create_params);
1253 if (status != DMUB_STATUS_OK) {
1254 DRM_ERROR("Error creating DMUB service: %d\n", status);
1255 return -EINVAL;
1256 }
1257
1258 /* Calculate the size of all the regions for the DMUB service. */
1259 memset(&region_params, 0, sizeof(region_params));
1260
1261 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1262 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1263 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1264 region_params.vbios_size = adev->bios_size;
1f0674fd
NK
1265 region_params.fw_bss_data =
1266 adev->dm.dmub_fw->data +
1267 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1268 le32_to_cpu(hdr->inst_const_bytes);
a576b345
NK
1269 region_params.fw_inst_const =
1270 adev->dm.dmub_fw->data +
1271 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1272 PSP_HEADER_BYTES;
8c7aea40
NK
1273
1274 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1275 &region_info);
1276
1277 if (status != DMUB_STATUS_OK) {
1278 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1279 return -EINVAL;
1280 }
1281
1282 /*
1283 * Allocate a framebuffer based on the total size of all the regions.
1284 * TODO: Move this into GART.
1285 */
1286 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1287 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1288 &adev->dm.dmub_bo_gpu_addr,
1289 &adev->dm.dmub_bo_cpu_addr);
1290 if (r)
1291 return r;
1292
1293 /* Rebase the regions on the framebuffer address. */
1294 memset(&fb_params, 0, sizeof(fb_params));
1295 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1296 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1297 fb_params.region_info = &region_info;
1298
1299 adev->dm.dmub_fb_info =
1300 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1301 fb_info = adev->dm.dmub_fb_info;
1302
1303 if (!fb_info) {
1304 DRM_ERROR(
1305 "Failed to allocate framebuffer info for DMUB service!\n");
1306 return -ENOMEM;
1307 }
1308
1309 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1310 if (status != DMUB_STATUS_OK) {
1311 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1312 return -EINVAL;
1313 }
1314
743b9786
NK
1315 return 0;
1316}
1317
a94d5569
DF
1318static int dm_sw_init(void *handle)
1319{
1320 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1321 int r;
1322
1323 r = dm_dmub_sw_init(adev);
1324 if (r)
1325 return r;
a94d5569
DF
1326
1327 return load_dmcu_fw(adev);
1328}
1329
4562236b
HW
1330static int dm_sw_fini(void *handle)
1331{
a94d5569
DF
1332 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1333
8c7aea40
NK
1334 kfree(adev->dm.dmub_fb_info);
1335 adev->dm.dmub_fb_info = NULL;
1336
743b9786
NK
1337 if (adev->dm.dmub_srv) {
1338 dmub_srv_destroy(adev->dm.dmub_srv);
1339 adev->dm.dmub_srv = NULL;
1340 }
1341
1342 if (adev->dm.dmub_fw) {
1343 release_firmware(adev->dm.dmub_fw);
1344 adev->dm.dmub_fw = NULL;
1345 }
1346
a94d5569
DF
1347 if(adev->dm.fw_dmcu) {
1348 release_firmware(adev->dm.fw_dmcu);
1349 adev->dm.fw_dmcu = NULL;
1350 }
1351
4562236b
HW
1352 return 0;
1353}
1354
7abcf6b5 1355static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1356{
c84dec2f 1357 struct amdgpu_dm_connector *aconnector;
4562236b 1358 struct drm_connector *connector;
f8d2d39e 1359 struct drm_connector_list_iter iter;
7abcf6b5 1360 int ret = 0;
4562236b 1361
f8d2d39e
LP
1362 drm_connector_list_iter_begin(dev, &iter);
1363 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1364 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1365 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1366 aconnector->mst_mgr.aux) {
f1ad2f5e 1367 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1368 aconnector,
1369 aconnector->base.base.id);
7abcf6b5
AG
1370
1371 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1372 if (ret < 0) {
1373 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1374 aconnector->dc_link->type =
1375 dc_connection_single;
1376 break;
7abcf6b5 1377 }
f8d2d39e 1378 }
4562236b 1379 }
f8d2d39e 1380 drm_connector_list_iter_end(&iter);
4562236b 1381
7abcf6b5
AG
1382 return ret;
1383}
1384
1385static int dm_late_init(void *handle)
1386{
42e67c3b 1387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1388
bbf854dc
DF
1389 struct dmcu_iram_parameters params;
1390 unsigned int linear_lut[16];
1391 int i;
1392 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
96cb7cf1 1393 bool ret = false;
bbf854dc
DF
1394
1395 for (i = 0; i < 16; i++)
1396 linear_lut[i] = 0xFFFF * i / 15;
1397
1398 params.set = 0;
1399 params.backlight_ramping_start = 0xCCCC;
1400 params.backlight_ramping_reduction = 0xCCCCCCCC;
1401 params.backlight_lut_array_size = 16;
1402 params.backlight_lut_array = linear_lut;
1403
2ad0cdf9
AK
1404 /* Min backlight level after ABM reduction, Don't allow below 1%
1405 * 0xFFFF x 0.01 = 0x28F
1406 */
1407 params.min_abm_backlight = 0x28F;
1408
96cb7cf1 1409 /* todo will enable for navi10 */
1410 if (adev->asic_type <= CHIP_RAVEN) {
1411 ret = dmcu_load_iram(dmcu, params);
bbf854dc 1412
96cb7cf1 1413 if (!ret)
1414 return -EINVAL;
1415 }
bbf854dc 1416
42e67c3b 1417 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1418}
1419
1420static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421{
c84dec2f 1422 struct amdgpu_dm_connector *aconnector;
4562236b 1423 struct drm_connector *connector;
f8d2d39e 1424 struct drm_connector_list_iter iter;
fe7553be
LP
1425 struct drm_dp_mst_topology_mgr *mgr;
1426 int ret;
1427 bool need_hotplug = false;
4562236b 1428
f8d2d39e
LP
1429 drm_connector_list_iter_begin(dev, &iter);
1430 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1431 aconnector = to_amdgpu_dm_connector(connector);
1432 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 aconnector->mst_port)
1434 continue;
1435
1436 mgr = &aconnector->mst_mgr;
1437
1438 if (suspend) {
1439 drm_dp_mst_topology_mgr_suspend(mgr);
1440 } else {
6f85f738 1441 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1442 if (ret < 0) {
1443 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 need_hotplug = true;
1445 }
1446 }
4562236b 1447 }
f8d2d39e 1448 drm_connector_list_iter_end(&iter);
fe7553be
LP
1449
1450 if (need_hotplug)
1451 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1452}
1453
9340dfd3
HW
1454static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455{
1456 struct smu_context *smu = &adev->smu;
1457 int ret = 0;
1458
1459 if (!is_support_sw_smu(adev))
1460 return 0;
1461
1462 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 * on window driver dc implementation.
1464 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 * should be passed to smu during boot up and resume from s3.
1466 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 * dcn20_resource_construct
1468 * then call pplib functions below to pass the settings to smu:
1469 * smu_set_watermarks_for_clock_ranges
1470 * smu_set_watermarks_table
1471 * navi10_set_watermarks_table
1472 * smu_write_watermarks_table
1473 *
1474 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 * dc has implemented different flow for window driver:
1476 * dc_hardware_init / dc_set_power_state
1477 * dcn10_init_hw
1478 * notify_wm_ranges
1479 * set_wm_ranges
1480 * -- Linux
1481 * smu_set_watermarks_for_clock_ranges
1482 * renoir_set_watermarks_table
1483 * smu_write_watermarks_table
1484 *
1485 * For Linux,
1486 * dc_hardware_init -> amdgpu_dm_init
1487 * dc_set_power_state --> dm_resume
1488 *
1489 * therefore, this function apply to navi10/12/14 but not Renoir
1490 * *
1491 */
1492 switch(adev->asic_type) {
1493 case CHIP_NAVI10:
1494 case CHIP_NAVI14:
1495 case CHIP_NAVI12:
1496 break;
1497 default:
1498 return 0;
1499 }
1500
1501 mutex_lock(&smu->mutex);
1502
1503 /* pass data to smu controller */
1504 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1505 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1506 ret = smu_write_watermarks_table(smu);
1507
1508 if (ret) {
1509 mutex_unlock(&smu->mutex);
1510 DRM_ERROR("Failed to update WMTABLE!\n");
1511 return ret;
1512 }
1513 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1514 }
1515
1516 mutex_unlock(&smu->mutex);
1517
1518 return 0;
1519}
1520
b8592b48
LL
1521/**
1522 * dm_hw_init() - Initialize DC device
28d687ea 1523 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1524 *
1525 * Initialize the &struct amdgpu_display_manager device. This involves calling
1526 * the initializers of each DM component, then populating the struct with them.
1527 *
1528 * Although the function implies hardware initialization, both hardware and
1529 * software are initialized here. Splitting them out to their relevant init
1530 * hooks is a future TODO item.
1531 *
1532 * Some notable things that are initialized here:
1533 *
1534 * - Display Core, both software and hardware
1535 * - DC modules that we need (freesync and color management)
1536 * - DRM software states
1537 * - Interrupt sources and handlers
1538 * - Vblank support
1539 * - Debug FS entries, if enabled
1540 */
4562236b
HW
1541static int dm_hw_init(void *handle)
1542{
1543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1544 /* Create DAL display manager */
1545 amdgpu_dm_init(adev);
4562236b
HW
1546 amdgpu_dm_hpd_init(adev);
1547
4562236b
HW
1548 return 0;
1549}
1550
b8592b48
LL
1551/**
1552 * dm_hw_fini() - Teardown DC device
28d687ea 1553 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1554 *
1555 * Teardown components within &struct amdgpu_display_manager that require
1556 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1557 * were loaded. Also flush IRQ workqueues and disable them.
1558 */
4562236b
HW
1559static int dm_hw_fini(void *handle)
1560{
1561 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1562
1563 amdgpu_dm_hpd_fini(adev);
1564
1565 amdgpu_dm_irq_fini(adev);
21de3396 1566 amdgpu_dm_fini(adev);
4562236b
HW
1567 return 0;
1568}
1569
1570static int dm_suspend(void *handle)
1571{
1572 struct amdgpu_device *adev = handle;
1573 struct amdgpu_display_manager *dm = &adev->dm;
4562236b 1574
d2f0b53b
LHM
1575 WARN_ON(adev->dm.cached_state);
1576 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1577
4562236b
HW
1578 s3_handle_mst(adev->ddev, true);
1579
4562236b
HW
1580 amdgpu_dm_irq_suspend(adev);
1581
a3621485 1582
32f5062d 1583 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1584
1c2075d4 1585 return 0;
4562236b
HW
1586}
1587
1daf8c63
AD
1588static struct amdgpu_dm_connector *
1589amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1590 struct drm_crtc *crtc)
4562236b
HW
1591{
1592 uint32_t i;
c2cea706 1593 struct drm_connector_state *new_con_state;
4562236b
HW
1594 struct drm_connector *connector;
1595 struct drm_crtc *crtc_from_state;
1596
c2cea706
LSL
1597 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1598 crtc_from_state = new_con_state->crtc;
4562236b
HW
1599
1600 if (crtc_from_state == crtc)
c84dec2f 1601 return to_amdgpu_dm_connector(connector);
4562236b
HW
1602 }
1603
1604 return NULL;
1605}
1606
fbbdadf2
BL
1607static void emulated_link_detect(struct dc_link *link)
1608{
1609 struct dc_sink_init_data sink_init_data = { 0 };
1610 struct display_sink_capability sink_caps = { 0 };
1611 enum dc_edid_status edid_status;
1612 struct dc_context *dc_ctx = link->ctx;
1613 struct dc_sink *sink = NULL;
1614 struct dc_sink *prev_sink = NULL;
1615
1616 link->type = dc_connection_none;
1617 prev_sink = link->local_sink;
1618
1619 if (prev_sink != NULL)
1620 dc_sink_retain(prev_sink);
1621
1622 switch (link->connector_signal) {
1623 case SIGNAL_TYPE_HDMI_TYPE_A: {
1624 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1625 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1626 break;
1627 }
1628
1629 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1630 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1631 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1632 break;
1633 }
1634
1635 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1636 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1637 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1638 break;
1639 }
1640
1641 case SIGNAL_TYPE_LVDS: {
1642 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1643 sink_caps.signal = SIGNAL_TYPE_LVDS;
1644 break;
1645 }
1646
1647 case SIGNAL_TYPE_EDP: {
1648 sink_caps.transaction_type =
1649 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1650 sink_caps.signal = SIGNAL_TYPE_EDP;
1651 break;
1652 }
1653
1654 case SIGNAL_TYPE_DISPLAY_PORT: {
1655 sink_caps.transaction_type =
1656 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1657 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1658 break;
1659 }
1660
1661 default:
1662 DC_ERROR("Invalid connector type! signal:%d\n",
1663 link->connector_signal);
1664 return;
1665 }
1666
1667 sink_init_data.link = link;
1668 sink_init_data.sink_signal = sink_caps.signal;
1669
1670 sink = dc_sink_create(&sink_init_data);
1671 if (!sink) {
1672 DC_ERROR("Failed to create sink!\n");
1673 return;
1674 }
1675
dcd5fb82 1676 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1677 link->local_sink = sink;
1678
1679 edid_status = dm_helpers_read_local_edid(
1680 link->ctx,
1681 link,
1682 sink);
1683
1684 if (edid_status != EDID_OK)
1685 DC_ERROR("Failed to read EDID");
1686
1687}
1688
4562236b
HW
1689static int dm_resume(void *handle)
1690{
1691 struct amdgpu_device *adev = handle;
4562236b
HW
1692 struct drm_device *ddev = adev->ddev;
1693 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1694 struct amdgpu_dm_connector *aconnector;
4562236b 1695 struct drm_connector *connector;
f8d2d39e 1696 struct drm_connector_list_iter iter;
4562236b 1697 struct drm_crtc *crtc;
c2cea706 1698 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1699 struct dm_crtc_state *dm_new_crtc_state;
1700 struct drm_plane *plane;
1701 struct drm_plane_state *new_plane_state;
1702 struct dm_plane_state *dm_new_plane_state;
113b7a01 1703 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1704 enum dc_connection_type new_connection_type = dc_connection_none;
8c7aea40 1705 int i, r;
4562236b 1706
113b7a01
LL
1707 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1708 dc_release_state(dm_state->context);
1709 dm_state->context = dc_create_state(dm->dc);
1710 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1711 dc_resource_state_construct(dm->dc, dm_state->context);
1712
8c7aea40
NK
1713 /* Before powering on DC we need to re-initialize DMUB. */
1714 r = dm_dmub_hw_init(adev);
1715 if (r)
1716 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1717
a80aa93d
ML
1718 /* power on hardware */
1719 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1720
4562236b
HW
1721 /* program HPD filter */
1722 dc_resume(dm->dc);
1723
4562236b
HW
1724 /*
1725 * early enable HPD Rx IRQ, should be done before set mode as short
1726 * pulse interrupts are used for MST
1727 */
1728 amdgpu_dm_irq_resume_early(adev);
1729
d20ebea8 1730 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1731 s3_handle_mst(ddev, false);
1732
4562236b 1733 /* Do detection*/
f8d2d39e
LP
1734 drm_connector_list_iter_begin(ddev, &iter);
1735 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1736 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1737
1738 /*
1739 * this is the case when traversing through already created
1740 * MST connectors, should be skipped
1741 */
1742 if (aconnector->mst_port)
1743 continue;
1744
03ea364c 1745 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1746 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1747 DRM_ERROR("KMS: Failed to detect connector\n");
1748
1749 if (aconnector->base.force && new_connection_type == dc_connection_none)
1750 emulated_link_detect(aconnector->dc_link);
1751 else
1752 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1753
1754 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1755 aconnector->fake_enable = false;
1756
dcd5fb82
MF
1757 if (aconnector->dc_sink)
1758 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1759 aconnector->dc_sink = NULL;
1760 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1761 mutex_unlock(&aconnector->hpd_lock);
4562236b 1762 }
f8d2d39e 1763 drm_connector_list_iter_end(&iter);
4562236b 1764
1f6010a9 1765 /* Force mode set in atomic commit */
a80aa93d 1766 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1767 new_crtc_state->active_changed = true;
4f346e65 1768
fcb4019e
LSL
1769 /*
1770 * atomic_check is expected to create the dc states. We need to release
1771 * them here, since they were duplicated as part of the suspend
1772 * procedure.
1773 */
a80aa93d 1774 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1775 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1776 if (dm_new_crtc_state->stream) {
1777 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1778 dc_stream_release(dm_new_crtc_state->stream);
1779 dm_new_crtc_state->stream = NULL;
1780 }
1781 }
1782
a80aa93d 1783 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1784 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1785 if (dm_new_plane_state->dc_state) {
1786 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1787 dc_plane_state_release(dm_new_plane_state->dc_state);
1788 dm_new_plane_state->dc_state = NULL;
1789 }
1790 }
1791
2d1af6a1 1792 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1793
a80aa93d 1794 dm->cached_state = NULL;
0a214e2f 1795
9faa4237 1796 amdgpu_dm_irq_resume_late(adev);
4562236b 1797
9340dfd3
HW
1798 amdgpu_dm_smu_write_watermarks_table(adev);
1799
2d1af6a1 1800 return 0;
4562236b
HW
1801}
1802
b8592b48
LL
1803/**
1804 * DOC: DM Lifecycle
1805 *
1806 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1807 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1808 * the base driver's device list to be initialized and torn down accordingly.
1809 *
1810 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1811 */
1812
4562236b
HW
1813static const struct amd_ip_funcs amdgpu_dm_funcs = {
1814 .name = "dm",
1815 .early_init = dm_early_init,
7abcf6b5 1816 .late_init = dm_late_init,
4562236b
HW
1817 .sw_init = dm_sw_init,
1818 .sw_fini = dm_sw_fini,
1819 .hw_init = dm_hw_init,
1820 .hw_fini = dm_hw_fini,
1821 .suspend = dm_suspend,
1822 .resume = dm_resume,
1823 .is_idle = dm_is_idle,
1824 .wait_for_idle = dm_wait_for_idle,
1825 .check_soft_reset = dm_check_soft_reset,
1826 .soft_reset = dm_soft_reset,
1827 .set_clockgating_state = dm_set_clockgating_state,
1828 .set_powergating_state = dm_set_powergating_state,
1829};
1830
1831const struct amdgpu_ip_block_version dm_ip_block =
1832{
1833 .type = AMD_IP_BLOCK_TYPE_DCE,
1834 .major = 1,
1835 .minor = 0,
1836 .rev = 0,
1837 .funcs = &amdgpu_dm_funcs,
1838};
1839
ca3268c4 1840
b8592b48
LL
1841/**
1842 * DOC: atomic
1843 *
1844 * *WIP*
1845 */
0a323b84 1846
b3663f70 1847static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 1848 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 1849 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 1850 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 1851 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
1852};
1853
1854static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1855 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
1856};
1857
94562810
RS
1858static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1859{
1860 u32 max_cll, min_cll, max, min, q, r;
1861 struct amdgpu_dm_backlight_caps *caps;
1862 struct amdgpu_display_manager *dm;
1863 struct drm_connector *conn_base;
1864 struct amdgpu_device *adev;
1865 static const u8 pre_computed_values[] = {
1866 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1867 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1868
1869 if (!aconnector || !aconnector->dc_link)
1870 return;
1871
1872 conn_base = &aconnector->base;
1873 adev = conn_base->dev->dev_private;
1874 dm = &adev->dm;
1875 caps = &dm->backlight_caps;
1876 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1877 caps->aux_support = false;
1878 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1879 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1880
1881 if (caps->ext_caps->bits.oled == 1 ||
1882 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1883 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1884 caps->aux_support = true;
1885
1886 /* From the specification (CTA-861-G), for calculating the maximum
1887 * luminance we need to use:
1888 * Luminance = 50*2**(CV/32)
1889 * Where CV is a one-byte value.
1890 * For calculating this expression we may need float point precision;
1891 * to avoid this complexity level, we take advantage that CV is divided
1892 * by a constant. From the Euclids division algorithm, we know that CV
1893 * can be written as: CV = 32*q + r. Next, we replace CV in the
1894 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1895 * need to pre-compute the value of r/32. For pre-computing the values
1896 * We just used the following Ruby line:
1897 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1898 * The results of the above expressions can be verified at
1899 * pre_computed_values.
1900 */
1901 q = max_cll >> 5;
1902 r = max_cll % 32;
1903 max = (1 << q) * pre_computed_values[r];
1904
1905 // min luminance: maxLum * (CV/255)^2 / 100
1906 q = DIV_ROUND_CLOSEST(min_cll, 255);
1907 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1908
1909 caps->aux_max_input_signal = max;
1910 caps->aux_min_input_signal = min;
1911}
1912
97e51c16
HW
1913void amdgpu_dm_update_connector_after_detect(
1914 struct amdgpu_dm_connector *aconnector)
4562236b
HW
1915{
1916 struct drm_connector *connector = &aconnector->base;
1917 struct drm_device *dev = connector->dev;
b73a22d3 1918 struct dc_sink *sink;
4562236b
HW
1919
1920 /* MST handled by drm_mst framework */
1921 if (aconnector->mst_mgr.mst_state == true)
1922 return;
1923
1924
1925 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
1926 if (sink)
1927 dc_sink_retain(sink);
4562236b 1928
1f6010a9
DF
1929 /*
1930 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 1931 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 1932 * Skip if already done during boot.
4562236b
HW
1933 */
1934 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1935 && aconnector->dc_em_sink) {
1936
1f6010a9
DF
1937 /*
1938 * For S3 resume with headless use eml_sink to fake stream
1939 * because on resume connector->sink is set to NULL
4562236b
HW
1940 */
1941 mutex_lock(&dev->mode_config.mutex);
1942
1943 if (sink) {
922aa1e1 1944 if (aconnector->dc_sink) {
98e6436d 1945 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
1946 /*
1947 * retain and release below are used to
1948 * bump up refcount for sink because the link doesn't point
1949 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
1950 * reshuffle by UMD we will get into unwanted dc_sink release
1951 */
dcd5fb82 1952 dc_sink_release(aconnector->dc_sink);
922aa1e1 1953 }
4562236b 1954 aconnector->dc_sink = sink;
dcd5fb82 1955 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
1956 amdgpu_dm_update_freesync_caps(connector,
1957 aconnector->edid);
4562236b 1958 } else {
98e6436d 1959 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 1960 if (!aconnector->dc_sink) {
4562236b 1961 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 1962 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 1963 }
4562236b
HW
1964 }
1965
1966 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1967
1968 if (sink)
1969 dc_sink_release(sink);
4562236b
HW
1970 return;
1971 }
1972
1973 /*
1974 * TODO: temporary guard to look for proper fix
1975 * if this sink is MST sink, we should not do anything
1976 */
dcd5fb82
MF
1977 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1978 dc_sink_release(sink);
4562236b 1979 return;
dcd5fb82 1980 }
4562236b
HW
1981
1982 if (aconnector->dc_sink == sink) {
1f6010a9
DF
1983 /*
1984 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1985 * Do nothing!!
1986 */
f1ad2f5e 1987 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 1988 aconnector->connector_id);
dcd5fb82
MF
1989 if (sink)
1990 dc_sink_release(sink);
4562236b
HW
1991 return;
1992 }
1993
f1ad2f5e 1994 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
1995 aconnector->connector_id, aconnector->dc_sink, sink);
1996
1997 mutex_lock(&dev->mode_config.mutex);
1998
1f6010a9
DF
1999 /*
2000 * 1. Update status of the drm connector
2001 * 2. Send an event and let userspace tell us what to do
2002 */
4562236b 2003 if (sink) {
1f6010a9
DF
2004 /*
2005 * TODO: check if we still need the S3 mode update workaround.
2006 * If yes, put it here.
2007 */
4562236b 2008 if (aconnector->dc_sink)
98e6436d 2009 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2010
2011 aconnector->dc_sink = sink;
dcd5fb82 2012 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2013 if (sink->dc_edid.length == 0) {
4562236b 2014 aconnector->edid = NULL;
7b353e41
AP
2015 if (aconnector->dc_link->aux_mode) {
2016 drm_dp_cec_unset_edid(
2017 &aconnector->dm_dp_aux.aux);
2018 }
900b3cb1 2019 } else {
4562236b 2020 aconnector->edid =
7b353e41 2021 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2022
c555f023 2023 drm_connector_update_edid_property(connector,
7b353e41
AP
2024 aconnector->edid);
2025
2026 if (aconnector->dc_link->aux_mode)
2027 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2028 aconnector->edid);
4562236b 2029 }
7b353e41 2030
98e6436d 2031 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2032 update_connector_ext_caps(aconnector);
4562236b 2033 } else {
e86e8947 2034 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2035 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2036 drm_connector_update_edid_property(connector, NULL);
4562236b 2037 aconnector->num_modes = 0;
dcd5fb82 2038 dc_sink_release(aconnector->dc_sink);
4562236b 2039 aconnector->dc_sink = NULL;
5326c452 2040 aconnector->edid = NULL;
0c8620d6
BL
2041#ifdef CONFIG_DRM_AMD_DC_HDCP
2042 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2043 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2044 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2045#endif
4562236b
HW
2046 }
2047
2048 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2049
2050 if (sink)
2051 dc_sink_release(sink);
4562236b
HW
2052}
2053
2054static void handle_hpd_irq(void *param)
2055{
c84dec2f 2056 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2057 struct drm_connector *connector = &aconnector->base;
2058 struct drm_device *dev = connector->dev;
fbbdadf2 2059 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
2060#ifdef CONFIG_DRM_AMD_DC_HDCP
2061 struct amdgpu_device *adev = dev->dev_private;
2062#endif
4562236b 2063
1f6010a9
DF
2064 /*
2065 * In case of failure or MST no need to update connector status or notify the OS
2066 * since (for MST case) MST does this in its own context.
4562236b
HW
2067 */
2068 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2069
0c8620d6 2070#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2071 if (adev->dm.hdcp_workqueue)
96a3b32e 2072 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2073#endif
2e0ac3d6
HW
2074 if (aconnector->fake_enable)
2075 aconnector->fake_enable = false;
2076
fbbdadf2
BL
2077 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2078 DRM_ERROR("KMS: Failed to detect connector\n");
2079
2080 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2081 emulated_link_detect(aconnector->dc_link);
2082
2083
2084 drm_modeset_lock_all(dev);
2085 dm_restore_drm_connector_state(dev, connector);
2086 drm_modeset_unlock_all(dev);
2087
2088 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2089 drm_kms_helper_hotplug_event(dev);
2090
2091 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2092 amdgpu_dm_update_connector_after_detect(aconnector);
2093
2094
2095 drm_modeset_lock_all(dev);
2096 dm_restore_drm_connector_state(dev, connector);
2097 drm_modeset_unlock_all(dev);
2098
2099 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2100 drm_kms_helper_hotplug_event(dev);
2101 }
2102 mutex_unlock(&aconnector->hpd_lock);
2103
2104}
2105
c84dec2f 2106static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2107{
2108 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2109 uint8_t dret;
2110 bool new_irq_handled = false;
2111 int dpcd_addr;
2112 int dpcd_bytes_to_read;
2113
2114 const int max_process_count = 30;
2115 int process_count = 0;
2116
2117 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2118
2119 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2120 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2121 /* DPCD 0x200 - 0x201 for downstream IRQ */
2122 dpcd_addr = DP_SINK_COUNT;
2123 } else {
2124 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2125 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2126 dpcd_addr = DP_SINK_COUNT_ESI;
2127 }
2128
2129 dret = drm_dp_dpcd_read(
2130 &aconnector->dm_dp_aux.aux,
2131 dpcd_addr,
2132 esi,
2133 dpcd_bytes_to_read);
2134
2135 while (dret == dpcd_bytes_to_read &&
2136 process_count < max_process_count) {
2137 uint8_t retry;
2138 dret = 0;
2139
2140 process_count++;
2141
f1ad2f5e 2142 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2143 /* handle HPD short pulse irq */
2144 if (aconnector->mst_mgr.mst_state)
2145 drm_dp_mst_hpd_irq(
2146 &aconnector->mst_mgr,
2147 esi,
2148 &new_irq_handled);
4562236b
HW
2149
2150 if (new_irq_handled) {
2151 /* ACK at DPCD to notify down stream */
2152 const int ack_dpcd_bytes_to_write =
2153 dpcd_bytes_to_read - 1;
2154
2155 for (retry = 0; retry < 3; retry++) {
2156 uint8_t wret;
2157
2158 wret = drm_dp_dpcd_write(
2159 &aconnector->dm_dp_aux.aux,
2160 dpcd_addr + 1,
2161 &esi[1],
2162 ack_dpcd_bytes_to_write);
2163 if (wret == ack_dpcd_bytes_to_write)
2164 break;
2165 }
2166
1f6010a9 2167 /* check if there is new irq to be handled */
4562236b
HW
2168 dret = drm_dp_dpcd_read(
2169 &aconnector->dm_dp_aux.aux,
2170 dpcd_addr,
2171 esi,
2172 dpcd_bytes_to_read);
2173
2174 new_irq_handled = false;
d4a6e8a9 2175 } else {
4562236b 2176 break;
d4a6e8a9 2177 }
4562236b
HW
2178 }
2179
2180 if (process_count == max_process_count)
f1ad2f5e 2181 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2182}
2183
2184static void handle_hpd_rx_irq(void *param)
2185{
c84dec2f 2186 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2187 struct drm_connector *connector = &aconnector->base;
2188 struct drm_device *dev = connector->dev;
53cbf65c 2189 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2190 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2191 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2192#ifdef CONFIG_DRM_AMD_DC_HDCP
2193 union hpd_irq_data hpd_irq_data;
2194 struct amdgpu_device *adev = dev->dev_private;
2195
2196 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2197#endif
4562236b 2198
1f6010a9
DF
2199 /*
2200 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2201 * conflict, after implement i2c helper, this mutex should be
2202 * retired.
2203 */
53cbf65c 2204 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2205 mutex_lock(&aconnector->hpd_lock);
2206
2a0f9270
BL
2207
2208#ifdef CONFIG_DRM_AMD_DC_HDCP
2209 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2210#else
4e18814e 2211 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2212#endif
4562236b
HW
2213 !is_mst_root_connector) {
2214 /* Downstream Port status changed. */
fbbdadf2
BL
2215 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2216 DRM_ERROR("KMS: Failed to detect connector\n");
2217
2218 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2219 emulated_link_detect(dc_link);
2220
2221 if (aconnector->fake_enable)
2222 aconnector->fake_enable = false;
2223
2224 amdgpu_dm_update_connector_after_detect(aconnector);
2225
2226
2227 drm_modeset_lock_all(dev);
2228 dm_restore_drm_connector_state(dev, connector);
2229 drm_modeset_unlock_all(dev);
2230
2231 drm_kms_helper_hotplug_event(dev);
2232 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2233
2234 if (aconnector->fake_enable)
2235 aconnector->fake_enable = false;
2236
4562236b
HW
2237 amdgpu_dm_update_connector_after_detect(aconnector);
2238
2239
2240 drm_modeset_lock_all(dev);
2241 dm_restore_drm_connector_state(dev, connector);
2242 drm_modeset_unlock_all(dev);
2243
2244 drm_kms_helper_hotplug_event(dev);
2245 }
2246 }
2a0f9270 2247#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2248 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2249 if (adev->dm.hdcp_workqueue)
2250 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2251 }
2a0f9270 2252#endif
4562236b 2253 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2254 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2255 dm_handle_hpd_rx_irq(aconnector);
2256
e86e8947
HV
2257 if (dc_link->type != dc_connection_mst_branch) {
2258 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2259 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2260 }
4562236b
HW
2261}
2262
2263static void register_hpd_handlers(struct amdgpu_device *adev)
2264{
2265 struct drm_device *dev = adev->ddev;
2266 struct drm_connector *connector;
c84dec2f 2267 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2268 const struct dc_link *dc_link;
2269 struct dc_interrupt_params int_params = {0};
2270
2271 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2272 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2273
2274 list_for_each_entry(connector,
2275 &dev->mode_config.connector_list, head) {
2276
c84dec2f 2277 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2278 dc_link = aconnector->dc_link;
2279
2280 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2281 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2282 int_params.irq_source = dc_link->irq_source_hpd;
2283
2284 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2285 handle_hpd_irq,
2286 (void *) aconnector);
2287 }
2288
2289 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2290
2291 /* Also register for DP short pulse (hpd_rx). */
2292 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2293 int_params.irq_source = dc_link->irq_source_hpd_rx;
2294
2295 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2296 handle_hpd_rx_irq,
2297 (void *) aconnector);
2298 }
2299 }
2300}
2301
2302/* Register IRQ sources and initialize IRQ callbacks */
2303static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2304{
2305 struct dc *dc = adev->dm.dc;
2306 struct common_irq_params *c_irq_params;
2307 struct dc_interrupt_params int_params = {0};
2308 int r;
2309 int i;
1ffdeca6 2310 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2311
84374725 2312 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2313 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2314
2315 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2316 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2317
1f6010a9
DF
2318 /*
2319 * Actions of amdgpu_irq_add_id():
4562236b
HW
2320 * 1. Register a set() function with base driver.
2321 * Base driver will call set() function to enable/disable an
2322 * interrupt in DC hardware.
2323 * 2. Register amdgpu_dm_irq_handler().
2324 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2325 * coming from DC hardware.
2326 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2327 * for acknowledging and handling. */
2328
b57de80a 2329 /* Use VBLANK interrupt */
e9029155 2330 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2331 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2332 if (r) {
2333 DRM_ERROR("Failed to add crtc irq id!\n");
2334 return r;
2335 }
2336
2337 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2338 int_params.irq_source =
3d761e79 2339 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2340
b57de80a 2341 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2342
2343 c_irq_params->adev = adev;
2344 c_irq_params->irq_src = int_params.irq_source;
2345
2346 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2347 dm_crtc_high_irq, c_irq_params);
2348 }
2349
d2574c33
MK
2350 /* Use VUPDATE interrupt */
2351 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2352 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2353 if (r) {
2354 DRM_ERROR("Failed to add vupdate irq id!\n");
2355 return r;
2356 }
2357
2358 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2359 int_params.irq_source =
2360 dc_interrupt_to_irq_source(dc, i, 0);
2361
2362 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2363
2364 c_irq_params->adev = adev;
2365 c_irq_params->irq_src = int_params.irq_source;
2366
2367 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2368 dm_vupdate_high_irq, c_irq_params);
2369 }
2370
3d761e79 2371 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2372 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2373 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2374 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2375 if (r) {
2376 DRM_ERROR("Failed to add page flip irq id!\n");
2377 return r;
2378 }
2379
2380 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2381 int_params.irq_source =
2382 dc_interrupt_to_irq_source(dc, i, 0);
2383
2384 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2385
2386 c_irq_params->adev = adev;
2387 c_irq_params->irq_src = int_params.irq_source;
2388
2389 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2390 dm_pflip_high_irq, c_irq_params);
2391
2392 }
2393
2394 /* HPD */
2c8ad2d5
AD
2395 r = amdgpu_irq_add_id(adev, client_id,
2396 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2397 if (r) {
2398 DRM_ERROR("Failed to add hpd irq id!\n");
2399 return r;
2400 }
2401
2402 register_hpd_handlers(adev);
2403
2404 return 0;
2405}
2406
b86a1aa3 2407#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2408/* Register IRQ sources and initialize IRQ callbacks */
2409static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2410{
2411 struct dc *dc = adev->dm.dc;
2412 struct common_irq_params *c_irq_params;
2413 struct dc_interrupt_params int_params = {0};
2414 int r;
2415 int i;
2416
2417 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2418 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2419
1f6010a9
DF
2420 /*
2421 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2422 * 1. Register a set() function with base driver.
2423 * Base driver will call set() function to enable/disable an
2424 * interrupt in DC hardware.
2425 * 2. Register amdgpu_dm_irq_handler().
2426 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2427 * coming from DC hardware.
2428 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2429 * for acknowledging and handling.
1f6010a9 2430 */
ff5ef992
AD
2431
2432 /* Use VSTARTUP interrupt */
2433 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2434 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2435 i++) {
3760f76c 2436 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2437
2438 if (r) {
2439 DRM_ERROR("Failed to add crtc irq id!\n");
2440 return r;
2441 }
2442
2443 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2444 int_params.irq_source =
2445 dc_interrupt_to_irq_source(dc, i, 0);
2446
2447 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2448
2449 c_irq_params->adev = adev;
2450 c_irq_params->irq_src = int_params.irq_source;
2451
2452 amdgpu_dm_irq_register_interrupt(adev, &int_params,
16f17eda 2453 dm_dcn_crtc_high_irq, c_irq_params);
d2574c33
MK
2454 }
2455
ff5ef992
AD
2456 /* Use GRPH_PFLIP interrupt */
2457 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2458 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2459 i++) {
3760f76c 2460 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2461 if (r) {
2462 DRM_ERROR("Failed to add page flip irq id!\n");
2463 return r;
2464 }
2465
2466 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2467 int_params.irq_source =
2468 dc_interrupt_to_irq_source(dc, i, 0);
2469
2470 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2471
2472 c_irq_params->adev = adev;
2473 c_irq_params->irq_src = int_params.irq_source;
2474
2475 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2476 dm_pflip_high_irq, c_irq_params);
2477
2478 }
2479
2480 /* HPD */
3760f76c 2481 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2482 &adev->hpd_irq);
2483 if (r) {
2484 DRM_ERROR("Failed to add hpd irq id!\n");
2485 return r;
2486 }
2487
2488 register_hpd_handlers(adev);
2489
2490 return 0;
2491}
2492#endif
2493
eb3dc897
NK
2494/*
2495 * Acquires the lock for the atomic state object and returns
2496 * the new atomic state.
2497 *
2498 * This should only be called during atomic check.
2499 */
2500static int dm_atomic_get_state(struct drm_atomic_state *state,
2501 struct dm_atomic_state **dm_state)
2502{
2503 struct drm_device *dev = state->dev;
2504 struct amdgpu_device *adev = dev->dev_private;
2505 struct amdgpu_display_manager *dm = &adev->dm;
2506 struct drm_private_state *priv_state;
eb3dc897
NK
2507
2508 if (*dm_state)
2509 return 0;
2510
eb3dc897
NK
2511 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2512 if (IS_ERR(priv_state))
2513 return PTR_ERR(priv_state);
2514
2515 *dm_state = to_dm_atomic_state(priv_state);
2516
2517 return 0;
2518}
2519
2520struct dm_atomic_state *
2521dm_atomic_get_new_state(struct drm_atomic_state *state)
2522{
2523 struct drm_device *dev = state->dev;
2524 struct amdgpu_device *adev = dev->dev_private;
2525 struct amdgpu_display_manager *dm = &adev->dm;
2526 struct drm_private_obj *obj;
2527 struct drm_private_state *new_obj_state;
2528 int i;
2529
2530 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2531 if (obj->funcs == dm->atomic_obj.funcs)
2532 return to_dm_atomic_state(new_obj_state);
2533 }
2534
2535 return NULL;
2536}
2537
2538struct dm_atomic_state *
2539dm_atomic_get_old_state(struct drm_atomic_state *state)
2540{
2541 struct drm_device *dev = state->dev;
2542 struct amdgpu_device *adev = dev->dev_private;
2543 struct amdgpu_display_manager *dm = &adev->dm;
2544 struct drm_private_obj *obj;
2545 struct drm_private_state *old_obj_state;
2546 int i;
2547
2548 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2549 if (obj->funcs == dm->atomic_obj.funcs)
2550 return to_dm_atomic_state(old_obj_state);
2551 }
2552
2553 return NULL;
2554}
2555
2556static struct drm_private_state *
2557dm_atomic_duplicate_state(struct drm_private_obj *obj)
2558{
2559 struct dm_atomic_state *old_state, *new_state;
2560
2561 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2562 if (!new_state)
2563 return NULL;
2564
2565 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2566
813d20dc
AW
2567 old_state = to_dm_atomic_state(obj->state);
2568
2569 if (old_state && old_state->context)
2570 new_state->context = dc_copy_state(old_state->context);
2571
eb3dc897
NK
2572 if (!new_state->context) {
2573 kfree(new_state);
2574 return NULL;
2575 }
2576
eb3dc897
NK
2577 return &new_state->base;
2578}
2579
2580static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2581 struct drm_private_state *state)
2582{
2583 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2584
2585 if (dm_state && dm_state->context)
2586 dc_release_state(dm_state->context);
2587
2588 kfree(dm_state);
2589}
2590
2591static struct drm_private_state_funcs dm_atomic_state_funcs = {
2592 .atomic_duplicate_state = dm_atomic_duplicate_state,
2593 .atomic_destroy_state = dm_atomic_destroy_state,
2594};
2595
4562236b
HW
2596static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2597{
eb3dc897 2598 struct dm_atomic_state *state;
4562236b
HW
2599 int r;
2600
2601 adev->mode_info.mode_config_initialized = true;
2602
4562236b 2603 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2604 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2605
2606 adev->ddev->mode_config.max_width = 16384;
2607 adev->ddev->mode_config.max_height = 16384;
2608
2609 adev->ddev->mode_config.preferred_depth = 24;
2610 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2611 /* indicates support for immediate flip */
4562236b
HW
2612 adev->ddev->mode_config.async_page_flip = true;
2613
770d13b1 2614 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2615
eb3dc897
NK
2616 state = kzalloc(sizeof(*state), GFP_KERNEL);
2617 if (!state)
2618 return -ENOMEM;
2619
813d20dc 2620 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2621 if (!state->context) {
2622 kfree(state);
2623 return -ENOMEM;
2624 }
2625
2626 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2627
8c1a765b
DA
2628 drm_atomic_private_obj_init(adev->ddev,
2629 &adev->dm.atomic_obj,
eb3dc897
NK
2630 &state->base,
2631 &dm_atomic_state_funcs);
2632
3dc9b1ce 2633 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2634 if (r)
2635 return r;
2636
6ce8f316
NK
2637 r = amdgpu_dm_audio_init(adev);
2638 if (r)
2639 return r;
2640
4562236b
HW
2641 return 0;
2642}
2643
206bbafe
DF
2644#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2645#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 2646#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 2647
4562236b
HW
2648#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2649 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2650
206bbafe
DF
2651static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2652{
2653#if defined(CONFIG_ACPI)
2654 struct amdgpu_dm_backlight_caps caps;
2655
2656 if (dm->backlight_caps.caps_valid)
2657 return;
2658
2659 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2660 if (caps.caps_valid) {
94562810
RS
2661 dm->backlight_caps.caps_valid = true;
2662 if (caps.aux_support)
2663 return;
206bbafe
DF
2664 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2665 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
2666 } else {
2667 dm->backlight_caps.min_input_signal =
2668 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2669 dm->backlight_caps.max_input_signal =
2670 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2671 }
2672#else
94562810
RS
2673 if (dm->backlight_caps.aux_support)
2674 return;
2675
8bcbc9ef
DF
2676 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2677 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2678#endif
2679}
2680
94562810
RS
2681static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2682{
2683 bool rc;
2684
2685 if (!link)
2686 return 1;
2687
2688 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2689 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2690
2691 return rc ? 0 : 1;
2692}
2693
2694static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2695 const uint32_t user_brightness)
2696{
2697 u32 min, max, conversion_pace;
2698 u32 brightness = user_brightness;
2699
2700 if (!caps)
2701 goto out;
2702
2703 if (!caps->aux_support) {
2704 max = caps->max_input_signal;
2705 min = caps->min_input_signal;
2706 /*
2707 * The brightness input is in the range 0-255
2708 * It needs to be rescaled to be between the
2709 * requested min and max input signal
2710 * It also needs to be scaled up by 0x101 to
2711 * match the DC interface which has a range of
2712 * 0 to 0xffff
2713 */
2714 conversion_pace = 0x101;
2715 brightness =
2716 user_brightness
2717 * conversion_pace
2718 * (max - min)
2719 / AMDGPU_MAX_BL_LEVEL
2720 + min * conversion_pace;
2721 } else {
2722 /* TODO
2723 * We are doing a linear interpolation here, which is OK but
2724 * does not provide the optimal result. We probably want
2725 * something close to the Perceptual Quantizer (PQ) curve.
2726 */
2727 max = caps->aux_max_input_signal;
2728 min = caps->aux_min_input_signal;
2729
2730 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2731 + user_brightness * max;
2732 // Multiple the value by 1000 since we use millinits
2733 brightness *= 1000;
2734 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2735 }
2736
2737out:
2738 return brightness;
2739}
2740
4562236b
HW
2741static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2742{
2743 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 2744 struct amdgpu_dm_backlight_caps caps;
94562810
RS
2745 struct dc_link *link = NULL;
2746 u32 brightness;
2747 bool rc;
4562236b 2748
206bbafe
DF
2749 amdgpu_dm_update_backlight_caps(dm);
2750 caps = dm->backlight_caps;
94562810
RS
2751
2752 link = (struct dc_link *)dm->backlight_link;
2753
2754 brightness = convert_brightness(&caps, bd->props.brightness);
2755 // Change brightness based on AUX property
2756 if (caps.aux_support)
2757 return set_backlight_via_aux(link, brightness);
2758
2759 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2760
2761 return rc ? 0 : 1;
4562236b
HW
2762}
2763
2764static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2765{
620a0d27
DF
2766 struct amdgpu_display_manager *dm = bl_get_data(bd);
2767 int ret = dc_link_get_backlight_level(dm->backlight_link);
2768
2769 if (ret == DC_ERROR_UNEXPECTED)
2770 return bd->props.brightness;
2771 return ret;
4562236b
HW
2772}
2773
2774static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 2775 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
2776 .get_brightness = amdgpu_dm_backlight_get_brightness,
2777 .update_status = amdgpu_dm_backlight_update_status,
2778};
2779
7578ecda
AD
2780static void
2781amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
2782{
2783 char bl_name[16];
2784 struct backlight_properties props = { 0 };
2785
206bbafe
DF
2786 amdgpu_dm_update_backlight_caps(dm);
2787
4562236b 2788 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 2789 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
2790 props.type = BACKLIGHT_RAW;
2791
2792 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2793 dm->adev->ddev->primary->index);
2794
2795 dm->backlight_dev = backlight_device_register(bl_name,
2796 dm->adev->ddev->dev,
2797 dm,
2798 &amdgpu_dm_backlight_ops,
2799 &props);
2800
74baea42 2801 if (IS_ERR(dm->backlight_dev))
4562236b
HW
2802 DRM_ERROR("DM: Backlight registration failed!\n");
2803 else
f1ad2f5e 2804 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
2805}
2806
2807#endif
2808
df534fff 2809static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 2810 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
2811 enum drm_plane_type plane_type,
2812 const struct dc_plane_cap *plane_cap)
df534fff 2813{
f180b4bc 2814 struct drm_plane *plane;
df534fff
S
2815 unsigned long possible_crtcs;
2816 int ret = 0;
2817
f180b4bc 2818 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
2819 if (!plane) {
2820 DRM_ERROR("KMS: Failed to allocate plane\n");
2821 return -ENOMEM;
2822 }
b2fddb13 2823 plane->type = plane_type;
df534fff
S
2824
2825 /*
b2fddb13
NK
2826 * HACK: IGT tests expect that the primary plane for a CRTC
2827 * can only have one possible CRTC. Only expose support for
2828 * any CRTC if they're not going to be used as a primary plane
2829 * for a CRTC - like overlay or underlay planes.
df534fff
S
2830 */
2831 possible_crtcs = 1 << plane_id;
2832 if (plane_id >= dm->dc->caps.max_streams)
2833 possible_crtcs = 0xff;
2834
cc1fec57 2835 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
2836
2837 if (ret) {
2838 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 2839 kfree(plane);
df534fff
S
2840 return ret;
2841 }
2842
54087768
NK
2843 if (mode_info)
2844 mode_info->planes[plane_id] = plane;
2845
df534fff
S
2846 return ret;
2847}
2848
89fc8d4e
HW
2849
2850static void register_backlight_device(struct amdgpu_display_manager *dm,
2851 struct dc_link *link)
2852{
2853#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2854 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2855
2856 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2857 link->type != dc_connection_none) {
1f6010a9
DF
2858 /*
2859 * Event if registration failed, we should continue with
89fc8d4e
HW
2860 * DM initialization because not having a backlight control
2861 * is better then a black screen.
2862 */
2863 amdgpu_dm_register_backlight_device(dm);
2864
2865 if (dm->backlight_dev)
2866 dm->backlight_link = link;
2867 }
2868#endif
2869}
2870
2871
1f6010a9
DF
2872/*
2873 * In this architecture, the association
4562236b
HW
2874 * connector -> encoder -> crtc
2875 * id not really requried. The crtc and connector will hold the
2876 * display_index as an abstraction to use with DAL component
2877 *
2878 * Returns 0 on success
2879 */
7578ecda 2880static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
2881{
2882 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 2883 int32_t i;
c84dec2f 2884 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 2885 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 2886 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 2887 uint32_t link_cnt;
cc1fec57 2888 int32_t primary_planes;
fbbdadf2 2889 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 2890 const struct dc_plane_cap *plane;
4562236b
HW
2891
2892 link_cnt = dm->dc->caps.max_links;
4562236b
HW
2893 if (amdgpu_dm_mode_config_init(dm->adev)) {
2894 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 2895 return -EINVAL;
4562236b
HW
2896 }
2897
b2fddb13
NK
2898 /* There is one primary plane per CRTC */
2899 primary_planes = dm->dc->caps.max_streams;
54087768 2900 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 2901
b2fddb13
NK
2902 /*
2903 * Initialize primary planes, implicit planes for legacy IOCTLS.
2904 * Order is reversed to match iteration order in atomic check.
2905 */
2906 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
2907 plane = &dm->dc->caps.planes[i];
2908
b2fddb13 2909 if (initialize_plane(dm, mode_info, i,
cc1fec57 2910 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 2911 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 2912 goto fail;
d4e13b0d 2913 }
df534fff 2914 }
92f3ac40 2915
0d579c7e
NK
2916 /*
2917 * Initialize overlay planes, index starting after primary planes.
2918 * These planes have a higher DRM index than the primary planes since
2919 * they should be considered as having a higher z-order.
2920 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
2921 *
2922 * Only support DCN for now, and only expose one so we don't encourage
2923 * userspace to use up all the pipes.
0d579c7e 2924 */
cc1fec57
NK
2925 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2926 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2927
2928 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2929 continue;
2930
2931 if (!plane->blends_with_above || !plane->blends_with_below)
2932 continue;
2933
ea36ad34 2934 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
2935 continue;
2936
54087768 2937 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 2938 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 2939 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 2940 goto fail;
d4e13b0d 2941 }
cc1fec57
NK
2942
2943 /* Only create one overlay plane. */
2944 break;
d4e13b0d 2945 }
4562236b 2946
d4e13b0d 2947 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 2948 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 2949 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 2950 goto fail;
4562236b 2951 }
4562236b 2952
ab2541b6 2953 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
2954
2955 /* loops over all connectors on the board */
2956 for (i = 0; i < link_cnt; i++) {
89fc8d4e 2957 struct dc_link *link = NULL;
4562236b
HW
2958
2959 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2960 DRM_ERROR(
2961 "KMS: Cannot support more than %d display indexes\n",
2962 AMDGPU_DM_MAX_DISPLAY_INDEX);
2963 continue;
2964 }
2965
2966 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2967 if (!aconnector)
cd8a2ae8 2968 goto fail;
4562236b
HW
2969
2970 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 2971 if (!aencoder)
cd8a2ae8 2972 goto fail;
4562236b
HW
2973
2974 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2975 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 2976 goto fail;
4562236b
HW
2977 }
2978
2979 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2980 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 2981 goto fail;
4562236b
HW
2982 }
2983
89fc8d4e
HW
2984 link = dc_get_link_at_index(dm->dc, i);
2985
fbbdadf2
BL
2986 if (!dc_link_detect_sink(link, &new_connection_type))
2987 DRM_ERROR("KMS: Failed to detect connector\n");
2988
2989 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2990 emulated_link_detect(link);
2991 amdgpu_dm_update_connector_after_detect(aconnector);
2992
2993 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 2994 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 2995 register_backlight_device(dm, link);
397a9bc5
RL
2996 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2997 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
2998 }
2999
3000
4562236b
HW
3001 }
3002
3003 /* Software is initialized. Now we can register interrupt handlers. */
3004 switch (adev->asic_type) {
3005 case CHIP_BONAIRE:
3006 case CHIP_HAWAII:
cd4b356f
AD
3007 case CHIP_KAVERI:
3008 case CHIP_KABINI:
3009 case CHIP_MULLINS:
4562236b
HW
3010 case CHIP_TONGA:
3011 case CHIP_FIJI:
3012 case CHIP_CARRIZO:
3013 case CHIP_STONEY:
3014 case CHIP_POLARIS11:
3015 case CHIP_POLARIS10:
b264d345 3016 case CHIP_POLARIS12:
7737de91 3017 case CHIP_VEGAM:
2c8ad2d5 3018 case CHIP_VEGA10:
2325ff30 3019 case CHIP_VEGA12:
1fe6bf2f 3020 case CHIP_VEGA20:
4562236b
HW
3021 if (dce110_register_irq_handlers(dm->adev)) {
3022 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3023 goto fail;
4562236b
HW
3024 }
3025 break;
b86a1aa3 3026#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3027 case CHIP_RAVEN:
fbd2afe5 3028 case CHIP_NAVI12:
476e955d 3029 case CHIP_NAVI10:
fce651e3 3030 case CHIP_NAVI14:
30221ad8 3031 case CHIP_RENOIR:
ff5ef992
AD
3032 if (dcn10_register_irq_handlers(dm->adev)) {
3033 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3034 goto fail;
ff5ef992
AD
3035 }
3036 break;
3037#endif
4562236b 3038 default:
e63f8673 3039 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3040 goto fail;
4562236b
HW
3041 }
3042
1bc460a4
HW
3043 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3044 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3045
2d673560
NK
3046 /* No userspace support. */
3047 dm->dc->debug.disable_tri_buf = true;
3048
4562236b 3049 return 0;
cd8a2ae8 3050fail:
4562236b 3051 kfree(aencoder);
4562236b 3052 kfree(aconnector);
54087768 3053
59d0f396 3054 return -EINVAL;
4562236b
HW
3055}
3056
7578ecda 3057static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3058{
3059 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3060 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3061 return;
3062}
3063
3064/******************************************************************************
3065 * amdgpu_display_funcs functions
3066 *****************************************************************************/
3067
1f6010a9 3068/*
4562236b
HW
3069 * dm_bandwidth_update - program display watermarks
3070 *
3071 * @adev: amdgpu_device pointer
3072 *
3073 * Calculate and program the display watermarks and line buffer allocation.
3074 */
3075static void dm_bandwidth_update(struct amdgpu_device *adev)
3076{
49c07a99 3077 /* TODO: implement later */
4562236b
HW
3078}
3079
39cc5be2 3080static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3081 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3082 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3083 .backlight_set_level = NULL, /* never called for DC */
3084 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3085 .hpd_sense = NULL,/* called unconditionally */
3086 .hpd_set_polarity = NULL, /* called unconditionally */
3087 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3088 .page_flip_get_scanoutpos =
3089 dm_crtc_get_scanoutpos,/* called unconditionally */
3090 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3091 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3092};
3093
3094#if defined(CONFIG_DEBUG_KERNEL_DC)
3095
3ee6b26b
AD
3096static ssize_t s3_debug_store(struct device *device,
3097 struct device_attribute *attr,
3098 const char *buf,
3099 size_t count)
4562236b
HW
3100{
3101 int ret;
3102 int s3_state;
ef1de361 3103 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
3104 struct amdgpu_device *adev = drm_dev->dev_private;
3105
3106 ret = kstrtoint(buf, 0, &s3_state);
3107
3108 if (ret == 0) {
3109 if (s3_state) {
3110 dm_resume(adev);
4562236b
HW
3111 drm_kms_helper_hotplug_event(adev->ddev);
3112 } else
3113 dm_suspend(adev);
3114 }
3115
3116 return ret == 0 ? count : 0;
3117}
3118
3119DEVICE_ATTR_WO(s3_debug);
3120
3121#endif
3122
3123static int dm_early_init(void *handle)
3124{
3125 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3126
4562236b
HW
3127 switch (adev->asic_type) {
3128 case CHIP_BONAIRE:
3129 case CHIP_HAWAII:
3130 adev->mode_info.num_crtc = 6;
3131 adev->mode_info.num_hpd = 6;
3132 adev->mode_info.num_dig = 6;
4562236b 3133 break;
cd4b356f
AD
3134 case CHIP_KAVERI:
3135 adev->mode_info.num_crtc = 4;
3136 adev->mode_info.num_hpd = 6;
3137 adev->mode_info.num_dig = 7;
cd4b356f
AD
3138 break;
3139 case CHIP_KABINI:
3140 case CHIP_MULLINS:
3141 adev->mode_info.num_crtc = 2;
3142 adev->mode_info.num_hpd = 6;
3143 adev->mode_info.num_dig = 6;
cd4b356f 3144 break;
4562236b
HW
3145 case CHIP_FIJI:
3146 case CHIP_TONGA:
3147 adev->mode_info.num_crtc = 6;
3148 adev->mode_info.num_hpd = 6;
3149 adev->mode_info.num_dig = 7;
4562236b
HW
3150 break;
3151 case CHIP_CARRIZO:
3152 adev->mode_info.num_crtc = 3;
3153 adev->mode_info.num_hpd = 6;
3154 adev->mode_info.num_dig = 9;
4562236b
HW
3155 break;
3156 case CHIP_STONEY:
3157 adev->mode_info.num_crtc = 2;
3158 adev->mode_info.num_hpd = 6;
3159 adev->mode_info.num_dig = 9;
4562236b
HW
3160 break;
3161 case CHIP_POLARIS11:
b264d345 3162 case CHIP_POLARIS12:
4562236b
HW
3163 adev->mode_info.num_crtc = 5;
3164 adev->mode_info.num_hpd = 5;
3165 adev->mode_info.num_dig = 5;
4562236b
HW
3166 break;
3167 case CHIP_POLARIS10:
7737de91 3168 case CHIP_VEGAM:
4562236b
HW
3169 adev->mode_info.num_crtc = 6;
3170 adev->mode_info.num_hpd = 6;
3171 adev->mode_info.num_dig = 6;
4562236b 3172 break;
2c8ad2d5 3173 case CHIP_VEGA10:
2325ff30 3174 case CHIP_VEGA12:
1fe6bf2f 3175 case CHIP_VEGA20:
2c8ad2d5
AD
3176 adev->mode_info.num_crtc = 6;
3177 adev->mode_info.num_hpd = 6;
3178 adev->mode_info.num_dig = 6;
3179 break;
b86a1aa3 3180#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3181 case CHIP_RAVEN:
3182 adev->mode_info.num_crtc = 4;
3183 adev->mode_info.num_hpd = 4;
3184 adev->mode_info.num_dig = 4;
ff5ef992 3185 break;
476e955d 3186#endif
476e955d 3187 case CHIP_NAVI10:
fbd2afe5 3188 case CHIP_NAVI12:
476e955d
HW
3189 adev->mode_info.num_crtc = 6;
3190 adev->mode_info.num_hpd = 6;
3191 adev->mode_info.num_dig = 6;
3192 break;
fce651e3
BL
3193 case CHIP_NAVI14:
3194 adev->mode_info.num_crtc = 5;
3195 adev->mode_info.num_hpd = 5;
3196 adev->mode_info.num_dig = 5;
3197 break;
30221ad8
BL
3198 case CHIP_RENOIR:
3199 adev->mode_info.num_crtc = 4;
3200 adev->mode_info.num_hpd = 4;
3201 adev->mode_info.num_dig = 4;
3202 break;
4562236b 3203 default:
e63f8673 3204 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3205 return -EINVAL;
3206 }
3207
c8dd5715
MD
3208 amdgpu_dm_set_irq_funcs(adev);
3209
39cc5be2
AD
3210 if (adev->mode_info.funcs == NULL)
3211 adev->mode_info.funcs = &dm_display_funcs;
3212
1f6010a9
DF
3213 /*
3214 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3215 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3216 * amdgpu_device_init()
3217 */
4562236b
HW
3218#if defined(CONFIG_DEBUG_KERNEL_DC)
3219 device_create_file(
3220 adev->ddev->dev,
3221 &dev_attr_s3_debug);
3222#endif
3223
3224 return 0;
3225}
3226
9b690ef3 3227static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3228 struct dc_stream_state *new_stream,
3229 struct dc_stream_state *old_stream)
9b690ef3 3230{
e7b07cee
HW
3231 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3232 return false;
3233
3234 if (!crtc_state->enable)
3235 return false;
3236
3237 return crtc_state->active;
3238}
3239
3240static bool modereset_required(struct drm_crtc_state *crtc_state)
3241{
3242 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3243 return false;
3244
3245 return !crtc_state->enable || !crtc_state->active;
3246}
3247
7578ecda 3248static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3249{
3250 drm_encoder_cleanup(encoder);
3251 kfree(encoder);
3252}
3253
3254static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3255 .destroy = amdgpu_dm_encoder_destroy,
3256};
3257
e7b07cee 3258
695af5f9
NK
3259static int fill_dc_scaling_info(const struct drm_plane_state *state,
3260 struct dc_scaling_info *scaling_info)
e7b07cee 3261{
6491f0c0 3262 int scale_w, scale_h;
e7b07cee 3263
695af5f9 3264 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3265
695af5f9
NK
3266 /* Source is fixed 16.16 but we ignore mantissa for now... */
3267 scaling_info->src_rect.x = state->src_x >> 16;
3268 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3269
695af5f9
NK
3270 scaling_info->src_rect.width = state->src_w >> 16;
3271 if (scaling_info->src_rect.width == 0)
3272 return -EINVAL;
3273
3274 scaling_info->src_rect.height = state->src_h >> 16;
3275 if (scaling_info->src_rect.height == 0)
3276 return -EINVAL;
3277
3278 scaling_info->dst_rect.x = state->crtc_x;
3279 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3280
3281 if (state->crtc_w == 0)
695af5f9 3282 return -EINVAL;
e7b07cee 3283
695af5f9 3284 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3285
3286 if (state->crtc_h == 0)
695af5f9 3287 return -EINVAL;
e7b07cee 3288
695af5f9 3289 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3290
695af5f9
NK
3291 /* DRM doesn't specify clipping on destination output. */
3292 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3293
6491f0c0
NK
3294 /* TODO: Validate scaling per-format with DC plane caps */
3295 scale_w = scaling_info->dst_rect.width * 1000 /
3296 scaling_info->src_rect.width;
e7b07cee 3297
6491f0c0
NK
3298 if (scale_w < 250 || scale_w > 16000)
3299 return -EINVAL;
3300
3301 scale_h = scaling_info->dst_rect.height * 1000 /
3302 scaling_info->src_rect.height;
3303
3304 if (scale_h < 250 || scale_h > 16000)
3305 return -EINVAL;
3306
695af5f9
NK
3307 /*
3308 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3309 * assume reasonable defaults based on the format.
3310 */
e7b07cee 3311
695af5f9 3312 return 0;
4562236b 3313}
695af5f9 3314
3ee6b26b 3315static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
5888f07a 3316 uint64_t *tiling_flags, bool *tmz_surface)
e7b07cee 3317{
e68d14dd 3318 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 3319 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3320
e7b07cee 3321 if (unlikely(r)) {
1f6010a9 3322 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3323 if (r != -ERESTARTSYS)
3324 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3325 return r;
3326 }
3327
e7b07cee
HW
3328 if (tiling_flags)
3329 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3330
5888f07a
HW
3331 if (tmz_surface)
3332 *tmz_surface = amdgpu_bo_encrypted(rbo);
3333
e7b07cee
HW
3334 amdgpu_bo_unreserve(rbo);
3335
3336 return r;
3337}
3338
7df7e505
NK
3339static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3340{
3341 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3342
3343 return offset ? (address + offset * 256) : 0;
3344}
3345
695af5f9
NK
3346static int
3347fill_plane_dcc_attributes(struct amdgpu_device *adev,
3348 const struct amdgpu_framebuffer *afb,
3349 const enum surface_pixel_format format,
3350 const enum dc_rotation_angle rotation,
12e2b2d4 3351 const struct plane_size *plane_size,
695af5f9
NK
3352 const union dc_tiling_info *tiling_info,
3353 const uint64_t info,
3354 struct dc_plane_dcc_param *dcc,
af031f07
RS
3355 struct dc_plane_address *address,
3356 bool force_disable_dcc)
7df7e505
NK
3357{
3358 struct dc *dc = adev->dm.dc;
8daa1218
NC
3359 struct dc_dcc_surface_param input;
3360 struct dc_surface_dcc_cap output;
7df7e505
NK
3361 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3362 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3363 uint64_t dcc_address;
3364
8daa1218
NC
3365 memset(&input, 0, sizeof(input));
3366 memset(&output, 0, sizeof(output));
3367
af031f07
RS
3368 if (force_disable_dcc)
3369 return 0;
3370
7df7e505 3371 if (!offset)
09e5665a
NK
3372 return 0;
3373
695af5f9 3374 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3375 return 0;
7df7e505
NK
3376
3377 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3378 return -EINVAL;
7df7e505 3379
695af5f9 3380 input.format = format;
12e2b2d4
DL
3381 input.surface_size.width = plane_size->surface_size.width;
3382 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3383 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3384
695af5f9 3385 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3386 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3387 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3388 input.scan = SCAN_DIRECTION_VERTICAL;
3389
3390 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3391 return -EINVAL;
7df7e505
NK
3392
3393 if (!output.capable)
09e5665a 3394 return -EINVAL;
7df7e505
NK
3395
3396 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3397 return -EINVAL;
7df7e505 3398
09e5665a 3399 dcc->enable = 1;
12e2b2d4 3400 dcc->meta_pitch =
7df7e505 3401 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3402 dcc->independent_64b_blks = i64b;
7df7e505
NK
3403
3404 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3405 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3406 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3407
09e5665a
NK
3408 return 0;
3409}
3410
3411static int
320932bf 3412fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3413 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3414 const enum surface_pixel_format format,
3415 const enum dc_rotation_angle rotation,
3416 const uint64_t tiling_flags,
09e5665a 3417 union dc_tiling_info *tiling_info,
12e2b2d4 3418 struct plane_size *plane_size,
09e5665a 3419 struct dc_plane_dcc_param *dcc,
af031f07 3420 struct dc_plane_address *address,
5888f07a 3421 bool tmz_surface,
af031f07 3422 bool force_disable_dcc)
09e5665a 3423{
320932bf 3424 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3425 int ret;
3426
3427 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3428 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3429 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3430 memset(address, 0, sizeof(*address));
3431
5888f07a
HW
3432 address->tmz_surface = tmz_surface;
3433
695af5f9 3434 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3435 plane_size->surface_size.x = 0;
3436 plane_size->surface_size.y = 0;
3437 plane_size->surface_size.width = fb->width;
3438 plane_size->surface_size.height = fb->height;
3439 plane_size->surface_pitch =
320932bf
NK
3440 fb->pitches[0] / fb->format->cpp[0];
3441
e0634e8d
NK
3442 address->type = PLN_ADDR_TYPE_GRAPHICS;
3443 address->grph.addr.low_part = lower_32_bits(afb->address);
3444 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3445 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3446 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3447
12e2b2d4
DL
3448 plane_size->surface_size.x = 0;
3449 plane_size->surface_size.y = 0;
3450 plane_size->surface_size.width = fb->width;
3451 plane_size->surface_size.height = fb->height;
3452 plane_size->surface_pitch =
320932bf
NK
3453 fb->pitches[0] / fb->format->cpp[0];
3454
12e2b2d4
DL
3455 plane_size->chroma_size.x = 0;
3456 plane_size->chroma_size.y = 0;
320932bf 3457 /* TODO: set these based on surface format */
12e2b2d4
DL
3458 plane_size->chroma_size.width = fb->width / 2;
3459 plane_size->chroma_size.height = fb->height / 2;
320932bf 3460
12e2b2d4 3461 plane_size->chroma_pitch =
320932bf
NK
3462 fb->pitches[1] / fb->format->cpp[1];
3463
e0634e8d
NK
3464 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3465 address->video_progressive.luma_addr.low_part =
3466 lower_32_bits(afb->address);
3467 address->video_progressive.luma_addr.high_part =
3468 upper_32_bits(afb->address);
3469 address->video_progressive.chroma_addr.low_part =
3470 lower_32_bits(chroma_addr);
3471 address->video_progressive.chroma_addr.high_part =
3472 upper_32_bits(chroma_addr);
3473 }
09e5665a
NK
3474
3475 /* Fill GFX8 params */
3476 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3477 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3478
3479 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3480 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3481 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3482 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3483 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3484
3485 /* XXX fix me for VI */
3486 tiling_info->gfx8.num_banks = num_banks;
3487 tiling_info->gfx8.array_mode =
3488 DC_ARRAY_2D_TILED_THIN1;
3489 tiling_info->gfx8.tile_split = tile_split;
3490 tiling_info->gfx8.bank_width = bankw;
3491 tiling_info->gfx8.bank_height = bankh;
3492 tiling_info->gfx8.tile_aspect = mtaspect;
3493 tiling_info->gfx8.tile_mode =
3494 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3495 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3496 == DC_ARRAY_1D_TILED_THIN1) {
3497 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3498 }
3499
3500 tiling_info->gfx8.pipe_config =
3501 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3502
3503 if (adev->asic_type == CHIP_VEGA10 ||
3504 adev->asic_type == CHIP_VEGA12 ||
3505 adev->asic_type == CHIP_VEGA20 ||
476e955d 3506 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3507 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3508 adev->asic_type == CHIP_NAVI12 ||
30221ad8 3509 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3510 adev->asic_type == CHIP_RAVEN) {
3511 /* Fill GFX9 params */
3512 tiling_info->gfx9.num_pipes =
3513 adev->gfx.config.gb_addr_config_fields.num_pipes;
3514 tiling_info->gfx9.num_banks =
3515 adev->gfx.config.gb_addr_config_fields.num_banks;
3516 tiling_info->gfx9.pipe_interleave =
3517 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3518 tiling_info->gfx9.num_shader_engines =
3519 adev->gfx.config.gb_addr_config_fields.num_se;
3520 tiling_info->gfx9.max_compressed_frags =
3521 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3522 tiling_info->gfx9.num_rb_per_se =
3523 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3524 tiling_info->gfx9.swizzle =
3525 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3526 tiling_info->gfx9.shaderEnable = 1;
3527
695af5f9
NK
3528 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3529 plane_size, tiling_info,
af031f07
RS
3530 tiling_flags, dcc, address,
3531 force_disable_dcc);
09e5665a
NK
3532 if (ret)
3533 return ret;
3534 }
3535
3536 return 0;
7df7e505
NK
3537}
3538
d74004b6 3539static void
695af5f9 3540fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3541 bool *per_pixel_alpha, bool *global_alpha,
3542 int *global_alpha_value)
3543{
3544 *per_pixel_alpha = false;
3545 *global_alpha = false;
3546 *global_alpha_value = 0xff;
3547
3548 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3549 return;
3550
3551 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3552 static const uint32_t alpha_formats[] = {
3553 DRM_FORMAT_ARGB8888,
3554 DRM_FORMAT_RGBA8888,
3555 DRM_FORMAT_ABGR8888,
3556 };
3557 uint32_t format = plane_state->fb->format->format;
3558 unsigned int i;
3559
3560 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3561 if (format == alpha_formats[i]) {
3562 *per_pixel_alpha = true;
3563 break;
3564 }
3565 }
3566 }
3567
3568 if (plane_state->alpha < 0xffff) {
3569 *global_alpha = true;
3570 *global_alpha_value = plane_state->alpha >> 8;
3571 }
3572}
3573
004fefa3
NK
3574static int
3575fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3576 const enum surface_pixel_format format,
004fefa3
NK
3577 enum dc_color_space *color_space)
3578{
3579 bool full_range;
3580
3581 *color_space = COLOR_SPACE_SRGB;
3582
3583 /* DRM color properties only affect non-RGB formats. */
695af5f9 3584 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3585 return 0;
3586
3587 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3588
3589 switch (plane_state->color_encoding) {
3590 case DRM_COLOR_YCBCR_BT601:
3591 if (full_range)
3592 *color_space = COLOR_SPACE_YCBCR601;
3593 else
3594 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3595 break;
3596
3597 case DRM_COLOR_YCBCR_BT709:
3598 if (full_range)
3599 *color_space = COLOR_SPACE_YCBCR709;
3600 else
3601 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3602 break;
3603
3604 case DRM_COLOR_YCBCR_BT2020:
3605 if (full_range)
3606 *color_space = COLOR_SPACE_2020_YCBCR;
3607 else
3608 return -EINVAL;
3609 break;
3610
3611 default:
3612 return -EINVAL;
3613 }
3614
3615 return 0;
3616}
3617
695af5f9
NK
3618static int
3619fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3620 const struct drm_plane_state *plane_state,
3621 const uint64_t tiling_flags,
3622 struct dc_plane_info *plane_info,
af031f07 3623 struct dc_plane_address *address,
5888f07a 3624 bool tmz_surface,
af031f07 3625 bool force_disable_dcc)
695af5f9
NK
3626{
3627 const struct drm_framebuffer *fb = plane_state->fb;
3628 const struct amdgpu_framebuffer *afb =
3629 to_amdgpu_framebuffer(plane_state->fb);
3630 struct drm_format_name_buf format_name;
3631 int ret;
3632
3633 memset(plane_info, 0, sizeof(*plane_info));
3634
3635 switch (fb->format->format) {
3636 case DRM_FORMAT_C8:
3637 plane_info->format =
3638 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3639 break;
3640 case DRM_FORMAT_RGB565:
3641 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3642 break;
3643 case DRM_FORMAT_XRGB8888:
3644 case DRM_FORMAT_ARGB8888:
3645 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3646 break;
3647 case DRM_FORMAT_XRGB2101010:
3648 case DRM_FORMAT_ARGB2101010:
3649 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3650 break;
3651 case DRM_FORMAT_XBGR2101010:
3652 case DRM_FORMAT_ABGR2101010:
3653 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3654 break;
3655 case DRM_FORMAT_XBGR8888:
3656 case DRM_FORMAT_ABGR8888:
3657 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3658 break;
3659 case DRM_FORMAT_NV21:
3660 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3661 break;
3662 case DRM_FORMAT_NV12:
3663 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3664 break;
cbec6477
SW
3665 case DRM_FORMAT_P010:
3666 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3667 break;
492548dc
SW
3668 case DRM_FORMAT_XRGB16161616F:
3669 case DRM_FORMAT_ARGB16161616F:
3670 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3671 break;
695af5f9
NK
3672 default:
3673 DRM_ERROR(
3674 "Unsupported screen format %s\n",
3675 drm_get_format_name(fb->format->format, &format_name));
3676 return -EINVAL;
3677 }
3678
3679 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3680 case DRM_MODE_ROTATE_0:
3681 plane_info->rotation = ROTATION_ANGLE_0;
3682 break;
3683 case DRM_MODE_ROTATE_90:
3684 plane_info->rotation = ROTATION_ANGLE_90;
3685 break;
3686 case DRM_MODE_ROTATE_180:
3687 plane_info->rotation = ROTATION_ANGLE_180;
3688 break;
3689 case DRM_MODE_ROTATE_270:
3690 plane_info->rotation = ROTATION_ANGLE_270;
3691 break;
3692 default:
3693 plane_info->rotation = ROTATION_ANGLE_0;
3694 break;
3695 }
3696
3697 plane_info->visible = true;
3698 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3699
6d83a32d
MS
3700 plane_info->layer_index = 0;
3701
695af5f9
NK
3702 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3703 &plane_info->color_space);
3704 if (ret)
3705 return ret;
3706
3707 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3708 plane_info->rotation, tiling_flags,
3709 &plane_info->tiling_info,
3710 &plane_info->plane_size,
5888f07a 3711 &plane_info->dcc, address, tmz_surface,
af031f07 3712 force_disable_dcc);
695af5f9
NK
3713 if (ret)
3714 return ret;
3715
3716 fill_blending_from_plane_state(
3717 plane_state, &plane_info->per_pixel_alpha,
3718 &plane_info->global_alpha, &plane_info->global_alpha_value);
3719
3720 return 0;
3721}
3722
3723static int fill_dc_plane_attributes(struct amdgpu_device *adev,
e4923387 3724 struct dc_plane_state *dc_plane_state,
695af5f9
NK
3725 struct drm_plane_state *plane_state,
3726 struct drm_crtc_state *crtc_state)
e7b07cee 3727{
cf020d49 3728 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
3729 const struct amdgpu_framebuffer *amdgpu_fb =
3730 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
3731 struct dc_scaling_info scaling_info;
3732 struct dc_plane_info plane_info;
3733 uint64_t tiling_flags;
3734 int ret;
5888f07a 3735 bool tmz_surface = false;
af031f07 3736 bool force_disable_dcc = false;
e7b07cee 3737
695af5f9
NK
3738 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3739 if (ret)
3740 return ret;
e7b07cee 3741
695af5f9
NK
3742 dc_plane_state->src_rect = scaling_info.src_rect;
3743 dc_plane_state->dst_rect = scaling_info.dst_rect;
3744 dc_plane_state->clip_rect = scaling_info.clip_rect;
3745 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 3746
5888f07a 3747 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
e7b07cee
HW
3748 if (ret)
3749 return ret;
3750
af031f07 3751 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
695af5f9
NK
3752 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3753 &plane_info,
af031f07 3754 &dc_plane_state->address,
5888f07a 3755 tmz_surface,
af031f07 3756 force_disable_dcc);
004fefa3
NK
3757 if (ret)
3758 return ret;
3759
695af5f9
NK
3760 dc_plane_state->format = plane_info.format;
3761 dc_plane_state->color_space = plane_info.color_space;
3762 dc_plane_state->format = plane_info.format;
3763 dc_plane_state->plane_size = plane_info.plane_size;
3764 dc_plane_state->rotation = plane_info.rotation;
3765 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3766 dc_plane_state->stereo_format = plane_info.stereo_format;
3767 dc_plane_state->tiling_info = plane_info.tiling_info;
3768 dc_plane_state->visible = plane_info.visible;
3769 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3770 dc_plane_state->global_alpha = plane_info.global_alpha;
3771 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3772 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 3773 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 3774
e277adc5
LSL
3775 /*
3776 * Always set input transfer function, since plane state is refreshed
3777 * every time.
3778 */
e4923387 3779 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
cf020d49
NK
3780 if (ret)
3781 return ret;
e7b07cee 3782
cf020d49 3783 return 0;
e7b07cee
HW
3784}
3785
3ee6b26b
AD
3786static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3787 const struct dm_connector_state *dm_state,
3788 struct dc_stream_state *stream)
e7b07cee
HW
3789{
3790 enum amdgpu_rmx_type rmx_type;
3791
3792 struct rect src = { 0 }; /* viewport in composition space*/
3793 struct rect dst = { 0 }; /* stream addressable area */
3794
3795 /* no mode. nothing to be done */
3796 if (!mode)
3797 return;
3798
3799 /* Full screen scaling by default */
3800 src.width = mode->hdisplay;
3801 src.height = mode->vdisplay;
3802 dst.width = stream->timing.h_addressable;
3803 dst.height = stream->timing.v_addressable;
3804
f4791779
HW
3805 if (dm_state) {
3806 rmx_type = dm_state->scaling;
3807 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3808 if (src.width * dst.height <
3809 src.height * dst.width) {
3810 /* height needs less upscaling/more downscaling */
3811 dst.width = src.width *
3812 dst.height / src.height;
3813 } else {
3814 /* width needs less upscaling/more downscaling */
3815 dst.height = src.height *
3816 dst.width / src.width;
3817 }
3818 } else if (rmx_type == RMX_CENTER) {
3819 dst = src;
e7b07cee 3820 }
e7b07cee 3821
f4791779
HW
3822 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3823 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 3824
f4791779
HW
3825 if (dm_state->underscan_enable) {
3826 dst.x += dm_state->underscan_hborder / 2;
3827 dst.y += dm_state->underscan_vborder / 2;
3828 dst.width -= dm_state->underscan_hborder;
3829 dst.height -= dm_state->underscan_vborder;
3830 }
e7b07cee
HW
3831 }
3832
3833 stream->src = src;
3834 stream->dst = dst;
3835
f1ad2f5e 3836 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
3837 dst.x, dst.y, dst.width, dst.height);
3838
3839}
3840
3ee6b26b 3841static enum dc_color_depth
42ba01fc 3842convert_color_depth_from_display_info(const struct drm_connector *connector,
1bc22f20
SW
3843 const struct drm_connector_state *state,
3844 bool is_y420)
e7b07cee 3845{
1bc22f20 3846 uint8_t bpc;
01c22997 3847
1bc22f20
SW
3848 if (is_y420) {
3849 bpc = 8;
3850
3851 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3852 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3853 bpc = 16;
3854 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3855 bpc = 12;
3856 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3857 bpc = 10;
3858 } else {
3859 bpc = (uint8_t)connector->display_info.bpc;
3860 /* Assume 8 bpc by default if no bpc is specified. */
3861 bpc = bpc ? bpc : 8;
3862 }
e7b07cee 3863
01933ba4
NK
3864 if (!state)
3865 state = connector->state;
3866
42ba01fc 3867 if (state) {
01c22997
NK
3868 /*
3869 * Cap display bpc based on the user requested value.
3870 *
3871 * The value for state->max_bpc may not correctly updated
3872 * depending on when the connector gets added to the state
3873 * or if this was called outside of atomic check, so it
3874 * can't be used directly.
3875 */
3876 bpc = min(bpc, state->max_requested_bpc);
3877
1825fd34
NK
3878 /* Round down to the nearest even number. */
3879 bpc = bpc - (bpc & 1);
3880 }
07e3a1cf 3881
e7b07cee
HW
3882 switch (bpc) {
3883 case 0:
1f6010a9
DF
3884 /*
3885 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
3886 * EDID revision before 1.4
3887 * TODO: Fix edid parsing
3888 */
3889 return COLOR_DEPTH_888;
3890 case 6:
3891 return COLOR_DEPTH_666;
3892 case 8:
3893 return COLOR_DEPTH_888;
3894 case 10:
3895 return COLOR_DEPTH_101010;
3896 case 12:
3897 return COLOR_DEPTH_121212;
3898 case 14:
3899 return COLOR_DEPTH_141414;
3900 case 16:
3901 return COLOR_DEPTH_161616;
3902 default:
3903 return COLOR_DEPTH_UNDEFINED;
3904 }
3905}
3906
3ee6b26b
AD
3907static enum dc_aspect_ratio
3908get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 3909{
e11d4147
LSL
3910 /* 1-1 mapping, since both enums follow the HDMI spec. */
3911 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
3912}
3913
3ee6b26b
AD
3914static enum dc_color_space
3915get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
3916{
3917 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3918
3919 switch (dc_crtc_timing->pixel_encoding) {
3920 case PIXEL_ENCODING_YCBCR422:
3921 case PIXEL_ENCODING_YCBCR444:
3922 case PIXEL_ENCODING_YCBCR420:
3923 {
3924 /*
3925 * 27030khz is the separation point between HDTV and SDTV
3926 * according to HDMI spec, we use YCbCr709 and YCbCr601
3927 * respectively
3928 */
380604e2 3929 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
3930 if (dc_crtc_timing->flags.Y_ONLY)
3931 color_space =
3932 COLOR_SPACE_YCBCR709_LIMITED;
3933 else
3934 color_space = COLOR_SPACE_YCBCR709;
3935 } else {
3936 if (dc_crtc_timing->flags.Y_ONLY)
3937 color_space =
3938 COLOR_SPACE_YCBCR601_LIMITED;
3939 else
3940 color_space = COLOR_SPACE_YCBCR601;
3941 }
3942
3943 }
3944 break;
3945 case PIXEL_ENCODING_RGB:
3946 color_space = COLOR_SPACE_SRGB;
3947 break;
3948
3949 default:
3950 WARN_ON(1);
3951 break;
3952 }
3953
3954 return color_space;
3955}
3956
ea117312
TA
3957static bool adjust_colour_depth_from_display_info(
3958 struct dc_crtc_timing *timing_out,
3959 const struct drm_display_info *info)
400443e8 3960{
ea117312 3961 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 3962 int normalized_clk;
400443e8 3963 do {
380604e2 3964 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
3965 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3966 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3967 normalized_clk /= 2;
3968 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
3969 switch (depth) {
3970 case COLOR_DEPTH_888:
3971 break;
400443e8
ML
3972 case COLOR_DEPTH_101010:
3973 normalized_clk = (normalized_clk * 30) / 24;
3974 break;
3975 case COLOR_DEPTH_121212:
3976 normalized_clk = (normalized_clk * 36) / 24;
3977 break;
3978 case COLOR_DEPTH_161616:
3979 normalized_clk = (normalized_clk * 48) / 24;
3980 break;
3981 default:
ea117312
TA
3982 /* The above depths are the only ones valid for HDMI. */
3983 return false;
400443e8 3984 }
ea117312
TA
3985 if (normalized_clk <= info->max_tmds_clock) {
3986 timing_out->display_color_depth = depth;
3987 return true;
3988 }
3989 } while (--depth > COLOR_DEPTH_666);
3990 return false;
400443e8 3991}
e7b07cee 3992
42ba01fc
NK
3993static void fill_stream_properties_from_drm_display_mode(
3994 struct dc_stream_state *stream,
3995 const struct drm_display_mode *mode_in,
3996 const struct drm_connector *connector,
3997 const struct drm_connector_state *connector_state,
3998 const struct dc_stream_state *old_stream)
e7b07cee
HW
3999{
4000 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4001 const struct drm_display_info *info = &connector->display_info;
d4252eee 4002 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4003 struct hdmi_vendor_infoframe hv_frame;
4004 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4005
acf83f86
WL
4006 memset(&hv_frame, 0, sizeof(hv_frame));
4007 memset(&avi_frame, 0, sizeof(avi_frame));
4008
e7b07cee
HW
4009 timing_out->h_border_left = 0;
4010 timing_out->h_border_right = 0;
4011 timing_out->v_border_top = 0;
4012 timing_out->v_border_bottom = 0;
4013 /* TODO: un-hardcode */
fe61a2f1 4014 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4015 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4016 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4017 else if (drm_mode_is_420_also(info, mode_in)
4018 && aconnector->force_yuv420_output)
4019 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4020 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4021 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4022 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4023 else
4024 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4025
4026 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4027 timing_out->display_color_depth = convert_color_depth_from_display_info(
1bc22f20
SW
4028 connector, connector_state,
4029 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
e7b07cee
HW
4030 timing_out->scan_type = SCANNING_TYPE_NODATA;
4031 timing_out->hdmi_vic = 0;
b333730d
BL
4032
4033 if(old_stream) {
4034 timing_out->vic = old_stream->timing.vic;
4035 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4036 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4037 } else {
4038 timing_out->vic = drm_match_cea_mode(mode_in);
4039 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4040 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4041 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4042 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4043 }
e7b07cee 4044
1cb1d477
WL
4045 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4046 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4047 timing_out->vic = avi_frame.video_code;
4048 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4049 timing_out->hdmi_vic = hv_frame.vic;
4050 }
4051
e7b07cee
HW
4052 timing_out->h_addressable = mode_in->crtc_hdisplay;
4053 timing_out->h_total = mode_in->crtc_htotal;
4054 timing_out->h_sync_width =
4055 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4056 timing_out->h_front_porch =
4057 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4058 timing_out->v_total = mode_in->crtc_vtotal;
4059 timing_out->v_addressable = mode_in->crtc_vdisplay;
4060 timing_out->v_front_porch =
4061 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4062 timing_out->v_sync_width =
4063 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4064 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4065 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4066
4067 stream->output_color_space = get_output_color_space(timing_out);
4068
e43a432c
AK
4069 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4070 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4071 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4072 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4073 drm_mode_is_420_also(info, mode_in) &&
4074 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4075 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4076 adjust_colour_depth_from_display_info(timing_out, info);
4077 }
4078 }
e7b07cee
HW
4079}
4080
3ee6b26b
AD
4081static void fill_audio_info(struct audio_info *audio_info,
4082 const struct drm_connector *drm_connector,
4083 const struct dc_sink *dc_sink)
e7b07cee
HW
4084{
4085 int i = 0;
4086 int cea_revision = 0;
4087 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4088
4089 audio_info->manufacture_id = edid_caps->manufacturer_id;
4090 audio_info->product_id = edid_caps->product_id;
4091
4092 cea_revision = drm_connector->display_info.cea_rev;
4093
090afc1e 4094 strscpy(audio_info->display_name,
d2b2562c 4095 edid_caps->display_name,
090afc1e 4096 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4097
b830ebc9 4098 if (cea_revision >= 3) {
e7b07cee
HW
4099 audio_info->mode_count = edid_caps->audio_mode_count;
4100
4101 for (i = 0; i < audio_info->mode_count; ++i) {
4102 audio_info->modes[i].format_code =
4103 (enum audio_format_code)
4104 (edid_caps->audio_modes[i].format_code);
4105 audio_info->modes[i].channel_count =
4106 edid_caps->audio_modes[i].channel_count;
4107 audio_info->modes[i].sample_rates.all =
4108 edid_caps->audio_modes[i].sample_rate;
4109 audio_info->modes[i].sample_size =
4110 edid_caps->audio_modes[i].sample_size;
4111 }
4112 }
4113
4114 audio_info->flags.all = edid_caps->speaker_flags;
4115
4116 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4117 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4118 audio_info->video_latency = drm_connector->video_latency[0];
4119 audio_info->audio_latency = drm_connector->audio_latency[0];
4120 }
4121
4122 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4123
4124}
4125
3ee6b26b
AD
4126static void
4127copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4128 struct drm_display_mode *dst_mode)
e7b07cee
HW
4129{
4130 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4131 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4132 dst_mode->crtc_clock = src_mode->crtc_clock;
4133 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4134 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4135 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4136 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4137 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4138 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4139 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4140 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4141 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4142 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4143 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4144}
4145
3ee6b26b
AD
4146static void
4147decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4148 const struct drm_display_mode *native_mode,
4149 bool scale_enabled)
e7b07cee
HW
4150{
4151 if (scale_enabled) {
4152 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4153 } else if (native_mode->clock == drm_mode->clock &&
4154 native_mode->htotal == drm_mode->htotal &&
4155 native_mode->vtotal == drm_mode->vtotal) {
4156 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4157 } else {
4158 /* no scaling nor amdgpu inserted, no need to patch */
4159 }
4160}
4161
aed15309
ML
4162static struct dc_sink *
4163create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4164{
2e0ac3d6 4165 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4166 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4167 sink_init_data.link = aconnector->dc_link;
4168 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4169
4170 sink = dc_sink_create(&sink_init_data);
423788c7 4171 if (!sink) {
2e0ac3d6 4172 DRM_ERROR("Failed to create sink!\n");
aed15309 4173 return NULL;
423788c7 4174 }
2e0ac3d6 4175 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4176
aed15309 4177 return sink;
2e0ac3d6
HW
4178}
4179
fa2123db
ML
4180static void set_multisync_trigger_params(
4181 struct dc_stream_state *stream)
4182{
4183 if (stream->triggered_crtc_reset.enabled) {
4184 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4185 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4186 }
4187}
4188
4189static void set_master_stream(struct dc_stream_state *stream_set[],
4190 int stream_count)
4191{
4192 int j, highest_rfr = 0, master_stream = 0;
4193
4194 for (j = 0; j < stream_count; j++) {
4195 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4196 int refresh_rate = 0;
4197
380604e2 4198 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4199 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4200 if (refresh_rate > highest_rfr) {
4201 highest_rfr = refresh_rate;
4202 master_stream = j;
4203 }
4204 }
4205 }
4206 for (j = 0; j < stream_count; j++) {
03736f4c 4207 if (stream_set[j])
fa2123db
ML
4208 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4209 }
4210}
4211
4212static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4213{
4214 int i = 0;
4215
4216 if (context->stream_count < 2)
4217 return;
4218 for (i = 0; i < context->stream_count ; i++) {
4219 if (!context->streams[i])
4220 continue;
1f6010a9
DF
4221 /*
4222 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4223 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4224 * For now it's set to false
fa2123db
ML
4225 */
4226 set_multisync_trigger_params(context->streams[i]);
4227 }
4228 set_master_stream(context->streams, context->stream_count);
4229}
4230
3ee6b26b
AD
4231static struct dc_stream_state *
4232create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4233 const struct drm_display_mode *drm_mode,
b333730d
BL
4234 const struct dm_connector_state *dm_state,
4235 const struct dc_stream_state *old_stream)
e7b07cee
HW
4236{
4237 struct drm_display_mode *preferred_mode = NULL;
391ef035 4238 struct drm_connector *drm_connector;
42ba01fc
NK
4239 const struct drm_connector_state *con_state =
4240 dm_state ? &dm_state->base : NULL;
0971c40e 4241 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4242 struct drm_display_mode mode = *drm_mode;
4243 bool native_mode_found = false;
b333730d
BL
4244 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4245 int mode_refresh;
58124bf8 4246 int preferred_refresh = 0;
defeb878 4247#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4248 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4249#endif
df2f1015 4250 uint32_t link_bandwidth_kbps;
b333730d 4251
aed15309 4252 struct dc_sink *sink = NULL;
b830ebc9 4253 if (aconnector == NULL) {
e7b07cee 4254 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4255 return stream;
e7b07cee
HW
4256 }
4257
e7b07cee 4258 drm_connector = &aconnector->base;
2e0ac3d6 4259
f4ac176e 4260 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4261 sink = create_fake_sink(aconnector);
4262 if (!sink)
4263 return stream;
aed15309
ML
4264 } else {
4265 sink = aconnector->dc_sink;
dcd5fb82 4266 dc_sink_retain(sink);
f4ac176e 4267 }
2e0ac3d6 4268
aed15309 4269 stream = dc_create_stream_for_sink(sink);
4562236b 4270
b830ebc9 4271 if (stream == NULL) {
e7b07cee 4272 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4273 goto finish;
e7b07cee
HW
4274 }
4275
ceb3dbb4
JL
4276 stream->dm_stream_context = aconnector;
4277
4a36fcba
WL
4278 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4279 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4280
e7b07cee
HW
4281 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4282 /* Search for preferred mode */
4283 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4284 native_mode_found = true;
4285 break;
4286 }
4287 }
4288 if (!native_mode_found)
4289 preferred_mode = list_first_entry_or_null(
4290 &aconnector->base.modes,
4291 struct drm_display_mode,
4292 head);
4293
b333730d
BL
4294 mode_refresh = drm_mode_vrefresh(&mode);
4295
b830ebc9 4296 if (preferred_mode == NULL) {
1f6010a9
DF
4297 /*
4298 * This may not be an error, the use case is when we have no
e7b07cee
HW
4299 * usermode calls to reset and set mode upon hotplug. In this
4300 * case, we call set mode ourselves to restore the previous mode
4301 * and the modelist may not be filled in in time.
4302 */
f1ad2f5e 4303 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4304 } else {
4305 decide_crtc_timing_for_drm_display_mode(
4306 &mode, preferred_mode,
f4791779 4307 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4308 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4309 }
4310
f783577c
JFZ
4311 if (!dm_state)
4312 drm_mode_set_crtcinfo(&mode, 0);
4313
b333730d
BL
4314 /*
4315 * If scaling is enabled and refresh rate didn't change
4316 * we copy the vic and polarities of the old timings
4317 */
4318 if (!scale || mode_refresh != preferred_refresh)
4319 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4320 &mode, &aconnector->base, con_state, NULL);
b333730d
BL
4321 else
4322 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4323 &mode, &aconnector->base, con_state, old_stream);
b333730d 4324
df2f1015
DF
4325 stream->timing.flags.DSC = 0;
4326
4327 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4328#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4329 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4330 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
df2f1015
DF
4331 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4332 &dsc_caps);
defeb878 4333#endif
df2f1015
DF
4334 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4335 dc_link_get_link_cap(aconnector->dc_link));
4336
defeb878 4337#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4338 if (dsc_caps.is_dsc_supported)
0417df16 4339 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4340 &dsc_caps,
0417df16 4341 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4342 link_bandwidth_kbps,
4343 &stream->timing,
4344 &stream->timing.dsc_cfg))
4345 stream->timing.flags.DSC = 1;
39a4eb85 4346#endif
df2f1015 4347 }
39a4eb85 4348
e7b07cee
HW
4349 update_stream_scaling_settings(&mode, dm_state, stream);
4350
4351 fill_audio_info(
4352 &stream->audio_info,
4353 drm_connector,
aed15309 4354 sink);
e7b07cee 4355
ceb3dbb4 4356 update_stream_signal(stream, sink);
9182b4cb 4357
d832fc3b
WL
4358 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4359 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
d1ebfdd8 4360 if (stream->link->psr_settings.psr_feature_enabled) {
8c322309 4361 struct dc *core_dc = stream->link->ctx->dc;
d832fc3b 4362
8c322309 4363 if (dc_is_dmcu_initialized(core_dc)) {
c38cc677
MT
4364 //
4365 // should decide stream support vsc sdp colorimetry capability
4366 // before building vsc info packet
4367 //
4368 stream->use_vsc_sdp_for_colorimetry = false;
4369 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4370 stream->use_vsc_sdp_for_colorimetry =
4371 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4372 } else {
4373 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4374 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4375 stream->use_vsc_sdp_for_colorimetry = true;
4376 }
4377 }
4378 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309
RL
4379 }
4380 }
aed15309 4381finish:
dcd5fb82 4382 dc_sink_release(sink);
9e3efe3e 4383
e7b07cee
HW
4384 return stream;
4385}
4386
7578ecda 4387static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4388{
4389 drm_crtc_cleanup(crtc);
4390 kfree(crtc);
4391}
4392
4393static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4394 struct drm_crtc_state *state)
e7b07cee
HW
4395{
4396 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4397
4398 /* TODO Destroy dc_stream objects are stream object is flattened */
4399 if (cur->stream)
4400 dc_stream_release(cur->stream);
4401
4402
4403 __drm_atomic_helper_crtc_destroy_state(state);
4404
4405
4406 kfree(state);
4407}
4408
4409static void dm_crtc_reset_state(struct drm_crtc *crtc)
4410{
4411 struct dm_crtc_state *state;
4412
4413 if (crtc->state)
4414 dm_crtc_destroy_state(crtc, crtc->state);
4415
4416 state = kzalloc(sizeof(*state), GFP_KERNEL);
4417 if (WARN_ON(!state))
4418 return;
4419
4420 crtc->state = &state->base;
4421 crtc->state->crtc = crtc;
4422
4423}
4424
4425static struct drm_crtc_state *
4426dm_crtc_duplicate_state(struct drm_crtc *crtc)
4427{
4428 struct dm_crtc_state *state, *cur;
4429
4430 cur = to_dm_crtc_state(crtc->state);
4431
4432 if (WARN_ON(!crtc->state))
4433 return NULL;
4434
2004f45e 4435 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4436 if (!state)
4437 return NULL;
e7b07cee
HW
4438
4439 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4440
4441 if (cur->stream) {
4442 state->stream = cur->stream;
4443 dc_stream_retain(state->stream);
4444 }
4445
d6ef9b41
NK
4446 state->active_planes = cur->active_planes;
4447 state->interrupts_enabled = cur->interrupts_enabled;
180db303 4448 state->vrr_params = cur->vrr_params;
98e6436d 4449 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4450 state->abm_level = cur->abm_level;
bb47de73
NK
4451 state->vrr_supported = cur->vrr_supported;
4452 state->freesync_config = cur->freesync_config;
14b25846 4453 state->crc_src = cur->crc_src;
cf020d49
NK
4454 state->cm_has_degamma = cur->cm_has_degamma;
4455 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4456
e7b07cee
HW
4457 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4458
4459 return &state->base;
4460}
4461
d2574c33
MK
4462static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4463{
4464 enum dc_irq_source irq_source;
4465 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4466 struct amdgpu_device *adev = crtc->dev->dev_private;
4467 int rc;
4468
3a2ce8d6
LL
4469 /* Do not set vupdate for DCN hardware */
4470 if (adev->family > AMDGPU_FAMILY_AI)
4471 return 0;
4472
d2574c33
MK
4473 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4474
4475 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4476
4477 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4478 acrtc->crtc_id, enable ? "en" : "dis", rc);
4479 return rc;
4480}
589d2739
HW
4481
4482static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4483{
4484 enum dc_irq_source irq_source;
4485 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4486 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4487 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4488 int rc = 0;
4489
4490 if (enable) {
4491 /* vblank irq on -> Only need vupdate irq in vrr mode */
4492 if (amdgpu_dm_vrr_active(acrtc_state))
4493 rc = dm_set_vupdate_irq(crtc, true);
4494 } else {
4495 /* vblank irq off -> vupdate irq off */
4496 rc = dm_set_vupdate_irq(crtc, false);
4497 }
4498
4499 if (rc)
4500 return rc;
589d2739
HW
4501
4502 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4503 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4504}
4505
4506static int dm_enable_vblank(struct drm_crtc *crtc)
4507{
4508 return dm_set_vblank(crtc, true);
4509}
4510
4511static void dm_disable_vblank(struct drm_crtc *crtc)
4512{
4513 dm_set_vblank(crtc, false);
4514}
4515
e7b07cee
HW
4516/* Implemented only the options currently availible for the driver */
4517static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4518 .reset = dm_crtc_reset_state,
4519 .destroy = amdgpu_dm_crtc_destroy,
4520 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4521 .set_config = drm_atomic_helper_set_config,
4522 .page_flip = drm_atomic_helper_page_flip,
4523 .atomic_duplicate_state = dm_crtc_duplicate_state,
4524 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4525 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4526 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4527 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 4528 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
4529 .enable_vblank = dm_enable_vblank,
4530 .disable_vblank = dm_disable_vblank,
e3eff4b5 4531 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
4532};
4533
4534static enum drm_connector_status
4535amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4536{
4537 bool connected;
c84dec2f 4538 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4539
1f6010a9
DF
4540 /*
4541 * Notes:
e7b07cee
HW
4542 * 1. This interface is NOT called in context of HPD irq.
4543 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4544 * makes it a bad place for *any* MST-related activity.
4545 */
e7b07cee 4546
8580d60b
HW
4547 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4548 !aconnector->fake_enable)
e7b07cee
HW
4549 connected = (aconnector->dc_sink != NULL);
4550 else
4551 connected = (aconnector->base.force == DRM_FORCE_ON);
4552
4553 return (connected ? connector_status_connected :
4554 connector_status_disconnected);
4555}
4556
3ee6b26b
AD
4557int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4558 struct drm_connector_state *connector_state,
4559 struct drm_property *property,
4560 uint64_t val)
e7b07cee
HW
4561{
4562 struct drm_device *dev = connector->dev;
4563 struct amdgpu_device *adev = dev->dev_private;
4564 struct dm_connector_state *dm_old_state =
4565 to_dm_connector_state(connector->state);
4566 struct dm_connector_state *dm_new_state =
4567 to_dm_connector_state(connector_state);
4568
4569 int ret = -EINVAL;
4570
4571 if (property == dev->mode_config.scaling_mode_property) {
4572 enum amdgpu_rmx_type rmx_type;
4573
4574 switch (val) {
4575 case DRM_MODE_SCALE_CENTER:
4576 rmx_type = RMX_CENTER;
4577 break;
4578 case DRM_MODE_SCALE_ASPECT:
4579 rmx_type = RMX_ASPECT;
4580 break;
4581 case DRM_MODE_SCALE_FULLSCREEN:
4582 rmx_type = RMX_FULL;
4583 break;
4584 case DRM_MODE_SCALE_NONE:
4585 default:
4586 rmx_type = RMX_OFF;
4587 break;
4588 }
4589
4590 if (dm_old_state->scaling == rmx_type)
4591 return 0;
4592
4593 dm_new_state->scaling = rmx_type;
4594 ret = 0;
4595 } else if (property == adev->mode_info.underscan_hborder_property) {
4596 dm_new_state->underscan_hborder = val;
4597 ret = 0;
4598 } else if (property == adev->mode_info.underscan_vborder_property) {
4599 dm_new_state->underscan_vborder = val;
4600 ret = 0;
4601 } else if (property == adev->mode_info.underscan_property) {
4602 dm_new_state->underscan_enable = val;
4603 ret = 0;
c1ee92f9
DF
4604 } else if (property == adev->mode_info.abm_level_property) {
4605 dm_new_state->abm_level = val;
4606 ret = 0;
e7b07cee
HW
4607 }
4608
4609 return ret;
4610}
4611
3ee6b26b
AD
4612int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4613 const struct drm_connector_state *state,
4614 struct drm_property *property,
4615 uint64_t *val)
e7b07cee
HW
4616{
4617 struct drm_device *dev = connector->dev;
4618 struct amdgpu_device *adev = dev->dev_private;
4619 struct dm_connector_state *dm_state =
4620 to_dm_connector_state(state);
4621 int ret = -EINVAL;
4622
4623 if (property == dev->mode_config.scaling_mode_property) {
4624 switch (dm_state->scaling) {
4625 case RMX_CENTER:
4626 *val = DRM_MODE_SCALE_CENTER;
4627 break;
4628 case RMX_ASPECT:
4629 *val = DRM_MODE_SCALE_ASPECT;
4630 break;
4631 case RMX_FULL:
4632 *val = DRM_MODE_SCALE_FULLSCREEN;
4633 break;
4634 case RMX_OFF:
4635 default:
4636 *val = DRM_MODE_SCALE_NONE;
4637 break;
4638 }
4639 ret = 0;
4640 } else if (property == adev->mode_info.underscan_hborder_property) {
4641 *val = dm_state->underscan_hborder;
4642 ret = 0;
4643 } else if (property == adev->mode_info.underscan_vborder_property) {
4644 *val = dm_state->underscan_vborder;
4645 ret = 0;
4646 } else if (property == adev->mode_info.underscan_property) {
4647 *val = dm_state->underscan_enable;
4648 ret = 0;
c1ee92f9
DF
4649 } else if (property == adev->mode_info.abm_level_property) {
4650 *val = dm_state->abm_level;
4651 ret = 0;
e7b07cee 4652 }
c1ee92f9 4653
e7b07cee
HW
4654 return ret;
4655}
4656
526c654a
ED
4657static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4658{
4659 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4660
4661 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4662}
4663
7578ecda 4664static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4665{
c84dec2f 4666 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4667 const struct dc_link *link = aconnector->dc_link;
4668 struct amdgpu_device *adev = connector->dev->dev_private;
4669 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4670
e7b07cee
HW
4671#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4672 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4673
89fc8d4e 4674 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
4675 link->type != dc_connection_none &&
4676 dm->backlight_dev) {
4677 backlight_device_unregister(dm->backlight_dev);
4678 dm->backlight_dev = NULL;
e7b07cee
HW
4679 }
4680#endif
dcd5fb82
MF
4681
4682 if (aconnector->dc_em_sink)
4683 dc_sink_release(aconnector->dc_em_sink);
4684 aconnector->dc_em_sink = NULL;
4685 if (aconnector->dc_sink)
4686 dc_sink_release(aconnector->dc_sink);
4687 aconnector->dc_sink = NULL;
4688
e86e8947 4689 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
4690 drm_connector_unregister(connector);
4691 drm_connector_cleanup(connector);
526c654a
ED
4692 if (aconnector->i2c) {
4693 i2c_del_adapter(&aconnector->i2c->base);
4694 kfree(aconnector->i2c);
4695 }
9f656935 4696 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 4697
e7b07cee
HW
4698 kfree(connector);
4699}
4700
4701void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4702{
4703 struct dm_connector_state *state =
4704 to_dm_connector_state(connector->state);
4705
df099b9b
LSL
4706 if (connector->state)
4707 __drm_atomic_helper_connector_destroy_state(connector->state);
4708
e7b07cee
HW
4709 kfree(state);
4710
4711 state = kzalloc(sizeof(*state), GFP_KERNEL);
4712
4713 if (state) {
4714 state->scaling = RMX_OFF;
4715 state->underscan_enable = false;
4716 state->underscan_hborder = 0;
4717 state->underscan_vborder = 0;
01933ba4 4718 state->base.max_requested_bpc = 8;
3261e013
ML
4719 state->vcpi_slots = 0;
4720 state->pbn = 0;
c3e50f89
NK
4721 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4722 state->abm_level = amdgpu_dm_abm_level;
4723
df099b9b 4724 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
4725 }
4726}
4727
3ee6b26b
AD
4728struct drm_connector_state *
4729amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
4730{
4731 struct dm_connector_state *state =
4732 to_dm_connector_state(connector->state);
4733
4734 struct dm_connector_state *new_state =
4735 kmemdup(state, sizeof(*state), GFP_KERNEL);
4736
98e6436d
AK
4737 if (!new_state)
4738 return NULL;
e7b07cee 4739
98e6436d
AK
4740 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4741
4742 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 4743 new_state->abm_level = state->abm_level;
922454c2
NK
4744 new_state->scaling = state->scaling;
4745 new_state->underscan_enable = state->underscan_enable;
4746 new_state->underscan_hborder = state->underscan_hborder;
4747 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
4748 new_state->vcpi_slots = state->vcpi_slots;
4749 new_state->pbn = state->pbn;
98e6436d 4750 return &new_state->base;
e7b07cee
HW
4751}
4752
14f04fa4
AD
4753static int
4754amdgpu_dm_connector_late_register(struct drm_connector *connector)
4755{
4756 struct amdgpu_dm_connector *amdgpu_dm_connector =
4757 to_amdgpu_dm_connector(connector);
bdb9fbc6 4758 int r;
14f04fa4 4759
bdb9fbc6
AD
4760 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4761 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4762 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4763 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4764 if (r)
4765 return r;
4766 }
4767
4768#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
4769 connector_debugfs_init(amdgpu_dm_connector);
4770#endif
4771
4772 return 0;
4773}
4774
e7b07cee
HW
4775static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4776 .reset = amdgpu_dm_connector_funcs_reset,
4777 .detect = amdgpu_dm_connector_detect,
4778 .fill_modes = drm_helper_probe_single_connector_modes,
4779 .destroy = amdgpu_dm_connector_destroy,
4780 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4781 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4782 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 4783 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 4784 .late_register = amdgpu_dm_connector_late_register,
526c654a 4785 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
4786};
4787
e7b07cee
HW
4788static int get_modes(struct drm_connector *connector)
4789{
4790 return amdgpu_dm_connector_get_modes(connector);
4791}
4792
c84dec2f 4793static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4794{
4795 struct dc_sink_init_data init_params = {
4796 .link = aconnector->dc_link,
4797 .sink_signal = SIGNAL_TYPE_VIRTUAL
4798 };
70e8ffc5 4799 struct edid *edid;
e7b07cee 4800
a89ff457 4801 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
4802 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4803 aconnector->base.name);
4804
4805 aconnector->base.force = DRM_FORCE_OFF;
4806 aconnector->base.override_edid = false;
4807 return;
4808 }
4809
70e8ffc5
HW
4810 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4811
e7b07cee
HW
4812 aconnector->edid = edid;
4813
4814 aconnector->dc_em_sink = dc_link_add_remote_sink(
4815 aconnector->dc_link,
4816 (uint8_t *)edid,
4817 (edid->extensions + 1) * EDID_LENGTH,
4818 &init_params);
4819
dcd5fb82 4820 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
4821 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4822 aconnector->dc_link->local_sink :
4823 aconnector->dc_em_sink;
dcd5fb82
MF
4824 dc_sink_retain(aconnector->dc_sink);
4825 }
e7b07cee
HW
4826}
4827
c84dec2f 4828static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4829{
4830 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4831
1f6010a9
DF
4832 /*
4833 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
4834 * Those settings have to be != 0 to get initial modeset
4835 */
4836 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4837 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4838 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4839 }
4840
4841
4842 aconnector->base.override_edid = true;
4843 create_eml_sink(aconnector);
4844}
4845
ba9ca088 4846enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 4847 struct drm_display_mode *mode)
e7b07cee
HW
4848{
4849 int result = MODE_ERROR;
4850 struct dc_sink *dc_sink;
4851 struct amdgpu_device *adev = connector->dev->dev_private;
4852 /* TODO: Unhardcode stream count */
0971c40e 4853 struct dc_stream_state *stream;
c84dec2f 4854 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
a39438f0 4855 enum dc_status dc_result = DC_OK;
e7b07cee
HW
4856
4857 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4858 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4859 return result;
4860
1f6010a9
DF
4861 /*
4862 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
4863 * EDID mgmt
4864 */
4865 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4866 !aconnector->dc_em_sink)
4867 handle_edid_mgmt(aconnector);
4868
c84dec2f 4869 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 4870
b830ebc9 4871 if (dc_sink == NULL) {
e7b07cee
HW
4872 DRM_ERROR("dc_sink is NULL!\n");
4873 goto fail;
4874 }
4875
b333730d 4876 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
b830ebc9 4877 if (stream == NULL) {
e7b07cee
HW
4878 DRM_ERROR("Failed to create stream for sink!\n");
4879 goto fail;
4880 }
4881
a39438f0
HW
4882 dc_result = dc_validate_stream(adev->dm.dc, stream);
4883
4884 if (dc_result == DC_OK)
e7b07cee 4885 result = MODE_OK;
a39438f0 4886 else
9f921b14 4887 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
a39438f0 4888 mode->hdisplay,
26e99ba6 4889 mode->vdisplay,
9f921b14
HW
4890 mode->clock,
4891 dc_result);
e7b07cee
HW
4892
4893 dc_stream_release(stream);
4894
4895fail:
4896 /* TODO: error handling*/
4897 return result;
4898}
4899
88694af9
NK
4900static int fill_hdr_info_packet(const struct drm_connector_state *state,
4901 struct dc_info_packet *out)
4902{
4903 struct hdmi_drm_infoframe frame;
4904 unsigned char buf[30]; /* 26 + 4 */
4905 ssize_t len;
4906 int ret, i;
4907
4908 memset(out, 0, sizeof(*out));
4909
4910 if (!state->hdr_output_metadata)
4911 return 0;
4912
4913 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4914 if (ret)
4915 return ret;
4916
4917 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4918 if (len < 0)
4919 return (int)len;
4920
4921 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4922 if (len != 30)
4923 return -EINVAL;
4924
4925 /* Prepare the infopacket for DC. */
4926 switch (state->connector->connector_type) {
4927 case DRM_MODE_CONNECTOR_HDMIA:
4928 out->hb0 = 0x87; /* type */
4929 out->hb1 = 0x01; /* version */
4930 out->hb2 = 0x1A; /* length */
4931 out->sb[0] = buf[3]; /* checksum */
4932 i = 1;
4933 break;
4934
4935 case DRM_MODE_CONNECTOR_DisplayPort:
4936 case DRM_MODE_CONNECTOR_eDP:
4937 out->hb0 = 0x00; /* sdp id, zero */
4938 out->hb1 = 0x87; /* type */
4939 out->hb2 = 0x1D; /* payload len - 1 */
4940 out->hb3 = (0x13 << 2); /* sdp version */
4941 out->sb[0] = 0x01; /* version */
4942 out->sb[1] = 0x1A; /* length */
4943 i = 2;
4944 break;
4945
4946 default:
4947 return -EINVAL;
4948 }
4949
4950 memcpy(&out->sb[i], &buf[4], 26);
4951 out->valid = true;
4952
4953 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4954 sizeof(out->sb), false);
4955
4956 return 0;
4957}
4958
4959static bool
4960is_hdr_metadata_different(const struct drm_connector_state *old_state,
4961 const struct drm_connector_state *new_state)
4962{
4963 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4964 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4965
4966 if (old_blob != new_blob) {
4967 if (old_blob && new_blob &&
4968 old_blob->length == new_blob->length)
4969 return memcmp(old_blob->data, new_blob->data,
4970 old_blob->length);
4971
4972 return true;
4973 }
4974
4975 return false;
4976}
4977
4978static int
4979amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 4980 struct drm_atomic_state *state)
88694af9 4981{
51e857af
SP
4982 struct drm_connector_state *new_con_state =
4983 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
4984 struct drm_connector_state *old_con_state =
4985 drm_atomic_get_old_connector_state(state, conn);
4986 struct drm_crtc *crtc = new_con_state->crtc;
4987 struct drm_crtc_state *new_crtc_state;
4988 int ret;
4989
4990 if (!crtc)
4991 return 0;
4992
4993 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4994 struct dc_info_packet hdr_infopacket;
4995
4996 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4997 if (ret)
4998 return ret;
4999
5000 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5001 if (IS_ERR(new_crtc_state))
5002 return PTR_ERR(new_crtc_state);
5003
5004 /*
5005 * DC considers the stream backends changed if the
5006 * static metadata changes. Forcing the modeset also
5007 * gives a simple way for userspace to switch from
b232d4ed
NK
5008 * 8bpc to 10bpc when setting the metadata to enter
5009 * or exit HDR.
5010 *
5011 * Changing the static metadata after it's been
5012 * set is permissible, however. So only force a
5013 * modeset if we're entering or exiting HDR.
88694af9 5014 */
b232d4ed
NK
5015 new_crtc_state->mode_changed =
5016 !old_con_state->hdr_output_metadata ||
5017 !new_con_state->hdr_output_metadata;
88694af9
NK
5018 }
5019
5020 return 0;
5021}
5022
e7b07cee
HW
5023static const struct drm_connector_helper_funcs
5024amdgpu_dm_connector_helper_funcs = {
5025 /*
1f6010a9 5026 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5027 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5028 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5029 * in get_modes call back, not just return the modes count
5030 */
e7b07cee
HW
5031 .get_modes = get_modes,
5032 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5033 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5034};
5035
5036static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5037{
5038}
5039
bc92c065
NK
5040static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5041{
5042 struct drm_device *dev = new_crtc_state->crtc->dev;
5043 struct drm_plane *plane;
5044
5045 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5046 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5047 return true;
5048 }
5049
5050 return false;
5051}
5052
d6ef9b41 5053static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5054{
5055 struct drm_atomic_state *state = new_crtc_state->state;
5056 struct drm_plane *plane;
5057 int num_active = 0;
5058
5059 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5060 struct drm_plane_state *new_plane_state;
5061
5062 /* Cursor planes are "fake". */
5063 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5064 continue;
5065
5066 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5067
5068 if (!new_plane_state) {
5069 /*
5070 * The plane is enable on the CRTC and hasn't changed
5071 * state. This means that it previously passed
5072 * validation and is therefore enabled.
5073 */
5074 num_active += 1;
5075 continue;
5076 }
5077
5078 /* We need a framebuffer to be considered enabled. */
5079 num_active += (new_plane_state->fb != NULL);
5080 }
5081
d6ef9b41
NK
5082 return num_active;
5083}
5084
5085/*
5086 * Sets whether interrupts should be enabled on a specific CRTC.
5087 * We require that the stream be enabled and that there exist active
5088 * DC planes on the stream.
5089 */
5090static void
5091dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5092 struct drm_crtc_state *new_crtc_state)
5093{
5094 struct dm_crtc_state *dm_new_crtc_state =
5095 to_dm_crtc_state(new_crtc_state);
5096
5097 dm_new_crtc_state->active_planes = 0;
5098 dm_new_crtc_state->interrupts_enabled = false;
5099
5100 if (!dm_new_crtc_state->stream)
5101 return;
5102
5103 dm_new_crtc_state->active_planes =
5104 count_crtc_active_planes(new_crtc_state);
5105
5106 dm_new_crtc_state->interrupts_enabled =
5107 dm_new_crtc_state->active_planes > 0;
c14a005c
NK
5108}
5109
3ee6b26b
AD
5110static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5111 struct drm_crtc_state *state)
e7b07cee
HW
5112{
5113 struct amdgpu_device *adev = crtc->dev->dev_private;
5114 struct dc *dc = adev->dm.dc;
5115 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5116 int ret = -EINVAL;
5117
d6ef9b41
NK
5118 /*
5119 * Update interrupt state for the CRTC. This needs to happen whenever
5120 * the CRTC has changed or whenever any of its planes have changed.
5121 * Atomic check satisfies both of these requirements since the CRTC
5122 * is added to the state by DRM during drm_atomic_helper_check_planes.
5123 */
5124 dm_update_crtc_interrupt_state(crtc, state);
5125
9b690ef3
BL
5126 if (unlikely(!dm_crtc_state->stream &&
5127 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5128 WARN_ON(1);
5129 return ret;
5130 }
5131
1f6010a9 5132 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
5133 if (!dm_crtc_state->stream)
5134 return 0;
5135
bc92c065
NK
5136 /*
5137 * We want at least one hardware plane enabled to use
5138 * the stream with a cursor enabled.
5139 */
c14a005c 5140 if (state->enable && state->active &&
bc92c065 5141 does_crtc_have_active_cursor(state) &&
d6ef9b41 5142 dm_crtc_state->active_planes == 0)
c14a005c
NK
5143 return -EINVAL;
5144
62c933f9 5145 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5146 return 0;
5147
5148 return ret;
5149}
5150
3ee6b26b
AD
5151static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5152 const struct drm_display_mode *mode,
5153 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5154{
5155 return true;
5156}
5157
5158static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5159 .disable = dm_crtc_helper_disable,
5160 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5161 .mode_fixup = dm_crtc_helper_mode_fixup,
5162 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5163};
5164
5165static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5166{
5167
5168}
5169
3261e013
ML
5170static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5171{
5172 switch (display_color_depth) {
5173 case COLOR_DEPTH_666:
5174 return 6;
5175 case COLOR_DEPTH_888:
5176 return 8;
5177 case COLOR_DEPTH_101010:
5178 return 10;
5179 case COLOR_DEPTH_121212:
5180 return 12;
5181 case COLOR_DEPTH_141414:
5182 return 14;
5183 case COLOR_DEPTH_161616:
5184 return 16;
5185 default:
5186 break;
5187 }
5188 return 0;
5189}
5190
3ee6b26b
AD
5191static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5192 struct drm_crtc_state *crtc_state,
5193 struct drm_connector_state *conn_state)
e7b07cee 5194{
3261e013
ML
5195 struct drm_atomic_state *state = crtc_state->state;
5196 struct drm_connector *connector = conn_state->connector;
5197 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5198 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5199 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5200 struct drm_dp_mst_topology_mgr *mst_mgr;
5201 struct drm_dp_mst_port *mst_port;
5202 enum dc_color_depth color_depth;
5203 int clock, bpp = 0;
1bc22f20 5204 bool is_y420 = false;
3261e013
ML
5205
5206 if (!aconnector->port || !aconnector->dc_sink)
5207 return 0;
5208
5209 mst_port = aconnector->port;
5210 mst_mgr = &aconnector->mst_port->mst_mgr;
5211
5212 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5213 return 0;
5214
5215 if (!state->duplicated) {
1bc22f20
SW
5216 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5217 aconnector->force_yuv420_output;
5218 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5219 is_y420);
3261e013
ML
5220 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5221 clock = adjusted_mode->clock;
dc48529f 5222 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5223 }
5224 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5225 mst_mgr,
5226 mst_port,
1c6c1cb5
ML
5227 dm_new_connector_state->pbn,
5228 0);
3261e013
ML
5229 if (dm_new_connector_state->vcpi_slots < 0) {
5230 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5231 return dm_new_connector_state->vcpi_slots;
5232 }
e7b07cee
HW
5233 return 0;
5234}
5235
5236const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5237 .disable = dm_encoder_helper_disable,
5238 .atomic_check = dm_encoder_helper_atomic_check
5239};
5240
d9fe1a4c 5241#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5242static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5243 struct dc_state *dc_state)
5244{
5245 struct dc_stream_state *stream = NULL;
5246 struct drm_connector *connector;
5247 struct drm_connector_state *new_con_state, *old_con_state;
5248 struct amdgpu_dm_connector *aconnector;
5249 struct dm_connector_state *dm_conn_state;
5250 int i, j, clock, bpp;
5251 int vcpi, pbn_div, pbn = 0;
5252
5253 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5254
5255 aconnector = to_amdgpu_dm_connector(connector);
5256
5257 if (!aconnector->port)
5258 continue;
5259
5260 if (!new_con_state || !new_con_state->crtc)
5261 continue;
5262
5263 dm_conn_state = to_dm_connector_state(new_con_state);
5264
5265 for (j = 0; j < dc_state->stream_count; j++) {
5266 stream = dc_state->streams[j];
5267 if (!stream)
5268 continue;
5269
5270 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5271 break;
5272
5273 stream = NULL;
5274 }
5275
5276 if (!stream)
5277 continue;
5278
5279 if (stream->timing.flags.DSC != 1) {
5280 drm_dp_mst_atomic_enable_dsc(state,
5281 aconnector->port,
5282 dm_conn_state->pbn,
5283 0,
5284 false);
5285 continue;
5286 }
5287
5288 pbn_div = dm_mst_get_pbn_divider(stream->link);
5289 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5290 clock = stream->timing.pix_clk_100hz / 10;
5291 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5292 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5293 aconnector->port,
5294 pbn, pbn_div,
5295 true);
5296 if (vcpi < 0)
5297 return vcpi;
5298
5299 dm_conn_state->pbn = pbn;
5300 dm_conn_state->vcpi_slots = vcpi;
5301 }
5302 return 0;
5303}
d9fe1a4c 5304#endif
29b9ba74 5305
e7b07cee
HW
5306static void dm_drm_plane_reset(struct drm_plane *plane)
5307{
5308 struct dm_plane_state *amdgpu_state = NULL;
5309
5310 if (plane->state)
5311 plane->funcs->atomic_destroy_state(plane, plane->state);
5312
5313 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5314 WARN_ON(amdgpu_state == NULL);
1f6010a9 5315
7ddaef96
NK
5316 if (amdgpu_state)
5317 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5318}
5319
5320static struct drm_plane_state *
5321dm_drm_plane_duplicate_state(struct drm_plane *plane)
5322{
5323 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5324
5325 old_dm_plane_state = to_dm_plane_state(plane->state);
5326 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5327 if (!dm_plane_state)
5328 return NULL;
5329
5330 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5331
3be5262e
HW
5332 if (old_dm_plane_state->dc_state) {
5333 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5334 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5335 }
5336
5337 return &dm_plane_state->base;
5338}
5339
5340void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5341 struct drm_plane_state *state)
e7b07cee
HW
5342{
5343 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5344
3be5262e
HW
5345 if (dm_plane_state->dc_state)
5346 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5347
0627bbd3 5348 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5349}
5350
5351static const struct drm_plane_funcs dm_plane_funcs = {
5352 .update_plane = drm_atomic_helper_update_plane,
5353 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5354 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5355 .reset = dm_drm_plane_reset,
5356 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5357 .atomic_destroy_state = dm_drm_plane_destroy_state,
5358};
5359
3ee6b26b
AD
5360static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5361 struct drm_plane_state *new_state)
e7b07cee
HW
5362{
5363 struct amdgpu_framebuffer *afb;
5364 struct drm_gem_object *obj;
5d43be0c 5365 struct amdgpu_device *adev;
e7b07cee 5366 struct amdgpu_bo *rbo;
e7b07cee 5367 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5368 struct list_head list;
5369 struct ttm_validate_buffer tv;
5370 struct ww_acquire_ctx ticket;
e0634e8d 5371 uint64_t tiling_flags;
5d43be0c
CK
5372 uint32_t domain;
5373 int r;
5888f07a 5374 bool tmz_surface = false;
af031f07 5375 bool force_disable_dcc = false;
e7b07cee
HW
5376
5377 dm_plane_state_old = to_dm_plane_state(plane->state);
5378 dm_plane_state_new = to_dm_plane_state(new_state);
5379
5380 if (!new_state->fb) {
f1ad2f5e 5381 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5382 return 0;
5383 }
5384
5385 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5386 obj = new_state->fb->obj[0];
e7b07cee 5387 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5388 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5389 INIT_LIST_HEAD(&list);
5390
5391 tv.bo = &rbo->tbo;
5392 tv.num_shared = 1;
5393 list_add(&tv.head, &list);
5394
9165fb87 5395 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5396 if (r) {
5397 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5398 return r;
0f257b09 5399 }
e7b07cee 5400
5d43be0c 5401 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5402 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5403 else
5404 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5405
7b7c6c81 5406 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5407 if (unlikely(r != 0)) {
30b7c614
HW
5408 if (r != -ERESTARTSYS)
5409 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5410 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5411 return r;
5412 }
5413
bb812f1e
JZ
5414 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5415 if (unlikely(r != 0)) {
5416 amdgpu_bo_unpin(rbo);
0f257b09 5417 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5418 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5419 return r;
5420 }
7df7e505
NK
5421
5422 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5423
5888f07a
HW
5424 tmz_surface = amdgpu_bo_encrypted(rbo);
5425
0f257b09 5426 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5427
7b7c6c81 5428 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5429
5430 amdgpu_bo_ref(rbo);
5431
3be5262e
HW
5432 if (dm_plane_state_new->dc_state &&
5433 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5434 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 5435
af031f07 5436 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
320932bf 5437 fill_plane_buffer_attributes(
695af5f9
NK
5438 adev, afb, plane_state->format, plane_state->rotation,
5439 tiling_flags, &plane_state->tiling_info,
320932bf 5440 &plane_state->plane_size, &plane_state->dcc,
5888f07a 5441 &plane_state->address, tmz_surface,
af031f07 5442 force_disable_dcc);
e7b07cee
HW
5443 }
5444
e7b07cee
HW
5445 return 0;
5446}
5447
3ee6b26b
AD
5448static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5449 struct drm_plane_state *old_state)
e7b07cee
HW
5450{
5451 struct amdgpu_bo *rbo;
e7b07cee
HW
5452 int r;
5453
5454 if (!old_state->fb)
5455 return;
5456
e68d14dd 5457 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5458 r = amdgpu_bo_reserve(rbo, false);
5459 if (unlikely(r)) {
5460 DRM_ERROR("failed to reserve rbo before unpin\n");
5461 return;
b830ebc9
HW
5462 }
5463
5464 amdgpu_bo_unpin(rbo);
5465 amdgpu_bo_unreserve(rbo);
5466 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5467}
5468
7578ecda
AD
5469static int dm_plane_atomic_check(struct drm_plane *plane,
5470 struct drm_plane_state *state)
cbd19488
AG
5471{
5472 struct amdgpu_device *adev = plane->dev->dev_private;
5473 struct dc *dc = adev->dm.dc;
78171832 5474 struct dm_plane_state *dm_plane_state;
695af5f9
NK
5475 struct dc_scaling_info scaling_info;
5476 int ret;
78171832
NK
5477
5478 dm_plane_state = to_dm_plane_state(state);
cbd19488 5479
3be5262e 5480 if (!dm_plane_state->dc_state)
9a3329b1 5481 return 0;
cbd19488 5482
695af5f9
NK
5483 ret = fill_dc_scaling_info(state, &scaling_info);
5484 if (ret)
5485 return ret;
a05bcff1 5486
62c933f9 5487 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5488 return 0;
5489
5490 return -EINVAL;
5491}
5492
674e78ac
NK
5493static int dm_plane_atomic_async_check(struct drm_plane *plane,
5494 struct drm_plane_state *new_plane_state)
5495{
5496 /* Only support async updates on cursor planes. */
5497 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5498 return -EINVAL;
5499
5500 return 0;
5501}
5502
5503static void dm_plane_atomic_async_update(struct drm_plane *plane,
5504 struct drm_plane_state *new_state)
5505{
5506 struct drm_plane_state *old_state =
5507 drm_atomic_get_old_plane_state(new_state->state, plane);
5508
332af874 5509 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5510
5511 plane->state->src_x = new_state->src_x;
5512 plane->state->src_y = new_state->src_y;
5513 plane->state->src_w = new_state->src_w;
5514 plane->state->src_h = new_state->src_h;
5515 plane->state->crtc_x = new_state->crtc_x;
5516 plane->state->crtc_y = new_state->crtc_y;
5517 plane->state->crtc_w = new_state->crtc_w;
5518 plane->state->crtc_h = new_state->crtc_h;
5519
5520 handle_cursor_update(plane, old_state);
5521}
5522
e7b07cee
HW
5523static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5524 .prepare_fb = dm_plane_helper_prepare_fb,
5525 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5526 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5527 .atomic_async_check = dm_plane_atomic_async_check,
5528 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5529};
5530
5531/*
5532 * TODO: these are currently initialized to rgb formats only.
5533 * For future use cases we should either initialize them dynamically based on
5534 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5535 * check will succeed, and let DC implement proper check
e7b07cee 5536 */
d90371b0 5537static const uint32_t rgb_formats[] = {
e7b07cee
HW
5538 DRM_FORMAT_XRGB8888,
5539 DRM_FORMAT_ARGB8888,
5540 DRM_FORMAT_RGBA8888,
5541 DRM_FORMAT_XRGB2101010,
5542 DRM_FORMAT_XBGR2101010,
5543 DRM_FORMAT_ARGB2101010,
5544 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5545 DRM_FORMAT_XBGR8888,
5546 DRM_FORMAT_ABGR8888,
46dd9ff7 5547 DRM_FORMAT_RGB565,
e7b07cee
HW
5548};
5549
0d579c7e
NK
5550static const uint32_t overlay_formats[] = {
5551 DRM_FORMAT_XRGB8888,
5552 DRM_FORMAT_ARGB8888,
5553 DRM_FORMAT_RGBA8888,
5554 DRM_FORMAT_XBGR8888,
5555 DRM_FORMAT_ABGR8888,
7267a1a9 5556 DRM_FORMAT_RGB565
e7b07cee
HW
5557};
5558
5559static const u32 cursor_formats[] = {
5560 DRM_FORMAT_ARGB8888
5561};
5562
37c6a93b
NK
5563static int get_plane_formats(const struct drm_plane *plane,
5564 const struct dc_plane_cap *plane_cap,
5565 uint32_t *formats, int max_formats)
e7b07cee 5566{
37c6a93b
NK
5567 int i, num_formats = 0;
5568
5569 /*
5570 * TODO: Query support for each group of formats directly from
5571 * DC plane caps. This will require adding more formats to the
5572 * caps list.
5573 */
e7b07cee 5574
f180b4bc 5575 switch (plane->type) {
e7b07cee 5576 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5577 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5578 if (num_formats >= max_formats)
5579 break;
5580
5581 formats[num_formats++] = rgb_formats[i];
5582 }
5583
ea36ad34 5584 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5585 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
5586 if (plane_cap && plane_cap->pixel_format_support.p010)
5587 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
5588 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5589 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5590 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5591 }
e7b07cee 5592 break;
37c6a93b 5593
e7b07cee 5594 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5595 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5596 if (num_formats >= max_formats)
5597 break;
5598
5599 formats[num_formats++] = overlay_formats[i];
5600 }
e7b07cee 5601 break;
37c6a93b 5602
e7b07cee 5603 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5604 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5605 if (num_formats >= max_formats)
5606 break;
5607
5608 formats[num_formats++] = cursor_formats[i];
5609 }
e7b07cee
HW
5610 break;
5611 }
5612
37c6a93b
NK
5613 return num_formats;
5614}
5615
5616static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5617 struct drm_plane *plane,
5618 unsigned long possible_crtcs,
5619 const struct dc_plane_cap *plane_cap)
5620{
5621 uint32_t formats[32];
5622 int num_formats;
5623 int res = -EPERM;
5624
5625 num_formats = get_plane_formats(plane, plane_cap, formats,
5626 ARRAY_SIZE(formats));
5627
5628 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5629 &dm_plane_funcs, formats, num_formats,
5630 NULL, plane->type, NULL);
5631 if (res)
5632 return res;
5633
cc1fec57
NK
5634 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5635 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
5636 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5637 BIT(DRM_MODE_BLEND_PREMULTI);
5638
5639 drm_plane_create_alpha_property(plane);
5640 drm_plane_create_blend_mode_property(plane, blend_caps);
5641 }
5642
fc8e5230 5643 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
5644 plane_cap &&
5645 (plane_cap->pixel_format_support.nv12 ||
5646 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
5647 /* This only affects YUV formats. */
5648 drm_plane_create_color_properties(
5649 plane,
5650 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
5651 BIT(DRM_COLOR_YCBCR_BT709) |
5652 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
5653 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5654 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5655 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5656 }
5657
f180b4bc 5658 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 5659
96719c54 5660 /* Create (reset) the plane state */
f180b4bc
HW
5661 if (plane->funcs->reset)
5662 plane->funcs->reset(plane);
96719c54 5663
37c6a93b 5664 return 0;
e7b07cee
HW
5665}
5666
7578ecda
AD
5667static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5668 struct drm_plane *plane,
5669 uint32_t crtc_index)
e7b07cee
HW
5670{
5671 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 5672 struct drm_plane *cursor_plane;
e7b07cee
HW
5673
5674 int res = -ENOMEM;
5675
5676 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5677 if (!cursor_plane)
5678 goto fail;
5679
f180b4bc 5680 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 5681 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
5682
5683 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5684 if (!acrtc)
5685 goto fail;
5686
5687 res = drm_crtc_init_with_planes(
5688 dm->ddev,
5689 &acrtc->base,
5690 plane,
f180b4bc 5691 cursor_plane,
e7b07cee
HW
5692 &amdgpu_dm_crtc_funcs, NULL);
5693
5694 if (res)
5695 goto fail;
5696
5697 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5698
96719c54
HW
5699 /* Create (reset) the plane state */
5700 if (acrtc->base.funcs->reset)
5701 acrtc->base.funcs->reset(&acrtc->base);
5702
e7b07cee
HW
5703 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5704 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5705
5706 acrtc->crtc_id = crtc_index;
5707 acrtc->base.enabled = false;
c37e2d29 5708 acrtc->otg_inst = -1;
e7b07cee
HW
5709
5710 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
5711 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5712 true, MAX_COLOR_LUT_ENTRIES);
086247a4 5713 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
5714
5715 return 0;
5716
5717fail:
b830ebc9
HW
5718 kfree(acrtc);
5719 kfree(cursor_plane);
e7b07cee
HW
5720 return res;
5721}
5722
5723
5724static int to_drm_connector_type(enum signal_type st)
5725{
5726 switch (st) {
5727 case SIGNAL_TYPE_HDMI_TYPE_A:
5728 return DRM_MODE_CONNECTOR_HDMIA;
5729 case SIGNAL_TYPE_EDP:
5730 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
5731 case SIGNAL_TYPE_LVDS:
5732 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
5733 case SIGNAL_TYPE_RGB:
5734 return DRM_MODE_CONNECTOR_VGA;
5735 case SIGNAL_TYPE_DISPLAY_PORT:
5736 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5737 return DRM_MODE_CONNECTOR_DisplayPort;
5738 case SIGNAL_TYPE_DVI_DUAL_LINK:
5739 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5740 return DRM_MODE_CONNECTOR_DVID;
5741 case SIGNAL_TYPE_VIRTUAL:
5742 return DRM_MODE_CONNECTOR_VIRTUAL;
5743
5744 default:
5745 return DRM_MODE_CONNECTOR_Unknown;
5746 }
5747}
5748
2b4c1c05
DV
5749static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5750{
62afb4ad
JRS
5751 struct drm_encoder *encoder;
5752
5753 /* There is only one encoder per connector */
5754 drm_connector_for_each_possible_encoder(connector, encoder)
5755 return encoder;
5756
5757 return NULL;
2b4c1c05
DV
5758}
5759
e7b07cee
HW
5760static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5761{
e7b07cee
HW
5762 struct drm_encoder *encoder;
5763 struct amdgpu_encoder *amdgpu_encoder;
5764
2b4c1c05 5765 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
5766
5767 if (encoder == NULL)
5768 return;
5769
5770 amdgpu_encoder = to_amdgpu_encoder(encoder);
5771
5772 amdgpu_encoder->native_mode.clock = 0;
5773
5774 if (!list_empty(&connector->probed_modes)) {
5775 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 5776
e7b07cee 5777 list_for_each_entry(preferred_mode,
b830ebc9
HW
5778 &connector->probed_modes,
5779 head) {
5780 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5781 amdgpu_encoder->native_mode = *preferred_mode;
5782
e7b07cee
HW
5783 break;
5784 }
5785
5786 }
5787}
5788
3ee6b26b
AD
5789static struct drm_display_mode *
5790amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5791 char *name,
5792 int hdisplay, int vdisplay)
e7b07cee
HW
5793{
5794 struct drm_device *dev = encoder->dev;
5795 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5796 struct drm_display_mode *mode = NULL;
5797 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5798
5799 mode = drm_mode_duplicate(dev, native_mode);
5800
b830ebc9 5801 if (mode == NULL)
e7b07cee
HW
5802 return NULL;
5803
5804 mode->hdisplay = hdisplay;
5805 mode->vdisplay = vdisplay;
5806 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 5807 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
5808
5809 return mode;
5810
5811}
5812
5813static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 5814 struct drm_connector *connector)
e7b07cee
HW
5815{
5816 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5817 struct drm_display_mode *mode = NULL;
5818 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
5819 struct amdgpu_dm_connector *amdgpu_dm_connector =
5820 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5821 int i;
5822 int n;
5823 struct mode_size {
5824 char name[DRM_DISPLAY_MODE_LEN];
5825 int w;
5826 int h;
b830ebc9 5827 } common_modes[] = {
e7b07cee
HW
5828 { "640x480", 640, 480},
5829 { "800x600", 800, 600},
5830 { "1024x768", 1024, 768},
5831 { "1280x720", 1280, 720},
5832 { "1280x800", 1280, 800},
5833 {"1280x1024", 1280, 1024},
5834 { "1440x900", 1440, 900},
5835 {"1680x1050", 1680, 1050},
5836 {"1600x1200", 1600, 1200},
5837 {"1920x1080", 1920, 1080},
5838 {"1920x1200", 1920, 1200}
5839 };
5840
b830ebc9 5841 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
5842
5843 for (i = 0; i < n; i++) {
5844 struct drm_display_mode *curmode = NULL;
5845 bool mode_existed = false;
5846
5847 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
5848 common_modes[i].h > native_mode->vdisplay ||
5849 (common_modes[i].w == native_mode->hdisplay &&
5850 common_modes[i].h == native_mode->vdisplay))
5851 continue;
e7b07cee
HW
5852
5853 list_for_each_entry(curmode, &connector->probed_modes, head) {
5854 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 5855 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
5856 mode_existed = true;
5857 break;
5858 }
5859 }
5860
5861 if (mode_existed)
5862 continue;
5863
5864 mode = amdgpu_dm_create_common_mode(encoder,
5865 common_modes[i].name, common_modes[i].w,
5866 common_modes[i].h);
5867 drm_mode_probed_add(connector, mode);
c84dec2f 5868 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
5869 }
5870}
5871
3ee6b26b
AD
5872static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5873 struct edid *edid)
e7b07cee 5874{
c84dec2f
HW
5875 struct amdgpu_dm_connector *amdgpu_dm_connector =
5876 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5877
5878 if (edid) {
5879 /* empty probed_modes */
5880 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 5881 amdgpu_dm_connector->num_modes =
e7b07cee
HW
5882 drm_add_edid_modes(connector, edid);
5883
f1e5e913
YMM
5884 /* sorting the probed modes before calling function
5885 * amdgpu_dm_get_native_mode() since EDID can have
5886 * more than one preferred mode. The modes that are
5887 * later in the probed mode list could be of higher
5888 * and preferred resolution. For example, 3840x2160
5889 * resolution in base EDID preferred timing and 4096x2160
5890 * preferred resolution in DID extension block later.
5891 */
5892 drm_mode_sort(&connector->probed_modes);
e7b07cee 5893 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 5894 } else {
c84dec2f 5895 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 5896 }
e7b07cee
HW
5897}
5898
7578ecda 5899static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 5900{
c84dec2f
HW
5901 struct amdgpu_dm_connector *amdgpu_dm_connector =
5902 to_amdgpu_dm_connector(connector);
e7b07cee 5903 struct drm_encoder *encoder;
c84dec2f 5904 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 5905
2b4c1c05 5906 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 5907
85ee15d6 5908 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
5909 amdgpu_dm_connector->num_modes =
5910 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
5911 } else {
5912 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5913 amdgpu_dm_connector_add_common_modes(encoder, connector);
5914 }
3e332d3a 5915 amdgpu_dm_fbc_init(connector);
5099114b 5916
c84dec2f 5917 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
5918}
5919
3ee6b26b
AD
5920void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5921 struct amdgpu_dm_connector *aconnector,
5922 int connector_type,
5923 struct dc_link *link,
5924 int link_index)
e7b07cee
HW
5925{
5926 struct amdgpu_device *adev = dm->ddev->dev_private;
5927
f04bee34
NK
5928 /*
5929 * Some of the properties below require access to state, like bpc.
5930 * Allocate some default initial connector state with our reset helper.
5931 */
5932 if (aconnector->base.funcs->reset)
5933 aconnector->base.funcs->reset(&aconnector->base);
5934
e7b07cee
HW
5935 aconnector->connector_id = link_index;
5936 aconnector->dc_link = link;
5937 aconnector->base.interlace_allowed = false;
5938 aconnector->base.doublescan_allowed = false;
5939 aconnector->base.stereo_allowed = false;
5940 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5941 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 5942 aconnector->audio_inst = -1;
e7b07cee
HW
5943 mutex_init(&aconnector->hpd_lock);
5944
1f6010a9
DF
5945 /*
5946 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
5947 * which means HPD hot plug not supported
5948 */
e7b07cee
HW
5949 switch (connector_type) {
5950 case DRM_MODE_CONNECTOR_HDMIA:
5951 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5952 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5953 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
5954 break;
5955 case DRM_MODE_CONNECTOR_DisplayPort:
5956 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5957 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5958 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
5959 break;
5960 case DRM_MODE_CONNECTOR_DVID:
5961 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5962 break;
5963 default:
5964 break;
5965 }
5966
5967 drm_object_attach_property(&aconnector->base.base,
5968 dm->ddev->mode_config.scaling_mode_property,
5969 DRM_MODE_SCALE_NONE);
5970
5971 drm_object_attach_property(&aconnector->base.base,
5972 adev->mode_info.underscan_property,
5973 UNDERSCAN_OFF);
5974 drm_object_attach_property(&aconnector->base.base,
5975 adev->mode_info.underscan_hborder_property,
5976 0);
5977 drm_object_attach_property(&aconnector->base.base,
5978 adev->mode_info.underscan_vborder_property,
5979 0);
1825fd34 5980
b754c07a
JFZ
5981 if (!aconnector->mst_port)
5982 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 5983
4a8ca46b
RL
5984 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5985 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5986 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 5987
c1ee92f9
DF
5988 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5989 dc_is_dmcu_initialized(adev->dm.dc)) {
5990 drm_object_attach_property(&aconnector->base.base,
5991 adev->mode_info.abm_level_property, 0);
5992 }
bb47de73
NK
5993
5994 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
5995 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5996 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
5997 drm_object_attach_property(
5998 &aconnector->base.base,
5999 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6000
b754c07a
JFZ
6001 if (!aconnector->mst_port)
6002 drm_connector_attach_vrr_capable_property(&aconnector->base);
6003
0c8620d6 6004#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 6005 if (adev->dm.hdcp_workqueue)
53e108aa 6006 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 6007#endif
bb47de73 6008 }
e7b07cee
HW
6009}
6010
7578ecda
AD
6011static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6012 struct i2c_msg *msgs, int num)
e7b07cee
HW
6013{
6014 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6015 struct ddc_service *ddc_service = i2c->ddc_service;
6016 struct i2c_command cmd;
6017 int i;
6018 int result = -EIO;
6019
b830ebc9 6020 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
6021
6022 if (!cmd.payloads)
6023 return result;
6024
6025 cmd.number_of_payloads = num;
6026 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6027 cmd.speed = 100;
6028
6029 for (i = 0; i < num; i++) {
6030 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6031 cmd.payloads[i].address = msgs[i].addr;
6032 cmd.payloads[i].length = msgs[i].len;
6033 cmd.payloads[i].data = msgs[i].buf;
6034 }
6035
c85e6e54
DF
6036 if (dc_submit_i2c(
6037 ddc_service->ctx->dc,
6038 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
6039 &cmd))
6040 result = num;
6041
6042 kfree(cmd.payloads);
6043 return result;
6044}
6045
7578ecda 6046static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
6047{
6048 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6049}
6050
6051static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6052 .master_xfer = amdgpu_dm_i2c_xfer,
6053 .functionality = amdgpu_dm_i2c_func,
6054};
6055
3ee6b26b
AD
6056static struct amdgpu_i2c_adapter *
6057create_i2c(struct ddc_service *ddc_service,
6058 int link_index,
6059 int *res)
e7b07cee
HW
6060{
6061 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6062 struct amdgpu_i2c_adapter *i2c;
6063
b830ebc9 6064 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
6065 if (!i2c)
6066 return NULL;
e7b07cee
HW
6067 i2c->base.owner = THIS_MODULE;
6068 i2c->base.class = I2C_CLASS_DDC;
6069 i2c->base.dev.parent = &adev->pdev->dev;
6070 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 6071 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
6072 i2c_set_adapdata(&i2c->base, i2c);
6073 i2c->ddc_service = ddc_service;
c85e6e54 6074 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
6075
6076 return i2c;
6077}
6078
89fc8d4e 6079
1f6010a9
DF
6080/*
6081 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
6082 * dc_link which will be represented by this aconnector.
6083 */
7578ecda
AD
6084static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6085 struct amdgpu_dm_connector *aconnector,
6086 uint32_t link_index,
6087 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6088{
6089 int res = 0;
6090 int connector_type;
6091 struct dc *dc = dm->dc;
6092 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6093 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6094
6095 link->priv = aconnector;
e7b07cee 6096
f1ad2f5e 6097 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6098
6099 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6100 if (!i2c) {
6101 DRM_ERROR("Failed to create i2c adapter data\n");
6102 return -ENOMEM;
6103 }
6104
e7b07cee
HW
6105 aconnector->i2c = i2c;
6106 res = i2c_add_adapter(&i2c->base);
6107
6108 if (res) {
6109 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6110 goto out_free;
6111 }
6112
6113 connector_type = to_drm_connector_type(link->connector_signal);
6114
17165de2 6115 res = drm_connector_init_with_ddc(
e7b07cee
HW
6116 dm->ddev,
6117 &aconnector->base,
6118 &amdgpu_dm_connector_funcs,
17165de2
AP
6119 connector_type,
6120 &i2c->base);
e7b07cee
HW
6121
6122 if (res) {
6123 DRM_ERROR("connector_init failed\n");
6124 aconnector->connector_id = -1;
6125 goto out_free;
6126 }
6127
6128 drm_connector_helper_add(
6129 &aconnector->base,
6130 &amdgpu_dm_connector_helper_funcs);
6131
6132 amdgpu_dm_connector_init_helper(
6133 dm,
6134 aconnector,
6135 connector_type,
6136 link,
6137 link_index);
6138
cde4c44d 6139 drm_connector_attach_encoder(
e7b07cee
HW
6140 &aconnector->base, &aencoder->base);
6141
e7b07cee
HW
6142 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6143 || connector_type == DRM_MODE_CONNECTOR_eDP)
9f656935 6144 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 6145
e7b07cee
HW
6146out_free:
6147 if (res) {
6148 kfree(i2c);
6149 aconnector->i2c = NULL;
6150 }
6151 return res;
6152}
6153
6154int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6155{
6156 switch (adev->mode_info.num_crtc) {
6157 case 1:
6158 return 0x1;
6159 case 2:
6160 return 0x3;
6161 case 3:
6162 return 0x7;
6163 case 4:
6164 return 0xf;
6165 case 5:
6166 return 0x1f;
6167 case 6:
6168 default:
6169 return 0x3f;
6170 }
6171}
6172
7578ecda
AD
6173static int amdgpu_dm_encoder_init(struct drm_device *dev,
6174 struct amdgpu_encoder *aencoder,
6175 uint32_t link_index)
e7b07cee
HW
6176{
6177 struct amdgpu_device *adev = dev->dev_private;
6178
6179 int res = drm_encoder_init(dev,
6180 &aencoder->base,
6181 &amdgpu_dm_encoder_funcs,
6182 DRM_MODE_ENCODER_TMDS,
6183 NULL);
6184
6185 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6186
6187 if (!res)
6188 aencoder->encoder_id = link_index;
6189 else
6190 aencoder->encoder_id = -1;
6191
6192 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6193
6194 return res;
6195}
6196
3ee6b26b
AD
6197static void manage_dm_interrupts(struct amdgpu_device *adev,
6198 struct amdgpu_crtc *acrtc,
6199 bool enable)
e7b07cee
HW
6200{
6201 /*
6202 * this is not correct translation but will work as soon as VBLANK
6203 * constant is the same as PFLIP
6204 */
6205 int irq_type =
734dd01d 6206 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6207 adev,
6208 acrtc->crtc_id);
6209
6210 if (enable) {
6211 drm_crtc_vblank_on(&acrtc->base);
6212 amdgpu_irq_get(
6213 adev,
6214 &adev->pageflip_irq,
6215 irq_type);
6216 } else {
6217
6218 amdgpu_irq_put(
6219 adev,
6220 &adev->pageflip_irq,
6221 irq_type);
6222 drm_crtc_vblank_off(&acrtc->base);
6223 }
6224}
6225
3ee6b26b
AD
6226static bool
6227is_scaling_state_different(const struct dm_connector_state *dm_state,
6228 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6229{
6230 if (dm_state->scaling != old_dm_state->scaling)
6231 return true;
6232 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6233 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6234 return true;
6235 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6236 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6237 return true;
b830ebc9
HW
6238 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6239 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6240 return true;
e7b07cee
HW
6241 return false;
6242}
6243
0c8620d6
BL
6244#ifdef CONFIG_DRM_AMD_DC_HDCP
6245static bool is_content_protection_different(struct drm_connector_state *state,
6246 const struct drm_connector_state *old_state,
6247 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6248{
6249 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6250
53e108aa
BL
6251 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6252 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6253 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6254 return true;
6255 }
6256
0c8620d6
BL
6257 /* CP is being re enabled, ignore this */
6258 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6259 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6260 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6261 return false;
6262 }
6263
6264 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6265 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6266 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6267 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6268
6269 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6270 * hot-plug, headless s3, dpms
6271 */
6272 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6273 aconnector->dc_sink != NULL)
6274 return true;
6275
6276 if (old_state->content_protection == state->content_protection)
6277 return false;
6278
6279 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6280 return true;
6281
6282 return false;
6283}
6284
0c8620d6 6285#endif
3ee6b26b
AD
6286static void remove_stream(struct amdgpu_device *adev,
6287 struct amdgpu_crtc *acrtc,
6288 struct dc_stream_state *stream)
e7b07cee
HW
6289{
6290 /* this is the update mode case */
e7b07cee
HW
6291
6292 acrtc->otg_inst = -1;
6293 acrtc->enabled = false;
6294}
6295
7578ecda
AD
6296static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6297 struct dc_cursor_position *position)
2a8f6ccb 6298{
f4c2cc43 6299 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6300 int x, y;
6301 int xorigin = 0, yorigin = 0;
6302
e371e19c
NK
6303 position->enable = false;
6304 position->x = 0;
6305 position->y = 0;
6306
6307 if (!crtc || !plane->state->fb)
2a8f6ccb 6308 return 0;
2a8f6ccb
HW
6309
6310 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6311 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6312 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6313 __func__,
6314 plane->state->crtc_w,
6315 plane->state->crtc_h);
6316 return -EINVAL;
6317 }
6318
6319 x = plane->state->crtc_x;
6320 y = plane->state->crtc_y;
c14a005c 6321
e371e19c
NK
6322 if (x <= -amdgpu_crtc->max_cursor_width ||
6323 y <= -amdgpu_crtc->max_cursor_height)
6324 return 0;
6325
2a8f6ccb
HW
6326 if (x < 0) {
6327 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6328 x = 0;
6329 }
6330 if (y < 0) {
6331 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6332 y = 0;
6333 }
6334 position->enable = true;
033baeee 6335 position->translate_by_source = true;
2a8f6ccb
HW
6336 position->x = x;
6337 position->y = y;
6338 position->x_hotspot = xorigin;
6339 position->y_hotspot = yorigin;
6340
6341 return 0;
6342}
6343
3ee6b26b
AD
6344static void handle_cursor_update(struct drm_plane *plane,
6345 struct drm_plane_state *old_plane_state)
e7b07cee 6346{
674e78ac 6347 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
6348 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6349 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6350 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6351 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6352 uint64_t address = afb ? afb->address : 0;
6353 struct dc_cursor_position position;
6354 struct dc_cursor_attributes attributes;
6355 int ret;
6356
e7b07cee
HW
6357 if (!plane->state->fb && !old_plane_state->fb)
6358 return;
6359
f1ad2f5e 6360 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6361 __func__,
6362 amdgpu_crtc->crtc_id,
6363 plane->state->crtc_w,
6364 plane->state->crtc_h);
2a8f6ccb
HW
6365
6366 ret = get_cursor_position(plane, crtc, &position);
6367 if (ret)
6368 return;
6369
6370 if (!position.enable) {
6371 /* turn off cursor */
674e78ac
NK
6372 if (crtc_state && crtc_state->stream) {
6373 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6374 dc_stream_set_cursor_position(crtc_state->stream,
6375 &position);
674e78ac
NK
6376 mutex_unlock(&adev->dm.dc_lock);
6377 }
2a8f6ccb 6378 return;
e7b07cee 6379 }
e7b07cee 6380
2a8f6ccb
HW
6381 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6382 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6383
c1cefe11 6384 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6385 attributes.address.high_part = upper_32_bits(address);
6386 attributes.address.low_part = lower_32_bits(address);
6387 attributes.width = plane->state->crtc_w;
6388 attributes.height = plane->state->crtc_h;
6389 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6390 attributes.rotation_angle = 0;
6391 attributes.attribute_flags.value = 0;
6392
6393 attributes.pitch = attributes.width;
6394
886daac9 6395 if (crtc_state->stream) {
674e78ac 6396 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6397 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6398 &attributes))
6399 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6400
2a8f6ccb
HW
6401 if (!dc_stream_set_cursor_position(crtc_state->stream,
6402 &position))
6403 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6404 mutex_unlock(&adev->dm.dc_lock);
886daac9 6405 }
2a8f6ccb 6406}
e7b07cee
HW
6407
6408static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6409{
6410
6411 assert_spin_locked(&acrtc->base.dev->event_lock);
6412 WARN_ON(acrtc->event);
6413
6414 acrtc->event = acrtc->base.state->event;
6415
6416 /* Set the flip status */
6417 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6418
6419 /* Mark this event as consumed */
6420 acrtc->base.state->event = NULL;
6421
6422 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6423 acrtc->crtc_id);
6424}
6425
bb47de73
NK
6426static void update_freesync_state_on_stream(
6427 struct amdgpu_display_manager *dm,
6428 struct dm_crtc_state *new_crtc_state,
180db303
NK
6429 struct dc_stream_state *new_stream,
6430 struct dc_plane_state *surface,
6431 u32 flip_timestamp_in_us)
bb47de73 6432{
09aef2c4 6433 struct mod_vrr_params vrr_params;
bb47de73 6434 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
6435 struct amdgpu_device *adev = dm->adev;
6436 unsigned long flags;
bb47de73
NK
6437
6438 if (!new_stream)
6439 return;
6440
6441 /*
6442 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6443 * For now it's sufficient to just guard against these conditions.
6444 */
6445
6446 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6447 return;
6448
09aef2c4
MK
6449 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6450 vrr_params = new_crtc_state->vrr_params;
6451
180db303
NK
6452 if (surface) {
6453 mod_freesync_handle_preflip(
6454 dm->freesync_module,
6455 surface,
6456 new_stream,
6457 flip_timestamp_in_us,
6458 &vrr_params);
09aef2c4
MK
6459
6460 if (adev->family < AMDGPU_FAMILY_AI &&
6461 amdgpu_dm_vrr_active(new_crtc_state)) {
6462 mod_freesync_handle_v_update(dm->freesync_module,
6463 new_stream, &vrr_params);
e63e2491
EB
6464
6465 /* Need to call this before the frame ends. */
6466 dc_stream_adjust_vmin_vmax(dm->dc,
6467 new_crtc_state->stream,
6468 &vrr_params.adjust);
09aef2c4 6469 }
180db303 6470 }
bb47de73
NK
6471
6472 mod_freesync_build_vrr_infopacket(
6473 dm->freesync_module,
6474 new_stream,
180db303 6475 &vrr_params,
ecd0136b
HT
6476 PACKET_TYPE_VRR,
6477 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6478 &vrr_infopacket);
6479
8a48b44c 6480 new_crtc_state->freesync_timing_changed |=
180db303
NK
6481 (memcmp(&new_crtc_state->vrr_params.adjust,
6482 &vrr_params.adjust,
6483 sizeof(vrr_params.adjust)) != 0);
bb47de73 6484
8a48b44c 6485 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6486 (memcmp(&new_crtc_state->vrr_infopacket,
6487 &vrr_infopacket,
6488 sizeof(vrr_infopacket)) != 0);
6489
180db303 6490 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6491 new_crtc_state->vrr_infopacket = vrr_infopacket;
6492
180db303 6493 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6494 new_stream->vrr_infopacket = vrr_infopacket;
6495
6496 if (new_crtc_state->freesync_vrr_info_changed)
6497 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6498 new_crtc_state->base.crtc->base.id,
6499 (int)new_crtc_state->base.vrr_enabled,
180db303 6500 (int)vrr_params.state);
09aef2c4
MK
6501
6502 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6503}
6504
e854194c
MK
6505static void pre_update_freesync_state_on_stream(
6506 struct amdgpu_display_manager *dm,
6507 struct dm_crtc_state *new_crtc_state)
6508{
6509 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6510 struct mod_vrr_params vrr_params;
e854194c 6511 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6512 struct amdgpu_device *adev = dm->adev;
6513 unsigned long flags;
e854194c
MK
6514
6515 if (!new_stream)
6516 return;
6517
6518 /*
6519 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6520 * For now it's sufficient to just guard against these conditions.
6521 */
6522 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6523 return;
6524
09aef2c4
MK
6525 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6526 vrr_params = new_crtc_state->vrr_params;
6527
e854194c
MK
6528 if (new_crtc_state->vrr_supported &&
6529 config.min_refresh_in_uhz &&
6530 config.max_refresh_in_uhz) {
6531 config.state = new_crtc_state->base.vrr_enabled ?
6532 VRR_STATE_ACTIVE_VARIABLE :
6533 VRR_STATE_INACTIVE;
6534 } else {
6535 config.state = VRR_STATE_UNSUPPORTED;
6536 }
6537
6538 mod_freesync_build_vrr_params(dm->freesync_module,
6539 new_stream,
6540 &config, &vrr_params);
6541
6542 new_crtc_state->freesync_timing_changed |=
6543 (memcmp(&new_crtc_state->vrr_params.adjust,
6544 &vrr_params.adjust,
6545 sizeof(vrr_params.adjust)) != 0);
6546
6547 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6548 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6549}
6550
66b0c973
MK
6551static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6552 struct dm_crtc_state *new_state)
6553{
6554 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6555 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6556
6557 if (!old_vrr_active && new_vrr_active) {
6558 /* Transition VRR inactive -> active:
6559 * While VRR is active, we must not disable vblank irq, as a
6560 * reenable after disable would compute bogus vblank/pflip
6561 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6562 *
6563 * We also need vupdate irq for the actual core vblank handling
6564 * at end of vblank.
66b0c973 6565 */
d2574c33 6566 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6567 drm_crtc_vblank_get(new_state->base.crtc);
6568 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6569 __func__, new_state->base.crtc->base.id);
6570 } else if (old_vrr_active && !new_vrr_active) {
6571 /* Transition VRR active -> inactive:
6572 * Allow vblank irq disable again for fixed refresh rate.
6573 */
d2574c33 6574 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6575 drm_crtc_vblank_put(new_state->base.crtc);
6576 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6577 __func__, new_state->base.crtc->base.id);
6578 }
6579}
6580
8ad27806
NK
6581static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6582{
6583 struct drm_plane *plane;
6584 struct drm_plane_state *old_plane_state, *new_plane_state;
6585 int i;
6586
6587 /*
6588 * TODO: Make this per-stream so we don't issue redundant updates for
6589 * commits with multiple streams.
6590 */
6591 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6592 new_plane_state, i)
6593 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6594 handle_cursor_update(plane, old_plane_state);
6595}
6596
3be5262e 6597static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6598 struct dc_state *dc_state,
3ee6b26b
AD
6599 struct drm_device *dev,
6600 struct amdgpu_display_manager *dm,
6601 struct drm_crtc *pcrtc,
420cd472 6602 bool wait_for_vblank)
e7b07cee 6603{
570c91d5 6604 uint32_t i;
8a48b44c 6605 uint64_t timestamp_ns;
e7b07cee 6606 struct drm_plane *plane;
0bc9706d 6607 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6608 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6609 struct drm_crtc_state *new_pcrtc_state =
6610 drm_atomic_get_new_crtc_state(state, pcrtc);
6611 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
6612 struct dm_crtc_state *dm_old_crtc_state =
6613 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 6614 int planes_count = 0, vpos, hpos;
570c91d5 6615 long r;
e7b07cee 6616 unsigned long flags;
8a48b44c 6617 struct amdgpu_bo *abo;
09e5665a 6618 uint64_t tiling_flags;
5888f07a 6619 bool tmz_surface = false;
fdd1fe57
MK
6620 uint32_t target_vblank, last_flip_vblank;
6621 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 6622 bool pflip_present = false;
bc7f670e
DF
6623 struct {
6624 struct dc_surface_update surface_updates[MAX_SURFACES];
6625 struct dc_plane_info plane_infos[MAX_SURFACES];
6626 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 6627 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 6628 struct dc_stream_update stream_update;
74aa7bd4 6629 } *bundle;
bc7f670e 6630
74aa7bd4 6631 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 6632
74aa7bd4
DF
6633 if (!bundle) {
6634 dm_error("Failed to allocate update bundle\n");
4b510503
NK
6635 goto cleanup;
6636 }
e7b07cee 6637
8ad27806
NK
6638 /*
6639 * Disable the cursor first if we're disabling all the planes.
6640 * It'll remain on the screen after the planes are re-enabled
6641 * if we don't.
6642 */
6643 if (acrtc_state->active_planes == 0)
6644 amdgpu_dm_commit_cursors(state);
6645
e7b07cee 6646 /* update planes when needed */
0bc9706d
LSL
6647 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6648 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 6649 struct drm_crtc_state *new_crtc_state;
0bc9706d 6650 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 6651 bool plane_needs_flip;
c7af5f77 6652 struct dc_plane_state *dc_plane;
54d76575 6653 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 6654
80c218d5
NK
6655 /* Cursor plane is handled after stream updates */
6656 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 6657 continue;
e7b07cee 6658
f5ba60fe
DD
6659 if (!fb || !crtc || pcrtc != crtc)
6660 continue;
6661
6662 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6663 if (!new_crtc_state->active)
e7b07cee
HW
6664 continue;
6665
bc7f670e 6666 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 6667
74aa7bd4 6668 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 6669 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
6670 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6671 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 6672 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 6673 }
8a48b44c 6674
695af5f9
NK
6675 fill_dc_scaling_info(new_plane_state,
6676 &bundle->scaling_infos[planes_count]);
8a48b44c 6677
695af5f9
NK
6678 bundle->surface_updates[planes_count].scaling_info =
6679 &bundle->scaling_infos[planes_count];
8a48b44c 6680
f5031000 6681 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 6682
f5031000 6683 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 6684
f5031000
DF
6685 if (!plane_needs_flip) {
6686 planes_count += 1;
6687 continue;
6688 }
8a48b44c 6689
2fac0f53
CK
6690 abo = gem_to_amdgpu_bo(fb->obj[0]);
6691
f8308898
AG
6692 /*
6693 * Wait for all fences on this FB. Do limited wait to avoid
6694 * deadlock during GPU reset when this fence will not signal
6695 * but we hold reservation lock for the BO.
6696 */
52791eee 6697 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 6698 false,
f8308898
AG
6699 msecs_to_jiffies(5000));
6700 if (unlikely(r <= 0))
ed8a5fb2 6701 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 6702
f5031000
DF
6703 /*
6704 * TODO This might fail and hence better not used, wait
6705 * explicitly on fences instead
6706 * and in general should be called for
6707 * blocking commit to as per framework helpers
6708 */
f5031000 6709 r = amdgpu_bo_reserve(abo, true);
f8308898 6710 if (unlikely(r != 0))
f5031000 6711 DRM_ERROR("failed to reserve buffer before flip\n");
8a48b44c 6712
f5031000 6713 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
8a48b44c 6714
5888f07a
HW
6715 tmz_surface = amdgpu_bo_encrypted(abo);
6716
f5031000 6717 amdgpu_bo_unreserve(abo);
8a48b44c 6718
695af5f9
NK
6719 fill_dc_plane_info_and_addr(
6720 dm->adev, new_plane_state, tiling_flags,
6721 &bundle->plane_infos[planes_count],
af031f07 6722 &bundle->flip_addrs[planes_count].address,
5888f07a 6723 tmz_surface,
af031f07
RS
6724 false);
6725
6726 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6727 new_plane_state->plane->index,
6728 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
6729
6730 bundle->surface_updates[planes_count].plane_info =
6731 &bundle->plane_infos[planes_count];
8a48b44c 6732
caff0e66
NK
6733 /*
6734 * Only allow immediate flips for fast updates that don't
6735 * change FB pitch, DCC state, rotation or mirroing.
6736 */
f5031000 6737 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 6738 crtc->state->async_flip &&
caff0e66 6739 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 6740
f5031000
DF
6741 timestamp_ns = ktime_get_ns();
6742 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6743 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6744 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 6745
f5031000
DF
6746 if (!bundle->surface_updates[planes_count].surface) {
6747 DRM_ERROR("No surface for CRTC: id=%d\n",
6748 acrtc_attach->crtc_id);
6749 continue;
bc7f670e
DF
6750 }
6751
f5031000
DF
6752 if (plane == pcrtc->primary)
6753 update_freesync_state_on_stream(
6754 dm,
6755 acrtc_state,
6756 acrtc_state->stream,
6757 dc_plane,
6758 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 6759
f5031000
DF
6760 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6761 __func__,
6762 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6763 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
6764
6765 planes_count += 1;
6766
8a48b44c
DF
6767 }
6768
74aa7bd4 6769 if (pflip_present) {
634092b1
MK
6770 if (!vrr_active) {
6771 /* Use old throttling in non-vrr fixed refresh rate mode
6772 * to keep flip scheduling based on target vblank counts
6773 * working in a backwards compatible way, e.g., for
6774 * clients using the GLX_OML_sync_control extension or
6775 * DRI3/Present extension with defined target_msc.
6776 */
e3eff4b5 6777 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
6778 }
6779 else {
6780 /* For variable refresh rate mode only:
6781 * Get vblank of last completed flip to avoid > 1 vrr
6782 * flips per video frame by use of throttling, but allow
6783 * flip programming anywhere in the possibly large
6784 * variable vrr vblank interval for fine-grained flip
6785 * timing control and more opportunity to avoid stutter
6786 * on late submission of flips.
6787 */
6788 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6789 last_flip_vblank = acrtc_attach->last_flip_vblank;
6790 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6791 }
6792
fdd1fe57 6793 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
6794
6795 /*
6796 * Wait until we're out of the vertical blank period before the one
6797 * targeted by the flip
6798 */
6799 while ((acrtc_attach->enabled &&
6800 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6801 0, &vpos, &hpos, NULL,
6802 NULL, &pcrtc->hwmode)
6803 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6804 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6805 (int)(target_vblank -
e3eff4b5 6806 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
6807 usleep_range(1000, 1100);
6808 }
6809
6810 if (acrtc_attach->base.state->event) {
6811 drm_crtc_vblank_get(pcrtc);
6812
6813 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6814
6815 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6816 prepare_flip_isr(acrtc_attach);
6817
6818 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6819 }
6820
6821 if (acrtc_state->stream) {
8a48b44c 6822 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 6823 bundle->stream_update.vrr_infopacket =
8a48b44c 6824 &acrtc_state->stream->vrr_infopacket;
e7b07cee 6825 }
e7b07cee
HW
6826 }
6827
bc92c065 6828 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
6829 if ((planes_count || acrtc_state->active_planes == 0) &&
6830 acrtc_state->stream) {
b6e881c9 6831 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 6832 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
6833 bundle->stream_update.src = acrtc_state->stream->src;
6834 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
6835 }
6836
cf020d49
NK
6837 if (new_pcrtc_state->color_mgmt_changed) {
6838 /*
6839 * TODO: This isn't fully correct since we've actually
6840 * already modified the stream in place.
6841 */
6842 bundle->stream_update.gamut_remap =
6843 &acrtc_state->stream->gamut_remap_matrix;
6844 bundle->stream_update.output_csc_transform =
6845 &acrtc_state->stream->csc_color_matrix;
6846 bundle->stream_update.out_transfer_func =
6847 acrtc_state->stream->out_transfer_func;
6848 }
bc7f670e 6849
8a48b44c 6850 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 6851 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 6852 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 6853
e63e2491
EB
6854 /*
6855 * If FreeSync state on the stream has changed then we need to
6856 * re-adjust the min/max bounds now that DC doesn't handle this
6857 * as part of commit.
6858 */
6859 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6860 amdgpu_dm_vrr_active(acrtc_state)) {
6861 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6862 dc_stream_adjust_vmin_vmax(
6863 dm->dc, acrtc_state->stream,
6864 &acrtc_state->vrr_params.adjust);
6865 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6866 }
bc7f670e 6867 mutex_lock(&dm->dc_lock);
8c322309 6868 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 6869 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
6870 amdgpu_dm_psr_disable(acrtc_state->stream);
6871
bc7f670e 6872 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 6873 bundle->surface_updates,
bc7f670e
DF
6874 planes_count,
6875 acrtc_state->stream,
74aa7bd4 6876 &bundle->stream_update,
bc7f670e 6877 dc_state);
8c322309
RL
6878
6879 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 6880 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 6881 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
6882 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6883 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
6884 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
6885 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
6886 amdgpu_dm_psr_enable(acrtc_state->stream);
6887 }
6888
bc7f670e 6889 mutex_unlock(&dm->dc_lock);
e7b07cee 6890 }
4b510503 6891
8ad27806
NK
6892 /*
6893 * Update cursor state *after* programming all the planes.
6894 * This avoids redundant programming in the case where we're going
6895 * to be disabling a single plane - those pipes are being disabled.
6896 */
6897 if (acrtc_state->active_planes)
6898 amdgpu_dm_commit_cursors(state);
80c218d5 6899
4b510503 6900cleanup:
74aa7bd4 6901 kfree(bundle);
e7b07cee
HW
6902}
6903
6ce8f316
NK
6904static void amdgpu_dm_commit_audio(struct drm_device *dev,
6905 struct drm_atomic_state *state)
6906{
6907 struct amdgpu_device *adev = dev->dev_private;
6908 struct amdgpu_dm_connector *aconnector;
6909 struct drm_connector *connector;
6910 struct drm_connector_state *old_con_state, *new_con_state;
6911 struct drm_crtc_state *new_crtc_state;
6912 struct dm_crtc_state *new_dm_crtc_state;
6913 const struct dc_stream_status *status;
6914 int i, inst;
6915
6916 /* Notify device removals. */
6917 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6918 if (old_con_state->crtc != new_con_state->crtc) {
6919 /* CRTC changes require notification. */
6920 goto notify;
6921 }
6922
6923 if (!new_con_state->crtc)
6924 continue;
6925
6926 new_crtc_state = drm_atomic_get_new_crtc_state(
6927 state, new_con_state->crtc);
6928
6929 if (!new_crtc_state)
6930 continue;
6931
6932 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6933 continue;
6934
6935 notify:
6936 aconnector = to_amdgpu_dm_connector(connector);
6937
6938 mutex_lock(&adev->dm.audio_lock);
6939 inst = aconnector->audio_inst;
6940 aconnector->audio_inst = -1;
6941 mutex_unlock(&adev->dm.audio_lock);
6942
6943 amdgpu_dm_audio_eld_notify(adev, inst);
6944 }
6945
6946 /* Notify audio device additions. */
6947 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6948 if (!new_con_state->crtc)
6949 continue;
6950
6951 new_crtc_state = drm_atomic_get_new_crtc_state(
6952 state, new_con_state->crtc);
6953
6954 if (!new_crtc_state)
6955 continue;
6956
6957 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6958 continue;
6959
6960 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6961 if (!new_dm_crtc_state->stream)
6962 continue;
6963
6964 status = dc_stream_get_status(new_dm_crtc_state->stream);
6965 if (!status)
6966 continue;
6967
6968 aconnector = to_amdgpu_dm_connector(connector);
6969
6970 mutex_lock(&adev->dm.audio_lock);
6971 inst = status->audio_inst;
6972 aconnector->audio_inst = inst;
6973 mutex_unlock(&adev->dm.audio_lock);
6974
6975 amdgpu_dm_audio_eld_notify(adev, inst);
6976 }
6977}
6978
b5e83f6f
NK
6979/*
6980 * Enable interrupts on CRTCs that are newly active, undergone
6981 * a modeset, or have active planes again.
6982 *
6983 * Done in two passes, based on the for_modeset flag:
6984 * Pass 1: For CRTCs going through modeset
6985 * Pass 2: For CRTCs going from 0 to n active planes
6986 *
6987 * Interrupts can only be enabled after the planes are programmed,
6988 * so this requires a two-pass approach since we don't want to
6989 * just defer the interrupts until after commit planes every time.
6990 */
6991static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6992 struct drm_atomic_state *state,
6993 bool for_modeset)
6994{
6995 struct amdgpu_device *adev = dev->dev_private;
6996 struct drm_crtc *crtc;
6997 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6998 int i;
148d31e3 6999#ifdef CONFIG_DEBUG_FS
14b25846 7000 enum amdgpu_dm_pipe_crc_source source;
148d31e3 7001#endif
b5e83f6f
NK
7002
7003 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7004 new_crtc_state, i) {
7005 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7006 struct dm_crtc_state *dm_new_crtc_state =
7007 to_dm_crtc_state(new_crtc_state);
7008 struct dm_crtc_state *dm_old_crtc_state =
7009 to_dm_crtc_state(old_crtc_state);
7010 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7011 bool run_pass;
7012
7013 run_pass = (for_modeset && modeset) ||
7014 (!for_modeset && !modeset &&
7015 !dm_old_crtc_state->interrupts_enabled);
7016
7017 if (!run_pass)
7018 continue;
7019
b5e83f6f
NK
7020 if (!dm_new_crtc_state->interrupts_enabled)
7021 continue;
7022
7023 manage_dm_interrupts(adev, acrtc, true);
7024
7025#ifdef CONFIG_DEBUG_FS
7026 /* The stream has changed so CRC capture needs to re-enabled. */
14b25846
DZ
7027 source = dm_new_crtc_state->crc_src;
7028 if (amdgpu_dm_is_valid_crc_source(source)) {
57638021
NK
7029 amdgpu_dm_crtc_configure_crc_source(
7030 crtc, dm_new_crtc_state,
7031 dm_new_crtc_state->crc_src);
b5e83f6f
NK
7032 }
7033#endif
7034 }
7035}
7036
1f6010a9 7037/*
27b3f4fc
LSL
7038 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7039 * @crtc_state: the DRM CRTC state
7040 * @stream_state: the DC stream state.
7041 *
7042 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7043 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7044 */
7045static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7046 struct dc_stream_state *stream_state)
7047{
b9952f93 7048 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 7049}
e7b07cee 7050
7578ecda
AD
7051static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7052 struct drm_atomic_state *state,
7053 bool nonblock)
e7b07cee
HW
7054{
7055 struct drm_crtc *crtc;
c2cea706 7056 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7057 struct amdgpu_device *adev = dev->dev_private;
7058 int i;
7059
7060 /*
d6ef9b41
NK
7061 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7062 * a modeset, being disabled, or have no active planes.
7063 *
7064 * It's done in atomic commit rather than commit tail for now since
7065 * some of these interrupt handlers access the current CRTC state and
7066 * potentially the stream pointer itself.
7067 *
7068 * Since the atomic state is swapped within atomic commit and not within
7069 * commit tail this would leave to new state (that hasn't been committed yet)
7070 * being accesssed from within the handlers.
7071 *
7072 * TODO: Fix this so we can do this in commit tail and not have to block
7073 * in atomic check.
e7b07cee 7074 */
c2cea706 7075 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 7076 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
428da2bd 7077 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee
HW
7078 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7079
d6ef9b41
NK
7080 if (dm_old_crtc_state->interrupts_enabled &&
7081 (!dm_new_crtc_state->interrupts_enabled ||
57638021 7082 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
7083 manage_dm_interrupts(adev, acrtc, false);
7084 }
1f6010a9
DF
7085 /*
7086 * Add check here for SoC's that support hardware cursor plane, to
7087 * unset legacy_cursor_update
7088 */
e7b07cee
HW
7089
7090 return drm_atomic_helper_commit(dev, state, nonblock);
7091
7092 /*TODO Handle EINTR, reenable IRQ*/
7093}
7094
b8592b48
LL
7095/**
7096 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7097 * @state: The atomic state to commit
7098 *
7099 * This will tell DC to commit the constructed DC state from atomic_check,
7100 * programming the hardware. Any failures here implies a hardware failure, since
7101 * atomic check should have filtered anything non-kosher.
7102 */
7578ecda 7103static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7104{
7105 struct drm_device *dev = state->dev;
7106 struct amdgpu_device *adev = dev->dev_private;
7107 struct amdgpu_display_manager *dm = &adev->dm;
7108 struct dm_atomic_state *dm_state;
eb3dc897 7109 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7110 uint32_t i, j;
5cc6dcbd 7111 struct drm_crtc *crtc;
0bc9706d 7112 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7113 unsigned long flags;
7114 bool wait_for_vblank = true;
7115 struct drm_connector *connector;
c2cea706 7116 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7117 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7118 int crtc_disable_count = 0;
e7b07cee
HW
7119
7120 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7121
eb3dc897
NK
7122 dm_state = dm_atomic_get_new_state(state);
7123 if (dm_state && dm_state->context) {
7124 dc_state = dm_state->context;
7125 } else {
7126 /* No state changes, retain current state. */
813d20dc 7127 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7128 ASSERT(dc_state_temp);
7129 dc_state = dc_state_temp;
7130 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7131 }
e7b07cee
HW
7132
7133 /* update changed items */
0bc9706d 7134 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7135 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7136
54d76575
LSL
7137 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7138 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7139
f1ad2f5e 7140 DRM_DEBUG_DRIVER(
e7b07cee
HW
7141 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7142 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7143 "connectors_changed:%d\n",
7144 acrtc->crtc_id,
0bc9706d
LSL
7145 new_crtc_state->enable,
7146 new_crtc_state->active,
7147 new_crtc_state->planes_changed,
7148 new_crtc_state->mode_changed,
7149 new_crtc_state->active_changed,
7150 new_crtc_state->connectors_changed);
e7b07cee 7151
27b3f4fc
LSL
7152 /* Copy all transient state flags into dc state */
7153 if (dm_new_crtc_state->stream) {
7154 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7155 dm_new_crtc_state->stream);
7156 }
7157
e7b07cee
HW
7158 /* handles headless hotplug case, updating new_state and
7159 * aconnector as needed
7160 */
7161
54d76575 7162 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7163
f1ad2f5e 7164 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7165
54d76575 7166 if (!dm_new_crtc_state->stream) {
e7b07cee 7167 /*
b830ebc9
HW
7168 * this could happen because of issues with
7169 * userspace notifications delivery.
7170 * In this case userspace tries to set mode on
1f6010a9
DF
7171 * display which is disconnected in fact.
7172 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7173 * We expect reset mode will come soon.
7174 *
7175 * This can also happen when unplug is done
7176 * during resume sequence ended
7177 *
7178 * In this case, we want to pretend we still
7179 * have a sink to keep the pipe running so that
7180 * hw state is consistent with the sw state
7181 */
f1ad2f5e 7182 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7183 __func__, acrtc->base.base.id);
7184 continue;
7185 }
7186
54d76575
LSL
7187 if (dm_old_crtc_state->stream)
7188 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7189
97028037
LP
7190 pm_runtime_get_noresume(dev->dev);
7191
e7b07cee 7192 acrtc->enabled = true;
0bc9706d
LSL
7193 acrtc->hw_mode = new_crtc_state->mode;
7194 crtc->hwmode = new_crtc_state->mode;
7195 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7196 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7197 /* i.e. reset mode */
8c322309 7198 if (dm_old_crtc_state->stream) {
d1ebfdd8 7199 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7200 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7201
54d76575 7202 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8c322309 7203 }
e7b07cee
HW
7204 }
7205 } /* for_each_crtc_in_state() */
7206
eb3dc897
NK
7207 if (dc_state) {
7208 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7209 mutex_lock(&dm->dc_lock);
eb3dc897 7210 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7211 mutex_unlock(&dm->dc_lock);
fa2123db 7212 }
e7b07cee 7213
0bc9706d 7214 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7215 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7216
54d76575 7217 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7218
54d76575 7219 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7220 const struct dc_stream_status *status =
54d76575 7221 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7222
eb3dc897 7223 if (!status)
09f609c3
LL
7224 status = dc_stream_get_status_from_state(dc_state,
7225 dm_new_crtc_state->stream);
eb3dc897 7226
e7b07cee 7227 if (!status)
54d76575 7228 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7229 else
7230 acrtc->otg_inst = status->primary_otg_inst;
7231 }
7232 }
0c8620d6
BL
7233#ifdef CONFIG_DRM_AMD_DC_HDCP
7234 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7235 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7236 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7237 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7238
7239 new_crtc_state = NULL;
7240
7241 if (acrtc)
7242 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7243
7244 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7245
7246 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7247 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7248 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7249 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7250 continue;
7251 }
7252
7253 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7254 hdcp_update_display(
7255 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7256 new_con_state->hdcp_content_type,
b1abe558
BL
7257 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7258 : false);
0c8620d6
BL
7259 }
7260#endif
e7b07cee 7261
02d6a6fc 7262 /* Handle connector state changes */
c2cea706 7263 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7264 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7265 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7266 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7267 struct dc_surface_update dummy_updates[MAX_SURFACES];
7268 struct dc_stream_update stream_update;
b232d4ed 7269 struct dc_info_packet hdr_packet;
e7b07cee 7270 struct dc_stream_status *status = NULL;
b232d4ed 7271 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7272
19afd799
NC
7273 memset(&dummy_updates, 0, sizeof(dummy_updates));
7274 memset(&stream_update, 0, sizeof(stream_update));
7275
44d09c6a 7276 if (acrtc) {
0bc9706d 7277 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7278 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7279 }
0bc9706d 7280
e7b07cee 7281 /* Skip any modesets/resets */
0bc9706d 7282 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7283 continue;
7284
54d76575 7285 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7286 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7287
b232d4ed
NK
7288 scaling_changed = is_scaling_state_different(dm_new_con_state,
7289 dm_old_con_state);
7290
7291 abm_changed = dm_new_crtc_state->abm_level !=
7292 dm_old_crtc_state->abm_level;
7293
7294 hdr_changed =
7295 is_hdr_metadata_different(old_con_state, new_con_state);
7296
7297 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7298 continue;
e7b07cee 7299
b6e881c9 7300 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7301 if (scaling_changed) {
02d6a6fc 7302 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7303 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7304
02d6a6fc
DF
7305 stream_update.src = dm_new_crtc_state->stream->src;
7306 stream_update.dst = dm_new_crtc_state->stream->dst;
7307 }
7308
b232d4ed 7309 if (abm_changed) {
02d6a6fc
DF
7310 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7311
7312 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7313 }
70e8ffc5 7314
b232d4ed
NK
7315 if (hdr_changed) {
7316 fill_hdr_info_packet(new_con_state, &hdr_packet);
7317 stream_update.hdr_static_metadata = &hdr_packet;
7318 }
7319
54d76575 7320 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7321 WARN_ON(!status);
3be5262e 7322 WARN_ON(!status->plane_count);
e7b07cee 7323
02d6a6fc
DF
7324 /*
7325 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7326 * Here we create an empty update on each plane.
7327 * To fix this, DC should permit updating only stream properties.
7328 */
7329 for (j = 0; j < status->plane_count; j++)
7330 dummy_updates[j].surface = status->plane_states[0];
7331
7332
7333 mutex_lock(&dm->dc_lock);
7334 dc_commit_updates_for_stream(dm->dc,
7335 dummy_updates,
7336 status->plane_count,
7337 dm_new_crtc_state->stream,
7338 &stream_update,
7339 dc_state);
7340 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7341 }
7342
b5e83f6f 7343 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7344 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7345 new_crtc_state, i) {
fe2a1965
LP
7346 if (old_crtc_state->active && !new_crtc_state->active)
7347 crtc_disable_count++;
7348
54d76575 7349 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7350 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7351
057be086
NK
7352 /* Update freesync active state. */
7353 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7354
66b0c973
MK
7355 /* Handle vrr on->off / off->on transitions */
7356 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7357 dm_new_crtc_state);
e7b07cee
HW
7358 }
7359
b5e83f6f
NK
7360 /* Enable interrupts for CRTCs going through a modeset. */
7361 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
e7b07cee 7362
420cd472 7363 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7364 if (new_crtc_state->async_flip)
420cd472
DF
7365 wait_for_vblank = false;
7366
e7b07cee 7367 /* update planes when needed per crtc*/
5cc6dcbd 7368 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7369 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7370
54d76575 7371 if (dm_new_crtc_state->stream)
eb3dc897 7372 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7373 dm, crtc, wait_for_vblank);
e7b07cee
HW
7374 }
7375
b5e83f6f
NK
7376 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7377 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
e7b07cee 7378
6ce8f316
NK
7379 /* Update audio instances for each connector. */
7380 amdgpu_dm_commit_audio(dev, state);
7381
e7b07cee
HW
7382 /*
7383 * send vblank event on all events not handled in flip and
7384 * mark consumed event for drm_atomic_helper_commit_hw_done
7385 */
7386 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 7387 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7388
0bc9706d
LSL
7389 if (new_crtc_state->event)
7390 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7391
0bc9706d 7392 new_crtc_state->event = NULL;
e7b07cee
HW
7393 }
7394 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7395
29c8f234
LL
7396 /* Signal HW programming completion */
7397 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7398
7399 if (wait_for_vblank)
320a1274 7400 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7401
7402 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7403
1f6010a9
DF
7404 /*
7405 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7406 * so we can put the GPU into runtime suspend if we're not driving any
7407 * displays anymore
7408 */
fe2a1965
LP
7409 for (i = 0; i < crtc_disable_count; i++)
7410 pm_runtime_put_autosuspend(dev->dev);
97028037 7411 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7412
7413 if (dc_state_temp)
7414 dc_release_state(dc_state_temp);
e7b07cee
HW
7415}
7416
7417
7418static int dm_force_atomic_commit(struct drm_connector *connector)
7419{
7420 int ret = 0;
7421 struct drm_device *ddev = connector->dev;
7422 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7423 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7424 struct drm_plane *plane = disconnected_acrtc->base.primary;
7425 struct drm_connector_state *conn_state;
7426 struct drm_crtc_state *crtc_state;
7427 struct drm_plane_state *plane_state;
7428
7429 if (!state)
7430 return -ENOMEM;
7431
7432 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7433
7434 /* Construct an atomic state to restore previous display setting */
7435
7436 /*
7437 * Attach connectors to drm_atomic_state
7438 */
7439 conn_state = drm_atomic_get_connector_state(state, connector);
7440
7441 ret = PTR_ERR_OR_ZERO(conn_state);
7442 if (ret)
7443 goto err;
7444
7445 /* Attach crtc to drm_atomic_state*/
7446 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7447
7448 ret = PTR_ERR_OR_ZERO(crtc_state);
7449 if (ret)
7450 goto err;
7451
7452 /* force a restore */
7453 crtc_state->mode_changed = true;
7454
7455 /* Attach plane to drm_atomic_state */
7456 plane_state = drm_atomic_get_plane_state(state, plane);
7457
7458 ret = PTR_ERR_OR_ZERO(plane_state);
7459 if (ret)
7460 goto err;
7461
7462
7463 /* Call commit internally with the state we just constructed */
7464 ret = drm_atomic_commit(state);
7465 if (!ret)
7466 return 0;
7467
7468err:
7469 DRM_ERROR("Restoring old state failed with %i\n", ret);
7470 drm_atomic_state_put(state);
7471
7472 return ret;
7473}
7474
7475/*
1f6010a9
DF
7476 * This function handles all cases when set mode does not come upon hotplug.
7477 * This includes when a display is unplugged then plugged back into the
7478 * same port and when running without usermode desktop manager supprot
e7b07cee 7479 */
3ee6b26b
AD
7480void dm_restore_drm_connector_state(struct drm_device *dev,
7481 struct drm_connector *connector)
e7b07cee 7482{
c84dec2f 7483 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7484 struct amdgpu_crtc *disconnected_acrtc;
7485 struct dm_crtc_state *acrtc_state;
7486
7487 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7488 return;
7489
7490 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7491 if (!disconnected_acrtc)
7492 return;
e7b07cee 7493
70e8ffc5
HW
7494 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7495 if (!acrtc_state->stream)
e7b07cee
HW
7496 return;
7497
7498 /*
7499 * If the previous sink is not released and different from the current,
7500 * we deduce we are in a state where we can not rely on usermode call
7501 * to turn on the display, so we do it here
7502 */
7503 if (acrtc_state->stream->sink != aconnector->dc_sink)
7504 dm_force_atomic_commit(&aconnector->base);
7505}
7506
1f6010a9 7507/*
e7b07cee
HW
7508 * Grabs all modesetting locks to serialize against any blocking commits,
7509 * Waits for completion of all non blocking commits.
7510 */
3ee6b26b
AD
7511static int do_aquire_global_lock(struct drm_device *dev,
7512 struct drm_atomic_state *state)
e7b07cee
HW
7513{
7514 struct drm_crtc *crtc;
7515 struct drm_crtc_commit *commit;
7516 long ret;
7517
1f6010a9
DF
7518 /*
7519 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7520 * ensure that when the framework release it the
7521 * extra locks we are locking here will get released to
7522 */
7523 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7524 if (ret)
7525 return ret;
7526
7527 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7528 spin_lock(&crtc->commit_lock);
7529 commit = list_first_entry_or_null(&crtc->commit_list,
7530 struct drm_crtc_commit, commit_entry);
7531 if (commit)
7532 drm_crtc_commit_get(commit);
7533 spin_unlock(&crtc->commit_lock);
7534
7535 if (!commit)
7536 continue;
7537
1f6010a9
DF
7538 /*
7539 * Make sure all pending HW programming completed and
e7b07cee
HW
7540 * page flips done
7541 */
7542 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7543
7544 if (ret > 0)
7545 ret = wait_for_completion_interruptible_timeout(
7546 &commit->flip_done, 10*HZ);
7547
7548 if (ret == 0)
7549 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7550 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7551
7552 drm_crtc_commit_put(commit);
7553 }
7554
7555 return ret < 0 ? ret : 0;
7556}
7557
bb47de73
NK
7558static void get_freesync_config_for_crtc(
7559 struct dm_crtc_state *new_crtc_state,
7560 struct dm_connector_state *new_con_state)
98e6436d
AK
7561{
7562 struct mod_freesync_config config = {0};
98e6436d
AK
7563 struct amdgpu_dm_connector *aconnector =
7564 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7565 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7566 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7567
a057ec46 7568 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7569 vrefresh >= aconnector->min_vfreq &&
7570 vrefresh <= aconnector->max_vfreq;
bb47de73 7571
a057ec46
IB
7572 if (new_crtc_state->vrr_supported) {
7573 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7574 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7575 VRR_STATE_ACTIVE_VARIABLE :
7576 VRR_STATE_INACTIVE;
7577 config.min_refresh_in_uhz =
7578 aconnector->min_vfreq * 1000000;
7579 config.max_refresh_in_uhz =
7580 aconnector->max_vfreq * 1000000;
69ff8845 7581 config.vsif_supported = true;
180db303 7582 config.btr = true;
98e6436d
AK
7583 }
7584
bb47de73
NK
7585 new_crtc_state->freesync_config = config;
7586}
98e6436d 7587
bb47de73
NK
7588static void reset_freesync_config_for_crtc(
7589 struct dm_crtc_state *new_crtc_state)
7590{
7591 new_crtc_state->vrr_supported = false;
98e6436d 7592
180db303
NK
7593 memset(&new_crtc_state->vrr_params, 0,
7594 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7595 memset(&new_crtc_state->vrr_infopacket, 0,
7596 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7597}
7598
4b9674e5
LL
7599static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7600 struct drm_atomic_state *state,
7601 struct drm_crtc *crtc,
7602 struct drm_crtc_state *old_crtc_state,
7603 struct drm_crtc_state *new_crtc_state,
7604 bool enable,
7605 bool *lock_and_validation_needed)
e7b07cee 7606{
eb3dc897 7607 struct dm_atomic_state *dm_state = NULL;
54d76575 7608 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7609 struct dc_stream_state *new_stream;
62f55537 7610 int ret = 0;
d4d4a645 7611
1f6010a9
DF
7612 /*
7613 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7614 * update changed items
7615 */
4b9674e5
LL
7616 struct amdgpu_crtc *acrtc = NULL;
7617 struct amdgpu_dm_connector *aconnector = NULL;
7618 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7619 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7620
4b9674e5 7621 new_stream = NULL;
9635b754 7622
4b9674e5
LL
7623 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7624 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7625 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7626 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7627
4b9674e5
LL
7628 /* TODO This hack should go away */
7629 if (aconnector && enable) {
7630 /* Make sure fake sink is created in plug-in scenario */
7631 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7632 &aconnector->base);
7633 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7634 &aconnector->base);
19f89e23 7635
4b9674e5
LL
7636 if (IS_ERR(drm_new_conn_state)) {
7637 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7638 goto fail;
7639 }
19f89e23 7640
4b9674e5
LL
7641 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7642 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 7643
02d35a67
JFZ
7644 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7645 goto skip_modeset;
7646
4b9674e5
LL
7647 new_stream = create_stream_for_sink(aconnector,
7648 &new_crtc_state->mode,
7649 dm_new_conn_state,
7650 dm_old_crtc_state->stream);
19f89e23 7651
4b9674e5
LL
7652 /*
7653 * we can have no stream on ACTION_SET if a display
7654 * was disconnected during S3, in this case it is not an
7655 * error, the OS will be updated after detection, and
7656 * will do the right thing on next atomic commit
7657 */
19f89e23 7658
4b9674e5
LL
7659 if (!new_stream) {
7660 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7661 __func__, acrtc->base.base.id);
7662 ret = -ENOMEM;
7663 goto fail;
7664 }
e7b07cee 7665
4b9674e5 7666 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 7667
88694af9
NK
7668 ret = fill_hdr_info_packet(drm_new_conn_state,
7669 &new_stream->hdr_static_metadata);
7670 if (ret)
7671 goto fail;
7672
7e930949
NK
7673 /*
7674 * If we already removed the old stream from the context
7675 * (and set the new stream to NULL) then we can't reuse
7676 * the old stream even if the stream and scaling are unchanged.
7677 * We'll hit the BUG_ON and black screen.
7678 *
7679 * TODO: Refactor this function to allow this check to work
7680 * in all conditions.
7681 */
7682 if (dm_new_crtc_state->stream &&
7683 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
7684 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7685 new_crtc_state->mode_changed = false;
7686 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7687 new_crtc_state->mode_changed);
62f55537 7688 }
4b9674e5 7689 }
b830ebc9 7690
02d35a67 7691 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
7692 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7693 goto skip_modeset;
e7b07cee 7694
4b9674e5
LL
7695 DRM_DEBUG_DRIVER(
7696 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7697 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7698 "connectors_changed:%d\n",
7699 acrtc->crtc_id,
7700 new_crtc_state->enable,
7701 new_crtc_state->active,
7702 new_crtc_state->planes_changed,
7703 new_crtc_state->mode_changed,
7704 new_crtc_state->active_changed,
7705 new_crtc_state->connectors_changed);
62f55537 7706
4b9674e5
LL
7707 /* Remove stream for any changed/disabled CRTC */
7708 if (!enable) {
62f55537 7709
4b9674e5
LL
7710 if (!dm_old_crtc_state->stream)
7711 goto skip_modeset;
eb3dc897 7712
4b9674e5
LL
7713 ret = dm_atomic_get_state(state, &dm_state);
7714 if (ret)
7715 goto fail;
e7b07cee 7716
4b9674e5
LL
7717 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7718 crtc->base.id);
62f55537 7719
4b9674e5
LL
7720 /* i.e. reset mode */
7721 if (dc_remove_stream_from_ctx(
7722 dm->dc,
7723 dm_state->context,
7724 dm_old_crtc_state->stream) != DC_OK) {
7725 ret = -EINVAL;
7726 goto fail;
7727 }
62f55537 7728
4b9674e5
LL
7729 dc_stream_release(dm_old_crtc_state->stream);
7730 dm_new_crtc_state->stream = NULL;
bb47de73 7731
4b9674e5 7732 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 7733
4b9674e5 7734 *lock_and_validation_needed = true;
62f55537 7735
4b9674e5
LL
7736 } else {/* Add stream for any updated/enabled CRTC */
7737 /*
7738 * Quick fix to prevent NULL pointer on new_stream when
7739 * added MST connectors not found in existing crtc_state in the chained mode
7740 * TODO: need to dig out the root cause of that
7741 */
7742 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7743 goto skip_modeset;
62f55537 7744
4b9674e5
LL
7745 if (modereset_required(new_crtc_state))
7746 goto skip_modeset;
62f55537 7747
4b9674e5
LL
7748 if (modeset_required(new_crtc_state, new_stream,
7749 dm_old_crtc_state->stream)) {
62f55537 7750
4b9674e5 7751 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 7752
4b9674e5
LL
7753 ret = dm_atomic_get_state(state, &dm_state);
7754 if (ret)
7755 goto fail;
27b3f4fc 7756
4b9674e5 7757 dm_new_crtc_state->stream = new_stream;
62f55537 7758
4b9674e5 7759 dc_stream_retain(new_stream);
1dc90497 7760
4b9674e5
LL
7761 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7762 crtc->base.id);
1dc90497 7763
4b9674e5
LL
7764 if (dc_add_stream_to_ctx(
7765 dm->dc,
7766 dm_state->context,
7767 dm_new_crtc_state->stream) != DC_OK) {
7768 ret = -EINVAL;
7769 goto fail;
9b690ef3
BL
7770 }
7771
4b9674e5
LL
7772 *lock_and_validation_needed = true;
7773 }
7774 }
e277adc5 7775
4b9674e5
LL
7776skip_modeset:
7777 /* Release extra reference */
7778 if (new_stream)
7779 dc_stream_release(new_stream);
e277adc5 7780
4b9674e5
LL
7781 /*
7782 * We want to do dc stream updates that do not require a
7783 * full modeset below.
7784 */
7785 if (!(enable && aconnector && new_crtc_state->enable &&
7786 new_crtc_state->active))
7787 return 0;
7788 /*
7789 * Given above conditions, the dc state cannot be NULL because:
7790 * 1. We're in the process of enabling CRTCs (just been added
7791 * to the dc context, or already is on the context)
7792 * 2. Has a valid connector attached, and
7793 * 3. Is currently active and enabled.
7794 * => The dc stream state currently exists.
7795 */
7796 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 7797
4b9674e5
LL
7798 /* Scaling or underscan settings */
7799 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7800 update_stream_scaling_settings(
7801 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 7802
b05e2c5e
DF
7803 /* ABM settings */
7804 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7805
4b9674e5
LL
7806 /*
7807 * Color management settings. We also update color properties
7808 * when a modeset is needed, to ensure it gets reprogrammed.
7809 */
7810 if (dm_new_crtc_state->base.color_mgmt_changed ||
7811 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 7812 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
7813 if (ret)
7814 goto fail;
62f55537 7815 }
e7b07cee 7816
4b9674e5
LL
7817 /* Update Freesync settings. */
7818 get_freesync_config_for_crtc(dm_new_crtc_state,
7819 dm_new_conn_state);
7820
62f55537 7821 return ret;
9635b754
DS
7822
7823fail:
7824 if (new_stream)
7825 dc_stream_release(new_stream);
7826 return ret;
62f55537 7827}
9b690ef3 7828
f6ff2a08
NK
7829static bool should_reset_plane(struct drm_atomic_state *state,
7830 struct drm_plane *plane,
7831 struct drm_plane_state *old_plane_state,
7832 struct drm_plane_state *new_plane_state)
7833{
7834 struct drm_plane *other;
7835 struct drm_plane_state *old_other_state, *new_other_state;
7836 struct drm_crtc_state *new_crtc_state;
7837 int i;
7838
70a1efac
NK
7839 /*
7840 * TODO: Remove this hack once the checks below are sufficient
7841 * enough to determine when we need to reset all the planes on
7842 * the stream.
7843 */
7844 if (state->allow_modeset)
7845 return true;
7846
f6ff2a08
NK
7847 /* Exit early if we know that we're adding or removing the plane. */
7848 if (old_plane_state->crtc != new_plane_state->crtc)
7849 return true;
7850
7851 /* old crtc == new_crtc == NULL, plane not in context. */
7852 if (!new_plane_state->crtc)
7853 return false;
7854
7855 new_crtc_state =
7856 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7857
7858 if (!new_crtc_state)
7859 return true;
7860
7316c4ad
NK
7861 /* CRTC Degamma changes currently require us to recreate planes. */
7862 if (new_crtc_state->color_mgmt_changed)
7863 return true;
7864
f6ff2a08
NK
7865 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7866 return true;
7867
7868 /*
7869 * If there are any new primary or overlay planes being added or
7870 * removed then the z-order can potentially change. To ensure
7871 * correct z-order and pipe acquisition the current DC architecture
7872 * requires us to remove and recreate all existing planes.
7873 *
7874 * TODO: Come up with a more elegant solution for this.
7875 */
7876 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7877 if (other->type == DRM_PLANE_TYPE_CURSOR)
7878 continue;
7879
7880 if (old_other_state->crtc != new_plane_state->crtc &&
7881 new_other_state->crtc != new_plane_state->crtc)
7882 continue;
7883
7884 if (old_other_state->crtc != new_other_state->crtc)
7885 return true;
7886
7887 /* TODO: Remove this once we can handle fast format changes. */
7888 if (old_other_state->fb && new_other_state->fb &&
7889 old_other_state->fb->format != new_other_state->fb->format)
7890 return true;
7891 }
7892
7893 return false;
7894}
7895
9e869063
LL
7896static int dm_update_plane_state(struct dc *dc,
7897 struct drm_atomic_state *state,
7898 struct drm_plane *plane,
7899 struct drm_plane_state *old_plane_state,
7900 struct drm_plane_state *new_plane_state,
7901 bool enable,
7902 bool *lock_and_validation_needed)
62f55537 7903{
eb3dc897
NK
7904
7905 struct dm_atomic_state *dm_state = NULL;
62f55537 7906 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 7907 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 7908 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 7909 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
f6ff2a08 7910 bool needs_reset;
62f55537 7911 int ret = 0;
e7b07cee 7912
9b690ef3 7913
9e869063
LL
7914 new_plane_crtc = new_plane_state->crtc;
7915 old_plane_crtc = old_plane_state->crtc;
7916 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7917 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 7918
9e869063
LL
7919 /*TODO Implement atomic check for cursor plane */
7920 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7921 return 0;
9b690ef3 7922
f6ff2a08
NK
7923 needs_reset = should_reset_plane(state, plane, old_plane_state,
7924 new_plane_state);
7925
9e869063
LL
7926 /* Remove any changed/removed planes */
7927 if (!enable) {
f6ff2a08 7928 if (!needs_reset)
9e869063 7929 return 0;
a7b06724 7930
9e869063
LL
7931 if (!old_plane_crtc)
7932 return 0;
62f55537 7933
9e869063
LL
7934 old_crtc_state = drm_atomic_get_old_crtc_state(
7935 state, old_plane_crtc);
7936 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 7937
9e869063
LL
7938 if (!dm_old_crtc_state->stream)
7939 return 0;
62f55537 7940
9e869063
LL
7941 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7942 plane->base.id, old_plane_crtc->base.id);
9b690ef3 7943
9e869063
LL
7944 ret = dm_atomic_get_state(state, &dm_state);
7945 if (ret)
7946 return ret;
eb3dc897 7947
9e869063
LL
7948 if (!dc_remove_plane_from_context(
7949 dc,
7950 dm_old_crtc_state->stream,
7951 dm_old_plane_state->dc_state,
7952 dm_state->context)) {
62f55537 7953
9e869063
LL
7954 ret = EINVAL;
7955 return ret;
7956 }
e7b07cee 7957
9b690ef3 7958
9e869063
LL
7959 dc_plane_state_release(dm_old_plane_state->dc_state);
7960 dm_new_plane_state->dc_state = NULL;
1dc90497 7961
9e869063 7962 *lock_and_validation_needed = true;
1dc90497 7963
9e869063
LL
7964 } else { /* Add new planes */
7965 struct dc_plane_state *dc_new_plane_state;
1dc90497 7966
9e869063
LL
7967 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7968 return 0;
e7b07cee 7969
9e869063
LL
7970 if (!new_plane_crtc)
7971 return 0;
e7b07cee 7972
9e869063
LL
7973 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7974 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 7975
9e869063
LL
7976 if (!dm_new_crtc_state->stream)
7977 return 0;
62f55537 7978
f6ff2a08 7979 if (!needs_reset)
9e869063 7980 return 0;
62f55537 7981
9e869063 7982 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 7983
9e869063
LL
7984 dc_new_plane_state = dc_create_plane_state(dc);
7985 if (!dc_new_plane_state)
7986 return -ENOMEM;
62f55537 7987
9e869063
LL
7988 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7989 plane->base.id, new_plane_crtc->base.id);
8c45c5db 7990
e4923387
SW
7991 ret = fill_dc_plane_attributes(
7992 new_plane_crtc->dev->dev_private,
7993 dc_new_plane_state,
7994 new_plane_state,
7995 new_crtc_state);
7996 if (ret) {
7997 dc_plane_state_release(dc_new_plane_state);
7998 return ret;
7999 }
8000
9e869063
LL
8001 ret = dm_atomic_get_state(state, &dm_state);
8002 if (ret) {
8003 dc_plane_state_release(dc_new_plane_state);
8004 return ret;
8005 }
eb3dc897 8006
9e869063
LL
8007 /*
8008 * Any atomic check errors that occur after this will
8009 * not need a release. The plane state will be attached
8010 * to the stream, and therefore part of the atomic
8011 * state. It'll be released when the atomic state is
8012 * cleaned.
8013 */
8014 if (!dc_add_plane_to_context(
8015 dc,
8016 dm_new_crtc_state->stream,
8017 dc_new_plane_state,
8018 dm_state->context)) {
62f55537 8019
9e869063
LL
8020 dc_plane_state_release(dc_new_plane_state);
8021 return -EINVAL;
8022 }
8c45c5db 8023
9e869063 8024 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 8025
9e869063
LL
8026 /* Tell DC to do a full surface update every time there
8027 * is a plane change. Inefficient, but works for now.
8028 */
8029 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8030
8031 *lock_and_validation_needed = true;
62f55537 8032 }
e7b07cee
HW
8033
8034
62f55537
AG
8035 return ret;
8036}
a87fa993 8037
eb3dc897 8038static int
f843b308 8039dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
eb3dc897
NK
8040 struct drm_atomic_state *state,
8041 enum surface_update_type *out_type)
8042{
f843b308 8043 struct dc *dc = dm->dc;
eb3dc897
NK
8044 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8045 int i, j, num_plane, ret = 0;
a87fa993
BL
8046 struct drm_plane_state *old_plane_state, *new_plane_state;
8047 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
d3b65841 8048 struct drm_crtc *new_plane_crtc;
a87fa993
BL
8049 struct drm_plane *plane;
8050
8051 struct drm_crtc *crtc;
8052 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8053 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8054 struct dc_stream_status *status = NULL;
a87fa993 8055 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7527791e
RL
8056 struct surface_info_bundle {
8057 struct dc_surface_update surface_updates[MAX_SURFACES];
8058 struct dc_plane_info plane_infos[MAX_SURFACES];
8059 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8060 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8061 struct dc_stream_update stream_update;
8062 } *bundle;
a87fa993 8063
7527791e 8064 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
fe96b99d 8065
7527791e
RL
8066 if (!bundle) {
8067 DRM_ERROR("Failed to allocate update bundle\n");
4f712911
BL
8068 /* Set type to FULL to avoid crashing in DC*/
8069 update_type = UPDATE_TYPE_FULL;
eb3dc897 8070 goto cleanup;
4f712911 8071 }
a87fa993
BL
8072
8073 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2aa632c5 8074
7527791e 8075 memset(bundle, 0, sizeof(struct surface_info_bundle));
c448a53a 8076
a87fa993
BL
8077 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8078 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8079 num_plane = 0;
8080
6836d239
NK
8081 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8082 update_type = UPDATE_TYPE_FULL;
8083 goto cleanup;
8084 }
a87fa993 8085
6836d239 8086 if (!new_dm_crtc_state->stream)
c744e974 8087 continue;
eb3dc897 8088
c744e974 8089 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
2cc450ce
NK
8090 const struct amdgpu_framebuffer *amdgpu_fb =
8091 to_amdgpu_framebuffer(new_plane_state->fb);
7527791e
RL
8092 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8093 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8094 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
2cc450ce 8095 uint64_t tiling_flags;
5888f07a 8096 bool tmz_surface = false;
2cc450ce 8097
c744e974 8098 new_plane_crtc = new_plane_state->crtc;
c744e974
NK
8099 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8100 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 8101
c744e974
NK
8102 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8103 continue;
eb3dc897 8104
6836d239
NK
8105 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8106 update_type = UPDATE_TYPE_FULL;
8107 goto cleanup;
8108 }
8109
c744e974
NK
8110 if (crtc != new_plane_crtc)
8111 continue;
8112
7527791e
RL
8113 bundle->surface_updates[num_plane].surface =
8114 new_dm_plane_state->dc_state;
c744e974
NK
8115
8116 if (new_crtc_state->mode_changed) {
7527791e
RL
8117 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8118 bundle->stream_update.src = new_dm_crtc_state->stream->src;
c744e974
NK
8119 }
8120
8121 if (new_crtc_state->color_mgmt_changed) {
7527791e 8122 bundle->surface_updates[num_plane].gamma =
c744e974 8123 new_dm_plane_state->dc_state->gamma_correction;
7527791e 8124 bundle->surface_updates[num_plane].in_transfer_func =
c744e974 8125 new_dm_plane_state->dc_state->in_transfer_func;
44efb784
SW
8126 bundle->surface_updates[num_plane].gamut_remap_matrix =
8127 &new_dm_plane_state->dc_state->gamut_remap_matrix;
7527791e 8128 bundle->stream_update.gamut_remap =
c744e974 8129 &new_dm_crtc_state->stream->gamut_remap_matrix;
7527791e 8130 bundle->stream_update.output_csc_transform =
cf020d49 8131 &new_dm_crtc_state->stream->csc_color_matrix;
7527791e 8132 bundle->stream_update.out_transfer_func =
c744e974 8133 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
8134 }
8135
004b3938 8136 ret = fill_dc_scaling_info(new_plane_state,
7527791e 8137 scaling_info);
004b3938
NK
8138 if (ret)
8139 goto cleanup;
8140
7527791e 8141 bundle->surface_updates[num_plane].scaling_info = scaling_info;
004b3938 8142
2cc450ce 8143 if (amdgpu_fb) {
5888f07a 8144 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
2cc450ce
NK
8145 if (ret)
8146 goto cleanup;
8147
2cc450ce
NK
8148 ret = fill_dc_plane_info_and_addr(
8149 dm->adev, new_plane_state, tiling_flags,
7527791e 8150 plane_info,
5888f07a 8151 &flip_addr->address, tmz_surface,
af031f07 8152 false);
2cc450ce
NK
8153 if (ret)
8154 goto cleanup;
8155
7527791e
RL
8156 bundle->surface_updates[num_plane].plane_info = plane_info;
8157 bundle->surface_updates[num_plane].flip_addr = flip_addr;
2cc450ce
NK
8158 }
8159
c744e974
NK
8160 num_plane++;
8161 }
8162
8163 if (num_plane == 0)
8164 continue;
8165
8166 ret = dm_atomic_get_state(state, &dm_state);
8167 if (ret)
8168 goto cleanup;
8169
8170 old_dm_state = dm_atomic_get_old_state(state);
8171 if (!old_dm_state) {
8172 ret = -EINVAL;
8173 goto cleanup;
8174 }
8175
8176 status = dc_stream_get_status_from_state(old_dm_state->context,
8177 new_dm_crtc_state->stream);
7527791e 8178 bundle->stream_update.stream = new_dm_crtc_state->stream;
f843b308
NK
8179 /*
8180 * TODO: DC modifies the surface during this call so we need
8181 * to lock here - find a way to do this without locking.
8182 */
8183 mutex_lock(&dm->dc_lock);
7527791e
RL
8184 update_type = dc_check_update_surfaces_for_stream(
8185 dc, bundle->surface_updates, num_plane,
8186 &bundle->stream_update, status);
f843b308 8187 mutex_unlock(&dm->dc_lock);
c744e974
NK
8188
8189 if (update_type > UPDATE_TYPE_MED) {
a87fa993 8190 update_type = UPDATE_TYPE_FULL;
eb3dc897 8191 goto cleanup;
a87fa993
BL
8192 }
8193 }
8194
eb3dc897 8195cleanup:
7527791e 8196 kfree(bundle);
a87fa993 8197
eb3dc897
NK
8198 *out_type = update_type;
8199 return ret;
a87fa993 8200}
62f55537 8201
44be939f
ML
8202static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8203{
8204 struct drm_connector *connector;
8205 struct drm_connector_state *conn_state;
8206 struct amdgpu_dm_connector *aconnector = NULL;
8207 int i;
8208 for_each_new_connector_in_state(state, connector, conn_state, i) {
8209 if (conn_state->crtc != crtc)
8210 continue;
8211
8212 aconnector = to_amdgpu_dm_connector(connector);
8213 if (!aconnector->port || !aconnector->mst_port)
8214 aconnector = NULL;
8215 else
8216 break;
8217 }
8218
8219 if (!aconnector)
8220 return 0;
8221
8222 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8223}
8224
b8592b48
LL
8225/**
8226 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8227 * @dev: The DRM device
8228 * @state: The atomic state to commit
8229 *
8230 * Validate that the given atomic state is programmable by DC into hardware.
8231 * This involves constructing a &struct dc_state reflecting the new hardware
8232 * state we wish to commit, then querying DC to see if it is programmable. It's
8233 * important not to modify the existing DC state. Otherwise, atomic_check
8234 * may unexpectedly commit hardware changes.
8235 *
8236 * When validating the DC state, it's important that the right locks are
8237 * acquired. For full updates case which removes/adds/updates streams on one
8238 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8239 * that any such full update commit will wait for completion of any outstanding
8240 * flip using DRMs synchronization events. See
8241 * dm_determine_update_type_for_commit()
8242 *
8243 * Note that DM adds the affected connectors for all CRTCs in state, when that
8244 * might not seem necessary. This is because DC stream creation requires the
8245 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8246 * be possible but non-trivial - a possible TODO item.
8247 *
8248 * Return: -Error code if validation failed.
8249 */
7578ecda
AD
8250static int amdgpu_dm_atomic_check(struct drm_device *dev,
8251 struct drm_atomic_state *state)
62f55537 8252{
62f55537 8253 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 8254 struct dm_atomic_state *dm_state = NULL;
62f55537 8255 struct dc *dc = adev->dm.dc;
62f55537 8256 struct drm_connector *connector;
c2cea706 8257 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8258 struct drm_crtc *crtc;
fc9e9920 8259 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8260 struct drm_plane *plane;
8261 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
8262 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8263 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8264
1e88ad0a 8265 int ret, i;
e7b07cee 8266
62f55537
AG
8267 /*
8268 * This bool will be set for true for any modeset/reset
8269 * or plane update which implies non fast surface update.
8270 */
8271 bool lock_and_validation_needed = false;
8272
8273 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8274 if (ret)
8275 goto fail;
62f55537 8276
44be939f
ML
8277 if (adev->asic_type >= CHIP_NAVI10) {
8278 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8279 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8280 ret = add_affected_mst_dsc_crtcs(state, crtc);
8281 if (ret)
8282 goto fail;
8283 }
8284 }
8285 }
8286
1e88ad0a
S
8287 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8288 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8289 !new_crtc_state->color_mgmt_changed &&
a93587b3 8290 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8291 continue;
7bef1af3 8292
1e88ad0a
S
8293 if (!new_crtc_state->enable)
8294 continue;
fc9e9920 8295
1e88ad0a
S
8296 ret = drm_atomic_add_affected_connectors(state, crtc);
8297 if (ret)
8298 return ret;
fc9e9920 8299
1e88ad0a
S
8300 ret = drm_atomic_add_affected_planes(state, crtc);
8301 if (ret)
8302 goto fail;
e7b07cee
HW
8303 }
8304
2d9e6431
NK
8305 /*
8306 * Add all primary and overlay planes on the CRTC to the state
8307 * whenever a plane is enabled to maintain correct z-ordering
8308 * and to enable fast surface updates.
8309 */
8310 drm_for_each_crtc(crtc, dev) {
8311 bool modified = false;
8312
8313 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8314 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8315 continue;
8316
8317 if (new_plane_state->crtc == crtc ||
8318 old_plane_state->crtc == crtc) {
8319 modified = true;
8320 break;
8321 }
8322 }
8323
8324 if (!modified)
8325 continue;
8326
8327 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8328 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8329 continue;
8330
8331 new_plane_state =
8332 drm_atomic_get_plane_state(state, plane);
8333
8334 if (IS_ERR(new_plane_state)) {
8335 ret = PTR_ERR(new_plane_state);
8336 goto fail;
8337 }
8338 }
8339 }
8340
62f55537 8341 /* Remove exiting planes if they are modified */
9e869063
LL
8342 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8343 ret = dm_update_plane_state(dc, state, plane,
8344 old_plane_state,
8345 new_plane_state,
8346 false,
8347 &lock_and_validation_needed);
8348 if (ret)
8349 goto fail;
62f55537
AG
8350 }
8351
8352 /* Disable all crtcs which require disable */
4b9674e5
LL
8353 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8354 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8355 old_crtc_state,
8356 new_crtc_state,
8357 false,
8358 &lock_and_validation_needed);
8359 if (ret)
8360 goto fail;
62f55537
AG
8361 }
8362
8363 /* Enable all crtcs which require enable */
4b9674e5
LL
8364 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8365 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8366 old_crtc_state,
8367 new_crtc_state,
8368 true,
8369 &lock_and_validation_needed);
8370 if (ret)
8371 goto fail;
62f55537
AG
8372 }
8373
8374 /* Add new/modified planes */
9e869063
LL
8375 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8376 ret = dm_update_plane_state(dc, state, plane,
8377 old_plane_state,
8378 new_plane_state,
8379 true,
8380 &lock_and_validation_needed);
8381 if (ret)
8382 goto fail;
62f55537
AG
8383 }
8384
b349f76e
ES
8385 /* Run this here since we want to validate the streams we created */
8386 ret = drm_atomic_helper_check_planes(dev, state);
8387 if (ret)
8388 goto fail;
62f55537 8389
43d10d30
NK
8390 if (state->legacy_cursor_update) {
8391 /*
8392 * This is a fast cursor update coming from the plane update
8393 * helper, check if it can be done asynchronously for better
8394 * performance.
8395 */
8396 state->async_update =
8397 !drm_atomic_helper_async_check(dev, state);
8398
8399 /*
8400 * Skip the remaining global validation if this is an async
8401 * update. Cursor updates can be done without affecting
8402 * state or bandwidth calcs and this avoids the performance
8403 * penalty of locking the private state object and
8404 * allocating a new dc_state.
8405 */
8406 if (state->async_update)
8407 return 0;
8408 }
8409
ebdd27e1 8410 /* Check scaling and underscan changes*/
1f6010a9 8411 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8412 * new stream into context w\o causing full reset. Need to
8413 * decide how to handle.
8414 */
c2cea706 8415 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8416 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8417 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8418 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8419
8420 /* Skip any modesets/resets */
0bc9706d
LSL
8421 if (!acrtc || drm_atomic_crtc_needs_modeset(
8422 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8423 continue;
8424
b830ebc9 8425 /* Skip any thing not scale or underscan changes */
54d76575 8426 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8427 continue;
8428
a87fa993 8429 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
8430 lock_and_validation_needed = true;
8431 }
8432
f843b308 8433 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
eb3dc897
NK
8434 if (ret)
8435 goto fail;
a87fa993
BL
8436
8437 if (overall_update_type < update_type)
8438 overall_update_type = update_type;
8439
8440 /*
8441 * lock_and_validation_needed was an old way to determine if we need to set
8442 * the global lock. Leaving it in to check if we broke any corner cases
8443 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8444 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8445 */
8446 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8447 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
e7b07cee 8448
a87fa993 8449 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
8450 ret = dm_atomic_get_state(state, &dm_state);
8451 if (ret)
8452 goto fail;
e7b07cee
HW
8453
8454 ret = do_aquire_global_lock(dev, state);
8455 if (ret)
8456 goto fail;
1dc90497 8457
d9fe1a4c 8458#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8459 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8460 goto fail;
8461
29b9ba74
ML
8462 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8463 if (ret)
8464 goto fail;
d9fe1a4c 8465#endif
29b9ba74 8466
ded58c7b
ZL
8467 /*
8468 * Perform validation of MST topology in the state:
8469 * We need to perform MST atomic check before calling
8470 * dc_validate_global_state(), or there is a chance
8471 * to get stuck in an infinite loop and hang eventually.
8472 */
8473 ret = drm_dp_mst_atomic_check(state);
8474 if (ret)
8475 goto fail;
8476
afcd526b 8477 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
e7b07cee
HW
8478 ret = -EINVAL;
8479 goto fail;
8480 }
bd200d19 8481 } else {
674e78ac 8482 /*
bd200d19
NK
8483 * The commit is a fast update. Fast updates shouldn't change
8484 * the DC context, affect global validation, and can have their
8485 * commit work done in parallel with other commits not touching
8486 * the same resource. If we have a new DC context as part of
8487 * the DM atomic state from validation we need to free it and
8488 * retain the existing one instead.
674e78ac 8489 */
bd200d19
NK
8490 struct dm_atomic_state *new_dm_state, *old_dm_state;
8491
8492 new_dm_state = dm_atomic_get_new_state(state);
8493 old_dm_state = dm_atomic_get_old_state(state);
8494
8495 if (new_dm_state && old_dm_state) {
8496 if (new_dm_state->context)
8497 dc_release_state(new_dm_state->context);
8498
8499 new_dm_state->context = old_dm_state->context;
8500
8501 if (old_dm_state->context)
8502 dc_retain_state(old_dm_state->context);
8503 }
e7b07cee
HW
8504 }
8505
caff0e66
NK
8506 /* Store the overall update type for use later in atomic check. */
8507 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8508 struct dm_crtc_state *dm_new_crtc_state =
8509 to_dm_crtc_state(new_crtc_state);
8510
8511 dm_new_crtc_state->update_type = (int)overall_update_type;
e7b07cee
HW
8512 }
8513
8514 /* Must be success */
8515 WARN_ON(ret);
8516 return ret;
8517
8518fail:
8519 if (ret == -EDEADLK)
01e28f9c 8520 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8521 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8522 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8523 else
01e28f9c 8524 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8525
8526 return ret;
8527}
8528
3ee6b26b
AD
8529static bool is_dp_capable_without_timing_msa(struct dc *dc,
8530 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8531{
8532 uint8_t dpcd_data;
8533 bool capable = false;
8534
c84dec2f 8535 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8536 dm_helpers_dp_read_dpcd(
8537 NULL,
c84dec2f 8538 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8539 DP_DOWN_STREAM_PORT_COUNT,
8540 &dpcd_data,
8541 sizeof(dpcd_data))) {
8542 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8543 }
8544
8545 return capable;
8546}
98e6436d
AK
8547void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8548 struct edid *edid)
e7b07cee
HW
8549{
8550 int i;
e7b07cee
HW
8551 bool edid_check_required;
8552 struct detailed_timing *timing;
8553 struct detailed_non_pixel *data;
8554 struct detailed_data_monitor_range *range;
c84dec2f
HW
8555 struct amdgpu_dm_connector *amdgpu_dm_connector =
8556 to_amdgpu_dm_connector(connector);
bb47de73 8557 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8558
8559 struct drm_device *dev = connector->dev;
8560 struct amdgpu_device *adev = dev->dev_private;
bb47de73 8561 bool freesync_capable = false;
b830ebc9 8562
8218d7f1
HW
8563 if (!connector->state) {
8564 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 8565 goto update;
8218d7f1
HW
8566 }
8567
98e6436d
AK
8568 if (!edid) {
8569 dm_con_state = to_dm_connector_state(connector->state);
8570
8571 amdgpu_dm_connector->min_vfreq = 0;
8572 amdgpu_dm_connector->max_vfreq = 0;
8573 amdgpu_dm_connector->pixel_clock_mhz = 0;
8574
bb47de73 8575 goto update;
98e6436d
AK
8576 }
8577
8218d7f1
HW
8578 dm_con_state = to_dm_connector_state(connector->state);
8579
e7b07cee 8580 edid_check_required = false;
c84dec2f 8581 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 8582 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 8583 goto update;
e7b07cee
HW
8584 }
8585 if (!adev->dm.freesync_module)
bb47de73 8586 goto update;
e7b07cee
HW
8587 /*
8588 * if edid non zero restrict freesync only for dp and edp
8589 */
8590 if (edid) {
c84dec2f
HW
8591 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8592 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
8593 edid_check_required = is_dp_capable_without_timing_msa(
8594 adev->dm.dc,
c84dec2f 8595 amdgpu_dm_connector);
e7b07cee
HW
8596 }
8597 }
e7b07cee
HW
8598 if (edid_check_required == true && (edid->version > 1 ||
8599 (edid->version == 1 && edid->revision > 1))) {
8600 for (i = 0; i < 4; i++) {
8601
8602 timing = &edid->detailed_timings[i];
8603 data = &timing->data.other_data;
8604 range = &data->data.range;
8605 /*
8606 * Check if monitor has continuous frequency mode
8607 */
8608 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8609 continue;
8610 /*
8611 * Check for flag range limits only. If flag == 1 then
8612 * no additional timing information provided.
8613 * Default GTF, GTF Secondary curve and CVT are not
8614 * supported
8615 */
8616 if (range->flags != 1)
8617 continue;
8618
c84dec2f
HW
8619 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8620 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8621 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
8622 range->pixel_clock_mhz * 10;
8623 break;
8624 }
8625
c84dec2f 8626 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
8627 amdgpu_dm_connector->min_vfreq > 10) {
8628
bb47de73 8629 freesync_capable = true;
e7b07cee
HW
8630 }
8631 }
bb47de73
NK
8632
8633update:
8634 if (dm_con_state)
8635 dm_con_state->freesync_capable = freesync_capable;
8636
8637 if (connector->vrr_capable_property)
8638 drm_connector_set_vrr_capable_property(connector,
8639 freesync_capable);
e7b07cee
HW
8640}
8641
8c322309
RL
8642static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8643{
8644 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8645
8646 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8647 return;
8648 if (link->type == dc_connection_none)
8649 return;
8650 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8651 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
8652 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8653
8654 if (dpcd_data[0] == 0) {
1cfbbdde 8655 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
8656 link->psr_settings.psr_feature_enabled = false;
8657 } else {
1cfbbdde 8658 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
8659 link->psr_settings.psr_feature_enabled = true;
8660 }
8661
8662 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
8663 }
8664}
8665
8666/*
8667 * amdgpu_dm_link_setup_psr() - configure psr link
8668 * @stream: stream state
8669 *
8670 * Return: true if success
8671 */
8672static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8673{
8674 struct dc_link *link = NULL;
8675 struct psr_config psr_config = {0};
8676 struct psr_context psr_context = {0};
8c322309
RL
8677 bool ret = false;
8678
8679 if (stream == NULL)
8680 return false;
8681
8682 link = stream->link;
8c322309 8683
d1ebfdd8 8684 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
8685
8686 if (psr_config.psr_version > 0) {
8687 psr_config.psr_exit_link_training_required = 0x1;
8688 psr_config.psr_frame_capture_indication_req = 0;
8689 psr_config.psr_rfb_setup_time = 0x37;
8690 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8691 psr_config.allow_smu_optimizations = 0x0;
8692
8693 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8694
8695 }
d1ebfdd8 8696 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
8697
8698 return ret;
8699}
8700
8701/*
8702 * amdgpu_dm_psr_enable() - enable psr f/w
8703 * @stream: stream state
8704 *
8705 * Return: true if success
8706 */
8707bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8708{
8709 struct dc_link *link = stream->link;
5b5abe95
AK
8710 unsigned int vsync_rate_hz = 0;
8711 struct dc_static_screen_params params = {0};
8712 /* Calculate number of static frames before generating interrupt to
8713 * enter PSR.
8714 */
5b5abe95
AK
8715 // Init fail safe of 2 frames static
8716 unsigned int num_frames_static = 2;
8c322309
RL
8717
8718 DRM_DEBUG_DRIVER("Enabling psr...\n");
8719
5b5abe95
AK
8720 vsync_rate_hz = div64_u64(div64_u64((
8721 stream->timing.pix_clk_100hz * 100),
8722 stream->timing.v_total),
8723 stream->timing.h_total);
8724
8725 /* Round up
8726 * Calculate number of frames such that at least 30 ms of time has
8727 * passed.
8728 */
7aa62404
RL
8729 if (vsync_rate_hz != 0) {
8730 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 8731 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 8732 }
5b5abe95
AK
8733
8734 params.triggers.cursor_update = true;
8735 params.triggers.overlay_update = true;
8736 params.triggers.surface_update = true;
8737 params.num_frames = num_frames_static;
8c322309 8738
5b5abe95 8739 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 8740 &stream, 1,
5b5abe95 8741 &params);
8c322309
RL
8742
8743 return dc_link_set_psr_allow_active(link, true, false);
8744}
8745
8746/*
8747 * amdgpu_dm_psr_disable() - disable psr f/w
8748 * @stream: stream state
8749 *
8750 * Return: true if success
8751 */
8752static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8753{
8754
8755 DRM_DEBUG_DRIVER("Disabling psr...\n");
8756
8757 return dc_link_set_psr_allow_active(stream->link, false, true);
8758}