drm/amd/display: Add SetBacklight call to abm on dmcub
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
743b9786
NK
33#include "dmub/inc/dmub_srv.h"
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
2200eb9e 97
a94d5569
DF
98#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 100
5ea23931
RL
101#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
8c7aea40
NK
104/* Number of bytes in PSP header for firmware. */
105#define PSP_HEADER_BYTES 0x100
106
107/* Number of bytes in PSP footer for firmware. */
108#define PSP_FOOTER_BYTES 0x100
109
b8592b48
LL
110/**
111 * DOC: overview
112 *
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
116 *
117 * The root control structure is &struct amdgpu_display_manager.
118 */
119
7578ecda
AD
120/* basic init/fini API */
121static int amdgpu_dm_init(struct amdgpu_device *adev);
122static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
1f6010a9
DF
124/*
125 * initializes drm_device display related structures, based on the information
7578ecda
AD
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
128 *
129 * Returns 0 on success
130 */
131static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132/* removes and deallocates the drm structures, created by the above function */
133static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
7578ecda 135static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 136 struct drm_plane *plane,
cc1fec57
NK
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
7578ecda
AD
139static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
144 uint32_t link_index,
145 struct amdgpu_encoder *amdgpu_encoder);
146static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
149
150static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
154 bool nonblock);
155
156static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
160
674e78ac
NK
161static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
7578ecda 163
8c322309
RL
164static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
4562236b
HW
170/*
171 * dm_vblank_get_counter
172 *
173 * @brief
174 * Get counter for number of vertical blanks
175 *
176 * @param
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
179 *
180 * @return
181 * Counter for vertical blanks
182 */
183static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184{
185 if (crtc >= adev->mode_info.num_crtc)
186 return 0;
187 else {
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 acrtc->base.state);
4562236b 191
da5c47f6
AG
192
193 if (acrtc_state->stream == NULL) {
0971c40e
HW
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 crtc);
4562236b
HW
196 return 0;
197 }
198
da5c47f6 199 return dc_stream_get_vblank_counter(acrtc_state->stream);
4562236b
HW
200 }
201}
202
203static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 204 u32 *vbl, u32 *position)
4562236b 205{
81c50963
ST
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
4562236b
HW
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 return -EINVAL;
210 else {
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
da5c47f6
AG
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 acrtc->base.state);
4562236b 214
da5c47f6 215 if (acrtc_state->stream == NULL) {
0971c40e
HW
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 crtc);
4562236b
HW
218 return 0;
219 }
220
81c50963
ST
221 /*
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
224 */
da5c47f6 225 dc_stream_get_scanoutpos(acrtc_state->stream,
81c50963
ST
226 &v_blank_start,
227 &v_blank_end,
228 &h_position,
229 &v_position);
230
e806208d
AG
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
233 }
234
235 return 0;
236}
237
238static bool dm_is_idle(void *handle)
239{
240 /* XXX todo */
241 return true;
242}
243
244static int dm_wait_for_idle(void *handle)
245{
246 /* XXX todo */
247 return 0;
248}
249
250static bool dm_check_soft_reset(void *handle)
251{
252 return false;
253}
254
255static int dm_soft_reset(void *handle)
256{
257 /* XXX todo */
258 return 0;
259}
260
3ee6b26b
AD
261static struct amdgpu_crtc *
262get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 int otg_inst)
4562236b
HW
264{
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
268
4562236b
HW
269 if (otg_inst == -1) {
270 WARN_ON(1);
271 return adev->mode_info.crtcs[0];
272 }
273
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277 if (amdgpu_crtc->otg_inst == otg_inst)
278 return amdgpu_crtc;
279 }
280
281 return NULL;
282}
283
66b0c973
MK
284static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285{
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288}
289
b8e8c934
HW
290/**
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
293 *
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
296 */
4562236b
HW
297static void dm_pflip_high_irq(void *interrupt_params)
298{
4562236b
HW
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
302 unsigned long flags;
71bbe51a
MK
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 bool vrr_active;
4562236b
HW
307
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310 /* IRQ could occur when in initial stage */
1f6010a9 311 /* TODO work and BO cleanup */
4562236b
HW
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 return;
315 }
316
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
4562236b
HW
318
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
324 amdgpu_crtc);
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 return;
327 }
328
71bbe51a
MK
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
4562236b 332
71bbe51a
MK
333 if (!e)
334 WARN_ON(1);
1159898a 335
71bbe51a
MK
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 if (!vrr_active ||
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
347 */
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 349
71bbe51a
MK
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
352 */
353 if (e) {
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
358 }
359 } else if (e) {
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
366 *
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
371 */
372
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
376
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 e = NULL;
379 }
4562236b 380
fdd1fe57
MK
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
385 */
e3eff4b5
TZ
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 388
54f5499a 389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4562236b
HW
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
71bbe51a
MK
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
4562236b
HW
395}
396
d2574c33
MK
397static void dm_vupdate_high_irq(void *interrupt_params)
398{
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
09aef2c4 403 unsigned long flags;
d2574c33
MK
404
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407 if (acrtc) {
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
7f2be468
LP
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 acrtc->crtc_id,
412 amdgpu_dm_vrr_active(acrtc_state));
d2574c33
MK
413
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
419 */
09aef2c4 420 if (amdgpu_dm_vrr_active(acrtc_state)) {
d2574c33 421 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
422
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
429 acrtc_state->stream,
430 &acrtc_state->vrr_params);
431
432 dc_stream_adjust_vmin_vmax(
433 adev->dm.dc,
434 acrtc_state->stream,
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 }
438 }
d2574c33
MK
439 }
440}
441
b8e8c934
HW
442/**
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: ignored
445 *
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447 * event handler.
448 */
4562236b
HW
449static void dm_crtc_high_irq(void *interrupt_params)
450{
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
4562236b 453 struct amdgpu_crtc *acrtc;
180db303 454 struct dm_crtc_state *acrtc_state;
09aef2c4 455 unsigned long flags;
4562236b 456
b57de80a 457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
4562236b 458
e5d0170e 459 if (acrtc) {
180db303
NK
460 acrtc_state = to_dm_crtc_state(acrtc->base.state);
461
7f2be468
LP
462 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463 acrtc->crtc_id,
464 amdgpu_dm_vrr_active(acrtc_state));
d2574c33
MK
465
466 /* Core vblank handling at start of front-porch is only possible
467 * in non-vrr mode, as only there vblank timestamping will give
468 * valid results while done in front-porch. Otherwise defer it
469 * to dm_vupdate_high_irq after end of front-porch.
470 */
471 if (!amdgpu_dm_vrr_active(acrtc_state))
472 drm_crtc_handle_vblank(&acrtc->base);
473
474 /* Following stuff must happen at start of vblank, for crc
475 * computation and below-the-range btr support in vrr mode.
476 */
477 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478
09aef2c4 479 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
180db303
NK
480 acrtc_state->vrr_params.supported &&
481 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
09aef2c4 482 spin_lock_irqsave(&adev->ddev->event_lock, flags);
180db303
NK
483 mod_freesync_handle_v_update(
484 adev->dm.freesync_module,
485 acrtc_state->stream,
486 &acrtc_state->vrr_params);
487
488 dc_stream_adjust_vmin_vmax(
489 adev->dm.dc,
490 acrtc_state->stream,
491 &acrtc_state->vrr_params.adjust);
09aef2c4 492 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
180db303 493 }
e5d0170e 494 }
4562236b
HW
495}
496
b8219745 497#if defined(CONFIG_DRM_AMD_DC_DCN)
16f17eda
LL
498/**
499 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500 * @interrupt params - interrupt parameters
501 *
502 * Notify DRM's vblank event handler at VSTARTUP
503 *
504 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505 * * We are close enough to VUPDATE - the point of no return for hw
506 * * We are in the fixed portion of variable front porch when vrr is enabled
507 * * We are before VUPDATE, where double-buffered vrr registers are swapped
508 *
509 * It is therefore the correct place to signal vblank, send user flip events,
510 * and update VRR.
511 */
512static void dm_dcn_crtc_high_irq(void *interrupt_params)
513{
514 struct common_irq_params *irq_params = interrupt_params;
515 struct amdgpu_device *adev = irq_params->adev;
516 struct amdgpu_crtc *acrtc;
517 struct dm_crtc_state *acrtc_state;
518 unsigned long flags;
519
520 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521
522 if (!acrtc)
523 return;
524
525 acrtc_state = to_dm_crtc_state(acrtc->base.state);
526
2b5aed9a
MK
527 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528 amdgpu_dm_vrr_active(acrtc_state),
529 acrtc_state->active_planes);
16f17eda
LL
530
531 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 drm_crtc_handle_vblank(&acrtc->base);
533
534 spin_lock_irqsave(&adev->ddev->event_lock, flags);
535
536 if (acrtc_state->vrr_params.supported &&
537 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(
539 adev->dm.freesync_module,
540 acrtc_state->stream,
541 &acrtc_state->vrr_params);
542
543 dc_stream_adjust_vmin_vmax(
544 adev->dm.dc,
545 acrtc_state->stream,
546 &acrtc_state->vrr_params.adjust);
547 }
548
2b5aed9a
MK
549 /*
550 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 * In that case, pageflip completion interrupts won't fire and pageflip
552 * completion events won't get delivered. Prevent this by sending
553 * pending pageflip events from here if a flip is still pending.
554 *
555 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 * avoid race conditions between flip programming and completion,
557 * which could cause too early flip completion events.
558 */
559 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560 acrtc_state->active_planes == 0) {
16f17eda
LL
561 if (acrtc->event) {
562 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563 acrtc->event = NULL;
564 drm_crtc_vblank_put(&acrtc->base);
565 }
566 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 }
568
569 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570}
b8219745 571#endif
16f17eda 572
4562236b
HW
573static int dm_set_clockgating_state(void *handle,
574 enum amd_clockgating_state state)
575{
576 return 0;
577}
578
579static int dm_set_powergating_state(void *handle,
580 enum amd_powergating_state state)
581{
582 return 0;
583}
584
585/* Prototypes of private functions */
586static int dm_early_init(void* handle);
587
a32e24b4 588/* Allocate memory for FBC compressed data */
3e332d3a 589static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 590{
3e332d3a
RL
591 struct drm_device *dev = connector->dev;
592 struct amdgpu_device *adev = dev->dev_private;
a32e24b4 593 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
594 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595 struct drm_display_mode *mode;
42e67c3b
RL
596 unsigned long max_size = 0;
597
598 if (adev->dm.dc->fbc_compressor == NULL)
599 return;
a32e24b4 600
3e332d3a 601 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
602 return;
603
3e332d3a
RL
604 if (compressor->bo_ptr)
605 return;
42e67c3b 606
42e67c3b 607
3e332d3a
RL
608 list_for_each_entry(mode, &connector->modes, head) {
609 if (max_size < mode->htotal * mode->vtotal)
610 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
611 }
612
613 if (max_size) {
614 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 615 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 616 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
617
618 if (r)
42e67c3b
RL
619 DRM_ERROR("DM: Failed to initialize FBC\n");
620 else {
621 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623 }
624
a32e24b4
RL
625 }
626
627}
a32e24b4 628
6ce8f316
NK
629static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630 int pipe, bool *enabled,
631 unsigned char *buf, int max_bytes)
632{
633 struct drm_device *dev = dev_get_drvdata(kdev);
634 struct amdgpu_device *adev = dev->dev_private;
635 struct drm_connector *connector;
636 struct drm_connector_list_iter conn_iter;
637 struct amdgpu_dm_connector *aconnector;
638 int ret = 0;
639
640 *enabled = false;
641
642 mutex_lock(&adev->dm.audio_lock);
643
644 drm_connector_list_iter_begin(dev, &conn_iter);
645 drm_for_each_connector_iter(connector, &conn_iter) {
646 aconnector = to_amdgpu_dm_connector(connector);
647 if (aconnector->audio_inst != port)
648 continue;
649
650 *enabled = true;
651 ret = drm_eld_size(connector->eld);
652 memcpy(buf, connector->eld, min(max_bytes, ret));
653
654 break;
655 }
656 drm_connector_list_iter_end(&conn_iter);
657
658 mutex_unlock(&adev->dm.audio_lock);
659
660 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661
662 return ret;
663}
664
665static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666 .get_eld = amdgpu_dm_audio_component_get_eld,
667};
668
669static int amdgpu_dm_audio_component_bind(struct device *kdev,
670 struct device *hda_kdev, void *data)
671{
672 struct drm_device *dev = dev_get_drvdata(kdev);
673 struct amdgpu_device *adev = dev->dev_private;
674 struct drm_audio_component *acomp = data;
675
676 acomp->ops = &amdgpu_dm_audio_component_ops;
677 acomp->dev = kdev;
678 adev->dm.audio_component = acomp;
679
680 return 0;
681}
682
683static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684 struct device *hda_kdev, void *data)
685{
686 struct drm_device *dev = dev_get_drvdata(kdev);
687 struct amdgpu_device *adev = dev->dev_private;
688 struct drm_audio_component *acomp = data;
689
690 acomp->ops = NULL;
691 acomp->dev = NULL;
692 adev->dm.audio_component = NULL;
693}
694
695static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696 .bind = amdgpu_dm_audio_component_bind,
697 .unbind = amdgpu_dm_audio_component_unbind,
698};
699
700static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701{
702 int i, ret;
703
704 if (!amdgpu_audio)
705 return 0;
706
707 adev->mode_info.audio.enabled = true;
708
709 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710
711 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712 adev->mode_info.audio.pin[i].channels = -1;
713 adev->mode_info.audio.pin[i].rate = -1;
714 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715 adev->mode_info.audio.pin[i].status_bits = 0;
716 adev->mode_info.audio.pin[i].category_code = 0;
717 adev->mode_info.audio.pin[i].connected = false;
718 adev->mode_info.audio.pin[i].id =
719 adev->dm.dc->res_pool->audios[i]->inst;
720 adev->mode_info.audio.pin[i].offset = 0;
721 }
722
723 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724 if (ret < 0)
725 return ret;
726
727 adev->dm.audio_registered = true;
728
729 return 0;
730}
731
732static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733{
734 if (!amdgpu_audio)
735 return;
736
737 if (!adev->mode_info.audio.enabled)
738 return;
739
740 if (adev->dm.audio_registered) {
741 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742 adev->dm.audio_registered = false;
743 }
744
745 /* TODO: Disable audio? */
746
747 adev->mode_info.audio.enabled = false;
748}
749
750void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751{
752 struct drm_audio_component *acomp = adev->dm.audio_component;
753
754 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756
757 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 pin, -1);
759 }
760}
761
743b9786
NK
762static int dm_dmub_hw_init(struct amdgpu_device *adev)
763{
743b9786
NK
764 const struct dmcub_firmware_header_v1_0 *hdr;
765 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 766 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
767 const struct firmware *dmub_fw = adev->dm.dmub_fw;
768 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
770 struct dmub_srv_hw_params hw_params;
771 enum dmub_status status;
772 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 773 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
774 bool has_hw_support;
775
776 if (!dmub_srv)
777 /* DMUB isn't supported on the ASIC. */
778 return 0;
779
8c7aea40
NK
780 if (!fb_info) {
781 DRM_ERROR("No framebuffer info for DMUB service.\n");
782 return -EINVAL;
783 }
784
743b9786
NK
785 if (!dmub_fw) {
786 /* Firmware required for DMUB support. */
787 DRM_ERROR("No firmware provided for DMUB.\n");
788 return -EINVAL;
789 }
790
791 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792 if (status != DMUB_STATUS_OK) {
793 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794 return -EINVAL;
795 }
796
797 if (!has_hw_support) {
798 DRM_INFO("DMUB unsupported on ASIC\n");
799 return 0;
800 }
801
802 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803
743b9786
NK
804 fw_inst_const = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 806 PSP_HEADER_BYTES;
743b9786
NK
807
808 fw_bss_data = dmub_fw->data +
809 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810 le32_to_cpu(hdr->inst_const_bytes);
811
812 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
813 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815
816 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817
ddde28a5
HW
818 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 * amdgpu_ucode_init_single_fw will load dmub firmware
820 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 * will be done by dm_dmub_hw_init
822 */
823 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825 fw_inst_const_size);
826 }
827
a576b345
NK
828 if (fw_bss_data_size)
829 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
830 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
831
832 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
833 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
834 adev->bios_size);
835
836 /* Reset regions that need to be reset. */
837 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
838 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
839
840 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
841 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
842
843 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
844 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
845
846 /* Initialize hardware. */
847 memset(&hw_params, 0, sizeof(hw_params));
848 hw_params.fb_base = adev->gmc.fb_start;
849 hw_params.fb_offset = adev->gmc.aper_base;
850
31a7f4bb
HW
851 /* backdoor load firmware and trigger dmub running */
852 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
853 hw_params.load_inst_const = true;
854
743b9786
NK
855 if (dmcu)
856 hw_params.psp_version = dmcu->psp_version;
857
8c7aea40
NK
858 for (i = 0; i < fb_info->num_fb; ++i)
859 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
860
861 status = dmub_srv_hw_init(dmub_srv, &hw_params);
862 if (status != DMUB_STATUS_OK) {
863 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
864 return -EINVAL;
865 }
866
867 /* Wait for firmware load to finish. */
868 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
869 if (status != DMUB_STATUS_OK)
870 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
871
872 /* Init DMCU and ABM if available. */
873 if (dmcu && abm) {
874 dmcu->funcs->dmcu_init(dmcu);
875 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
876 }
877
9a71c7d3
NK
878 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
879 if (!adev->dm.dc->ctx->dmub_srv) {
880 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
881 return -ENOMEM;
882 }
883
743b9786
NK
884 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
885 adev->dm.dmcub_fw_version);
886
887 return 0;
888}
889
7578ecda 890static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
891{
892 struct dc_init_data init_data;
52704fca
BL
893#ifdef CONFIG_DRM_AMD_DC_HDCP
894 struct dc_callback_init init_params;
895#endif
743b9786 896 int r;
52704fca 897
4562236b
HW
898 adev->dm.ddev = adev->ddev;
899 adev->dm.adev = adev;
900
4562236b
HW
901 /* Zero all the fields */
902 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
903#ifdef CONFIG_DRM_AMD_DC_HDCP
904 memset(&init_params, 0, sizeof(init_params));
905#endif
4562236b 906
674e78ac 907 mutex_init(&adev->dm.dc_lock);
6ce8f316 908 mutex_init(&adev->dm.audio_lock);
674e78ac 909
4562236b
HW
910 if(amdgpu_dm_irq_init(adev)) {
911 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
912 goto error;
913 }
914
915 init_data.asic_id.chip_family = adev->family;
916
2dc31ca1 917 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
918 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
919
770d13b1 920 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
921 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
922 init_data.asic_id.atombios_base_address =
923 adev->mode_info.atom_context->bios;
924
925 init_data.driver = adev;
926
927 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
928
929 if (!adev->dm.cgs_device) {
930 DRM_ERROR("amdgpu: failed to create cgs device.\n");
931 goto error;
932 }
933
934 init_data.cgs_device = adev->dm.cgs_device;
935
4562236b
HW
936 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
937
60fb100b
AD
938 switch (adev->asic_type) {
939 case CHIP_CARRIZO:
940 case CHIP_STONEY:
941 case CHIP_RAVEN:
fe3db437 942 case CHIP_RENOIR:
6e227308 943 init_data.flags.gpu_vm_support = true;
60fb100b
AD
944 break;
945 default:
946 break;
947 }
6e227308 948
04b94af4
AD
949 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
950 init_data.flags.fbc_support = true;
951
d99f38ae
AD
952 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
953 init_data.flags.multi_mon_pp_mclk_switch = true;
954
eaf56410
LL
955 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
956 init_data.flags.disable_fractional_pwm = true;
957
27eaa492 958 init_data.flags.power_down_display_on_boot = true;
78ad75f8 959
48321c3d 960 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 961
4562236b
HW
962 /* Display Core create. */
963 adev->dm.dc = dc_create(&init_data);
964
423788c7 965 if (adev->dm.dc) {
76121231 966 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 967 } else {
76121231 968 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
969 goto error;
970 }
4562236b 971
743b9786
NK
972 r = dm_dmub_hw_init(adev);
973 if (r) {
974 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
975 goto error;
976 }
977
bb6785c1
NK
978 dc_hardware_init(adev->dm.dc);
979
4562236b
HW
980 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
981 if (!adev->dm.freesync_module) {
982 DRM_ERROR(
983 "amdgpu: failed to initialize freesync_module.\n");
984 } else
f1ad2f5e 985 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
986 adev->dm.freesync_module);
987
e277adc5
LSL
988 amdgpu_dm_init_color_mod();
989
52704fca 990#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 991 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 992 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 993
96a3b32e
BL
994 if (!adev->dm.hdcp_workqueue)
995 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
996 else
997 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 998
96a3b32e
BL
999 dc_init_callbacks(adev->dm.dc, &init_params);
1000 }
52704fca 1001#endif
4562236b
HW
1002 if (amdgpu_dm_initialize_drm_device(adev)) {
1003 DRM_ERROR(
1004 "amdgpu: failed to initialize sw for display support.\n");
1005 goto error;
1006 }
1007
1008 /* Update the actual used number of crtc */
1009 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1010
1011 /* TODO: Add_display_info? */
1012
1013 /* TODO use dynamic cursor width */
ce75805e
AG
1014 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1015 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b
HW
1016
1017 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1018 DRM_ERROR(
1019 "amdgpu: failed to initialize sw for display support.\n");
1020 goto error;
1021 }
1022
f1ad2f5e 1023 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1024
1025 return 0;
1026error:
1027 amdgpu_dm_fini(adev);
1028
59d0f396 1029 return -EINVAL;
4562236b
HW
1030}
1031
7578ecda 1032static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1033{
6ce8f316
NK
1034 amdgpu_dm_audio_fini(adev);
1035
4562236b 1036 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1037
52704fca
BL
1038#ifdef CONFIG_DRM_AMD_DC_HDCP
1039 if (adev->dm.hdcp_workqueue) {
1040 hdcp_destroy(adev->dm.hdcp_workqueue);
1041 adev->dm.hdcp_workqueue = NULL;
1042 }
1043
1044 if (adev->dm.dc)
1045 dc_deinit_callbacks(adev->dm.dc);
1046#endif
9a71c7d3
NK
1047 if (adev->dm.dc->ctx->dmub_srv) {
1048 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1049 adev->dm.dc->ctx->dmub_srv = NULL;
1050 }
1051
743b9786
NK
1052 if (adev->dm.dmub_bo)
1053 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1054 &adev->dm.dmub_bo_gpu_addr,
1055 &adev->dm.dmub_bo_cpu_addr);
52704fca 1056
c8bdf2b6
ED
1057 /* DC Destroy TODO: Replace destroy DAL */
1058 if (adev->dm.dc)
1059 dc_destroy(&adev->dm.dc);
4562236b
HW
1060 /*
1061 * TODO: pageflip, vlank interrupt
1062 *
1063 * amdgpu_dm_irq_fini(adev);
1064 */
1065
1066 if (adev->dm.cgs_device) {
1067 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1068 adev->dm.cgs_device = NULL;
1069 }
1070 if (adev->dm.freesync_module) {
1071 mod_freesync_destroy(adev->dm.freesync_module);
1072 adev->dm.freesync_module = NULL;
1073 }
674e78ac 1074
6ce8f316 1075 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1076 mutex_destroy(&adev->dm.dc_lock);
1077
4562236b
HW
1078 return;
1079}
1080
a94d5569 1081static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1082{
a7669aff 1083 const char *fw_name_dmcu = NULL;
a94d5569
DF
1084 int r;
1085 const struct dmcu_firmware_header_v1_0 *hdr;
1086
1087 switch(adev->asic_type) {
1088 case CHIP_BONAIRE:
1089 case CHIP_HAWAII:
1090 case CHIP_KAVERI:
1091 case CHIP_KABINI:
1092 case CHIP_MULLINS:
1093 case CHIP_TONGA:
1094 case CHIP_FIJI:
1095 case CHIP_CARRIZO:
1096 case CHIP_STONEY:
1097 case CHIP_POLARIS11:
1098 case CHIP_POLARIS10:
1099 case CHIP_POLARIS12:
1100 case CHIP_VEGAM:
1101 case CHIP_VEGA10:
1102 case CHIP_VEGA12:
1103 case CHIP_VEGA20:
476e955d 1104 case CHIP_NAVI10:
baebcf2e 1105 case CHIP_NAVI14:
30221ad8 1106 case CHIP_RENOIR:
a94d5569 1107 return 0;
5ea23931
RL
1108 case CHIP_NAVI12:
1109 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1110 break;
a94d5569 1111 case CHIP_RAVEN:
a7669aff
HW
1112 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1113 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1114 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1115 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1116 else
a7669aff 1117 return 0;
a94d5569
DF
1118 break;
1119 default:
1120 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1121 return -EINVAL;
a94d5569
DF
1122 }
1123
1124 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1125 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1126 return 0;
1127 }
1128
1129 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1130 if (r == -ENOENT) {
1131 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1132 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1133 adev->dm.fw_dmcu = NULL;
1134 return 0;
1135 }
1136 if (r) {
1137 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1138 fw_name_dmcu);
1139 return r;
1140 }
1141
1142 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1143 if (r) {
1144 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1145 fw_name_dmcu);
1146 release_firmware(adev->dm.fw_dmcu);
1147 adev->dm.fw_dmcu = NULL;
1148 return r;
1149 }
1150
1151 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1152 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1153 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1154 adev->firmware.fw_size +=
1155 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1156
1157 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1158 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1159 adev->firmware.fw_size +=
1160 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1161
ee6e89c0
DF
1162 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1163
a94d5569
DF
1164 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1165
4562236b
HW
1166 return 0;
1167}
1168
743b9786
NK
1169static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1170{
1171 struct amdgpu_device *adev = ctx;
1172
1173 return dm_read_reg(adev->dm.dc->ctx, address);
1174}
1175
1176static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1177 uint32_t value)
1178{
1179 struct amdgpu_device *adev = ctx;
1180
1181 return dm_write_reg(adev->dm.dc->ctx, address, value);
1182}
1183
1184static int dm_dmub_sw_init(struct amdgpu_device *adev)
1185{
1186 struct dmub_srv_create_params create_params;
8c7aea40
NK
1187 struct dmub_srv_region_params region_params;
1188 struct dmub_srv_region_info region_info;
1189 struct dmub_srv_fb_params fb_params;
1190 struct dmub_srv_fb_info *fb_info;
1191 struct dmub_srv *dmub_srv;
743b9786
NK
1192 const struct dmcub_firmware_header_v1_0 *hdr;
1193 const char *fw_name_dmub;
1194 enum dmub_asic dmub_asic;
1195 enum dmub_status status;
1196 int r;
1197
1198 switch (adev->asic_type) {
1199 case CHIP_RENOIR:
1200 dmub_asic = DMUB_ASIC_DCN21;
1201 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1202 break;
1203
1204 default:
1205 /* ASIC doesn't support DMUB. */
1206 return 0;
1207 }
1208
743b9786
NK
1209 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1210 if (r) {
1211 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1212 return 0;
1213 }
1214
1215 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1216 if (r) {
1217 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1218 return 0;
1219 }
1220
743b9786 1221 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1222
9a6ed547
NK
1223 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1224 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1225 AMDGPU_UCODE_ID_DMCUB;
1226 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1227 adev->dm.dmub_fw;
1228 adev->firmware.fw_size +=
1229 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1230
9a6ed547
NK
1231 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1232 adev->dm.dmcub_fw_version);
1233 }
1234
1235 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1236
8c7aea40
NK
1237 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1238 dmub_srv = adev->dm.dmub_srv;
1239
1240 if (!dmub_srv) {
1241 DRM_ERROR("Failed to allocate DMUB service!\n");
1242 return -ENOMEM;
1243 }
1244
1245 memset(&create_params, 0, sizeof(create_params));
1246 create_params.user_ctx = adev;
1247 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1248 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1249 create_params.asic = dmub_asic;
1250
1251 /* Create the DMUB service. */
1252 status = dmub_srv_create(dmub_srv, &create_params);
1253 if (status != DMUB_STATUS_OK) {
1254 DRM_ERROR("Error creating DMUB service: %d\n", status);
1255 return -EINVAL;
1256 }
1257
1258 /* Calculate the size of all the regions for the DMUB service. */
1259 memset(&region_params, 0, sizeof(region_params));
1260
1261 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1262 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1263 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1264 region_params.vbios_size = adev->bios_size;
1f0674fd
NK
1265 region_params.fw_bss_data =
1266 adev->dm.dmub_fw->data +
1267 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1268 le32_to_cpu(hdr->inst_const_bytes);
a576b345
NK
1269 region_params.fw_inst_const =
1270 adev->dm.dmub_fw->data +
1271 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1272 PSP_HEADER_BYTES;
8c7aea40
NK
1273
1274 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1275 &region_info);
1276
1277 if (status != DMUB_STATUS_OK) {
1278 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1279 return -EINVAL;
1280 }
1281
1282 /*
1283 * Allocate a framebuffer based on the total size of all the regions.
1284 * TODO: Move this into GART.
1285 */
1286 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1287 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1288 &adev->dm.dmub_bo_gpu_addr,
1289 &adev->dm.dmub_bo_cpu_addr);
1290 if (r)
1291 return r;
1292
1293 /* Rebase the regions on the framebuffer address. */
1294 memset(&fb_params, 0, sizeof(fb_params));
1295 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1296 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1297 fb_params.region_info = &region_info;
1298
1299 adev->dm.dmub_fb_info =
1300 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1301 fb_info = adev->dm.dmub_fb_info;
1302
1303 if (!fb_info) {
1304 DRM_ERROR(
1305 "Failed to allocate framebuffer info for DMUB service!\n");
1306 return -ENOMEM;
1307 }
1308
1309 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1310 if (status != DMUB_STATUS_OK) {
1311 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1312 return -EINVAL;
1313 }
1314
743b9786
NK
1315 return 0;
1316}
1317
a94d5569
DF
1318static int dm_sw_init(void *handle)
1319{
1320 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1321 int r;
1322
1323 r = dm_dmub_sw_init(adev);
1324 if (r)
1325 return r;
a94d5569
DF
1326
1327 return load_dmcu_fw(adev);
1328}
1329
4562236b
HW
1330static int dm_sw_fini(void *handle)
1331{
a94d5569
DF
1332 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1333
8c7aea40
NK
1334 kfree(adev->dm.dmub_fb_info);
1335 adev->dm.dmub_fb_info = NULL;
1336
743b9786
NK
1337 if (adev->dm.dmub_srv) {
1338 dmub_srv_destroy(adev->dm.dmub_srv);
1339 adev->dm.dmub_srv = NULL;
1340 }
1341
1342 if (adev->dm.dmub_fw) {
1343 release_firmware(adev->dm.dmub_fw);
1344 adev->dm.dmub_fw = NULL;
1345 }
1346
a94d5569
DF
1347 if(adev->dm.fw_dmcu) {
1348 release_firmware(adev->dm.fw_dmcu);
1349 adev->dm.fw_dmcu = NULL;
1350 }
1351
4562236b
HW
1352 return 0;
1353}
1354
7abcf6b5 1355static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1356{
c84dec2f 1357 struct amdgpu_dm_connector *aconnector;
4562236b 1358 struct drm_connector *connector;
f8d2d39e 1359 struct drm_connector_list_iter iter;
7abcf6b5 1360 int ret = 0;
4562236b 1361
f8d2d39e
LP
1362 drm_connector_list_iter_begin(dev, &iter);
1363 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1364 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1365 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1366 aconnector->mst_mgr.aux) {
f1ad2f5e 1367 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1368 aconnector,
1369 aconnector->base.base.id);
7abcf6b5
AG
1370
1371 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1372 if (ret < 0) {
1373 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1374 aconnector->dc_link->type =
1375 dc_connection_single;
1376 break;
7abcf6b5 1377 }
f8d2d39e 1378 }
4562236b 1379 }
f8d2d39e 1380 drm_connector_list_iter_end(&iter);
4562236b 1381
7abcf6b5
AG
1382 return ret;
1383}
1384
1385static int dm_late_init(void *handle)
1386{
42e67c3b 1387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1388
bbf854dc
DF
1389 struct dmcu_iram_parameters params;
1390 unsigned int linear_lut[16];
1391 int i;
1392 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
96cb7cf1 1393 bool ret = false;
bbf854dc
DF
1394
1395 for (i = 0; i < 16; i++)
1396 linear_lut[i] = 0xFFFF * i / 15;
1397
1398 params.set = 0;
1399 params.backlight_ramping_start = 0xCCCC;
1400 params.backlight_ramping_reduction = 0xCCCCCCCC;
1401 params.backlight_lut_array_size = 16;
1402 params.backlight_lut_array = linear_lut;
1403
2ad0cdf9
AK
1404 /* Min backlight level after ABM reduction, Don't allow below 1%
1405 * 0xFFFF x 0.01 = 0x28F
1406 */
1407 params.min_abm_backlight = 0x28F;
1408
96cb7cf1 1409 /* todo will enable for navi10 */
1410 if (adev->asic_type <= CHIP_RAVEN) {
1411 ret = dmcu_load_iram(dmcu, params);
bbf854dc 1412
96cb7cf1 1413 if (!ret)
1414 return -EINVAL;
1415 }
bbf854dc 1416
42e67c3b 1417 return detect_mst_link_for_all_connectors(adev->ddev);
4562236b
HW
1418}
1419
1420static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421{
c84dec2f 1422 struct amdgpu_dm_connector *aconnector;
4562236b 1423 struct drm_connector *connector;
f8d2d39e 1424 struct drm_connector_list_iter iter;
fe7553be
LP
1425 struct drm_dp_mst_topology_mgr *mgr;
1426 int ret;
1427 bool need_hotplug = false;
4562236b 1428
f8d2d39e
LP
1429 drm_connector_list_iter_begin(dev, &iter);
1430 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1431 aconnector = to_amdgpu_dm_connector(connector);
1432 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 aconnector->mst_port)
1434 continue;
1435
1436 mgr = &aconnector->mst_mgr;
1437
1438 if (suspend) {
1439 drm_dp_mst_topology_mgr_suspend(mgr);
1440 } else {
6f85f738 1441 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1442 if (ret < 0) {
1443 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 need_hotplug = true;
1445 }
1446 }
4562236b 1447 }
f8d2d39e 1448 drm_connector_list_iter_end(&iter);
fe7553be
LP
1449
1450 if (need_hotplug)
1451 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1452}
1453
9340dfd3
HW
1454static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455{
1456 struct smu_context *smu = &adev->smu;
1457 int ret = 0;
1458
1459 if (!is_support_sw_smu(adev))
1460 return 0;
1461
1462 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 * on window driver dc implementation.
1464 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 * should be passed to smu during boot up and resume from s3.
1466 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 * dcn20_resource_construct
1468 * then call pplib functions below to pass the settings to smu:
1469 * smu_set_watermarks_for_clock_ranges
1470 * smu_set_watermarks_table
1471 * navi10_set_watermarks_table
1472 * smu_write_watermarks_table
1473 *
1474 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 * dc has implemented different flow for window driver:
1476 * dc_hardware_init / dc_set_power_state
1477 * dcn10_init_hw
1478 * notify_wm_ranges
1479 * set_wm_ranges
1480 * -- Linux
1481 * smu_set_watermarks_for_clock_ranges
1482 * renoir_set_watermarks_table
1483 * smu_write_watermarks_table
1484 *
1485 * For Linux,
1486 * dc_hardware_init -> amdgpu_dm_init
1487 * dc_set_power_state --> dm_resume
1488 *
1489 * therefore, this function apply to navi10/12/14 but not Renoir
1490 * *
1491 */
1492 switch(adev->asic_type) {
1493 case CHIP_NAVI10:
1494 case CHIP_NAVI14:
1495 case CHIP_NAVI12:
1496 break;
1497 default:
1498 return 0;
1499 }
1500
1501 mutex_lock(&smu->mutex);
1502
1503 /* pass data to smu controller */
1504 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1505 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1506 ret = smu_write_watermarks_table(smu);
1507
1508 if (ret) {
1509 mutex_unlock(&smu->mutex);
1510 DRM_ERROR("Failed to update WMTABLE!\n");
1511 return ret;
1512 }
1513 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1514 }
1515
1516 mutex_unlock(&smu->mutex);
1517
1518 return 0;
1519}
1520
b8592b48
LL
1521/**
1522 * dm_hw_init() - Initialize DC device
28d687ea 1523 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1524 *
1525 * Initialize the &struct amdgpu_display_manager device. This involves calling
1526 * the initializers of each DM component, then populating the struct with them.
1527 *
1528 * Although the function implies hardware initialization, both hardware and
1529 * software are initialized here. Splitting them out to their relevant init
1530 * hooks is a future TODO item.
1531 *
1532 * Some notable things that are initialized here:
1533 *
1534 * - Display Core, both software and hardware
1535 * - DC modules that we need (freesync and color management)
1536 * - DRM software states
1537 * - Interrupt sources and handlers
1538 * - Vblank support
1539 * - Debug FS entries, if enabled
1540 */
4562236b
HW
1541static int dm_hw_init(void *handle)
1542{
1543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1544 /* Create DAL display manager */
1545 amdgpu_dm_init(adev);
4562236b
HW
1546 amdgpu_dm_hpd_init(adev);
1547
4562236b
HW
1548 return 0;
1549}
1550
b8592b48
LL
1551/**
1552 * dm_hw_fini() - Teardown DC device
28d687ea 1553 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1554 *
1555 * Teardown components within &struct amdgpu_display_manager that require
1556 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1557 * were loaded. Also flush IRQ workqueues and disable them.
1558 */
4562236b
HW
1559static int dm_hw_fini(void *handle)
1560{
1561 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1562
1563 amdgpu_dm_hpd_fini(adev);
1564
1565 amdgpu_dm_irq_fini(adev);
21de3396 1566 amdgpu_dm_fini(adev);
4562236b
HW
1567 return 0;
1568}
1569
1570static int dm_suspend(void *handle)
1571{
1572 struct amdgpu_device *adev = handle;
1573 struct amdgpu_display_manager *dm = &adev->dm;
1574 int ret = 0;
4562236b 1575
d2f0b53b
LHM
1576 WARN_ON(adev->dm.cached_state);
1577 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1578
4562236b
HW
1579 s3_handle_mst(adev->ddev, true);
1580
4562236b
HW
1581 amdgpu_dm_irq_suspend(adev);
1582
a3621485 1583
32f5062d 1584 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b
HW
1585
1586 return ret;
1587}
1588
1daf8c63
AD
1589static struct amdgpu_dm_connector *
1590amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1591 struct drm_crtc *crtc)
4562236b
HW
1592{
1593 uint32_t i;
c2cea706 1594 struct drm_connector_state *new_con_state;
4562236b
HW
1595 struct drm_connector *connector;
1596 struct drm_crtc *crtc_from_state;
1597
c2cea706
LSL
1598 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1599 crtc_from_state = new_con_state->crtc;
4562236b
HW
1600
1601 if (crtc_from_state == crtc)
c84dec2f 1602 return to_amdgpu_dm_connector(connector);
4562236b
HW
1603 }
1604
1605 return NULL;
1606}
1607
fbbdadf2
BL
1608static void emulated_link_detect(struct dc_link *link)
1609{
1610 struct dc_sink_init_data sink_init_data = { 0 };
1611 struct display_sink_capability sink_caps = { 0 };
1612 enum dc_edid_status edid_status;
1613 struct dc_context *dc_ctx = link->ctx;
1614 struct dc_sink *sink = NULL;
1615 struct dc_sink *prev_sink = NULL;
1616
1617 link->type = dc_connection_none;
1618 prev_sink = link->local_sink;
1619
1620 if (prev_sink != NULL)
1621 dc_sink_retain(prev_sink);
1622
1623 switch (link->connector_signal) {
1624 case SIGNAL_TYPE_HDMI_TYPE_A: {
1625 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1626 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1627 break;
1628 }
1629
1630 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1631 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1632 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1633 break;
1634 }
1635
1636 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1637 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1638 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1639 break;
1640 }
1641
1642 case SIGNAL_TYPE_LVDS: {
1643 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1644 sink_caps.signal = SIGNAL_TYPE_LVDS;
1645 break;
1646 }
1647
1648 case SIGNAL_TYPE_EDP: {
1649 sink_caps.transaction_type =
1650 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1651 sink_caps.signal = SIGNAL_TYPE_EDP;
1652 break;
1653 }
1654
1655 case SIGNAL_TYPE_DISPLAY_PORT: {
1656 sink_caps.transaction_type =
1657 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1658 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1659 break;
1660 }
1661
1662 default:
1663 DC_ERROR("Invalid connector type! signal:%d\n",
1664 link->connector_signal);
1665 return;
1666 }
1667
1668 sink_init_data.link = link;
1669 sink_init_data.sink_signal = sink_caps.signal;
1670
1671 sink = dc_sink_create(&sink_init_data);
1672 if (!sink) {
1673 DC_ERROR("Failed to create sink!\n");
1674 return;
1675 }
1676
dcd5fb82 1677 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1678 link->local_sink = sink;
1679
1680 edid_status = dm_helpers_read_local_edid(
1681 link->ctx,
1682 link,
1683 sink);
1684
1685 if (edid_status != EDID_OK)
1686 DC_ERROR("Failed to read EDID");
1687
1688}
1689
4562236b
HW
1690static int dm_resume(void *handle)
1691{
1692 struct amdgpu_device *adev = handle;
4562236b
HW
1693 struct drm_device *ddev = adev->ddev;
1694 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1695 struct amdgpu_dm_connector *aconnector;
4562236b 1696 struct drm_connector *connector;
f8d2d39e 1697 struct drm_connector_list_iter iter;
4562236b 1698 struct drm_crtc *crtc;
c2cea706 1699 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1700 struct dm_crtc_state *dm_new_crtc_state;
1701 struct drm_plane *plane;
1702 struct drm_plane_state *new_plane_state;
1703 struct dm_plane_state *dm_new_plane_state;
113b7a01 1704 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1705 enum dc_connection_type new_connection_type = dc_connection_none;
8c7aea40 1706 int i, r;
4562236b 1707
113b7a01
LL
1708 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1709 dc_release_state(dm_state->context);
1710 dm_state->context = dc_create_state(dm->dc);
1711 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1712 dc_resource_state_construct(dm->dc, dm_state->context);
1713
8c7aea40
NK
1714 /* Before powering on DC we need to re-initialize DMUB. */
1715 r = dm_dmub_hw_init(adev);
1716 if (r)
1717 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1718
a80aa93d
ML
1719 /* power on hardware */
1720 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1721
4562236b
HW
1722 /* program HPD filter */
1723 dc_resume(dm->dc);
1724
4562236b
HW
1725 /*
1726 * early enable HPD Rx IRQ, should be done before set mode as short
1727 * pulse interrupts are used for MST
1728 */
1729 amdgpu_dm_irq_resume_early(adev);
1730
d20ebea8 1731 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
1732 s3_handle_mst(ddev, false);
1733
4562236b 1734 /* Do detection*/
f8d2d39e
LP
1735 drm_connector_list_iter_begin(ddev, &iter);
1736 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 1737 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
1738
1739 /*
1740 * this is the case when traversing through already created
1741 * MST connectors, should be skipped
1742 */
1743 if (aconnector->mst_port)
1744 continue;
1745
03ea364c 1746 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
1747 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1748 DRM_ERROR("KMS: Failed to detect connector\n");
1749
1750 if (aconnector->base.force && new_connection_type == dc_connection_none)
1751 emulated_link_detect(aconnector->dc_link);
1752 else
1753 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
1754
1755 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1756 aconnector->fake_enable = false;
1757
dcd5fb82
MF
1758 if (aconnector->dc_sink)
1759 dc_sink_release(aconnector->dc_sink);
4562236b
HW
1760 aconnector->dc_sink = NULL;
1761 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 1762 mutex_unlock(&aconnector->hpd_lock);
4562236b 1763 }
f8d2d39e 1764 drm_connector_list_iter_end(&iter);
4562236b 1765
1f6010a9 1766 /* Force mode set in atomic commit */
a80aa93d 1767 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 1768 new_crtc_state->active_changed = true;
4f346e65 1769
fcb4019e
LSL
1770 /*
1771 * atomic_check is expected to create the dc states. We need to release
1772 * them here, since they were duplicated as part of the suspend
1773 * procedure.
1774 */
a80aa93d 1775 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
1776 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1777 if (dm_new_crtc_state->stream) {
1778 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1779 dc_stream_release(dm_new_crtc_state->stream);
1780 dm_new_crtc_state->stream = NULL;
1781 }
1782 }
1783
a80aa93d 1784 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
1785 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1786 if (dm_new_plane_state->dc_state) {
1787 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1788 dc_plane_state_release(dm_new_plane_state->dc_state);
1789 dm_new_plane_state->dc_state = NULL;
1790 }
1791 }
1792
2d1af6a1 1793 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 1794
a80aa93d 1795 dm->cached_state = NULL;
0a214e2f 1796
9faa4237 1797 amdgpu_dm_irq_resume_late(adev);
4562236b 1798
9340dfd3
HW
1799 amdgpu_dm_smu_write_watermarks_table(adev);
1800
2d1af6a1 1801 return 0;
4562236b
HW
1802}
1803
b8592b48
LL
1804/**
1805 * DOC: DM Lifecycle
1806 *
1807 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1808 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1809 * the base driver's device list to be initialized and torn down accordingly.
1810 *
1811 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1812 */
1813
4562236b
HW
1814static const struct amd_ip_funcs amdgpu_dm_funcs = {
1815 .name = "dm",
1816 .early_init = dm_early_init,
7abcf6b5 1817 .late_init = dm_late_init,
4562236b
HW
1818 .sw_init = dm_sw_init,
1819 .sw_fini = dm_sw_fini,
1820 .hw_init = dm_hw_init,
1821 .hw_fini = dm_hw_fini,
1822 .suspend = dm_suspend,
1823 .resume = dm_resume,
1824 .is_idle = dm_is_idle,
1825 .wait_for_idle = dm_wait_for_idle,
1826 .check_soft_reset = dm_check_soft_reset,
1827 .soft_reset = dm_soft_reset,
1828 .set_clockgating_state = dm_set_clockgating_state,
1829 .set_powergating_state = dm_set_powergating_state,
1830};
1831
1832const struct amdgpu_ip_block_version dm_ip_block =
1833{
1834 .type = AMD_IP_BLOCK_TYPE_DCE,
1835 .major = 1,
1836 .minor = 0,
1837 .rev = 0,
1838 .funcs = &amdgpu_dm_funcs,
1839};
1840
ca3268c4 1841
b8592b48
LL
1842/**
1843 * DOC: atomic
1844 *
1845 * *WIP*
1846 */
0a323b84 1847
b3663f70 1848static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 1849 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 1850 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 1851 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 1852 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
1853};
1854
1855static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1856 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
1857};
1858
94562810
RS
1859static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1860{
1861 u32 max_cll, min_cll, max, min, q, r;
1862 struct amdgpu_dm_backlight_caps *caps;
1863 struct amdgpu_display_manager *dm;
1864 struct drm_connector *conn_base;
1865 struct amdgpu_device *adev;
1866 static const u8 pre_computed_values[] = {
1867 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1868 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1869
1870 if (!aconnector || !aconnector->dc_link)
1871 return;
1872
1873 conn_base = &aconnector->base;
1874 adev = conn_base->dev->dev_private;
1875 dm = &adev->dm;
1876 caps = &dm->backlight_caps;
1877 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1878 caps->aux_support = false;
1879 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1880 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1881
1882 if (caps->ext_caps->bits.oled == 1 ||
1883 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1884 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1885 caps->aux_support = true;
1886
1887 /* From the specification (CTA-861-G), for calculating the maximum
1888 * luminance we need to use:
1889 * Luminance = 50*2**(CV/32)
1890 * Where CV is a one-byte value.
1891 * For calculating this expression we may need float point precision;
1892 * to avoid this complexity level, we take advantage that CV is divided
1893 * by a constant. From the Euclids division algorithm, we know that CV
1894 * can be written as: CV = 32*q + r. Next, we replace CV in the
1895 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1896 * need to pre-compute the value of r/32. For pre-computing the values
1897 * We just used the following Ruby line:
1898 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1899 * The results of the above expressions can be verified at
1900 * pre_computed_values.
1901 */
1902 q = max_cll >> 5;
1903 r = max_cll % 32;
1904 max = (1 << q) * pre_computed_values[r];
1905
1906 // min luminance: maxLum * (CV/255)^2 / 100
1907 q = DIV_ROUND_CLOSEST(min_cll, 255);
1908 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1909
1910 caps->aux_max_input_signal = max;
1911 caps->aux_min_input_signal = min;
1912}
1913
97e51c16
HW
1914void amdgpu_dm_update_connector_after_detect(
1915 struct amdgpu_dm_connector *aconnector)
4562236b
HW
1916{
1917 struct drm_connector *connector = &aconnector->base;
1918 struct drm_device *dev = connector->dev;
b73a22d3 1919 struct dc_sink *sink;
4562236b
HW
1920
1921 /* MST handled by drm_mst framework */
1922 if (aconnector->mst_mgr.mst_state == true)
1923 return;
1924
1925
1926 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
1927 if (sink)
1928 dc_sink_retain(sink);
4562236b 1929
1f6010a9
DF
1930 /*
1931 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 1932 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 1933 * Skip if already done during boot.
4562236b
HW
1934 */
1935 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1936 && aconnector->dc_em_sink) {
1937
1f6010a9
DF
1938 /*
1939 * For S3 resume with headless use eml_sink to fake stream
1940 * because on resume connector->sink is set to NULL
4562236b
HW
1941 */
1942 mutex_lock(&dev->mode_config.mutex);
1943
1944 if (sink) {
922aa1e1 1945 if (aconnector->dc_sink) {
98e6436d 1946 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
1947 /*
1948 * retain and release below are used to
1949 * bump up refcount for sink because the link doesn't point
1950 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
1951 * reshuffle by UMD we will get into unwanted dc_sink release
1952 */
dcd5fb82 1953 dc_sink_release(aconnector->dc_sink);
922aa1e1 1954 }
4562236b 1955 aconnector->dc_sink = sink;
dcd5fb82 1956 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
1957 amdgpu_dm_update_freesync_caps(connector,
1958 aconnector->edid);
4562236b 1959 } else {
98e6436d 1960 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 1961 if (!aconnector->dc_sink) {
4562236b 1962 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 1963 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 1964 }
4562236b
HW
1965 }
1966
1967 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
1968
1969 if (sink)
1970 dc_sink_release(sink);
4562236b
HW
1971 return;
1972 }
1973
1974 /*
1975 * TODO: temporary guard to look for proper fix
1976 * if this sink is MST sink, we should not do anything
1977 */
dcd5fb82
MF
1978 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1979 dc_sink_release(sink);
4562236b 1980 return;
dcd5fb82 1981 }
4562236b
HW
1982
1983 if (aconnector->dc_sink == sink) {
1f6010a9
DF
1984 /*
1985 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1986 * Do nothing!!
1987 */
f1ad2f5e 1988 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 1989 aconnector->connector_id);
dcd5fb82
MF
1990 if (sink)
1991 dc_sink_release(sink);
4562236b
HW
1992 return;
1993 }
1994
f1ad2f5e 1995 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
1996 aconnector->connector_id, aconnector->dc_sink, sink);
1997
1998 mutex_lock(&dev->mode_config.mutex);
1999
1f6010a9
DF
2000 /*
2001 * 1. Update status of the drm connector
2002 * 2. Send an event and let userspace tell us what to do
2003 */
4562236b 2004 if (sink) {
1f6010a9
DF
2005 /*
2006 * TODO: check if we still need the S3 mode update workaround.
2007 * If yes, put it here.
2008 */
4562236b 2009 if (aconnector->dc_sink)
98e6436d 2010 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2011
2012 aconnector->dc_sink = sink;
dcd5fb82 2013 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2014 if (sink->dc_edid.length == 0) {
4562236b 2015 aconnector->edid = NULL;
e86e8947 2016 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
900b3cb1 2017 } else {
4562236b
HW
2018 aconnector->edid =
2019 (struct edid *) sink->dc_edid.raw_edid;
2020
2021
c555f023 2022 drm_connector_update_edid_property(connector,
4562236b 2023 aconnector->edid);
e86e8947
HV
2024 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2025 aconnector->edid);
4562236b 2026 }
98e6436d 2027 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2028 update_connector_ext_caps(aconnector);
4562236b 2029 } else {
e86e8947 2030 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2031 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2032 drm_connector_update_edid_property(connector, NULL);
4562236b 2033 aconnector->num_modes = 0;
dcd5fb82 2034 dc_sink_release(aconnector->dc_sink);
4562236b 2035 aconnector->dc_sink = NULL;
5326c452 2036 aconnector->edid = NULL;
0c8620d6
BL
2037#ifdef CONFIG_DRM_AMD_DC_HDCP
2038 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2039 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2040 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2041#endif
4562236b
HW
2042 }
2043
2044 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2045
2046 if (sink)
2047 dc_sink_release(sink);
4562236b
HW
2048}
2049
2050static void handle_hpd_irq(void *param)
2051{
c84dec2f 2052 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2053 struct drm_connector *connector = &aconnector->base;
2054 struct drm_device *dev = connector->dev;
fbbdadf2 2055 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6
BL
2056#ifdef CONFIG_DRM_AMD_DC_HDCP
2057 struct amdgpu_device *adev = dev->dev_private;
2058#endif
4562236b 2059
1f6010a9
DF
2060 /*
2061 * In case of failure or MST no need to update connector status or notify the OS
2062 * since (for MST case) MST does this in its own context.
4562236b
HW
2063 */
2064 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2065
0c8620d6 2066#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2067 if (adev->dm.hdcp_workqueue)
96a3b32e 2068 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2069#endif
2e0ac3d6
HW
2070 if (aconnector->fake_enable)
2071 aconnector->fake_enable = false;
2072
fbbdadf2
BL
2073 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2074 DRM_ERROR("KMS: Failed to detect connector\n");
2075
2076 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2077 emulated_link_detect(aconnector->dc_link);
2078
2079
2080 drm_modeset_lock_all(dev);
2081 dm_restore_drm_connector_state(dev, connector);
2082 drm_modeset_unlock_all(dev);
2083
2084 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2085 drm_kms_helper_hotplug_event(dev);
2086
2087 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2088 amdgpu_dm_update_connector_after_detect(aconnector);
2089
2090
2091 drm_modeset_lock_all(dev);
2092 dm_restore_drm_connector_state(dev, connector);
2093 drm_modeset_unlock_all(dev);
2094
2095 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2096 drm_kms_helper_hotplug_event(dev);
2097 }
2098 mutex_unlock(&aconnector->hpd_lock);
2099
2100}
2101
c84dec2f 2102static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2103{
2104 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2105 uint8_t dret;
2106 bool new_irq_handled = false;
2107 int dpcd_addr;
2108 int dpcd_bytes_to_read;
2109
2110 const int max_process_count = 30;
2111 int process_count = 0;
2112
2113 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2114
2115 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2116 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2117 /* DPCD 0x200 - 0x201 for downstream IRQ */
2118 dpcd_addr = DP_SINK_COUNT;
2119 } else {
2120 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2121 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2122 dpcd_addr = DP_SINK_COUNT_ESI;
2123 }
2124
2125 dret = drm_dp_dpcd_read(
2126 &aconnector->dm_dp_aux.aux,
2127 dpcd_addr,
2128 esi,
2129 dpcd_bytes_to_read);
2130
2131 while (dret == dpcd_bytes_to_read &&
2132 process_count < max_process_count) {
2133 uint8_t retry;
2134 dret = 0;
2135
2136 process_count++;
2137
f1ad2f5e 2138 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2139 /* handle HPD short pulse irq */
2140 if (aconnector->mst_mgr.mst_state)
2141 drm_dp_mst_hpd_irq(
2142 &aconnector->mst_mgr,
2143 esi,
2144 &new_irq_handled);
4562236b
HW
2145
2146 if (new_irq_handled) {
2147 /* ACK at DPCD to notify down stream */
2148 const int ack_dpcd_bytes_to_write =
2149 dpcd_bytes_to_read - 1;
2150
2151 for (retry = 0; retry < 3; retry++) {
2152 uint8_t wret;
2153
2154 wret = drm_dp_dpcd_write(
2155 &aconnector->dm_dp_aux.aux,
2156 dpcd_addr + 1,
2157 &esi[1],
2158 ack_dpcd_bytes_to_write);
2159 if (wret == ack_dpcd_bytes_to_write)
2160 break;
2161 }
2162
1f6010a9 2163 /* check if there is new irq to be handled */
4562236b
HW
2164 dret = drm_dp_dpcd_read(
2165 &aconnector->dm_dp_aux.aux,
2166 dpcd_addr,
2167 esi,
2168 dpcd_bytes_to_read);
2169
2170 new_irq_handled = false;
d4a6e8a9 2171 } else {
4562236b 2172 break;
d4a6e8a9 2173 }
4562236b
HW
2174 }
2175
2176 if (process_count == max_process_count)
f1ad2f5e 2177 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2178}
2179
2180static void handle_hpd_rx_irq(void *param)
2181{
c84dec2f 2182 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2183 struct drm_connector *connector = &aconnector->base;
2184 struct drm_device *dev = connector->dev;
53cbf65c 2185 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2186 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2187 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2188#ifdef CONFIG_DRM_AMD_DC_HDCP
2189 union hpd_irq_data hpd_irq_data;
2190 struct amdgpu_device *adev = dev->dev_private;
2191
2192 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2193#endif
4562236b 2194
1f6010a9
DF
2195 /*
2196 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2197 * conflict, after implement i2c helper, this mutex should be
2198 * retired.
2199 */
53cbf65c 2200 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2201 mutex_lock(&aconnector->hpd_lock);
2202
2a0f9270
BL
2203
2204#ifdef CONFIG_DRM_AMD_DC_HDCP
2205 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2206#else
4e18814e 2207 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2208#endif
4562236b
HW
2209 !is_mst_root_connector) {
2210 /* Downstream Port status changed. */
fbbdadf2
BL
2211 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2212 DRM_ERROR("KMS: Failed to detect connector\n");
2213
2214 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2215 emulated_link_detect(dc_link);
2216
2217 if (aconnector->fake_enable)
2218 aconnector->fake_enable = false;
2219
2220 amdgpu_dm_update_connector_after_detect(aconnector);
2221
2222
2223 drm_modeset_lock_all(dev);
2224 dm_restore_drm_connector_state(dev, connector);
2225 drm_modeset_unlock_all(dev);
2226
2227 drm_kms_helper_hotplug_event(dev);
2228 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2229
2230 if (aconnector->fake_enable)
2231 aconnector->fake_enable = false;
2232
4562236b
HW
2233 amdgpu_dm_update_connector_after_detect(aconnector);
2234
2235
2236 drm_modeset_lock_all(dev);
2237 dm_restore_drm_connector_state(dev, connector);
2238 drm_modeset_unlock_all(dev);
2239
2240 drm_kms_helper_hotplug_event(dev);
2241 }
2242 }
2a0f9270 2243#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2244 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2245 if (adev->dm.hdcp_workqueue)
2246 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2247 }
2a0f9270 2248#endif
4562236b 2249 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2250 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2251 dm_handle_hpd_rx_irq(aconnector);
2252
e86e8947
HV
2253 if (dc_link->type != dc_connection_mst_branch) {
2254 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2255 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2256 }
4562236b
HW
2257}
2258
2259static void register_hpd_handlers(struct amdgpu_device *adev)
2260{
2261 struct drm_device *dev = adev->ddev;
2262 struct drm_connector *connector;
c84dec2f 2263 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2264 const struct dc_link *dc_link;
2265 struct dc_interrupt_params int_params = {0};
2266
2267 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2268 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2269
2270 list_for_each_entry(connector,
2271 &dev->mode_config.connector_list, head) {
2272
c84dec2f 2273 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2274 dc_link = aconnector->dc_link;
2275
2276 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2277 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2278 int_params.irq_source = dc_link->irq_source_hpd;
2279
2280 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2281 handle_hpd_irq,
2282 (void *) aconnector);
2283 }
2284
2285 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2286
2287 /* Also register for DP short pulse (hpd_rx). */
2288 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2289 int_params.irq_source = dc_link->irq_source_hpd_rx;
2290
2291 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2292 handle_hpd_rx_irq,
2293 (void *) aconnector);
2294 }
2295 }
2296}
2297
2298/* Register IRQ sources and initialize IRQ callbacks */
2299static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2300{
2301 struct dc *dc = adev->dm.dc;
2302 struct common_irq_params *c_irq_params;
2303 struct dc_interrupt_params int_params = {0};
2304 int r;
2305 int i;
1ffdeca6 2306 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2307
84374725 2308 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2309 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2310
2311 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2312 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2313
1f6010a9
DF
2314 /*
2315 * Actions of amdgpu_irq_add_id():
4562236b
HW
2316 * 1. Register a set() function with base driver.
2317 * Base driver will call set() function to enable/disable an
2318 * interrupt in DC hardware.
2319 * 2. Register amdgpu_dm_irq_handler().
2320 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2321 * coming from DC hardware.
2322 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2323 * for acknowledging and handling. */
2324
b57de80a 2325 /* Use VBLANK interrupt */
e9029155 2326 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2327 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2328 if (r) {
2329 DRM_ERROR("Failed to add crtc irq id!\n");
2330 return r;
2331 }
2332
2333 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2334 int_params.irq_source =
3d761e79 2335 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2336
b57de80a 2337 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2338
2339 c_irq_params->adev = adev;
2340 c_irq_params->irq_src = int_params.irq_source;
2341
2342 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2343 dm_crtc_high_irq, c_irq_params);
2344 }
2345
d2574c33
MK
2346 /* Use VUPDATE interrupt */
2347 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2348 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2349 if (r) {
2350 DRM_ERROR("Failed to add vupdate irq id!\n");
2351 return r;
2352 }
2353
2354 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2355 int_params.irq_source =
2356 dc_interrupt_to_irq_source(dc, i, 0);
2357
2358 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2359
2360 c_irq_params->adev = adev;
2361 c_irq_params->irq_src = int_params.irq_source;
2362
2363 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2364 dm_vupdate_high_irq, c_irq_params);
2365 }
2366
3d761e79 2367 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2368 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2369 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2370 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2371 if (r) {
2372 DRM_ERROR("Failed to add page flip irq id!\n");
2373 return r;
2374 }
2375
2376 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2377 int_params.irq_source =
2378 dc_interrupt_to_irq_source(dc, i, 0);
2379
2380 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2381
2382 c_irq_params->adev = adev;
2383 c_irq_params->irq_src = int_params.irq_source;
2384
2385 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2386 dm_pflip_high_irq, c_irq_params);
2387
2388 }
2389
2390 /* HPD */
2c8ad2d5
AD
2391 r = amdgpu_irq_add_id(adev, client_id,
2392 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2393 if (r) {
2394 DRM_ERROR("Failed to add hpd irq id!\n");
2395 return r;
2396 }
2397
2398 register_hpd_handlers(adev);
2399
2400 return 0;
2401}
2402
b86a1aa3 2403#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2404/* Register IRQ sources and initialize IRQ callbacks */
2405static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2406{
2407 struct dc *dc = adev->dm.dc;
2408 struct common_irq_params *c_irq_params;
2409 struct dc_interrupt_params int_params = {0};
2410 int r;
2411 int i;
2412
2413 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2414 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2415
1f6010a9
DF
2416 /*
2417 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2418 * 1. Register a set() function with base driver.
2419 * Base driver will call set() function to enable/disable an
2420 * interrupt in DC hardware.
2421 * 2. Register amdgpu_dm_irq_handler().
2422 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2423 * coming from DC hardware.
2424 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2425 * for acknowledging and handling.
1f6010a9 2426 */
ff5ef992
AD
2427
2428 /* Use VSTARTUP interrupt */
2429 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2430 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2431 i++) {
3760f76c 2432 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2433
2434 if (r) {
2435 DRM_ERROR("Failed to add crtc irq id!\n");
2436 return r;
2437 }
2438
2439 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2440 int_params.irq_source =
2441 dc_interrupt_to_irq_source(dc, i, 0);
2442
2443 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2444
2445 c_irq_params->adev = adev;
2446 c_irq_params->irq_src = int_params.irq_source;
2447
2448 amdgpu_dm_irq_register_interrupt(adev, &int_params,
16f17eda 2449 dm_dcn_crtc_high_irq, c_irq_params);
d2574c33
MK
2450 }
2451
ff5ef992
AD
2452 /* Use GRPH_PFLIP interrupt */
2453 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2454 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2455 i++) {
3760f76c 2456 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2457 if (r) {
2458 DRM_ERROR("Failed to add page flip irq id!\n");
2459 return r;
2460 }
2461
2462 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2463 int_params.irq_source =
2464 dc_interrupt_to_irq_source(dc, i, 0);
2465
2466 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2467
2468 c_irq_params->adev = adev;
2469 c_irq_params->irq_src = int_params.irq_source;
2470
2471 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2472 dm_pflip_high_irq, c_irq_params);
2473
2474 }
2475
2476 /* HPD */
3760f76c 2477 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2478 &adev->hpd_irq);
2479 if (r) {
2480 DRM_ERROR("Failed to add hpd irq id!\n");
2481 return r;
2482 }
2483
2484 register_hpd_handlers(adev);
2485
2486 return 0;
2487}
2488#endif
2489
eb3dc897
NK
2490/*
2491 * Acquires the lock for the atomic state object and returns
2492 * the new atomic state.
2493 *
2494 * This should only be called during atomic check.
2495 */
2496static int dm_atomic_get_state(struct drm_atomic_state *state,
2497 struct dm_atomic_state **dm_state)
2498{
2499 struct drm_device *dev = state->dev;
2500 struct amdgpu_device *adev = dev->dev_private;
2501 struct amdgpu_display_manager *dm = &adev->dm;
2502 struct drm_private_state *priv_state;
eb3dc897
NK
2503
2504 if (*dm_state)
2505 return 0;
2506
eb3dc897
NK
2507 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2508 if (IS_ERR(priv_state))
2509 return PTR_ERR(priv_state);
2510
2511 *dm_state = to_dm_atomic_state(priv_state);
2512
2513 return 0;
2514}
2515
2516struct dm_atomic_state *
2517dm_atomic_get_new_state(struct drm_atomic_state *state)
2518{
2519 struct drm_device *dev = state->dev;
2520 struct amdgpu_device *adev = dev->dev_private;
2521 struct amdgpu_display_manager *dm = &adev->dm;
2522 struct drm_private_obj *obj;
2523 struct drm_private_state *new_obj_state;
2524 int i;
2525
2526 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2527 if (obj->funcs == dm->atomic_obj.funcs)
2528 return to_dm_atomic_state(new_obj_state);
2529 }
2530
2531 return NULL;
2532}
2533
2534struct dm_atomic_state *
2535dm_atomic_get_old_state(struct drm_atomic_state *state)
2536{
2537 struct drm_device *dev = state->dev;
2538 struct amdgpu_device *adev = dev->dev_private;
2539 struct amdgpu_display_manager *dm = &adev->dm;
2540 struct drm_private_obj *obj;
2541 struct drm_private_state *old_obj_state;
2542 int i;
2543
2544 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2545 if (obj->funcs == dm->atomic_obj.funcs)
2546 return to_dm_atomic_state(old_obj_state);
2547 }
2548
2549 return NULL;
2550}
2551
2552static struct drm_private_state *
2553dm_atomic_duplicate_state(struct drm_private_obj *obj)
2554{
2555 struct dm_atomic_state *old_state, *new_state;
2556
2557 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2558 if (!new_state)
2559 return NULL;
2560
2561 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2562
813d20dc
AW
2563 old_state = to_dm_atomic_state(obj->state);
2564
2565 if (old_state && old_state->context)
2566 new_state->context = dc_copy_state(old_state->context);
2567
eb3dc897
NK
2568 if (!new_state->context) {
2569 kfree(new_state);
2570 return NULL;
2571 }
2572
eb3dc897
NK
2573 return &new_state->base;
2574}
2575
2576static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2577 struct drm_private_state *state)
2578{
2579 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2580
2581 if (dm_state && dm_state->context)
2582 dc_release_state(dm_state->context);
2583
2584 kfree(dm_state);
2585}
2586
2587static struct drm_private_state_funcs dm_atomic_state_funcs = {
2588 .atomic_duplicate_state = dm_atomic_duplicate_state,
2589 .atomic_destroy_state = dm_atomic_destroy_state,
2590};
2591
4562236b
HW
2592static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2593{
eb3dc897 2594 struct dm_atomic_state *state;
4562236b
HW
2595 int r;
2596
2597 adev->mode_info.mode_config_initialized = true;
2598
4562236b 2599 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
54f5499a 2600 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b
HW
2601
2602 adev->ddev->mode_config.max_width = 16384;
2603 adev->ddev->mode_config.max_height = 16384;
2604
2605 adev->ddev->mode_config.preferred_depth = 24;
2606 adev->ddev->mode_config.prefer_shadow = 1;
1f6010a9 2607 /* indicates support for immediate flip */
4562236b
HW
2608 adev->ddev->mode_config.async_page_flip = true;
2609
770d13b1 2610 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
4562236b 2611
eb3dc897
NK
2612 state = kzalloc(sizeof(*state), GFP_KERNEL);
2613 if (!state)
2614 return -ENOMEM;
2615
813d20dc 2616 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
2617 if (!state->context) {
2618 kfree(state);
2619 return -ENOMEM;
2620 }
2621
2622 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2623
8c1a765b
DA
2624 drm_atomic_private_obj_init(adev->ddev,
2625 &adev->dm.atomic_obj,
eb3dc897
NK
2626 &state->base,
2627 &dm_atomic_state_funcs);
2628
3dc9b1ce 2629 r = amdgpu_display_modeset_create_props(adev);
4562236b
HW
2630 if (r)
2631 return r;
2632
6ce8f316
NK
2633 r = amdgpu_dm_audio_init(adev);
2634 if (r)
2635 return r;
2636
4562236b
HW
2637 return 0;
2638}
2639
206bbafe
DF
2640#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2641#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 2642#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 2643
4562236b
HW
2644#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2645 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2646
206bbafe
DF
2647static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2648{
2649#if defined(CONFIG_ACPI)
2650 struct amdgpu_dm_backlight_caps caps;
2651
2652 if (dm->backlight_caps.caps_valid)
2653 return;
2654
2655 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2656 if (caps.caps_valid) {
94562810
RS
2657 dm->backlight_caps.caps_valid = true;
2658 if (caps.aux_support)
2659 return;
206bbafe
DF
2660 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2661 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
2662 } else {
2663 dm->backlight_caps.min_input_signal =
2664 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2665 dm->backlight_caps.max_input_signal =
2666 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2667 }
2668#else
94562810
RS
2669 if (dm->backlight_caps.aux_support)
2670 return;
2671
8bcbc9ef
DF
2672 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2673 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
2674#endif
2675}
2676
94562810
RS
2677static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2678{
2679 bool rc;
2680
2681 if (!link)
2682 return 1;
2683
2684 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2685 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2686
2687 return rc ? 0 : 1;
2688}
2689
2690static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2691 const uint32_t user_brightness)
2692{
2693 u32 min, max, conversion_pace;
2694 u32 brightness = user_brightness;
2695
2696 if (!caps)
2697 goto out;
2698
2699 if (!caps->aux_support) {
2700 max = caps->max_input_signal;
2701 min = caps->min_input_signal;
2702 /*
2703 * The brightness input is in the range 0-255
2704 * It needs to be rescaled to be between the
2705 * requested min and max input signal
2706 * It also needs to be scaled up by 0x101 to
2707 * match the DC interface which has a range of
2708 * 0 to 0xffff
2709 */
2710 conversion_pace = 0x101;
2711 brightness =
2712 user_brightness
2713 * conversion_pace
2714 * (max - min)
2715 / AMDGPU_MAX_BL_LEVEL
2716 + min * conversion_pace;
2717 } else {
2718 /* TODO
2719 * We are doing a linear interpolation here, which is OK but
2720 * does not provide the optimal result. We probably want
2721 * something close to the Perceptual Quantizer (PQ) curve.
2722 */
2723 max = caps->aux_max_input_signal;
2724 min = caps->aux_min_input_signal;
2725
2726 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2727 + user_brightness * max;
2728 // Multiple the value by 1000 since we use millinits
2729 brightness *= 1000;
2730 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2731 }
2732
2733out:
2734 return brightness;
2735}
2736
4562236b
HW
2737static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2738{
2739 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 2740 struct amdgpu_dm_backlight_caps caps;
94562810
RS
2741 struct dc_link *link = NULL;
2742 u32 brightness;
2743 bool rc;
4562236b 2744
206bbafe
DF
2745 amdgpu_dm_update_backlight_caps(dm);
2746 caps = dm->backlight_caps;
94562810
RS
2747
2748 link = (struct dc_link *)dm->backlight_link;
2749
2750 brightness = convert_brightness(&caps, bd->props.brightness);
2751 // Change brightness based on AUX property
2752 if (caps.aux_support)
2753 return set_backlight_via_aux(link, brightness);
2754
2755 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2756
2757 return rc ? 0 : 1;
4562236b
HW
2758}
2759
2760static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2761{
620a0d27
DF
2762 struct amdgpu_display_manager *dm = bl_get_data(bd);
2763 int ret = dc_link_get_backlight_level(dm->backlight_link);
2764
2765 if (ret == DC_ERROR_UNEXPECTED)
2766 return bd->props.brightness;
2767 return ret;
4562236b
HW
2768}
2769
2770static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 2771 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
2772 .get_brightness = amdgpu_dm_backlight_get_brightness,
2773 .update_status = amdgpu_dm_backlight_update_status,
2774};
2775
7578ecda
AD
2776static void
2777amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
2778{
2779 char bl_name[16];
2780 struct backlight_properties props = { 0 };
2781
206bbafe
DF
2782 amdgpu_dm_update_backlight_caps(dm);
2783
4562236b 2784 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 2785 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
2786 props.type = BACKLIGHT_RAW;
2787
2788 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2789 dm->adev->ddev->primary->index);
2790
2791 dm->backlight_dev = backlight_device_register(bl_name,
2792 dm->adev->ddev->dev,
2793 dm,
2794 &amdgpu_dm_backlight_ops,
2795 &props);
2796
74baea42 2797 if (IS_ERR(dm->backlight_dev))
4562236b
HW
2798 DRM_ERROR("DM: Backlight registration failed!\n");
2799 else
f1ad2f5e 2800 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
2801}
2802
2803#endif
2804
df534fff 2805static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 2806 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
2807 enum drm_plane_type plane_type,
2808 const struct dc_plane_cap *plane_cap)
df534fff 2809{
f180b4bc 2810 struct drm_plane *plane;
df534fff
S
2811 unsigned long possible_crtcs;
2812 int ret = 0;
2813
f180b4bc 2814 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
2815 if (!plane) {
2816 DRM_ERROR("KMS: Failed to allocate plane\n");
2817 return -ENOMEM;
2818 }
b2fddb13 2819 plane->type = plane_type;
df534fff
S
2820
2821 /*
b2fddb13
NK
2822 * HACK: IGT tests expect that the primary plane for a CRTC
2823 * can only have one possible CRTC. Only expose support for
2824 * any CRTC if they're not going to be used as a primary plane
2825 * for a CRTC - like overlay or underlay planes.
df534fff
S
2826 */
2827 possible_crtcs = 1 << plane_id;
2828 if (plane_id >= dm->dc->caps.max_streams)
2829 possible_crtcs = 0xff;
2830
cc1fec57 2831 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
2832
2833 if (ret) {
2834 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 2835 kfree(plane);
df534fff
S
2836 return ret;
2837 }
2838
54087768
NK
2839 if (mode_info)
2840 mode_info->planes[plane_id] = plane;
2841
df534fff
S
2842 return ret;
2843}
2844
89fc8d4e
HW
2845
2846static void register_backlight_device(struct amdgpu_display_manager *dm,
2847 struct dc_link *link)
2848{
2849#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2850 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2851
2852 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2853 link->type != dc_connection_none) {
1f6010a9
DF
2854 /*
2855 * Event if registration failed, we should continue with
89fc8d4e
HW
2856 * DM initialization because not having a backlight control
2857 * is better then a black screen.
2858 */
2859 amdgpu_dm_register_backlight_device(dm);
2860
2861 if (dm->backlight_dev)
2862 dm->backlight_link = link;
2863 }
2864#endif
2865}
2866
2867
1f6010a9
DF
2868/*
2869 * In this architecture, the association
4562236b
HW
2870 * connector -> encoder -> crtc
2871 * id not really requried. The crtc and connector will hold the
2872 * display_index as an abstraction to use with DAL component
2873 *
2874 * Returns 0 on success
2875 */
7578ecda 2876static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
2877{
2878 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 2879 int32_t i;
c84dec2f 2880 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 2881 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 2882 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 2883 uint32_t link_cnt;
cc1fec57 2884 int32_t primary_planes;
fbbdadf2 2885 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 2886 const struct dc_plane_cap *plane;
4562236b
HW
2887
2888 link_cnt = dm->dc->caps.max_links;
4562236b
HW
2889 if (amdgpu_dm_mode_config_init(dm->adev)) {
2890 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 2891 return -EINVAL;
4562236b
HW
2892 }
2893
b2fddb13
NK
2894 /* There is one primary plane per CRTC */
2895 primary_planes = dm->dc->caps.max_streams;
54087768 2896 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 2897
b2fddb13
NK
2898 /*
2899 * Initialize primary planes, implicit planes for legacy IOCTLS.
2900 * Order is reversed to match iteration order in atomic check.
2901 */
2902 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
2903 plane = &dm->dc->caps.planes[i];
2904
b2fddb13 2905 if (initialize_plane(dm, mode_info, i,
cc1fec57 2906 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 2907 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 2908 goto fail;
d4e13b0d 2909 }
df534fff 2910 }
92f3ac40 2911
0d579c7e
NK
2912 /*
2913 * Initialize overlay planes, index starting after primary planes.
2914 * These planes have a higher DRM index than the primary planes since
2915 * they should be considered as having a higher z-order.
2916 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
2917 *
2918 * Only support DCN for now, and only expose one so we don't encourage
2919 * userspace to use up all the pipes.
0d579c7e 2920 */
cc1fec57
NK
2921 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2922 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2923
2924 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2925 continue;
2926
2927 if (!plane->blends_with_above || !plane->blends_with_below)
2928 continue;
2929
ea36ad34 2930 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
2931 continue;
2932
54087768 2933 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 2934 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 2935 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 2936 goto fail;
d4e13b0d 2937 }
cc1fec57
NK
2938
2939 /* Only create one overlay plane. */
2940 break;
d4e13b0d 2941 }
4562236b 2942
d4e13b0d 2943 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 2944 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 2945 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 2946 goto fail;
4562236b 2947 }
4562236b 2948
ab2541b6 2949 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
2950
2951 /* loops over all connectors on the board */
2952 for (i = 0; i < link_cnt; i++) {
89fc8d4e 2953 struct dc_link *link = NULL;
4562236b
HW
2954
2955 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2956 DRM_ERROR(
2957 "KMS: Cannot support more than %d display indexes\n",
2958 AMDGPU_DM_MAX_DISPLAY_INDEX);
2959 continue;
2960 }
2961
2962 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2963 if (!aconnector)
cd8a2ae8 2964 goto fail;
4562236b
HW
2965
2966 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 2967 if (!aencoder)
cd8a2ae8 2968 goto fail;
4562236b
HW
2969
2970 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2971 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 2972 goto fail;
4562236b
HW
2973 }
2974
2975 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2976 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 2977 goto fail;
4562236b
HW
2978 }
2979
89fc8d4e
HW
2980 link = dc_get_link_at_index(dm->dc, i);
2981
fbbdadf2
BL
2982 if (!dc_link_detect_sink(link, &new_connection_type))
2983 DRM_ERROR("KMS: Failed to detect connector\n");
2984
2985 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2986 emulated_link_detect(link);
2987 amdgpu_dm_update_connector_after_detect(aconnector);
2988
2989 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 2990 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 2991 register_backlight_device(dm, link);
397a9bc5
RL
2992 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2993 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
2994 }
2995
2996
4562236b
HW
2997 }
2998
2999 /* Software is initialized. Now we can register interrupt handlers. */
3000 switch (adev->asic_type) {
3001 case CHIP_BONAIRE:
3002 case CHIP_HAWAII:
cd4b356f
AD
3003 case CHIP_KAVERI:
3004 case CHIP_KABINI:
3005 case CHIP_MULLINS:
4562236b
HW
3006 case CHIP_TONGA:
3007 case CHIP_FIJI:
3008 case CHIP_CARRIZO:
3009 case CHIP_STONEY:
3010 case CHIP_POLARIS11:
3011 case CHIP_POLARIS10:
b264d345 3012 case CHIP_POLARIS12:
7737de91 3013 case CHIP_VEGAM:
2c8ad2d5 3014 case CHIP_VEGA10:
2325ff30 3015 case CHIP_VEGA12:
1fe6bf2f 3016 case CHIP_VEGA20:
4562236b
HW
3017 if (dce110_register_irq_handlers(dm->adev)) {
3018 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3019 goto fail;
4562236b
HW
3020 }
3021 break;
b86a1aa3 3022#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3023 case CHIP_RAVEN:
fbd2afe5 3024 case CHIP_NAVI12:
476e955d 3025 case CHIP_NAVI10:
fce651e3 3026 case CHIP_NAVI14:
30221ad8 3027 case CHIP_RENOIR:
ff5ef992
AD
3028 if (dcn10_register_irq_handlers(dm->adev)) {
3029 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3030 goto fail;
ff5ef992
AD
3031 }
3032 break;
3033#endif
4562236b 3034 default:
e63f8673 3035 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3036 goto fail;
4562236b
HW
3037 }
3038
1bc460a4
HW
3039 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3040 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3041
2d673560
NK
3042 /* No userspace support. */
3043 dm->dc->debug.disable_tri_buf = true;
3044
4562236b 3045 return 0;
cd8a2ae8 3046fail:
4562236b 3047 kfree(aencoder);
4562236b 3048 kfree(aconnector);
54087768 3049
59d0f396 3050 return -EINVAL;
4562236b
HW
3051}
3052
7578ecda 3053static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3054{
3055 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3056 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3057 return;
3058}
3059
3060/******************************************************************************
3061 * amdgpu_display_funcs functions
3062 *****************************************************************************/
3063
1f6010a9 3064/*
4562236b
HW
3065 * dm_bandwidth_update - program display watermarks
3066 *
3067 * @adev: amdgpu_device pointer
3068 *
3069 * Calculate and program the display watermarks and line buffer allocation.
3070 */
3071static void dm_bandwidth_update(struct amdgpu_device *adev)
3072{
49c07a99 3073 /* TODO: implement later */
4562236b
HW
3074}
3075
39cc5be2 3076static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3077 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3078 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3079 .backlight_set_level = NULL, /* never called for DC */
3080 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3081 .hpd_sense = NULL,/* called unconditionally */
3082 .hpd_set_polarity = NULL, /* called unconditionally */
3083 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3084 .page_flip_get_scanoutpos =
3085 dm_crtc_get_scanoutpos,/* called unconditionally */
3086 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3087 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3088};
3089
3090#if defined(CONFIG_DEBUG_KERNEL_DC)
3091
3ee6b26b
AD
3092static ssize_t s3_debug_store(struct device *device,
3093 struct device_attribute *attr,
3094 const char *buf,
3095 size_t count)
4562236b
HW
3096{
3097 int ret;
3098 int s3_state;
ef1de361 3099 struct drm_device *drm_dev = dev_get_drvdata(device);
4562236b
HW
3100 struct amdgpu_device *adev = drm_dev->dev_private;
3101
3102 ret = kstrtoint(buf, 0, &s3_state);
3103
3104 if (ret == 0) {
3105 if (s3_state) {
3106 dm_resume(adev);
4562236b
HW
3107 drm_kms_helper_hotplug_event(adev->ddev);
3108 } else
3109 dm_suspend(adev);
3110 }
3111
3112 return ret == 0 ? count : 0;
3113}
3114
3115DEVICE_ATTR_WO(s3_debug);
3116
3117#endif
3118
3119static int dm_early_init(void *handle)
3120{
3121 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3122
4562236b
HW
3123 switch (adev->asic_type) {
3124 case CHIP_BONAIRE:
3125 case CHIP_HAWAII:
3126 adev->mode_info.num_crtc = 6;
3127 adev->mode_info.num_hpd = 6;
3128 adev->mode_info.num_dig = 6;
4562236b 3129 break;
cd4b356f
AD
3130 case CHIP_KAVERI:
3131 adev->mode_info.num_crtc = 4;
3132 adev->mode_info.num_hpd = 6;
3133 adev->mode_info.num_dig = 7;
cd4b356f
AD
3134 break;
3135 case CHIP_KABINI:
3136 case CHIP_MULLINS:
3137 adev->mode_info.num_crtc = 2;
3138 adev->mode_info.num_hpd = 6;
3139 adev->mode_info.num_dig = 6;
cd4b356f 3140 break;
4562236b
HW
3141 case CHIP_FIJI:
3142 case CHIP_TONGA:
3143 adev->mode_info.num_crtc = 6;
3144 adev->mode_info.num_hpd = 6;
3145 adev->mode_info.num_dig = 7;
4562236b
HW
3146 break;
3147 case CHIP_CARRIZO:
3148 adev->mode_info.num_crtc = 3;
3149 adev->mode_info.num_hpd = 6;
3150 adev->mode_info.num_dig = 9;
4562236b
HW
3151 break;
3152 case CHIP_STONEY:
3153 adev->mode_info.num_crtc = 2;
3154 adev->mode_info.num_hpd = 6;
3155 adev->mode_info.num_dig = 9;
4562236b
HW
3156 break;
3157 case CHIP_POLARIS11:
b264d345 3158 case CHIP_POLARIS12:
4562236b
HW
3159 adev->mode_info.num_crtc = 5;
3160 adev->mode_info.num_hpd = 5;
3161 adev->mode_info.num_dig = 5;
4562236b
HW
3162 break;
3163 case CHIP_POLARIS10:
7737de91 3164 case CHIP_VEGAM:
4562236b
HW
3165 adev->mode_info.num_crtc = 6;
3166 adev->mode_info.num_hpd = 6;
3167 adev->mode_info.num_dig = 6;
4562236b 3168 break;
2c8ad2d5 3169 case CHIP_VEGA10:
2325ff30 3170 case CHIP_VEGA12:
1fe6bf2f 3171 case CHIP_VEGA20:
2c8ad2d5
AD
3172 adev->mode_info.num_crtc = 6;
3173 adev->mode_info.num_hpd = 6;
3174 adev->mode_info.num_dig = 6;
3175 break;
b86a1aa3 3176#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3177 case CHIP_RAVEN:
3178 adev->mode_info.num_crtc = 4;
3179 adev->mode_info.num_hpd = 4;
3180 adev->mode_info.num_dig = 4;
ff5ef992 3181 break;
476e955d 3182#endif
476e955d 3183 case CHIP_NAVI10:
fbd2afe5 3184 case CHIP_NAVI12:
476e955d
HW
3185 adev->mode_info.num_crtc = 6;
3186 adev->mode_info.num_hpd = 6;
3187 adev->mode_info.num_dig = 6;
3188 break;
fce651e3
BL
3189 case CHIP_NAVI14:
3190 adev->mode_info.num_crtc = 5;
3191 adev->mode_info.num_hpd = 5;
3192 adev->mode_info.num_dig = 5;
3193 break;
30221ad8
BL
3194 case CHIP_RENOIR:
3195 adev->mode_info.num_crtc = 4;
3196 adev->mode_info.num_hpd = 4;
3197 adev->mode_info.num_dig = 4;
3198 break;
4562236b 3199 default:
e63f8673 3200 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3201 return -EINVAL;
3202 }
3203
c8dd5715
MD
3204 amdgpu_dm_set_irq_funcs(adev);
3205
39cc5be2
AD
3206 if (adev->mode_info.funcs == NULL)
3207 adev->mode_info.funcs = &dm_display_funcs;
3208
1f6010a9
DF
3209 /*
3210 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3211 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3212 * amdgpu_device_init()
3213 */
4562236b
HW
3214#if defined(CONFIG_DEBUG_KERNEL_DC)
3215 device_create_file(
3216 adev->ddev->dev,
3217 &dev_attr_s3_debug);
3218#endif
3219
3220 return 0;
3221}
3222
9b690ef3 3223static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3224 struct dc_stream_state *new_stream,
3225 struct dc_stream_state *old_stream)
9b690ef3 3226{
e7b07cee
HW
3227 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3228 return false;
3229
3230 if (!crtc_state->enable)
3231 return false;
3232
3233 return crtc_state->active;
3234}
3235
3236static bool modereset_required(struct drm_crtc_state *crtc_state)
3237{
3238 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3239 return false;
3240
3241 return !crtc_state->enable || !crtc_state->active;
3242}
3243
7578ecda 3244static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3245{
3246 drm_encoder_cleanup(encoder);
3247 kfree(encoder);
3248}
3249
3250static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3251 .destroy = amdgpu_dm_encoder_destroy,
3252};
3253
e7b07cee 3254
695af5f9
NK
3255static int fill_dc_scaling_info(const struct drm_plane_state *state,
3256 struct dc_scaling_info *scaling_info)
e7b07cee 3257{
6491f0c0 3258 int scale_w, scale_h;
e7b07cee 3259
695af5f9 3260 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3261
695af5f9
NK
3262 /* Source is fixed 16.16 but we ignore mantissa for now... */
3263 scaling_info->src_rect.x = state->src_x >> 16;
3264 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3265
695af5f9
NK
3266 scaling_info->src_rect.width = state->src_w >> 16;
3267 if (scaling_info->src_rect.width == 0)
3268 return -EINVAL;
3269
3270 scaling_info->src_rect.height = state->src_h >> 16;
3271 if (scaling_info->src_rect.height == 0)
3272 return -EINVAL;
3273
3274 scaling_info->dst_rect.x = state->crtc_x;
3275 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3276
3277 if (state->crtc_w == 0)
695af5f9 3278 return -EINVAL;
e7b07cee 3279
695af5f9 3280 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3281
3282 if (state->crtc_h == 0)
695af5f9 3283 return -EINVAL;
e7b07cee 3284
695af5f9 3285 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3286
695af5f9
NK
3287 /* DRM doesn't specify clipping on destination output. */
3288 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3289
6491f0c0
NK
3290 /* TODO: Validate scaling per-format with DC plane caps */
3291 scale_w = scaling_info->dst_rect.width * 1000 /
3292 scaling_info->src_rect.width;
e7b07cee 3293
6491f0c0
NK
3294 if (scale_w < 250 || scale_w > 16000)
3295 return -EINVAL;
3296
3297 scale_h = scaling_info->dst_rect.height * 1000 /
3298 scaling_info->src_rect.height;
3299
3300 if (scale_h < 250 || scale_h > 16000)
3301 return -EINVAL;
3302
695af5f9
NK
3303 /*
3304 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3305 * assume reasonable defaults based on the format.
3306 */
e7b07cee 3307
695af5f9 3308 return 0;
4562236b 3309}
695af5f9 3310
3ee6b26b 3311static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
9817d5f5 3312 uint64_t *tiling_flags)
e7b07cee 3313{
e68d14dd 3314 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
e7b07cee 3315 int r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3316
e7b07cee 3317 if (unlikely(r)) {
1f6010a9 3318 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3319 if (r != -ERESTARTSYS)
3320 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3321 return r;
3322 }
3323
e7b07cee
HW
3324 if (tiling_flags)
3325 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3326
3327 amdgpu_bo_unreserve(rbo);
3328
3329 return r;
3330}
3331
7df7e505
NK
3332static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3333{
3334 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3335
3336 return offset ? (address + offset * 256) : 0;
3337}
3338
695af5f9
NK
3339static int
3340fill_plane_dcc_attributes(struct amdgpu_device *adev,
3341 const struct amdgpu_framebuffer *afb,
3342 const enum surface_pixel_format format,
3343 const enum dc_rotation_angle rotation,
12e2b2d4 3344 const struct plane_size *plane_size,
695af5f9
NK
3345 const union dc_tiling_info *tiling_info,
3346 const uint64_t info,
3347 struct dc_plane_dcc_param *dcc,
3348 struct dc_plane_address *address)
7df7e505
NK
3349{
3350 struct dc *dc = adev->dm.dc;
8daa1218
NC
3351 struct dc_dcc_surface_param input;
3352 struct dc_surface_dcc_cap output;
7df7e505
NK
3353 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3354 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3355 uint64_t dcc_address;
3356
8daa1218
NC
3357 memset(&input, 0, sizeof(input));
3358 memset(&output, 0, sizeof(output));
3359
7df7e505 3360 if (!offset)
09e5665a
NK
3361 return 0;
3362
695af5f9 3363 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3364 return 0;
7df7e505
NK
3365
3366 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3367 return -EINVAL;
7df7e505 3368
695af5f9 3369 input.format = format;
12e2b2d4
DL
3370 input.surface_size.width = plane_size->surface_size.width;
3371 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3372 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3373
695af5f9 3374 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3375 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3376 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3377 input.scan = SCAN_DIRECTION_VERTICAL;
3378
3379 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3380 return -EINVAL;
7df7e505
NK
3381
3382 if (!output.capable)
09e5665a 3383 return -EINVAL;
7df7e505
NK
3384
3385 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3386 return -EINVAL;
7df7e505 3387
09e5665a 3388 dcc->enable = 1;
12e2b2d4 3389 dcc->meta_pitch =
7df7e505 3390 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3391 dcc->independent_64b_blks = i64b;
7df7e505
NK
3392
3393 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3394 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3395 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3396
09e5665a
NK
3397 return 0;
3398}
3399
3400static int
320932bf 3401fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3402 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3403 const enum surface_pixel_format format,
3404 const enum dc_rotation_angle rotation,
3405 const uint64_t tiling_flags,
09e5665a 3406 union dc_tiling_info *tiling_info,
12e2b2d4 3407 struct plane_size *plane_size,
09e5665a 3408 struct dc_plane_dcc_param *dcc,
695af5f9 3409 struct dc_plane_address *address)
09e5665a 3410{
320932bf 3411 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3412 int ret;
3413
3414 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3415 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3416 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3417 memset(address, 0, sizeof(*address));
3418
695af5f9 3419 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3420 plane_size->surface_size.x = 0;
3421 plane_size->surface_size.y = 0;
3422 plane_size->surface_size.width = fb->width;
3423 plane_size->surface_size.height = fb->height;
3424 plane_size->surface_pitch =
320932bf
NK
3425 fb->pitches[0] / fb->format->cpp[0];
3426
e0634e8d
NK
3427 address->type = PLN_ADDR_TYPE_GRAPHICS;
3428 address->grph.addr.low_part = lower_32_bits(afb->address);
3429 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3430 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3431 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3432
12e2b2d4
DL
3433 plane_size->surface_size.x = 0;
3434 plane_size->surface_size.y = 0;
3435 plane_size->surface_size.width = fb->width;
3436 plane_size->surface_size.height = fb->height;
3437 plane_size->surface_pitch =
320932bf
NK
3438 fb->pitches[0] / fb->format->cpp[0];
3439
12e2b2d4
DL
3440 plane_size->chroma_size.x = 0;
3441 plane_size->chroma_size.y = 0;
320932bf 3442 /* TODO: set these based on surface format */
12e2b2d4
DL
3443 plane_size->chroma_size.width = fb->width / 2;
3444 plane_size->chroma_size.height = fb->height / 2;
320932bf 3445
12e2b2d4 3446 plane_size->chroma_pitch =
320932bf
NK
3447 fb->pitches[1] / fb->format->cpp[1];
3448
e0634e8d
NK
3449 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3450 address->video_progressive.luma_addr.low_part =
3451 lower_32_bits(afb->address);
3452 address->video_progressive.luma_addr.high_part =
3453 upper_32_bits(afb->address);
3454 address->video_progressive.chroma_addr.low_part =
3455 lower_32_bits(chroma_addr);
3456 address->video_progressive.chroma_addr.high_part =
3457 upper_32_bits(chroma_addr);
3458 }
09e5665a
NK
3459
3460 /* Fill GFX8 params */
3461 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3462 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3463
3464 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3465 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3466 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3467 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3468 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3469
3470 /* XXX fix me for VI */
3471 tiling_info->gfx8.num_banks = num_banks;
3472 tiling_info->gfx8.array_mode =
3473 DC_ARRAY_2D_TILED_THIN1;
3474 tiling_info->gfx8.tile_split = tile_split;
3475 tiling_info->gfx8.bank_width = bankw;
3476 tiling_info->gfx8.bank_height = bankh;
3477 tiling_info->gfx8.tile_aspect = mtaspect;
3478 tiling_info->gfx8.tile_mode =
3479 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3480 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3481 == DC_ARRAY_1D_TILED_THIN1) {
3482 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3483 }
3484
3485 tiling_info->gfx8.pipe_config =
3486 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3487
3488 if (adev->asic_type == CHIP_VEGA10 ||
3489 adev->asic_type == CHIP_VEGA12 ||
3490 adev->asic_type == CHIP_VEGA20 ||
476e955d 3491 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3492 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3493 adev->asic_type == CHIP_NAVI12 ||
30221ad8 3494 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3495 adev->asic_type == CHIP_RAVEN) {
3496 /* Fill GFX9 params */
3497 tiling_info->gfx9.num_pipes =
3498 adev->gfx.config.gb_addr_config_fields.num_pipes;
3499 tiling_info->gfx9.num_banks =
3500 adev->gfx.config.gb_addr_config_fields.num_banks;
3501 tiling_info->gfx9.pipe_interleave =
3502 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3503 tiling_info->gfx9.num_shader_engines =
3504 adev->gfx.config.gb_addr_config_fields.num_se;
3505 tiling_info->gfx9.max_compressed_frags =
3506 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3507 tiling_info->gfx9.num_rb_per_se =
3508 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3509 tiling_info->gfx9.swizzle =
3510 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3511 tiling_info->gfx9.shaderEnable = 1;
3512
695af5f9
NK
3513 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3514 plane_size, tiling_info,
3515 tiling_flags, dcc, address);
09e5665a
NK
3516 if (ret)
3517 return ret;
3518 }
3519
3520 return 0;
7df7e505
NK
3521}
3522
d74004b6 3523static void
695af5f9 3524fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
3525 bool *per_pixel_alpha, bool *global_alpha,
3526 int *global_alpha_value)
3527{
3528 *per_pixel_alpha = false;
3529 *global_alpha = false;
3530 *global_alpha_value = 0xff;
3531
3532 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3533 return;
3534
3535 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3536 static const uint32_t alpha_formats[] = {
3537 DRM_FORMAT_ARGB8888,
3538 DRM_FORMAT_RGBA8888,
3539 DRM_FORMAT_ABGR8888,
3540 };
3541 uint32_t format = plane_state->fb->format->format;
3542 unsigned int i;
3543
3544 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3545 if (format == alpha_formats[i]) {
3546 *per_pixel_alpha = true;
3547 break;
3548 }
3549 }
3550 }
3551
3552 if (plane_state->alpha < 0xffff) {
3553 *global_alpha = true;
3554 *global_alpha_value = plane_state->alpha >> 8;
3555 }
3556}
3557
004fefa3
NK
3558static int
3559fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 3560 const enum surface_pixel_format format,
004fefa3
NK
3561 enum dc_color_space *color_space)
3562{
3563 bool full_range;
3564
3565 *color_space = COLOR_SPACE_SRGB;
3566
3567 /* DRM color properties only affect non-RGB formats. */
695af5f9 3568 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
3569 return 0;
3570
3571 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3572
3573 switch (plane_state->color_encoding) {
3574 case DRM_COLOR_YCBCR_BT601:
3575 if (full_range)
3576 *color_space = COLOR_SPACE_YCBCR601;
3577 else
3578 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3579 break;
3580
3581 case DRM_COLOR_YCBCR_BT709:
3582 if (full_range)
3583 *color_space = COLOR_SPACE_YCBCR709;
3584 else
3585 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3586 break;
3587
3588 case DRM_COLOR_YCBCR_BT2020:
3589 if (full_range)
3590 *color_space = COLOR_SPACE_2020_YCBCR;
3591 else
3592 return -EINVAL;
3593 break;
3594
3595 default:
3596 return -EINVAL;
3597 }
3598
3599 return 0;
3600}
3601
695af5f9
NK
3602static int
3603fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3604 const struct drm_plane_state *plane_state,
3605 const uint64_t tiling_flags,
3606 struct dc_plane_info *plane_info,
3607 struct dc_plane_address *address)
3608{
3609 const struct drm_framebuffer *fb = plane_state->fb;
3610 const struct amdgpu_framebuffer *afb =
3611 to_amdgpu_framebuffer(plane_state->fb);
3612 struct drm_format_name_buf format_name;
3613 int ret;
3614
3615 memset(plane_info, 0, sizeof(*plane_info));
3616
3617 switch (fb->format->format) {
3618 case DRM_FORMAT_C8:
3619 plane_info->format =
3620 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3621 break;
3622 case DRM_FORMAT_RGB565:
3623 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3624 break;
3625 case DRM_FORMAT_XRGB8888:
3626 case DRM_FORMAT_ARGB8888:
3627 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3628 break;
3629 case DRM_FORMAT_XRGB2101010:
3630 case DRM_FORMAT_ARGB2101010:
3631 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3632 break;
3633 case DRM_FORMAT_XBGR2101010:
3634 case DRM_FORMAT_ABGR2101010:
3635 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3636 break;
3637 case DRM_FORMAT_XBGR8888:
3638 case DRM_FORMAT_ABGR8888:
3639 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3640 break;
3641 case DRM_FORMAT_NV21:
3642 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3643 break;
3644 case DRM_FORMAT_NV12:
3645 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3646 break;
cbec6477
SW
3647 case DRM_FORMAT_P010:
3648 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3649 break;
695af5f9
NK
3650 default:
3651 DRM_ERROR(
3652 "Unsupported screen format %s\n",
3653 drm_get_format_name(fb->format->format, &format_name));
3654 return -EINVAL;
3655 }
3656
3657 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3658 case DRM_MODE_ROTATE_0:
3659 plane_info->rotation = ROTATION_ANGLE_0;
3660 break;
3661 case DRM_MODE_ROTATE_90:
3662 plane_info->rotation = ROTATION_ANGLE_90;
3663 break;
3664 case DRM_MODE_ROTATE_180:
3665 plane_info->rotation = ROTATION_ANGLE_180;
3666 break;
3667 case DRM_MODE_ROTATE_270:
3668 plane_info->rotation = ROTATION_ANGLE_270;
3669 break;
3670 default:
3671 plane_info->rotation = ROTATION_ANGLE_0;
3672 break;
3673 }
3674
3675 plane_info->visible = true;
3676 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3677
6d83a32d
MS
3678 plane_info->layer_index = 0;
3679
695af5f9
NK
3680 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3681 &plane_info->color_space);
3682 if (ret)
3683 return ret;
3684
3685 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3686 plane_info->rotation, tiling_flags,
3687 &plane_info->tiling_info,
3688 &plane_info->plane_size,
3689 &plane_info->dcc, address);
3690 if (ret)
3691 return ret;
3692
3693 fill_blending_from_plane_state(
3694 plane_state, &plane_info->per_pixel_alpha,
3695 &plane_info->global_alpha, &plane_info->global_alpha_value);
3696
3697 return 0;
3698}
3699
3700static int fill_dc_plane_attributes(struct amdgpu_device *adev,
cdde482c 3701 struct dm_plane_state *dm_plane_state,
695af5f9
NK
3702 struct drm_plane_state *plane_state,
3703 struct drm_crtc_state *crtc_state)
e7b07cee 3704{
cf020d49 3705 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
cdde482c 3706 struct dc_plane_state *dc_plane_state = dm_plane_state->dc_state;
e7b07cee
HW
3707 const struct amdgpu_framebuffer *amdgpu_fb =
3708 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
3709 struct dc_scaling_info scaling_info;
3710 struct dc_plane_info plane_info;
3711 uint64_t tiling_flags;
3712 int ret;
e7b07cee 3713
695af5f9
NK
3714 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3715 if (ret)
3716 return ret;
e7b07cee 3717
695af5f9
NK
3718 dc_plane_state->src_rect = scaling_info.src_rect;
3719 dc_plane_state->dst_rect = scaling_info.dst_rect;
3720 dc_plane_state->clip_rect = scaling_info.clip_rect;
3721 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 3722
695af5f9 3723 ret = get_fb_info(amdgpu_fb, &tiling_flags);
e7b07cee
HW
3724 if (ret)
3725 return ret;
3726
695af5f9
NK
3727 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3728 &plane_info,
3729 &dc_plane_state->address);
004fefa3
NK
3730 if (ret)
3731 return ret;
3732
695af5f9
NK
3733 dc_plane_state->format = plane_info.format;
3734 dc_plane_state->color_space = plane_info.color_space;
3735 dc_plane_state->format = plane_info.format;
3736 dc_plane_state->plane_size = plane_info.plane_size;
3737 dc_plane_state->rotation = plane_info.rotation;
3738 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3739 dc_plane_state->stereo_format = plane_info.stereo_format;
3740 dc_plane_state->tiling_info = plane_info.tiling_info;
3741 dc_plane_state->visible = plane_info.visible;
3742 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3743 dc_plane_state->global_alpha = plane_info.global_alpha;
3744 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3745 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 3746 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 3747
e277adc5
LSL
3748 /*
3749 * Always set input transfer function, since plane state is refreshed
3750 * every time.
3751 */
cdde482c 3752 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dm_plane_state);
cf020d49
NK
3753 if (ret)
3754 return ret;
e7b07cee 3755
cf020d49 3756 return 0;
e7b07cee
HW
3757}
3758
3ee6b26b
AD
3759static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3760 const struct dm_connector_state *dm_state,
3761 struct dc_stream_state *stream)
e7b07cee
HW
3762{
3763 enum amdgpu_rmx_type rmx_type;
3764
3765 struct rect src = { 0 }; /* viewport in composition space*/
3766 struct rect dst = { 0 }; /* stream addressable area */
3767
3768 /* no mode. nothing to be done */
3769 if (!mode)
3770 return;
3771
3772 /* Full screen scaling by default */
3773 src.width = mode->hdisplay;
3774 src.height = mode->vdisplay;
3775 dst.width = stream->timing.h_addressable;
3776 dst.height = stream->timing.v_addressable;
3777
f4791779
HW
3778 if (dm_state) {
3779 rmx_type = dm_state->scaling;
3780 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3781 if (src.width * dst.height <
3782 src.height * dst.width) {
3783 /* height needs less upscaling/more downscaling */
3784 dst.width = src.width *
3785 dst.height / src.height;
3786 } else {
3787 /* width needs less upscaling/more downscaling */
3788 dst.height = src.height *
3789 dst.width / src.width;
3790 }
3791 } else if (rmx_type == RMX_CENTER) {
3792 dst = src;
e7b07cee 3793 }
e7b07cee 3794
f4791779
HW
3795 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3796 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 3797
f4791779
HW
3798 if (dm_state->underscan_enable) {
3799 dst.x += dm_state->underscan_hborder / 2;
3800 dst.y += dm_state->underscan_vborder / 2;
3801 dst.width -= dm_state->underscan_hborder;
3802 dst.height -= dm_state->underscan_vborder;
3803 }
e7b07cee
HW
3804 }
3805
3806 stream->src = src;
3807 stream->dst = dst;
3808
f1ad2f5e 3809 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
3810 dst.x, dst.y, dst.width, dst.height);
3811
3812}
3813
3ee6b26b 3814static enum dc_color_depth
42ba01fc 3815convert_color_depth_from_display_info(const struct drm_connector *connector,
1bc22f20
SW
3816 const struct drm_connector_state *state,
3817 bool is_y420)
e7b07cee 3818{
1bc22f20 3819 uint8_t bpc;
01c22997 3820
1bc22f20
SW
3821 if (is_y420) {
3822 bpc = 8;
3823
3824 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3825 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3826 bpc = 16;
3827 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3828 bpc = 12;
3829 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3830 bpc = 10;
3831 } else {
3832 bpc = (uint8_t)connector->display_info.bpc;
3833 /* Assume 8 bpc by default if no bpc is specified. */
3834 bpc = bpc ? bpc : 8;
3835 }
e7b07cee 3836
01933ba4
NK
3837 if (!state)
3838 state = connector->state;
3839
42ba01fc 3840 if (state) {
01c22997
NK
3841 /*
3842 * Cap display bpc based on the user requested value.
3843 *
3844 * The value for state->max_bpc may not correctly updated
3845 * depending on when the connector gets added to the state
3846 * or if this was called outside of atomic check, so it
3847 * can't be used directly.
3848 */
3849 bpc = min(bpc, state->max_requested_bpc);
3850
1825fd34
NK
3851 /* Round down to the nearest even number. */
3852 bpc = bpc - (bpc & 1);
3853 }
07e3a1cf 3854
e7b07cee
HW
3855 switch (bpc) {
3856 case 0:
1f6010a9
DF
3857 /*
3858 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
3859 * EDID revision before 1.4
3860 * TODO: Fix edid parsing
3861 */
3862 return COLOR_DEPTH_888;
3863 case 6:
3864 return COLOR_DEPTH_666;
3865 case 8:
3866 return COLOR_DEPTH_888;
3867 case 10:
3868 return COLOR_DEPTH_101010;
3869 case 12:
3870 return COLOR_DEPTH_121212;
3871 case 14:
3872 return COLOR_DEPTH_141414;
3873 case 16:
3874 return COLOR_DEPTH_161616;
3875 default:
3876 return COLOR_DEPTH_UNDEFINED;
3877 }
3878}
3879
3ee6b26b
AD
3880static enum dc_aspect_ratio
3881get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 3882{
e11d4147
LSL
3883 /* 1-1 mapping, since both enums follow the HDMI spec. */
3884 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
3885}
3886
3ee6b26b
AD
3887static enum dc_color_space
3888get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
3889{
3890 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3891
3892 switch (dc_crtc_timing->pixel_encoding) {
3893 case PIXEL_ENCODING_YCBCR422:
3894 case PIXEL_ENCODING_YCBCR444:
3895 case PIXEL_ENCODING_YCBCR420:
3896 {
3897 /*
3898 * 27030khz is the separation point between HDTV and SDTV
3899 * according to HDMI spec, we use YCbCr709 and YCbCr601
3900 * respectively
3901 */
380604e2 3902 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
3903 if (dc_crtc_timing->flags.Y_ONLY)
3904 color_space =
3905 COLOR_SPACE_YCBCR709_LIMITED;
3906 else
3907 color_space = COLOR_SPACE_YCBCR709;
3908 } else {
3909 if (dc_crtc_timing->flags.Y_ONLY)
3910 color_space =
3911 COLOR_SPACE_YCBCR601_LIMITED;
3912 else
3913 color_space = COLOR_SPACE_YCBCR601;
3914 }
3915
3916 }
3917 break;
3918 case PIXEL_ENCODING_RGB:
3919 color_space = COLOR_SPACE_SRGB;
3920 break;
3921
3922 default:
3923 WARN_ON(1);
3924 break;
3925 }
3926
3927 return color_space;
3928}
3929
ea117312
TA
3930static bool adjust_colour_depth_from_display_info(
3931 struct dc_crtc_timing *timing_out,
3932 const struct drm_display_info *info)
400443e8 3933{
ea117312 3934 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 3935 int normalized_clk;
400443e8 3936 do {
380604e2 3937 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
3938 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3939 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3940 normalized_clk /= 2;
3941 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
3942 switch (depth) {
3943 case COLOR_DEPTH_888:
3944 break;
400443e8
ML
3945 case COLOR_DEPTH_101010:
3946 normalized_clk = (normalized_clk * 30) / 24;
3947 break;
3948 case COLOR_DEPTH_121212:
3949 normalized_clk = (normalized_clk * 36) / 24;
3950 break;
3951 case COLOR_DEPTH_161616:
3952 normalized_clk = (normalized_clk * 48) / 24;
3953 break;
3954 default:
ea117312
TA
3955 /* The above depths are the only ones valid for HDMI. */
3956 return false;
400443e8 3957 }
ea117312
TA
3958 if (normalized_clk <= info->max_tmds_clock) {
3959 timing_out->display_color_depth = depth;
3960 return true;
3961 }
3962 } while (--depth > COLOR_DEPTH_666);
3963 return false;
400443e8 3964}
e7b07cee 3965
42ba01fc
NK
3966static void fill_stream_properties_from_drm_display_mode(
3967 struct dc_stream_state *stream,
3968 const struct drm_display_mode *mode_in,
3969 const struct drm_connector *connector,
3970 const struct drm_connector_state *connector_state,
3971 const struct dc_stream_state *old_stream)
e7b07cee
HW
3972{
3973 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 3974 const struct drm_display_info *info = &connector->display_info;
d4252eee 3975 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
3976 struct hdmi_vendor_infoframe hv_frame;
3977 struct hdmi_avi_infoframe avi_frame;
e7b07cee 3978
acf83f86
WL
3979 memset(&hv_frame, 0, sizeof(hv_frame));
3980 memset(&avi_frame, 0, sizeof(avi_frame));
3981
e7b07cee
HW
3982 timing_out->h_border_left = 0;
3983 timing_out->h_border_right = 0;
3984 timing_out->v_border_top = 0;
3985 timing_out->v_border_bottom = 0;
3986 /* TODO: un-hardcode */
fe61a2f1 3987 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 3988 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 3989 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
3990 else if (drm_mode_is_420_also(info, mode_in)
3991 && aconnector->force_yuv420_output)
3992 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 3993 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 3994 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
3995 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3996 else
3997 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3998
3999 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4000 timing_out->display_color_depth = convert_color_depth_from_display_info(
1bc22f20
SW
4001 connector, connector_state,
4002 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
e7b07cee
HW
4003 timing_out->scan_type = SCANNING_TYPE_NODATA;
4004 timing_out->hdmi_vic = 0;
b333730d
BL
4005
4006 if(old_stream) {
4007 timing_out->vic = old_stream->timing.vic;
4008 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4009 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4010 } else {
4011 timing_out->vic = drm_match_cea_mode(mode_in);
4012 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4013 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4014 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4015 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4016 }
e7b07cee 4017
1cb1d477
WL
4018 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4019 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4020 timing_out->vic = avi_frame.video_code;
4021 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4022 timing_out->hdmi_vic = hv_frame.vic;
4023 }
4024
e7b07cee
HW
4025 timing_out->h_addressable = mode_in->crtc_hdisplay;
4026 timing_out->h_total = mode_in->crtc_htotal;
4027 timing_out->h_sync_width =
4028 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4029 timing_out->h_front_porch =
4030 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4031 timing_out->v_total = mode_in->crtc_vtotal;
4032 timing_out->v_addressable = mode_in->crtc_vdisplay;
4033 timing_out->v_front_porch =
4034 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4035 timing_out->v_sync_width =
4036 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4037 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4038 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4039
4040 stream->output_color_space = get_output_color_space(timing_out);
4041
e43a432c
AK
4042 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4043 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4044 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4045 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4046 drm_mode_is_420_also(info, mode_in) &&
4047 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4048 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4049 adjust_colour_depth_from_display_info(timing_out, info);
4050 }
4051 }
e7b07cee
HW
4052}
4053
3ee6b26b
AD
4054static void fill_audio_info(struct audio_info *audio_info,
4055 const struct drm_connector *drm_connector,
4056 const struct dc_sink *dc_sink)
e7b07cee
HW
4057{
4058 int i = 0;
4059 int cea_revision = 0;
4060 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4061
4062 audio_info->manufacture_id = edid_caps->manufacturer_id;
4063 audio_info->product_id = edid_caps->product_id;
4064
4065 cea_revision = drm_connector->display_info.cea_rev;
4066
090afc1e 4067 strscpy(audio_info->display_name,
d2b2562c 4068 edid_caps->display_name,
090afc1e 4069 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4070
b830ebc9 4071 if (cea_revision >= 3) {
e7b07cee
HW
4072 audio_info->mode_count = edid_caps->audio_mode_count;
4073
4074 for (i = 0; i < audio_info->mode_count; ++i) {
4075 audio_info->modes[i].format_code =
4076 (enum audio_format_code)
4077 (edid_caps->audio_modes[i].format_code);
4078 audio_info->modes[i].channel_count =
4079 edid_caps->audio_modes[i].channel_count;
4080 audio_info->modes[i].sample_rates.all =
4081 edid_caps->audio_modes[i].sample_rate;
4082 audio_info->modes[i].sample_size =
4083 edid_caps->audio_modes[i].sample_size;
4084 }
4085 }
4086
4087 audio_info->flags.all = edid_caps->speaker_flags;
4088
4089 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4090 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4091 audio_info->video_latency = drm_connector->video_latency[0];
4092 audio_info->audio_latency = drm_connector->audio_latency[0];
4093 }
4094
4095 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4096
4097}
4098
3ee6b26b
AD
4099static void
4100copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4101 struct drm_display_mode *dst_mode)
e7b07cee
HW
4102{
4103 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4104 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4105 dst_mode->crtc_clock = src_mode->crtc_clock;
4106 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4107 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4108 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4109 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4110 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4111 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4112 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4113 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4114 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4115 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4116 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4117}
4118
3ee6b26b
AD
4119static void
4120decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4121 const struct drm_display_mode *native_mode,
4122 bool scale_enabled)
e7b07cee
HW
4123{
4124 if (scale_enabled) {
4125 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4126 } else if (native_mode->clock == drm_mode->clock &&
4127 native_mode->htotal == drm_mode->htotal &&
4128 native_mode->vtotal == drm_mode->vtotal) {
4129 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4130 } else {
4131 /* no scaling nor amdgpu inserted, no need to patch */
4132 }
4133}
4134
aed15309
ML
4135static struct dc_sink *
4136create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4137{
2e0ac3d6 4138 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4139 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4140 sink_init_data.link = aconnector->dc_link;
4141 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4142
4143 sink = dc_sink_create(&sink_init_data);
423788c7 4144 if (!sink) {
2e0ac3d6 4145 DRM_ERROR("Failed to create sink!\n");
aed15309 4146 return NULL;
423788c7 4147 }
2e0ac3d6 4148 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4149
aed15309 4150 return sink;
2e0ac3d6
HW
4151}
4152
fa2123db
ML
4153static void set_multisync_trigger_params(
4154 struct dc_stream_state *stream)
4155{
4156 if (stream->triggered_crtc_reset.enabled) {
4157 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4158 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4159 }
4160}
4161
4162static void set_master_stream(struct dc_stream_state *stream_set[],
4163 int stream_count)
4164{
4165 int j, highest_rfr = 0, master_stream = 0;
4166
4167 for (j = 0; j < stream_count; j++) {
4168 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4169 int refresh_rate = 0;
4170
380604e2 4171 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4172 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4173 if (refresh_rate > highest_rfr) {
4174 highest_rfr = refresh_rate;
4175 master_stream = j;
4176 }
4177 }
4178 }
4179 for (j = 0; j < stream_count; j++) {
03736f4c 4180 if (stream_set[j])
fa2123db
ML
4181 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4182 }
4183}
4184
4185static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4186{
4187 int i = 0;
4188
4189 if (context->stream_count < 2)
4190 return;
4191 for (i = 0; i < context->stream_count ; i++) {
4192 if (!context->streams[i])
4193 continue;
1f6010a9
DF
4194 /*
4195 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4196 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4197 * For now it's set to false
fa2123db
ML
4198 */
4199 set_multisync_trigger_params(context->streams[i]);
4200 }
4201 set_master_stream(context->streams, context->stream_count);
4202}
4203
3ee6b26b
AD
4204static struct dc_stream_state *
4205create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4206 const struct drm_display_mode *drm_mode,
b333730d
BL
4207 const struct dm_connector_state *dm_state,
4208 const struct dc_stream_state *old_stream)
e7b07cee
HW
4209{
4210 struct drm_display_mode *preferred_mode = NULL;
391ef035 4211 struct drm_connector *drm_connector;
42ba01fc
NK
4212 const struct drm_connector_state *con_state =
4213 dm_state ? &dm_state->base : NULL;
0971c40e 4214 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4215 struct drm_display_mode mode = *drm_mode;
4216 bool native_mode_found = false;
b333730d
BL
4217 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4218 int mode_refresh;
58124bf8 4219 int preferred_refresh = 0;
defeb878 4220#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4221 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4222#endif
df2f1015 4223 uint32_t link_bandwidth_kbps;
b333730d 4224
aed15309 4225 struct dc_sink *sink = NULL;
b830ebc9 4226 if (aconnector == NULL) {
e7b07cee 4227 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4228 return stream;
e7b07cee
HW
4229 }
4230
e7b07cee 4231 drm_connector = &aconnector->base;
2e0ac3d6 4232
f4ac176e 4233 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4234 sink = create_fake_sink(aconnector);
4235 if (!sink)
4236 return stream;
aed15309
ML
4237 } else {
4238 sink = aconnector->dc_sink;
dcd5fb82 4239 dc_sink_retain(sink);
f4ac176e 4240 }
2e0ac3d6 4241
aed15309 4242 stream = dc_create_stream_for_sink(sink);
4562236b 4243
b830ebc9 4244 if (stream == NULL) {
e7b07cee 4245 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4246 goto finish;
e7b07cee
HW
4247 }
4248
ceb3dbb4
JL
4249 stream->dm_stream_context = aconnector;
4250
4a36fcba
WL
4251 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4252 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4253
e7b07cee
HW
4254 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4255 /* Search for preferred mode */
4256 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4257 native_mode_found = true;
4258 break;
4259 }
4260 }
4261 if (!native_mode_found)
4262 preferred_mode = list_first_entry_or_null(
4263 &aconnector->base.modes,
4264 struct drm_display_mode,
4265 head);
4266
b333730d
BL
4267 mode_refresh = drm_mode_vrefresh(&mode);
4268
b830ebc9 4269 if (preferred_mode == NULL) {
1f6010a9
DF
4270 /*
4271 * This may not be an error, the use case is when we have no
e7b07cee
HW
4272 * usermode calls to reset and set mode upon hotplug. In this
4273 * case, we call set mode ourselves to restore the previous mode
4274 * and the modelist may not be filled in in time.
4275 */
f1ad2f5e 4276 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4277 } else {
4278 decide_crtc_timing_for_drm_display_mode(
4279 &mode, preferred_mode,
f4791779 4280 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4281 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4282 }
4283
f783577c
JFZ
4284 if (!dm_state)
4285 drm_mode_set_crtcinfo(&mode, 0);
4286
b333730d
BL
4287 /*
4288 * If scaling is enabled and refresh rate didn't change
4289 * we copy the vic and polarities of the old timings
4290 */
4291 if (!scale || mode_refresh != preferred_refresh)
4292 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4293 &mode, &aconnector->base, con_state, NULL);
b333730d
BL
4294 else
4295 fill_stream_properties_from_drm_display_mode(stream,
42ba01fc 4296 &mode, &aconnector->base, con_state, old_stream);
b333730d 4297
df2f1015
DF
4298 stream->timing.flags.DSC = 0;
4299
4300 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4301#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4302 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4303 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
df2f1015
DF
4304 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4305 &dsc_caps);
defeb878 4306#endif
df2f1015
DF
4307 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4308 dc_link_get_link_cap(aconnector->dc_link));
4309
defeb878 4310#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4311 if (dsc_caps.is_dsc_supported)
0417df16 4312 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4313 &dsc_caps,
0417df16 4314 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4315 link_bandwidth_kbps,
4316 &stream->timing,
4317 &stream->timing.dsc_cfg))
4318 stream->timing.flags.DSC = 1;
39a4eb85 4319#endif
df2f1015 4320 }
39a4eb85 4321
e7b07cee
HW
4322 update_stream_scaling_settings(&mode, dm_state, stream);
4323
4324 fill_audio_info(
4325 &stream->audio_info,
4326 drm_connector,
aed15309 4327 sink);
e7b07cee 4328
ceb3dbb4 4329 update_stream_signal(stream, sink);
9182b4cb 4330
d832fc3b
WL
4331 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4332 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
8c322309
RL
4333 if (stream->link->psr_feature_enabled) {
4334 struct dc *core_dc = stream->link->ctx->dc;
d832fc3b 4335
8c322309
RL
4336 if (dc_is_dmcu_initialized(core_dc)) {
4337 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4338
4339 stream->psr_version = dmcu->dmcu_version.psr_version;
c38cc677
MT
4340
4341 //
4342 // should decide stream support vsc sdp colorimetry capability
4343 // before building vsc info packet
4344 //
4345 stream->use_vsc_sdp_for_colorimetry = false;
4346 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4347 stream->use_vsc_sdp_for_colorimetry =
4348 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4349 } else {
4350 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4351 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4352 stream->use_vsc_sdp_for_colorimetry = true;
4353 }
4354 }
4355 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309
RL
4356 }
4357 }
aed15309 4358finish:
dcd5fb82 4359 dc_sink_release(sink);
9e3efe3e 4360
e7b07cee
HW
4361 return stream;
4362}
4363
7578ecda 4364static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4365{
4366 drm_crtc_cleanup(crtc);
4367 kfree(crtc);
4368}
4369
4370static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4371 struct drm_crtc_state *state)
e7b07cee
HW
4372{
4373 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4374
4375 /* TODO Destroy dc_stream objects are stream object is flattened */
4376 if (cur->stream)
4377 dc_stream_release(cur->stream);
4378
4379
4380 __drm_atomic_helper_crtc_destroy_state(state);
4381
4382
4383 kfree(state);
4384}
4385
4386static void dm_crtc_reset_state(struct drm_crtc *crtc)
4387{
4388 struct dm_crtc_state *state;
4389
4390 if (crtc->state)
4391 dm_crtc_destroy_state(crtc, crtc->state);
4392
4393 state = kzalloc(sizeof(*state), GFP_KERNEL);
4394 if (WARN_ON(!state))
4395 return;
4396
4397 crtc->state = &state->base;
4398 crtc->state->crtc = crtc;
4399
4400}
4401
4402static struct drm_crtc_state *
4403dm_crtc_duplicate_state(struct drm_crtc *crtc)
4404{
4405 struct dm_crtc_state *state, *cur;
4406
4407 cur = to_dm_crtc_state(crtc->state);
4408
4409 if (WARN_ON(!crtc->state))
4410 return NULL;
4411
2004f45e 4412 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4413 if (!state)
4414 return NULL;
e7b07cee
HW
4415
4416 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4417
4418 if (cur->stream) {
4419 state->stream = cur->stream;
4420 dc_stream_retain(state->stream);
4421 }
4422
d6ef9b41
NK
4423 state->active_planes = cur->active_planes;
4424 state->interrupts_enabled = cur->interrupts_enabled;
180db303 4425 state->vrr_params = cur->vrr_params;
98e6436d 4426 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4427 state->abm_level = cur->abm_level;
bb47de73
NK
4428 state->vrr_supported = cur->vrr_supported;
4429 state->freesync_config = cur->freesync_config;
14b25846 4430 state->crc_src = cur->crc_src;
cf020d49
NK
4431 state->cm_has_degamma = cur->cm_has_degamma;
4432 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4433
e7b07cee
HW
4434 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4435
4436 return &state->base;
4437}
4438
d2574c33
MK
4439static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4440{
4441 enum dc_irq_source irq_source;
4442 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4443 struct amdgpu_device *adev = crtc->dev->dev_private;
4444 int rc;
4445
3a2ce8d6
LL
4446 /* Do not set vupdate for DCN hardware */
4447 if (adev->family > AMDGPU_FAMILY_AI)
4448 return 0;
4449
d2574c33
MK
4450 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4451
4452 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4453
4454 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4455 acrtc->crtc_id, enable ? "en" : "dis", rc);
4456 return rc;
4457}
589d2739
HW
4458
4459static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4460{
4461 enum dc_irq_source irq_source;
4462 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4463 struct amdgpu_device *adev = crtc->dev->dev_private;
d2574c33
MK
4464 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4465 int rc = 0;
4466
4467 if (enable) {
4468 /* vblank irq on -> Only need vupdate irq in vrr mode */
4469 if (amdgpu_dm_vrr_active(acrtc_state))
4470 rc = dm_set_vupdate_irq(crtc, true);
4471 } else {
4472 /* vblank irq off -> vupdate irq off */
4473 rc = dm_set_vupdate_irq(crtc, false);
4474 }
4475
4476 if (rc)
4477 return rc;
589d2739
HW
4478
4479 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4480 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4481}
4482
4483static int dm_enable_vblank(struct drm_crtc *crtc)
4484{
4485 return dm_set_vblank(crtc, true);
4486}
4487
4488static void dm_disable_vblank(struct drm_crtc *crtc)
4489{
4490 dm_set_vblank(crtc, false);
4491}
4492
e7b07cee
HW
4493/* Implemented only the options currently availible for the driver */
4494static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4495 .reset = dm_crtc_reset_state,
4496 .destroy = amdgpu_dm_crtc_destroy,
4497 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4498 .set_config = drm_atomic_helper_set_config,
4499 .page_flip = drm_atomic_helper_page_flip,
4500 .atomic_duplicate_state = dm_crtc_duplicate_state,
4501 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 4502 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 4503 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 4504 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 4505 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
4506 .enable_vblank = dm_enable_vblank,
4507 .disable_vblank = dm_disable_vblank,
e3eff4b5 4508 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
4509};
4510
4511static enum drm_connector_status
4512amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4513{
4514 bool connected;
c84dec2f 4515 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 4516
1f6010a9
DF
4517 /*
4518 * Notes:
e7b07cee
HW
4519 * 1. This interface is NOT called in context of HPD irq.
4520 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
4521 * makes it a bad place for *any* MST-related activity.
4522 */
e7b07cee 4523
8580d60b
HW
4524 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4525 !aconnector->fake_enable)
e7b07cee
HW
4526 connected = (aconnector->dc_sink != NULL);
4527 else
4528 connected = (aconnector->base.force == DRM_FORCE_ON);
4529
4530 return (connected ? connector_status_connected :
4531 connector_status_disconnected);
4532}
4533
3ee6b26b
AD
4534int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4535 struct drm_connector_state *connector_state,
4536 struct drm_property *property,
4537 uint64_t val)
e7b07cee
HW
4538{
4539 struct drm_device *dev = connector->dev;
4540 struct amdgpu_device *adev = dev->dev_private;
4541 struct dm_connector_state *dm_old_state =
4542 to_dm_connector_state(connector->state);
4543 struct dm_connector_state *dm_new_state =
4544 to_dm_connector_state(connector_state);
4545
4546 int ret = -EINVAL;
4547
4548 if (property == dev->mode_config.scaling_mode_property) {
4549 enum amdgpu_rmx_type rmx_type;
4550
4551 switch (val) {
4552 case DRM_MODE_SCALE_CENTER:
4553 rmx_type = RMX_CENTER;
4554 break;
4555 case DRM_MODE_SCALE_ASPECT:
4556 rmx_type = RMX_ASPECT;
4557 break;
4558 case DRM_MODE_SCALE_FULLSCREEN:
4559 rmx_type = RMX_FULL;
4560 break;
4561 case DRM_MODE_SCALE_NONE:
4562 default:
4563 rmx_type = RMX_OFF;
4564 break;
4565 }
4566
4567 if (dm_old_state->scaling == rmx_type)
4568 return 0;
4569
4570 dm_new_state->scaling = rmx_type;
4571 ret = 0;
4572 } else if (property == adev->mode_info.underscan_hborder_property) {
4573 dm_new_state->underscan_hborder = val;
4574 ret = 0;
4575 } else if (property == adev->mode_info.underscan_vborder_property) {
4576 dm_new_state->underscan_vborder = val;
4577 ret = 0;
4578 } else if (property == adev->mode_info.underscan_property) {
4579 dm_new_state->underscan_enable = val;
4580 ret = 0;
c1ee92f9
DF
4581 } else if (property == adev->mode_info.abm_level_property) {
4582 dm_new_state->abm_level = val;
4583 ret = 0;
e7b07cee
HW
4584 }
4585
4586 return ret;
4587}
4588
3ee6b26b
AD
4589int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4590 const struct drm_connector_state *state,
4591 struct drm_property *property,
4592 uint64_t *val)
e7b07cee
HW
4593{
4594 struct drm_device *dev = connector->dev;
4595 struct amdgpu_device *adev = dev->dev_private;
4596 struct dm_connector_state *dm_state =
4597 to_dm_connector_state(state);
4598 int ret = -EINVAL;
4599
4600 if (property == dev->mode_config.scaling_mode_property) {
4601 switch (dm_state->scaling) {
4602 case RMX_CENTER:
4603 *val = DRM_MODE_SCALE_CENTER;
4604 break;
4605 case RMX_ASPECT:
4606 *val = DRM_MODE_SCALE_ASPECT;
4607 break;
4608 case RMX_FULL:
4609 *val = DRM_MODE_SCALE_FULLSCREEN;
4610 break;
4611 case RMX_OFF:
4612 default:
4613 *val = DRM_MODE_SCALE_NONE;
4614 break;
4615 }
4616 ret = 0;
4617 } else if (property == adev->mode_info.underscan_hborder_property) {
4618 *val = dm_state->underscan_hborder;
4619 ret = 0;
4620 } else if (property == adev->mode_info.underscan_vborder_property) {
4621 *val = dm_state->underscan_vborder;
4622 ret = 0;
4623 } else if (property == adev->mode_info.underscan_property) {
4624 *val = dm_state->underscan_enable;
4625 ret = 0;
c1ee92f9
DF
4626 } else if (property == adev->mode_info.abm_level_property) {
4627 *val = dm_state->abm_level;
4628 ret = 0;
e7b07cee 4629 }
c1ee92f9 4630
e7b07cee
HW
4631 return ret;
4632}
4633
526c654a
ED
4634static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4635{
4636 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4637
4638 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4639}
4640
7578ecda 4641static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 4642{
c84dec2f 4643 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
4644 const struct dc_link *link = aconnector->dc_link;
4645 struct amdgpu_device *adev = connector->dev->dev_private;
4646 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 4647
e7b07cee
HW
4648#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4649 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4650
89fc8d4e 4651 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
4652 link->type != dc_connection_none &&
4653 dm->backlight_dev) {
4654 backlight_device_unregister(dm->backlight_dev);
4655 dm->backlight_dev = NULL;
e7b07cee
HW
4656 }
4657#endif
dcd5fb82
MF
4658
4659 if (aconnector->dc_em_sink)
4660 dc_sink_release(aconnector->dc_em_sink);
4661 aconnector->dc_em_sink = NULL;
4662 if (aconnector->dc_sink)
4663 dc_sink_release(aconnector->dc_sink);
4664 aconnector->dc_sink = NULL;
4665
e86e8947 4666 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
4667 drm_connector_unregister(connector);
4668 drm_connector_cleanup(connector);
526c654a
ED
4669 if (aconnector->i2c) {
4670 i2c_del_adapter(&aconnector->i2c->base);
4671 kfree(aconnector->i2c);
4672 }
9f656935 4673 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 4674
e7b07cee
HW
4675 kfree(connector);
4676}
4677
4678void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4679{
4680 struct dm_connector_state *state =
4681 to_dm_connector_state(connector->state);
4682
df099b9b
LSL
4683 if (connector->state)
4684 __drm_atomic_helper_connector_destroy_state(connector->state);
4685
e7b07cee
HW
4686 kfree(state);
4687
4688 state = kzalloc(sizeof(*state), GFP_KERNEL);
4689
4690 if (state) {
4691 state->scaling = RMX_OFF;
4692 state->underscan_enable = false;
4693 state->underscan_hborder = 0;
4694 state->underscan_vborder = 0;
01933ba4 4695 state->base.max_requested_bpc = 8;
3261e013
ML
4696 state->vcpi_slots = 0;
4697 state->pbn = 0;
c3e50f89
NK
4698 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4699 state->abm_level = amdgpu_dm_abm_level;
4700
df099b9b 4701 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
4702 }
4703}
4704
3ee6b26b
AD
4705struct drm_connector_state *
4706amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
4707{
4708 struct dm_connector_state *state =
4709 to_dm_connector_state(connector->state);
4710
4711 struct dm_connector_state *new_state =
4712 kmemdup(state, sizeof(*state), GFP_KERNEL);
4713
98e6436d
AK
4714 if (!new_state)
4715 return NULL;
e7b07cee 4716
98e6436d
AK
4717 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4718
4719 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 4720 new_state->abm_level = state->abm_level;
922454c2
NK
4721 new_state->scaling = state->scaling;
4722 new_state->underscan_enable = state->underscan_enable;
4723 new_state->underscan_hborder = state->underscan_hborder;
4724 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
4725 new_state->vcpi_slots = state->vcpi_slots;
4726 new_state->pbn = state->pbn;
98e6436d 4727 return &new_state->base;
e7b07cee
HW
4728}
4729
14f04fa4
AD
4730static int
4731amdgpu_dm_connector_late_register(struct drm_connector *connector)
4732{
4733 struct amdgpu_dm_connector *amdgpu_dm_connector =
4734 to_amdgpu_dm_connector(connector);
bdb9fbc6 4735 int r;
14f04fa4 4736
bdb9fbc6
AD
4737 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4738 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4739 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4740 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4741 if (r)
4742 return r;
4743 }
4744
4745#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
4746 connector_debugfs_init(amdgpu_dm_connector);
4747#endif
4748
4749 return 0;
4750}
4751
e7b07cee
HW
4752static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4753 .reset = amdgpu_dm_connector_funcs_reset,
4754 .detect = amdgpu_dm_connector_detect,
4755 .fill_modes = drm_helper_probe_single_connector_modes,
4756 .destroy = amdgpu_dm_connector_destroy,
4757 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4758 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4759 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 4760 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 4761 .late_register = amdgpu_dm_connector_late_register,
526c654a 4762 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
4763};
4764
e7b07cee
HW
4765static int get_modes(struct drm_connector *connector)
4766{
4767 return amdgpu_dm_connector_get_modes(connector);
4768}
4769
c84dec2f 4770static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4771{
4772 struct dc_sink_init_data init_params = {
4773 .link = aconnector->dc_link,
4774 .sink_signal = SIGNAL_TYPE_VIRTUAL
4775 };
70e8ffc5 4776 struct edid *edid;
e7b07cee 4777
a89ff457 4778 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
4779 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4780 aconnector->base.name);
4781
4782 aconnector->base.force = DRM_FORCE_OFF;
4783 aconnector->base.override_edid = false;
4784 return;
4785 }
4786
70e8ffc5
HW
4787 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4788
e7b07cee
HW
4789 aconnector->edid = edid;
4790
4791 aconnector->dc_em_sink = dc_link_add_remote_sink(
4792 aconnector->dc_link,
4793 (uint8_t *)edid,
4794 (edid->extensions + 1) * EDID_LENGTH,
4795 &init_params);
4796
dcd5fb82 4797 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
4798 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4799 aconnector->dc_link->local_sink :
4800 aconnector->dc_em_sink;
dcd5fb82
MF
4801 dc_sink_retain(aconnector->dc_sink);
4802 }
e7b07cee
HW
4803}
4804
c84dec2f 4805static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
4806{
4807 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4808
1f6010a9
DF
4809 /*
4810 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
4811 * Those settings have to be != 0 to get initial modeset
4812 */
4813 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4814 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4815 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4816 }
4817
4818
4819 aconnector->base.override_edid = true;
4820 create_eml_sink(aconnector);
4821}
4822
ba9ca088 4823enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 4824 struct drm_display_mode *mode)
e7b07cee
HW
4825{
4826 int result = MODE_ERROR;
4827 struct dc_sink *dc_sink;
4828 struct amdgpu_device *adev = connector->dev->dev_private;
4829 /* TODO: Unhardcode stream count */
0971c40e 4830 struct dc_stream_state *stream;
c84dec2f 4831 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
a39438f0 4832 enum dc_status dc_result = DC_OK;
e7b07cee
HW
4833
4834 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4835 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4836 return result;
4837
1f6010a9
DF
4838 /*
4839 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
4840 * EDID mgmt
4841 */
4842 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4843 !aconnector->dc_em_sink)
4844 handle_edid_mgmt(aconnector);
4845
c84dec2f 4846 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 4847
b830ebc9 4848 if (dc_sink == NULL) {
e7b07cee
HW
4849 DRM_ERROR("dc_sink is NULL!\n");
4850 goto fail;
4851 }
4852
b333730d 4853 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
b830ebc9 4854 if (stream == NULL) {
e7b07cee
HW
4855 DRM_ERROR("Failed to create stream for sink!\n");
4856 goto fail;
4857 }
4858
a39438f0
HW
4859 dc_result = dc_validate_stream(adev->dm.dc, stream);
4860
4861 if (dc_result == DC_OK)
e7b07cee 4862 result = MODE_OK;
a39438f0 4863 else
9f921b14 4864 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
a39438f0 4865 mode->hdisplay,
26e99ba6 4866 mode->vdisplay,
9f921b14
HW
4867 mode->clock,
4868 dc_result);
e7b07cee
HW
4869
4870 dc_stream_release(stream);
4871
4872fail:
4873 /* TODO: error handling*/
4874 return result;
4875}
4876
88694af9
NK
4877static int fill_hdr_info_packet(const struct drm_connector_state *state,
4878 struct dc_info_packet *out)
4879{
4880 struct hdmi_drm_infoframe frame;
4881 unsigned char buf[30]; /* 26 + 4 */
4882 ssize_t len;
4883 int ret, i;
4884
4885 memset(out, 0, sizeof(*out));
4886
4887 if (!state->hdr_output_metadata)
4888 return 0;
4889
4890 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4891 if (ret)
4892 return ret;
4893
4894 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4895 if (len < 0)
4896 return (int)len;
4897
4898 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4899 if (len != 30)
4900 return -EINVAL;
4901
4902 /* Prepare the infopacket for DC. */
4903 switch (state->connector->connector_type) {
4904 case DRM_MODE_CONNECTOR_HDMIA:
4905 out->hb0 = 0x87; /* type */
4906 out->hb1 = 0x01; /* version */
4907 out->hb2 = 0x1A; /* length */
4908 out->sb[0] = buf[3]; /* checksum */
4909 i = 1;
4910 break;
4911
4912 case DRM_MODE_CONNECTOR_DisplayPort:
4913 case DRM_MODE_CONNECTOR_eDP:
4914 out->hb0 = 0x00; /* sdp id, zero */
4915 out->hb1 = 0x87; /* type */
4916 out->hb2 = 0x1D; /* payload len - 1 */
4917 out->hb3 = (0x13 << 2); /* sdp version */
4918 out->sb[0] = 0x01; /* version */
4919 out->sb[1] = 0x1A; /* length */
4920 i = 2;
4921 break;
4922
4923 default:
4924 return -EINVAL;
4925 }
4926
4927 memcpy(&out->sb[i], &buf[4], 26);
4928 out->valid = true;
4929
4930 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4931 sizeof(out->sb), false);
4932
4933 return 0;
4934}
4935
4936static bool
4937is_hdr_metadata_different(const struct drm_connector_state *old_state,
4938 const struct drm_connector_state *new_state)
4939{
4940 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4941 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4942
4943 if (old_blob != new_blob) {
4944 if (old_blob && new_blob &&
4945 old_blob->length == new_blob->length)
4946 return memcmp(old_blob->data, new_blob->data,
4947 old_blob->length);
4948
4949 return true;
4950 }
4951
4952 return false;
4953}
4954
4955static int
4956amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 4957 struct drm_atomic_state *state)
88694af9 4958{
51e857af
SP
4959 struct drm_connector_state *new_con_state =
4960 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
4961 struct drm_connector_state *old_con_state =
4962 drm_atomic_get_old_connector_state(state, conn);
4963 struct drm_crtc *crtc = new_con_state->crtc;
4964 struct drm_crtc_state *new_crtc_state;
4965 int ret;
4966
4967 if (!crtc)
4968 return 0;
4969
4970 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4971 struct dc_info_packet hdr_infopacket;
4972
4973 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4974 if (ret)
4975 return ret;
4976
4977 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4978 if (IS_ERR(new_crtc_state))
4979 return PTR_ERR(new_crtc_state);
4980
4981 /*
4982 * DC considers the stream backends changed if the
4983 * static metadata changes. Forcing the modeset also
4984 * gives a simple way for userspace to switch from
b232d4ed
NK
4985 * 8bpc to 10bpc when setting the metadata to enter
4986 * or exit HDR.
4987 *
4988 * Changing the static metadata after it's been
4989 * set is permissible, however. So only force a
4990 * modeset if we're entering or exiting HDR.
88694af9 4991 */
b232d4ed
NK
4992 new_crtc_state->mode_changed =
4993 !old_con_state->hdr_output_metadata ||
4994 !new_con_state->hdr_output_metadata;
88694af9
NK
4995 }
4996
4997 return 0;
4998}
4999
e7b07cee
HW
5000static const struct drm_connector_helper_funcs
5001amdgpu_dm_connector_helper_funcs = {
5002 /*
1f6010a9 5003 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5004 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5005 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5006 * in get_modes call back, not just return the modes count
5007 */
e7b07cee
HW
5008 .get_modes = get_modes,
5009 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5010 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5011};
5012
5013static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5014{
5015}
5016
bc92c065
NK
5017static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5018{
5019 struct drm_device *dev = new_crtc_state->crtc->dev;
5020 struct drm_plane *plane;
5021
5022 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5023 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5024 return true;
5025 }
5026
5027 return false;
5028}
5029
d6ef9b41 5030static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5031{
5032 struct drm_atomic_state *state = new_crtc_state->state;
5033 struct drm_plane *plane;
5034 int num_active = 0;
5035
5036 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5037 struct drm_plane_state *new_plane_state;
5038
5039 /* Cursor planes are "fake". */
5040 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5041 continue;
5042
5043 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5044
5045 if (!new_plane_state) {
5046 /*
5047 * The plane is enable on the CRTC and hasn't changed
5048 * state. This means that it previously passed
5049 * validation and is therefore enabled.
5050 */
5051 num_active += 1;
5052 continue;
5053 }
5054
5055 /* We need a framebuffer to be considered enabled. */
5056 num_active += (new_plane_state->fb != NULL);
5057 }
5058
d6ef9b41
NK
5059 return num_active;
5060}
5061
5062/*
5063 * Sets whether interrupts should be enabled on a specific CRTC.
5064 * We require that the stream be enabled and that there exist active
5065 * DC planes on the stream.
5066 */
5067static void
5068dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5069 struct drm_crtc_state *new_crtc_state)
5070{
5071 struct dm_crtc_state *dm_new_crtc_state =
5072 to_dm_crtc_state(new_crtc_state);
5073
5074 dm_new_crtc_state->active_planes = 0;
5075 dm_new_crtc_state->interrupts_enabled = false;
5076
5077 if (!dm_new_crtc_state->stream)
5078 return;
5079
5080 dm_new_crtc_state->active_planes =
5081 count_crtc_active_planes(new_crtc_state);
5082
5083 dm_new_crtc_state->interrupts_enabled =
5084 dm_new_crtc_state->active_planes > 0;
c14a005c
NK
5085}
5086
3ee6b26b
AD
5087static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5088 struct drm_crtc_state *state)
e7b07cee
HW
5089{
5090 struct amdgpu_device *adev = crtc->dev->dev_private;
5091 struct dc *dc = adev->dm.dc;
5092 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5093 int ret = -EINVAL;
5094
d6ef9b41
NK
5095 /*
5096 * Update interrupt state for the CRTC. This needs to happen whenever
5097 * the CRTC has changed or whenever any of its planes have changed.
5098 * Atomic check satisfies both of these requirements since the CRTC
5099 * is added to the state by DRM during drm_atomic_helper_check_planes.
5100 */
5101 dm_update_crtc_interrupt_state(crtc, state);
5102
9b690ef3
BL
5103 if (unlikely(!dm_crtc_state->stream &&
5104 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5105 WARN_ON(1);
5106 return ret;
5107 }
5108
1f6010a9 5109 /* In some use cases, like reset, no stream is attached */
e7b07cee
HW
5110 if (!dm_crtc_state->stream)
5111 return 0;
5112
bc92c065
NK
5113 /*
5114 * We want at least one hardware plane enabled to use
5115 * the stream with a cursor enabled.
5116 */
c14a005c 5117 if (state->enable && state->active &&
bc92c065 5118 does_crtc_have_active_cursor(state) &&
d6ef9b41 5119 dm_crtc_state->active_planes == 0)
c14a005c
NK
5120 return -EINVAL;
5121
62c933f9 5122 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5123 return 0;
5124
5125 return ret;
5126}
5127
3ee6b26b
AD
5128static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5129 const struct drm_display_mode *mode,
5130 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5131{
5132 return true;
5133}
5134
5135static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5136 .disable = dm_crtc_helper_disable,
5137 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5138 .mode_fixup = dm_crtc_helper_mode_fixup,
5139 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5140};
5141
5142static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5143{
5144
5145}
5146
3261e013
ML
5147static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5148{
5149 switch (display_color_depth) {
5150 case COLOR_DEPTH_666:
5151 return 6;
5152 case COLOR_DEPTH_888:
5153 return 8;
5154 case COLOR_DEPTH_101010:
5155 return 10;
5156 case COLOR_DEPTH_121212:
5157 return 12;
5158 case COLOR_DEPTH_141414:
5159 return 14;
5160 case COLOR_DEPTH_161616:
5161 return 16;
5162 default:
5163 break;
5164 }
5165 return 0;
5166}
5167
3ee6b26b
AD
5168static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5169 struct drm_crtc_state *crtc_state,
5170 struct drm_connector_state *conn_state)
e7b07cee 5171{
3261e013
ML
5172 struct drm_atomic_state *state = crtc_state->state;
5173 struct drm_connector *connector = conn_state->connector;
5174 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5175 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5176 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5177 struct drm_dp_mst_topology_mgr *mst_mgr;
5178 struct drm_dp_mst_port *mst_port;
5179 enum dc_color_depth color_depth;
5180 int clock, bpp = 0;
1bc22f20 5181 bool is_y420 = false;
3261e013
ML
5182
5183 if (!aconnector->port || !aconnector->dc_sink)
5184 return 0;
5185
5186 mst_port = aconnector->port;
5187 mst_mgr = &aconnector->mst_port->mst_mgr;
5188
5189 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5190 return 0;
5191
5192 if (!state->duplicated) {
1bc22f20
SW
5193 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5194 aconnector->force_yuv420_output;
5195 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5196 is_y420);
3261e013
ML
5197 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5198 clock = adjusted_mode->clock;
dc48529f 5199 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5200 }
5201 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5202 mst_mgr,
5203 mst_port,
1c6c1cb5
ML
5204 dm_new_connector_state->pbn,
5205 0);
3261e013
ML
5206 if (dm_new_connector_state->vcpi_slots < 0) {
5207 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5208 return dm_new_connector_state->vcpi_slots;
5209 }
e7b07cee
HW
5210 return 0;
5211}
5212
5213const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5214 .disable = dm_encoder_helper_disable,
5215 .atomic_check = dm_encoder_helper_atomic_check
5216};
5217
d9fe1a4c 5218#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5219static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5220 struct dc_state *dc_state)
5221{
5222 struct dc_stream_state *stream = NULL;
5223 struct drm_connector *connector;
5224 struct drm_connector_state *new_con_state, *old_con_state;
5225 struct amdgpu_dm_connector *aconnector;
5226 struct dm_connector_state *dm_conn_state;
5227 int i, j, clock, bpp;
5228 int vcpi, pbn_div, pbn = 0;
5229
5230 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5231
5232 aconnector = to_amdgpu_dm_connector(connector);
5233
5234 if (!aconnector->port)
5235 continue;
5236
5237 if (!new_con_state || !new_con_state->crtc)
5238 continue;
5239
5240 dm_conn_state = to_dm_connector_state(new_con_state);
5241
5242 for (j = 0; j < dc_state->stream_count; j++) {
5243 stream = dc_state->streams[j];
5244 if (!stream)
5245 continue;
5246
5247 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5248 break;
5249
5250 stream = NULL;
5251 }
5252
5253 if (!stream)
5254 continue;
5255
5256 if (stream->timing.flags.DSC != 1) {
5257 drm_dp_mst_atomic_enable_dsc(state,
5258 aconnector->port,
5259 dm_conn_state->pbn,
5260 0,
5261 false);
5262 continue;
5263 }
5264
5265 pbn_div = dm_mst_get_pbn_divider(stream->link);
5266 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5267 clock = stream->timing.pix_clk_100hz / 10;
5268 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5269 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5270 aconnector->port,
5271 pbn, pbn_div,
5272 true);
5273 if (vcpi < 0)
5274 return vcpi;
5275
5276 dm_conn_state->pbn = pbn;
5277 dm_conn_state->vcpi_slots = vcpi;
5278 }
5279 return 0;
5280}
d9fe1a4c 5281#endif
29b9ba74 5282
e7b07cee
HW
5283static void dm_drm_plane_reset(struct drm_plane *plane)
5284{
5285 struct dm_plane_state *amdgpu_state = NULL;
5286
5287 if (plane->state)
5288 plane->funcs->atomic_destroy_state(plane, plane->state);
5289
5290 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5291 WARN_ON(amdgpu_state == NULL);
1f6010a9 5292
7ddaef96
NK
5293 if (amdgpu_state)
5294 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5295}
5296
5297static struct drm_plane_state *
5298dm_drm_plane_duplicate_state(struct drm_plane *plane)
5299{
5300 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5301
5302 old_dm_plane_state = to_dm_plane_state(plane->state);
5303 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5304 if (!dm_plane_state)
5305 return NULL;
5306
5307 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5308
3be5262e
HW
5309 if (old_dm_plane_state->dc_state) {
5310 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5311 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5312 }
5313
5314 return &dm_plane_state->base;
5315}
5316
5317void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5318 struct drm_plane_state *state)
e7b07cee
HW
5319{
5320 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5321
3be5262e
HW
5322 if (dm_plane_state->dc_state)
5323 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5324
0627bbd3 5325 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5326}
5327
5328static const struct drm_plane_funcs dm_plane_funcs = {
5329 .update_plane = drm_atomic_helper_update_plane,
5330 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5331 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5332 .reset = dm_drm_plane_reset,
5333 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5334 .atomic_destroy_state = dm_drm_plane_destroy_state,
5335};
5336
3ee6b26b
AD
5337static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5338 struct drm_plane_state *new_state)
e7b07cee
HW
5339{
5340 struct amdgpu_framebuffer *afb;
5341 struct drm_gem_object *obj;
5d43be0c 5342 struct amdgpu_device *adev;
e7b07cee 5343 struct amdgpu_bo *rbo;
e7b07cee 5344 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5345 struct list_head list;
5346 struct ttm_validate_buffer tv;
5347 struct ww_acquire_ctx ticket;
e0634e8d 5348 uint64_t tiling_flags;
5d43be0c
CK
5349 uint32_t domain;
5350 int r;
e7b07cee
HW
5351
5352 dm_plane_state_old = to_dm_plane_state(plane->state);
5353 dm_plane_state_new = to_dm_plane_state(new_state);
5354
5355 if (!new_state->fb) {
f1ad2f5e 5356 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5357 return 0;
5358 }
5359
5360 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5361 obj = new_state->fb->obj[0];
e7b07cee 5362 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5363 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5364 INIT_LIST_HEAD(&list);
5365
5366 tv.bo = &rbo->tbo;
5367 tv.num_shared = 1;
5368 list_add(&tv.head, &list);
5369
9165fb87 5370 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5371 if (r) {
5372 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5373 return r;
0f257b09 5374 }
e7b07cee 5375
5d43be0c 5376 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5377 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5378 else
5379 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5380
7b7c6c81 5381 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5382 if (unlikely(r != 0)) {
30b7c614
HW
5383 if (r != -ERESTARTSYS)
5384 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5385 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5386 return r;
5387 }
5388
bb812f1e
JZ
5389 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5390 if (unlikely(r != 0)) {
5391 amdgpu_bo_unpin(rbo);
0f257b09 5392 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5393 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5394 return r;
5395 }
7df7e505
NK
5396
5397 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5398
0f257b09 5399 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5400
7b7c6c81 5401 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5402
5403 amdgpu_bo_ref(rbo);
5404
3be5262e
HW
5405 if (dm_plane_state_new->dc_state &&
5406 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5407 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
e7b07cee 5408
320932bf 5409 fill_plane_buffer_attributes(
695af5f9
NK
5410 adev, afb, plane_state->format, plane_state->rotation,
5411 tiling_flags, &plane_state->tiling_info,
320932bf 5412 &plane_state->plane_size, &plane_state->dcc,
695af5f9 5413 &plane_state->address);
e7b07cee
HW
5414 }
5415
e7b07cee
HW
5416 return 0;
5417}
5418
3ee6b26b
AD
5419static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5420 struct drm_plane_state *old_state)
e7b07cee
HW
5421{
5422 struct amdgpu_bo *rbo;
e7b07cee
HW
5423 int r;
5424
5425 if (!old_state->fb)
5426 return;
5427
e68d14dd 5428 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5429 r = amdgpu_bo_reserve(rbo, false);
5430 if (unlikely(r)) {
5431 DRM_ERROR("failed to reserve rbo before unpin\n");
5432 return;
b830ebc9
HW
5433 }
5434
5435 amdgpu_bo_unpin(rbo);
5436 amdgpu_bo_unreserve(rbo);
5437 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5438}
5439
7578ecda
AD
5440static int dm_plane_atomic_check(struct drm_plane *plane,
5441 struct drm_plane_state *state)
cbd19488
AG
5442{
5443 struct amdgpu_device *adev = plane->dev->dev_private;
5444 struct dc *dc = adev->dm.dc;
78171832 5445 struct dm_plane_state *dm_plane_state;
695af5f9
NK
5446 struct dc_scaling_info scaling_info;
5447 int ret;
78171832
NK
5448
5449 dm_plane_state = to_dm_plane_state(state);
cbd19488 5450
3be5262e 5451 if (!dm_plane_state->dc_state)
9a3329b1 5452 return 0;
cbd19488 5453
695af5f9
NK
5454 ret = fill_dc_scaling_info(state, &scaling_info);
5455 if (ret)
5456 return ret;
a05bcff1 5457
62c933f9 5458 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
5459 return 0;
5460
5461 return -EINVAL;
5462}
5463
674e78ac
NK
5464static int dm_plane_atomic_async_check(struct drm_plane *plane,
5465 struct drm_plane_state *new_plane_state)
5466{
5467 /* Only support async updates on cursor planes. */
5468 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5469 return -EINVAL;
5470
5471 return 0;
5472}
5473
5474static void dm_plane_atomic_async_update(struct drm_plane *plane,
5475 struct drm_plane_state *new_state)
5476{
5477 struct drm_plane_state *old_state =
5478 drm_atomic_get_old_plane_state(new_state->state, plane);
5479
332af874 5480 swap(plane->state->fb, new_state->fb);
674e78ac
NK
5481
5482 plane->state->src_x = new_state->src_x;
5483 plane->state->src_y = new_state->src_y;
5484 plane->state->src_w = new_state->src_w;
5485 plane->state->src_h = new_state->src_h;
5486 plane->state->crtc_x = new_state->crtc_x;
5487 plane->state->crtc_y = new_state->crtc_y;
5488 plane->state->crtc_w = new_state->crtc_w;
5489 plane->state->crtc_h = new_state->crtc_h;
5490
5491 handle_cursor_update(plane, old_state);
5492}
5493
e7b07cee
HW
5494static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5495 .prepare_fb = dm_plane_helper_prepare_fb,
5496 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 5497 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
5498 .atomic_async_check = dm_plane_atomic_async_check,
5499 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
5500};
5501
5502/*
5503 * TODO: these are currently initialized to rgb formats only.
5504 * For future use cases we should either initialize them dynamically based on
5505 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 5506 * check will succeed, and let DC implement proper check
e7b07cee 5507 */
d90371b0 5508static const uint32_t rgb_formats[] = {
e7b07cee
HW
5509 DRM_FORMAT_XRGB8888,
5510 DRM_FORMAT_ARGB8888,
5511 DRM_FORMAT_RGBA8888,
5512 DRM_FORMAT_XRGB2101010,
5513 DRM_FORMAT_XBGR2101010,
5514 DRM_FORMAT_ARGB2101010,
5515 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
5516 DRM_FORMAT_XBGR8888,
5517 DRM_FORMAT_ABGR8888,
46dd9ff7 5518 DRM_FORMAT_RGB565,
e7b07cee
HW
5519};
5520
0d579c7e
NK
5521static const uint32_t overlay_formats[] = {
5522 DRM_FORMAT_XRGB8888,
5523 DRM_FORMAT_ARGB8888,
5524 DRM_FORMAT_RGBA8888,
5525 DRM_FORMAT_XBGR8888,
5526 DRM_FORMAT_ABGR8888,
7267a1a9 5527 DRM_FORMAT_RGB565
e7b07cee
HW
5528};
5529
5530static const u32 cursor_formats[] = {
5531 DRM_FORMAT_ARGB8888
5532};
5533
37c6a93b
NK
5534static int get_plane_formats(const struct drm_plane *plane,
5535 const struct dc_plane_cap *plane_cap,
5536 uint32_t *formats, int max_formats)
e7b07cee 5537{
37c6a93b
NK
5538 int i, num_formats = 0;
5539
5540 /*
5541 * TODO: Query support for each group of formats directly from
5542 * DC plane caps. This will require adding more formats to the
5543 * caps list.
5544 */
e7b07cee 5545
f180b4bc 5546 switch (plane->type) {
e7b07cee 5547 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
5548 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5549 if (num_formats >= max_formats)
5550 break;
5551
5552 formats[num_formats++] = rgb_formats[i];
5553 }
5554
ea36ad34 5555 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 5556 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
5557 if (plane_cap && plane_cap->pixel_format_support.p010)
5558 formats[num_formats++] = DRM_FORMAT_P010;
e7b07cee 5559 break;
37c6a93b 5560
e7b07cee 5561 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
5562 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5563 if (num_formats >= max_formats)
5564 break;
5565
5566 formats[num_formats++] = overlay_formats[i];
5567 }
e7b07cee 5568 break;
37c6a93b 5569
e7b07cee 5570 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
5571 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5572 if (num_formats >= max_formats)
5573 break;
5574
5575 formats[num_formats++] = cursor_formats[i];
5576 }
e7b07cee
HW
5577 break;
5578 }
5579
37c6a93b
NK
5580 return num_formats;
5581}
5582
5583static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5584 struct drm_plane *plane,
5585 unsigned long possible_crtcs,
5586 const struct dc_plane_cap *plane_cap)
5587{
5588 uint32_t formats[32];
5589 int num_formats;
5590 int res = -EPERM;
5591
5592 num_formats = get_plane_formats(plane, plane_cap, formats,
5593 ARRAY_SIZE(formats));
5594
5595 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5596 &dm_plane_funcs, formats, num_formats,
5597 NULL, plane->type, NULL);
5598 if (res)
5599 return res;
5600
cc1fec57
NK
5601 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5602 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
5603 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5604 BIT(DRM_MODE_BLEND_PREMULTI);
5605
5606 drm_plane_create_alpha_property(plane);
5607 drm_plane_create_blend_mode_property(plane, blend_caps);
5608 }
5609
fc8e5230 5610 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
5611 plane_cap &&
5612 (plane_cap->pixel_format_support.nv12 ||
5613 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
5614 /* This only affects YUV formats. */
5615 drm_plane_create_color_properties(
5616 plane,
5617 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
5618 BIT(DRM_COLOR_YCBCR_BT709) |
5619 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
5620 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5621 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5622 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5623 }
5624
f180b4bc 5625 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 5626
96719c54 5627 /* Create (reset) the plane state */
f180b4bc
HW
5628 if (plane->funcs->reset)
5629 plane->funcs->reset(plane);
96719c54 5630
37c6a93b 5631 return 0;
e7b07cee
HW
5632}
5633
7578ecda
AD
5634static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5635 struct drm_plane *plane,
5636 uint32_t crtc_index)
e7b07cee
HW
5637{
5638 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 5639 struct drm_plane *cursor_plane;
e7b07cee
HW
5640
5641 int res = -ENOMEM;
5642
5643 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5644 if (!cursor_plane)
5645 goto fail;
5646
f180b4bc 5647 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 5648 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
5649
5650 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5651 if (!acrtc)
5652 goto fail;
5653
5654 res = drm_crtc_init_with_planes(
5655 dm->ddev,
5656 &acrtc->base,
5657 plane,
f180b4bc 5658 cursor_plane,
e7b07cee
HW
5659 &amdgpu_dm_crtc_funcs, NULL);
5660
5661 if (res)
5662 goto fail;
5663
5664 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5665
96719c54
HW
5666 /* Create (reset) the plane state */
5667 if (acrtc->base.funcs->reset)
5668 acrtc->base.funcs->reset(&acrtc->base);
5669
e7b07cee
HW
5670 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5671 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5672
5673 acrtc->crtc_id = crtc_index;
5674 acrtc->base.enabled = false;
c37e2d29 5675 acrtc->otg_inst = -1;
e7b07cee
HW
5676
5677 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
5678 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5679 true, MAX_COLOR_LUT_ENTRIES);
086247a4 5680 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
5681
5682 return 0;
5683
5684fail:
b830ebc9
HW
5685 kfree(acrtc);
5686 kfree(cursor_plane);
e7b07cee
HW
5687 return res;
5688}
5689
5690
5691static int to_drm_connector_type(enum signal_type st)
5692{
5693 switch (st) {
5694 case SIGNAL_TYPE_HDMI_TYPE_A:
5695 return DRM_MODE_CONNECTOR_HDMIA;
5696 case SIGNAL_TYPE_EDP:
5697 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
5698 case SIGNAL_TYPE_LVDS:
5699 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
5700 case SIGNAL_TYPE_RGB:
5701 return DRM_MODE_CONNECTOR_VGA;
5702 case SIGNAL_TYPE_DISPLAY_PORT:
5703 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5704 return DRM_MODE_CONNECTOR_DisplayPort;
5705 case SIGNAL_TYPE_DVI_DUAL_LINK:
5706 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5707 return DRM_MODE_CONNECTOR_DVID;
5708 case SIGNAL_TYPE_VIRTUAL:
5709 return DRM_MODE_CONNECTOR_VIRTUAL;
5710
5711 default:
5712 return DRM_MODE_CONNECTOR_Unknown;
5713 }
5714}
5715
2b4c1c05
DV
5716static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5717{
62afb4ad
JRS
5718 struct drm_encoder *encoder;
5719
5720 /* There is only one encoder per connector */
5721 drm_connector_for_each_possible_encoder(connector, encoder)
5722 return encoder;
5723
5724 return NULL;
2b4c1c05
DV
5725}
5726
e7b07cee
HW
5727static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5728{
e7b07cee
HW
5729 struct drm_encoder *encoder;
5730 struct amdgpu_encoder *amdgpu_encoder;
5731
2b4c1c05 5732 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
5733
5734 if (encoder == NULL)
5735 return;
5736
5737 amdgpu_encoder = to_amdgpu_encoder(encoder);
5738
5739 amdgpu_encoder->native_mode.clock = 0;
5740
5741 if (!list_empty(&connector->probed_modes)) {
5742 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 5743
e7b07cee 5744 list_for_each_entry(preferred_mode,
b830ebc9
HW
5745 &connector->probed_modes,
5746 head) {
5747 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5748 amdgpu_encoder->native_mode = *preferred_mode;
5749
e7b07cee
HW
5750 break;
5751 }
5752
5753 }
5754}
5755
3ee6b26b
AD
5756static struct drm_display_mode *
5757amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5758 char *name,
5759 int hdisplay, int vdisplay)
e7b07cee
HW
5760{
5761 struct drm_device *dev = encoder->dev;
5762 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5763 struct drm_display_mode *mode = NULL;
5764 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5765
5766 mode = drm_mode_duplicate(dev, native_mode);
5767
b830ebc9 5768 if (mode == NULL)
e7b07cee
HW
5769 return NULL;
5770
5771 mode->hdisplay = hdisplay;
5772 mode->vdisplay = vdisplay;
5773 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 5774 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
5775
5776 return mode;
5777
5778}
5779
5780static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 5781 struct drm_connector *connector)
e7b07cee
HW
5782{
5783 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5784 struct drm_display_mode *mode = NULL;
5785 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
5786 struct amdgpu_dm_connector *amdgpu_dm_connector =
5787 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5788 int i;
5789 int n;
5790 struct mode_size {
5791 char name[DRM_DISPLAY_MODE_LEN];
5792 int w;
5793 int h;
b830ebc9 5794 } common_modes[] = {
e7b07cee
HW
5795 { "640x480", 640, 480},
5796 { "800x600", 800, 600},
5797 { "1024x768", 1024, 768},
5798 { "1280x720", 1280, 720},
5799 { "1280x800", 1280, 800},
5800 {"1280x1024", 1280, 1024},
5801 { "1440x900", 1440, 900},
5802 {"1680x1050", 1680, 1050},
5803 {"1600x1200", 1600, 1200},
5804 {"1920x1080", 1920, 1080},
5805 {"1920x1200", 1920, 1200}
5806 };
5807
b830ebc9 5808 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
5809
5810 for (i = 0; i < n; i++) {
5811 struct drm_display_mode *curmode = NULL;
5812 bool mode_existed = false;
5813
5814 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
5815 common_modes[i].h > native_mode->vdisplay ||
5816 (common_modes[i].w == native_mode->hdisplay &&
5817 common_modes[i].h == native_mode->vdisplay))
5818 continue;
e7b07cee
HW
5819
5820 list_for_each_entry(curmode, &connector->probed_modes, head) {
5821 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 5822 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
5823 mode_existed = true;
5824 break;
5825 }
5826 }
5827
5828 if (mode_existed)
5829 continue;
5830
5831 mode = amdgpu_dm_create_common_mode(encoder,
5832 common_modes[i].name, common_modes[i].w,
5833 common_modes[i].h);
5834 drm_mode_probed_add(connector, mode);
c84dec2f 5835 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
5836 }
5837}
5838
3ee6b26b
AD
5839static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5840 struct edid *edid)
e7b07cee 5841{
c84dec2f
HW
5842 struct amdgpu_dm_connector *amdgpu_dm_connector =
5843 to_amdgpu_dm_connector(connector);
e7b07cee
HW
5844
5845 if (edid) {
5846 /* empty probed_modes */
5847 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 5848 amdgpu_dm_connector->num_modes =
e7b07cee
HW
5849 drm_add_edid_modes(connector, edid);
5850
f1e5e913
YMM
5851 /* sorting the probed modes before calling function
5852 * amdgpu_dm_get_native_mode() since EDID can have
5853 * more than one preferred mode. The modes that are
5854 * later in the probed mode list could be of higher
5855 * and preferred resolution. For example, 3840x2160
5856 * resolution in base EDID preferred timing and 4096x2160
5857 * preferred resolution in DID extension block later.
5858 */
5859 drm_mode_sort(&connector->probed_modes);
e7b07cee 5860 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 5861 } else {
c84dec2f 5862 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 5863 }
e7b07cee
HW
5864}
5865
7578ecda 5866static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 5867{
c84dec2f
HW
5868 struct amdgpu_dm_connector *amdgpu_dm_connector =
5869 to_amdgpu_dm_connector(connector);
e7b07cee 5870 struct drm_encoder *encoder;
c84dec2f 5871 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 5872
2b4c1c05 5873 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 5874
85ee15d6 5875 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
5876 amdgpu_dm_connector->num_modes =
5877 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
5878 } else {
5879 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5880 amdgpu_dm_connector_add_common_modes(encoder, connector);
5881 }
3e332d3a 5882 amdgpu_dm_fbc_init(connector);
5099114b 5883
c84dec2f 5884 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
5885}
5886
3ee6b26b
AD
5887void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5888 struct amdgpu_dm_connector *aconnector,
5889 int connector_type,
5890 struct dc_link *link,
5891 int link_index)
e7b07cee
HW
5892{
5893 struct amdgpu_device *adev = dm->ddev->dev_private;
5894
f04bee34
NK
5895 /*
5896 * Some of the properties below require access to state, like bpc.
5897 * Allocate some default initial connector state with our reset helper.
5898 */
5899 if (aconnector->base.funcs->reset)
5900 aconnector->base.funcs->reset(&aconnector->base);
5901
e7b07cee
HW
5902 aconnector->connector_id = link_index;
5903 aconnector->dc_link = link;
5904 aconnector->base.interlace_allowed = false;
5905 aconnector->base.doublescan_allowed = false;
5906 aconnector->base.stereo_allowed = false;
5907 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5908 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 5909 aconnector->audio_inst = -1;
e7b07cee
HW
5910 mutex_init(&aconnector->hpd_lock);
5911
1f6010a9
DF
5912 /*
5913 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
5914 * which means HPD hot plug not supported
5915 */
e7b07cee
HW
5916 switch (connector_type) {
5917 case DRM_MODE_CONNECTOR_HDMIA:
5918 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5919 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5920 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
5921 break;
5922 case DRM_MODE_CONNECTOR_DisplayPort:
5923 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 5924 aconnector->base.ycbcr_420_allowed =
9ea59d5a 5925 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
5926 break;
5927 case DRM_MODE_CONNECTOR_DVID:
5928 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5929 break;
5930 default:
5931 break;
5932 }
5933
5934 drm_object_attach_property(&aconnector->base.base,
5935 dm->ddev->mode_config.scaling_mode_property,
5936 DRM_MODE_SCALE_NONE);
5937
5938 drm_object_attach_property(&aconnector->base.base,
5939 adev->mode_info.underscan_property,
5940 UNDERSCAN_OFF);
5941 drm_object_attach_property(&aconnector->base.base,
5942 adev->mode_info.underscan_hborder_property,
5943 0);
5944 drm_object_attach_property(&aconnector->base.base,
5945 adev->mode_info.underscan_vborder_property,
5946 0);
1825fd34 5947
b754c07a
JFZ
5948 if (!aconnector->mst_port)
5949 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 5950
4a8ca46b
RL
5951 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5952 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5953 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 5954
c1ee92f9
DF
5955 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5956 dc_is_dmcu_initialized(adev->dm.dc)) {
5957 drm_object_attach_property(&aconnector->base.base,
5958 adev->mode_info.abm_level_property, 0);
5959 }
bb47de73
NK
5960
5961 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
5962 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5963 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
5964 drm_object_attach_property(
5965 &aconnector->base.base,
5966 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5967
b754c07a
JFZ
5968 if (!aconnector->mst_port)
5969 drm_connector_attach_vrr_capable_property(&aconnector->base);
5970
0c8620d6 5971#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 5972 if (adev->dm.hdcp_workqueue)
53e108aa 5973 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 5974#endif
bb47de73 5975 }
e7b07cee
HW
5976}
5977
7578ecda
AD
5978static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5979 struct i2c_msg *msgs, int num)
e7b07cee
HW
5980{
5981 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5982 struct ddc_service *ddc_service = i2c->ddc_service;
5983 struct i2c_command cmd;
5984 int i;
5985 int result = -EIO;
5986
b830ebc9 5987 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
5988
5989 if (!cmd.payloads)
5990 return result;
5991
5992 cmd.number_of_payloads = num;
5993 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5994 cmd.speed = 100;
5995
5996 for (i = 0; i < num; i++) {
5997 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5998 cmd.payloads[i].address = msgs[i].addr;
5999 cmd.payloads[i].length = msgs[i].len;
6000 cmd.payloads[i].data = msgs[i].buf;
6001 }
6002
c85e6e54
DF
6003 if (dc_submit_i2c(
6004 ddc_service->ctx->dc,
6005 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
6006 &cmd))
6007 result = num;
6008
6009 kfree(cmd.payloads);
6010 return result;
6011}
6012
7578ecda 6013static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
6014{
6015 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6016}
6017
6018static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6019 .master_xfer = amdgpu_dm_i2c_xfer,
6020 .functionality = amdgpu_dm_i2c_func,
6021};
6022
3ee6b26b
AD
6023static struct amdgpu_i2c_adapter *
6024create_i2c(struct ddc_service *ddc_service,
6025 int link_index,
6026 int *res)
e7b07cee
HW
6027{
6028 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6029 struct amdgpu_i2c_adapter *i2c;
6030
b830ebc9 6031 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
6032 if (!i2c)
6033 return NULL;
e7b07cee
HW
6034 i2c->base.owner = THIS_MODULE;
6035 i2c->base.class = I2C_CLASS_DDC;
6036 i2c->base.dev.parent = &adev->pdev->dev;
6037 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 6038 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
6039 i2c_set_adapdata(&i2c->base, i2c);
6040 i2c->ddc_service = ddc_service;
c85e6e54 6041 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
6042
6043 return i2c;
6044}
6045
89fc8d4e 6046
1f6010a9
DF
6047/*
6048 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
6049 * dc_link which will be represented by this aconnector.
6050 */
7578ecda
AD
6051static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6052 struct amdgpu_dm_connector *aconnector,
6053 uint32_t link_index,
6054 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6055{
6056 int res = 0;
6057 int connector_type;
6058 struct dc *dc = dm->dc;
6059 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6060 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6061
6062 link->priv = aconnector;
e7b07cee 6063
f1ad2f5e 6064 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6065
6066 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6067 if (!i2c) {
6068 DRM_ERROR("Failed to create i2c adapter data\n");
6069 return -ENOMEM;
6070 }
6071
e7b07cee
HW
6072 aconnector->i2c = i2c;
6073 res = i2c_add_adapter(&i2c->base);
6074
6075 if (res) {
6076 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6077 goto out_free;
6078 }
6079
6080 connector_type = to_drm_connector_type(link->connector_signal);
6081
17165de2 6082 res = drm_connector_init_with_ddc(
e7b07cee
HW
6083 dm->ddev,
6084 &aconnector->base,
6085 &amdgpu_dm_connector_funcs,
17165de2
AP
6086 connector_type,
6087 &i2c->base);
e7b07cee
HW
6088
6089 if (res) {
6090 DRM_ERROR("connector_init failed\n");
6091 aconnector->connector_id = -1;
6092 goto out_free;
6093 }
6094
6095 drm_connector_helper_add(
6096 &aconnector->base,
6097 &amdgpu_dm_connector_helper_funcs);
6098
6099 amdgpu_dm_connector_init_helper(
6100 dm,
6101 aconnector,
6102 connector_type,
6103 link,
6104 link_index);
6105
cde4c44d 6106 drm_connector_attach_encoder(
e7b07cee
HW
6107 &aconnector->base, &aencoder->base);
6108
e7b07cee
HW
6109 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6110 || connector_type == DRM_MODE_CONNECTOR_eDP)
9f656935 6111 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 6112
e7b07cee
HW
6113out_free:
6114 if (res) {
6115 kfree(i2c);
6116 aconnector->i2c = NULL;
6117 }
6118 return res;
6119}
6120
6121int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6122{
6123 switch (adev->mode_info.num_crtc) {
6124 case 1:
6125 return 0x1;
6126 case 2:
6127 return 0x3;
6128 case 3:
6129 return 0x7;
6130 case 4:
6131 return 0xf;
6132 case 5:
6133 return 0x1f;
6134 case 6:
6135 default:
6136 return 0x3f;
6137 }
6138}
6139
7578ecda
AD
6140static int amdgpu_dm_encoder_init(struct drm_device *dev,
6141 struct amdgpu_encoder *aencoder,
6142 uint32_t link_index)
e7b07cee
HW
6143{
6144 struct amdgpu_device *adev = dev->dev_private;
6145
6146 int res = drm_encoder_init(dev,
6147 &aencoder->base,
6148 &amdgpu_dm_encoder_funcs,
6149 DRM_MODE_ENCODER_TMDS,
6150 NULL);
6151
6152 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6153
6154 if (!res)
6155 aencoder->encoder_id = link_index;
6156 else
6157 aencoder->encoder_id = -1;
6158
6159 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6160
6161 return res;
6162}
6163
3ee6b26b
AD
6164static void manage_dm_interrupts(struct amdgpu_device *adev,
6165 struct amdgpu_crtc *acrtc,
6166 bool enable)
e7b07cee
HW
6167{
6168 /*
6169 * this is not correct translation but will work as soon as VBLANK
6170 * constant is the same as PFLIP
6171 */
6172 int irq_type =
734dd01d 6173 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6174 adev,
6175 acrtc->crtc_id);
6176
6177 if (enable) {
6178 drm_crtc_vblank_on(&acrtc->base);
6179 amdgpu_irq_get(
6180 adev,
6181 &adev->pageflip_irq,
6182 irq_type);
6183 } else {
6184
6185 amdgpu_irq_put(
6186 adev,
6187 &adev->pageflip_irq,
6188 irq_type);
6189 drm_crtc_vblank_off(&acrtc->base);
6190 }
6191}
6192
3ee6b26b
AD
6193static bool
6194is_scaling_state_different(const struct dm_connector_state *dm_state,
6195 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6196{
6197 if (dm_state->scaling != old_dm_state->scaling)
6198 return true;
6199 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6200 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6201 return true;
6202 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6203 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6204 return true;
b830ebc9
HW
6205 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6206 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6207 return true;
e7b07cee
HW
6208 return false;
6209}
6210
0c8620d6
BL
6211#ifdef CONFIG_DRM_AMD_DC_HDCP
6212static bool is_content_protection_different(struct drm_connector_state *state,
6213 const struct drm_connector_state *old_state,
6214 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6215{
6216 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6217
53e108aa
BL
6218 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6219 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6220 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6221 return true;
6222 }
6223
0c8620d6
BL
6224 /* CP is being re enabled, ignore this */
6225 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6226 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6227 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6228 return false;
6229 }
6230
6231 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6232 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6233 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6234 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6235
6236 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6237 * hot-plug, headless s3, dpms
6238 */
6239 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6240 aconnector->dc_sink != NULL)
6241 return true;
6242
6243 if (old_state->content_protection == state->content_protection)
6244 return false;
6245
6246 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6247 return true;
6248
6249 return false;
6250}
6251
0c8620d6 6252#endif
3ee6b26b
AD
6253static void remove_stream(struct amdgpu_device *adev,
6254 struct amdgpu_crtc *acrtc,
6255 struct dc_stream_state *stream)
e7b07cee
HW
6256{
6257 /* this is the update mode case */
e7b07cee
HW
6258
6259 acrtc->otg_inst = -1;
6260 acrtc->enabled = false;
6261}
6262
7578ecda
AD
6263static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6264 struct dc_cursor_position *position)
2a8f6ccb 6265{
f4c2cc43 6266 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6267 int x, y;
6268 int xorigin = 0, yorigin = 0;
6269
e371e19c
NK
6270 position->enable = false;
6271 position->x = 0;
6272 position->y = 0;
6273
6274 if (!crtc || !plane->state->fb)
2a8f6ccb 6275 return 0;
2a8f6ccb
HW
6276
6277 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6278 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6279 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6280 __func__,
6281 plane->state->crtc_w,
6282 plane->state->crtc_h);
6283 return -EINVAL;
6284 }
6285
6286 x = plane->state->crtc_x;
6287 y = plane->state->crtc_y;
c14a005c 6288
e371e19c
NK
6289 if (x <= -amdgpu_crtc->max_cursor_width ||
6290 y <= -amdgpu_crtc->max_cursor_height)
6291 return 0;
6292
2a8f6ccb
HW
6293 if (x < 0) {
6294 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6295 x = 0;
6296 }
6297 if (y < 0) {
6298 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6299 y = 0;
6300 }
6301 position->enable = true;
033baeee 6302 position->translate_by_source = true;
2a8f6ccb
HW
6303 position->x = x;
6304 position->y = y;
6305 position->x_hotspot = xorigin;
6306 position->y_hotspot = yorigin;
6307
6308 return 0;
6309}
6310
3ee6b26b
AD
6311static void handle_cursor_update(struct drm_plane *plane,
6312 struct drm_plane_state *old_plane_state)
e7b07cee 6313{
674e78ac 6314 struct amdgpu_device *adev = plane->dev->dev_private;
2a8f6ccb
HW
6315 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6316 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6317 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6318 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6319 uint64_t address = afb ? afb->address : 0;
6320 struct dc_cursor_position position;
6321 struct dc_cursor_attributes attributes;
6322 int ret;
6323
e7b07cee
HW
6324 if (!plane->state->fb && !old_plane_state->fb)
6325 return;
6326
f1ad2f5e 6327 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6328 __func__,
6329 amdgpu_crtc->crtc_id,
6330 plane->state->crtc_w,
6331 plane->state->crtc_h);
2a8f6ccb
HW
6332
6333 ret = get_cursor_position(plane, crtc, &position);
6334 if (ret)
6335 return;
6336
6337 if (!position.enable) {
6338 /* turn off cursor */
674e78ac
NK
6339 if (crtc_state && crtc_state->stream) {
6340 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6341 dc_stream_set_cursor_position(crtc_state->stream,
6342 &position);
674e78ac
NK
6343 mutex_unlock(&adev->dm.dc_lock);
6344 }
2a8f6ccb 6345 return;
e7b07cee 6346 }
e7b07cee 6347
2a8f6ccb
HW
6348 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6349 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6350
c1cefe11 6351 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6352 attributes.address.high_part = upper_32_bits(address);
6353 attributes.address.low_part = lower_32_bits(address);
6354 attributes.width = plane->state->crtc_w;
6355 attributes.height = plane->state->crtc_h;
6356 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6357 attributes.rotation_angle = 0;
6358 attributes.attribute_flags.value = 0;
6359
6360 attributes.pitch = attributes.width;
6361
886daac9 6362 if (crtc_state->stream) {
674e78ac 6363 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6364 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6365 &attributes))
6366 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6367
2a8f6ccb
HW
6368 if (!dc_stream_set_cursor_position(crtc_state->stream,
6369 &position))
6370 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6371 mutex_unlock(&adev->dm.dc_lock);
886daac9 6372 }
2a8f6ccb 6373}
e7b07cee
HW
6374
6375static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6376{
6377
6378 assert_spin_locked(&acrtc->base.dev->event_lock);
6379 WARN_ON(acrtc->event);
6380
6381 acrtc->event = acrtc->base.state->event;
6382
6383 /* Set the flip status */
6384 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6385
6386 /* Mark this event as consumed */
6387 acrtc->base.state->event = NULL;
6388
6389 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6390 acrtc->crtc_id);
6391}
6392
bb47de73
NK
6393static void update_freesync_state_on_stream(
6394 struct amdgpu_display_manager *dm,
6395 struct dm_crtc_state *new_crtc_state,
180db303
NK
6396 struct dc_stream_state *new_stream,
6397 struct dc_plane_state *surface,
6398 u32 flip_timestamp_in_us)
bb47de73 6399{
09aef2c4 6400 struct mod_vrr_params vrr_params;
bb47de73 6401 struct dc_info_packet vrr_infopacket = {0};
09aef2c4
MK
6402 struct amdgpu_device *adev = dm->adev;
6403 unsigned long flags;
bb47de73
NK
6404
6405 if (!new_stream)
6406 return;
6407
6408 /*
6409 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6410 * For now it's sufficient to just guard against these conditions.
6411 */
6412
6413 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6414 return;
6415
09aef2c4
MK
6416 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6417 vrr_params = new_crtc_state->vrr_params;
6418
180db303
NK
6419 if (surface) {
6420 mod_freesync_handle_preflip(
6421 dm->freesync_module,
6422 surface,
6423 new_stream,
6424 flip_timestamp_in_us,
6425 &vrr_params);
09aef2c4
MK
6426
6427 if (adev->family < AMDGPU_FAMILY_AI &&
6428 amdgpu_dm_vrr_active(new_crtc_state)) {
6429 mod_freesync_handle_v_update(dm->freesync_module,
6430 new_stream, &vrr_params);
e63e2491
EB
6431
6432 /* Need to call this before the frame ends. */
6433 dc_stream_adjust_vmin_vmax(dm->dc,
6434 new_crtc_state->stream,
6435 &vrr_params.adjust);
09aef2c4 6436 }
180db303 6437 }
bb47de73
NK
6438
6439 mod_freesync_build_vrr_infopacket(
6440 dm->freesync_module,
6441 new_stream,
180db303 6442 &vrr_params,
ecd0136b
HT
6443 PACKET_TYPE_VRR,
6444 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
6445 &vrr_infopacket);
6446
8a48b44c 6447 new_crtc_state->freesync_timing_changed |=
180db303
NK
6448 (memcmp(&new_crtc_state->vrr_params.adjust,
6449 &vrr_params.adjust,
6450 sizeof(vrr_params.adjust)) != 0);
bb47de73 6451
8a48b44c 6452 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
6453 (memcmp(&new_crtc_state->vrr_infopacket,
6454 &vrr_infopacket,
6455 sizeof(vrr_infopacket)) != 0);
6456
180db303 6457 new_crtc_state->vrr_params = vrr_params;
bb47de73
NK
6458 new_crtc_state->vrr_infopacket = vrr_infopacket;
6459
180db303 6460 new_stream->adjust = new_crtc_state->vrr_params.adjust;
bb47de73
NK
6461 new_stream->vrr_infopacket = vrr_infopacket;
6462
6463 if (new_crtc_state->freesync_vrr_info_changed)
6464 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6465 new_crtc_state->base.crtc->base.id,
6466 (int)new_crtc_state->base.vrr_enabled,
180db303 6467 (int)vrr_params.state);
09aef2c4
MK
6468
6469 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
bb47de73
NK
6470}
6471
e854194c
MK
6472static void pre_update_freesync_state_on_stream(
6473 struct amdgpu_display_manager *dm,
6474 struct dm_crtc_state *new_crtc_state)
6475{
6476 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 6477 struct mod_vrr_params vrr_params;
e854194c 6478 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4
MK
6479 struct amdgpu_device *adev = dm->adev;
6480 unsigned long flags;
e854194c
MK
6481
6482 if (!new_stream)
6483 return;
6484
6485 /*
6486 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6487 * For now it's sufficient to just guard against these conditions.
6488 */
6489 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6490 return;
6491
09aef2c4
MK
6492 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6493 vrr_params = new_crtc_state->vrr_params;
6494
e854194c
MK
6495 if (new_crtc_state->vrr_supported &&
6496 config.min_refresh_in_uhz &&
6497 config.max_refresh_in_uhz) {
6498 config.state = new_crtc_state->base.vrr_enabled ?
6499 VRR_STATE_ACTIVE_VARIABLE :
6500 VRR_STATE_INACTIVE;
6501 } else {
6502 config.state = VRR_STATE_UNSUPPORTED;
6503 }
6504
6505 mod_freesync_build_vrr_params(dm->freesync_module,
6506 new_stream,
6507 &config, &vrr_params);
6508
6509 new_crtc_state->freesync_timing_changed |=
6510 (memcmp(&new_crtc_state->vrr_params.adjust,
6511 &vrr_params.adjust,
6512 sizeof(vrr_params.adjust)) != 0);
6513
6514 new_crtc_state->vrr_params = vrr_params;
09aef2c4 6515 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
e854194c
MK
6516}
6517
66b0c973
MK
6518static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6519 struct dm_crtc_state *new_state)
6520{
6521 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6522 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6523
6524 if (!old_vrr_active && new_vrr_active) {
6525 /* Transition VRR inactive -> active:
6526 * While VRR is active, we must not disable vblank irq, as a
6527 * reenable after disable would compute bogus vblank/pflip
6528 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
6529 *
6530 * We also need vupdate irq for the actual core vblank handling
6531 * at end of vblank.
66b0c973 6532 */
d2574c33 6533 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
6534 drm_crtc_vblank_get(new_state->base.crtc);
6535 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6536 __func__, new_state->base.crtc->base.id);
6537 } else if (old_vrr_active && !new_vrr_active) {
6538 /* Transition VRR active -> inactive:
6539 * Allow vblank irq disable again for fixed refresh rate.
6540 */
d2574c33 6541 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
6542 drm_crtc_vblank_put(new_state->base.crtc);
6543 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6544 __func__, new_state->base.crtc->base.id);
6545 }
6546}
6547
8ad27806
NK
6548static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6549{
6550 struct drm_plane *plane;
6551 struct drm_plane_state *old_plane_state, *new_plane_state;
6552 int i;
6553
6554 /*
6555 * TODO: Make this per-stream so we don't issue redundant updates for
6556 * commits with multiple streams.
6557 */
6558 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6559 new_plane_state, i)
6560 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6561 handle_cursor_update(plane, old_plane_state);
6562}
6563
3be5262e 6564static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 6565 struct dc_state *dc_state,
3ee6b26b
AD
6566 struct drm_device *dev,
6567 struct amdgpu_display_manager *dm,
6568 struct drm_crtc *pcrtc,
420cd472 6569 bool wait_for_vblank)
e7b07cee 6570{
570c91d5 6571 uint32_t i;
8a48b44c 6572 uint64_t timestamp_ns;
e7b07cee 6573 struct drm_plane *plane;
0bc9706d 6574 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 6575 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
6576 struct drm_crtc_state *new_pcrtc_state =
6577 drm_atomic_get_new_crtc_state(state, pcrtc);
6578 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
6579 struct dm_crtc_state *dm_old_crtc_state =
6580 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 6581 int planes_count = 0, vpos, hpos;
570c91d5 6582 long r;
e7b07cee 6583 unsigned long flags;
8a48b44c 6584 struct amdgpu_bo *abo;
09e5665a 6585 uint64_t tiling_flags;
fdd1fe57
MK
6586 uint32_t target_vblank, last_flip_vblank;
6587 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 6588 bool pflip_present = false;
bc7f670e
DF
6589 struct {
6590 struct dc_surface_update surface_updates[MAX_SURFACES];
6591 struct dc_plane_info plane_infos[MAX_SURFACES];
6592 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 6593 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 6594 struct dc_stream_update stream_update;
74aa7bd4 6595 } *bundle;
bc7f670e 6596
74aa7bd4 6597 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 6598
74aa7bd4
DF
6599 if (!bundle) {
6600 dm_error("Failed to allocate update bundle\n");
4b510503
NK
6601 goto cleanup;
6602 }
e7b07cee 6603
8ad27806
NK
6604 /*
6605 * Disable the cursor first if we're disabling all the planes.
6606 * It'll remain on the screen after the planes are re-enabled
6607 * if we don't.
6608 */
6609 if (acrtc_state->active_planes == 0)
6610 amdgpu_dm_commit_cursors(state);
6611
e7b07cee 6612 /* update planes when needed */
0bc9706d
LSL
6613 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6614 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 6615 struct drm_crtc_state *new_crtc_state;
0bc9706d 6616 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 6617 bool plane_needs_flip;
c7af5f77 6618 struct dc_plane_state *dc_plane;
54d76575 6619 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 6620
80c218d5
NK
6621 /* Cursor plane is handled after stream updates */
6622 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 6623 continue;
e7b07cee 6624
f5ba60fe
DD
6625 if (!fb || !crtc || pcrtc != crtc)
6626 continue;
6627
6628 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6629 if (!new_crtc_state->active)
e7b07cee
HW
6630 continue;
6631
bc7f670e 6632 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 6633
74aa7bd4 6634 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 6635 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
6636 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6637 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
bc7f670e 6638 }
8a48b44c 6639
695af5f9
NK
6640 fill_dc_scaling_info(new_plane_state,
6641 &bundle->scaling_infos[planes_count]);
8a48b44c 6642
695af5f9
NK
6643 bundle->surface_updates[planes_count].scaling_info =
6644 &bundle->scaling_infos[planes_count];
8a48b44c 6645
f5031000 6646 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 6647
f5031000 6648 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 6649
f5031000
DF
6650 if (!plane_needs_flip) {
6651 planes_count += 1;
6652 continue;
6653 }
8a48b44c 6654
2fac0f53
CK
6655 abo = gem_to_amdgpu_bo(fb->obj[0]);
6656
f8308898
AG
6657 /*
6658 * Wait for all fences on this FB. Do limited wait to avoid
6659 * deadlock during GPU reset when this fence will not signal
6660 * but we hold reservation lock for the BO.
6661 */
52791eee 6662 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 6663 false,
f8308898
AG
6664 msecs_to_jiffies(5000));
6665 if (unlikely(r <= 0))
ed8a5fb2 6666 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 6667
f5031000
DF
6668 /*
6669 * TODO This might fail and hence better not used, wait
6670 * explicitly on fences instead
6671 * and in general should be called for
6672 * blocking commit to as per framework helpers
6673 */
f5031000 6674 r = amdgpu_bo_reserve(abo, true);
f8308898 6675 if (unlikely(r != 0))
f5031000 6676 DRM_ERROR("failed to reserve buffer before flip\n");
8a48b44c 6677
f5031000 6678 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
8a48b44c 6679
f5031000 6680 amdgpu_bo_unreserve(abo);
8a48b44c 6681
695af5f9
NK
6682 fill_dc_plane_info_and_addr(
6683 dm->adev, new_plane_state, tiling_flags,
6684 &bundle->plane_infos[planes_count],
6685 &bundle->flip_addrs[planes_count].address);
6686
6687 bundle->surface_updates[planes_count].plane_info =
6688 &bundle->plane_infos[planes_count];
8a48b44c 6689
caff0e66
NK
6690 /*
6691 * Only allow immediate flips for fast updates that don't
6692 * change FB pitch, DCC state, rotation or mirroing.
6693 */
f5031000 6694 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 6695 crtc->state->async_flip &&
caff0e66 6696 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 6697
f5031000
DF
6698 timestamp_ns = ktime_get_ns();
6699 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6700 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6701 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 6702
f5031000
DF
6703 if (!bundle->surface_updates[planes_count].surface) {
6704 DRM_ERROR("No surface for CRTC: id=%d\n",
6705 acrtc_attach->crtc_id);
6706 continue;
bc7f670e
DF
6707 }
6708
f5031000
DF
6709 if (plane == pcrtc->primary)
6710 update_freesync_state_on_stream(
6711 dm,
6712 acrtc_state,
6713 acrtc_state->stream,
6714 dc_plane,
6715 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 6716
f5031000
DF
6717 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6718 __func__,
6719 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6720 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
6721
6722 planes_count += 1;
6723
8a48b44c
DF
6724 }
6725
74aa7bd4 6726 if (pflip_present) {
634092b1
MK
6727 if (!vrr_active) {
6728 /* Use old throttling in non-vrr fixed refresh rate mode
6729 * to keep flip scheduling based on target vblank counts
6730 * working in a backwards compatible way, e.g., for
6731 * clients using the GLX_OML_sync_control extension or
6732 * DRI3/Present extension with defined target_msc.
6733 */
e3eff4b5 6734 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
6735 }
6736 else {
6737 /* For variable refresh rate mode only:
6738 * Get vblank of last completed flip to avoid > 1 vrr
6739 * flips per video frame by use of throttling, but allow
6740 * flip programming anywhere in the possibly large
6741 * variable vrr vblank interval for fine-grained flip
6742 * timing control and more opportunity to avoid stutter
6743 * on late submission of flips.
6744 */
6745 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6746 last_flip_vblank = acrtc_attach->last_flip_vblank;
6747 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6748 }
6749
fdd1fe57 6750 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
6751
6752 /*
6753 * Wait until we're out of the vertical blank period before the one
6754 * targeted by the flip
6755 */
6756 while ((acrtc_attach->enabled &&
6757 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6758 0, &vpos, &hpos, NULL,
6759 NULL, &pcrtc->hwmode)
6760 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6761 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6762 (int)(target_vblank -
e3eff4b5 6763 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
6764 usleep_range(1000, 1100);
6765 }
6766
6767 if (acrtc_attach->base.state->event) {
6768 drm_crtc_vblank_get(pcrtc);
6769
6770 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6771
6772 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6773 prepare_flip_isr(acrtc_attach);
6774
6775 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6776 }
6777
6778 if (acrtc_state->stream) {
8a48b44c 6779 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 6780 bundle->stream_update.vrr_infopacket =
8a48b44c 6781 &acrtc_state->stream->vrr_infopacket;
e7b07cee 6782 }
e7b07cee
HW
6783 }
6784
bc92c065 6785 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
6786 if ((planes_count || acrtc_state->active_planes == 0) &&
6787 acrtc_state->stream) {
b6e881c9 6788 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 6789 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
6790 bundle->stream_update.src = acrtc_state->stream->src;
6791 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
6792 }
6793
cf020d49
NK
6794 if (new_pcrtc_state->color_mgmt_changed) {
6795 /*
6796 * TODO: This isn't fully correct since we've actually
6797 * already modified the stream in place.
6798 */
6799 bundle->stream_update.gamut_remap =
6800 &acrtc_state->stream->gamut_remap_matrix;
6801 bundle->stream_update.output_csc_transform =
6802 &acrtc_state->stream->csc_color_matrix;
6803 bundle->stream_update.out_transfer_func =
6804 acrtc_state->stream->out_transfer_func;
6805 }
bc7f670e 6806
8a48b44c 6807 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 6808 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 6809 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 6810
e63e2491
EB
6811 /*
6812 * If FreeSync state on the stream has changed then we need to
6813 * re-adjust the min/max bounds now that DC doesn't handle this
6814 * as part of commit.
6815 */
6816 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6817 amdgpu_dm_vrr_active(acrtc_state)) {
6818 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6819 dc_stream_adjust_vmin_vmax(
6820 dm->dc, acrtc_state->stream,
6821 &acrtc_state->vrr_params.adjust);
6822 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6823 }
bc7f670e 6824 mutex_lock(&dm->dc_lock);
8c322309
RL
6825 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6826 acrtc_state->stream->link->psr_allow_active)
6827 amdgpu_dm_psr_disable(acrtc_state->stream);
6828
bc7f670e 6829 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 6830 bundle->surface_updates,
bc7f670e
DF
6831 planes_count,
6832 acrtc_state->stream,
74aa7bd4 6833 &bundle->stream_update,
bc7f670e 6834 dc_state);
8c322309
RL
6835
6836 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6837 acrtc_state->stream->psr_version &&
6838 !acrtc_state->stream->link->psr_feature_enabled)
6839 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6840 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6841 acrtc_state->stream->link->psr_feature_enabled &&
3b58f22e 6842 !acrtc_state->stream->link->psr_allow_active) {
8c322309
RL
6843 amdgpu_dm_psr_enable(acrtc_state->stream);
6844 }
6845
bc7f670e 6846 mutex_unlock(&dm->dc_lock);
e7b07cee 6847 }
4b510503 6848
8ad27806
NK
6849 /*
6850 * Update cursor state *after* programming all the planes.
6851 * This avoids redundant programming in the case where we're going
6852 * to be disabling a single plane - those pipes are being disabled.
6853 */
6854 if (acrtc_state->active_planes)
6855 amdgpu_dm_commit_cursors(state);
80c218d5 6856
4b510503 6857cleanup:
74aa7bd4 6858 kfree(bundle);
e7b07cee
HW
6859}
6860
6ce8f316
NK
6861static void amdgpu_dm_commit_audio(struct drm_device *dev,
6862 struct drm_atomic_state *state)
6863{
6864 struct amdgpu_device *adev = dev->dev_private;
6865 struct amdgpu_dm_connector *aconnector;
6866 struct drm_connector *connector;
6867 struct drm_connector_state *old_con_state, *new_con_state;
6868 struct drm_crtc_state *new_crtc_state;
6869 struct dm_crtc_state *new_dm_crtc_state;
6870 const struct dc_stream_status *status;
6871 int i, inst;
6872
6873 /* Notify device removals. */
6874 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6875 if (old_con_state->crtc != new_con_state->crtc) {
6876 /* CRTC changes require notification. */
6877 goto notify;
6878 }
6879
6880 if (!new_con_state->crtc)
6881 continue;
6882
6883 new_crtc_state = drm_atomic_get_new_crtc_state(
6884 state, new_con_state->crtc);
6885
6886 if (!new_crtc_state)
6887 continue;
6888
6889 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6890 continue;
6891
6892 notify:
6893 aconnector = to_amdgpu_dm_connector(connector);
6894
6895 mutex_lock(&adev->dm.audio_lock);
6896 inst = aconnector->audio_inst;
6897 aconnector->audio_inst = -1;
6898 mutex_unlock(&adev->dm.audio_lock);
6899
6900 amdgpu_dm_audio_eld_notify(adev, inst);
6901 }
6902
6903 /* Notify audio device additions. */
6904 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6905 if (!new_con_state->crtc)
6906 continue;
6907
6908 new_crtc_state = drm_atomic_get_new_crtc_state(
6909 state, new_con_state->crtc);
6910
6911 if (!new_crtc_state)
6912 continue;
6913
6914 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6915 continue;
6916
6917 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6918 if (!new_dm_crtc_state->stream)
6919 continue;
6920
6921 status = dc_stream_get_status(new_dm_crtc_state->stream);
6922 if (!status)
6923 continue;
6924
6925 aconnector = to_amdgpu_dm_connector(connector);
6926
6927 mutex_lock(&adev->dm.audio_lock);
6928 inst = status->audio_inst;
6929 aconnector->audio_inst = inst;
6930 mutex_unlock(&adev->dm.audio_lock);
6931
6932 amdgpu_dm_audio_eld_notify(adev, inst);
6933 }
6934}
6935
b5e83f6f
NK
6936/*
6937 * Enable interrupts on CRTCs that are newly active, undergone
6938 * a modeset, or have active planes again.
6939 *
6940 * Done in two passes, based on the for_modeset flag:
6941 * Pass 1: For CRTCs going through modeset
6942 * Pass 2: For CRTCs going from 0 to n active planes
6943 *
6944 * Interrupts can only be enabled after the planes are programmed,
6945 * so this requires a two-pass approach since we don't want to
6946 * just defer the interrupts until after commit planes every time.
6947 */
6948static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6949 struct drm_atomic_state *state,
6950 bool for_modeset)
6951{
6952 struct amdgpu_device *adev = dev->dev_private;
6953 struct drm_crtc *crtc;
6954 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6955 int i;
148d31e3 6956#ifdef CONFIG_DEBUG_FS
14b25846 6957 enum amdgpu_dm_pipe_crc_source source;
148d31e3 6958#endif
b5e83f6f
NK
6959
6960 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6961 new_crtc_state, i) {
6962 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6963 struct dm_crtc_state *dm_new_crtc_state =
6964 to_dm_crtc_state(new_crtc_state);
6965 struct dm_crtc_state *dm_old_crtc_state =
6966 to_dm_crtc_state(old_crtc_state);
6967 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6968 bool run_pass;
6969
6970 run_pass = (for_modeset && modeset) ||
6971 (!for_modeset && !modeset &&
6972 !dm_old_crtc_state->interrupts_enabled);
6973
6974 if (!run_pass)
6975 continue;
6976
b5e83f6f
NK
6977 if (!dm_new_crtc_state->interrupts_enabled)
6978 continue;
6979
6980 manage_dm_interrupts(adev, acrtc, true);
6981
6982#ifdef CONFIG_DEBUG_FS
6983 /* The stream has changed so CRC capture needs to re-enabled. */
14b25846
DZ
6984 source = dm_new_crtc_state->crc_src;
6985 if (amdgpu_dm_is_valid_crc_source(source)) {
57638021
NK
6986 amdgpu_dm_crtc_configure_crc_source(
6987 crtc, dm_new_crtc_state,
6988 dm_new_crtc_state->crc_src);
b5e83f6f
NK
6989 }
6990#endif
6991 }
6992}
6993
1f6010a9 6994/*
27b3f4fc
LSL
6995 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6996 * @crtc_state: the DRM CRTC state
6997 * @stream_state: the DC stream state.
6998 *
6999 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7000 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7001 */
7002static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7003 struct dc_stream_state *stream_state)
7004{
b9952f93 7005 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 7006}
e7b07cee 7007
7578ecda
AD
7008static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7009 struct drm_atomic_state *state,
7010 bool nonblock)
e7b07cee
HW
7011{
7012 struct drm_crtc *crtc;
c2cea706 7013 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7014 struct amdgpu_device *adev = dev->dev_private;
7015 int i;
7016
7017 /*
d6ef9b41
NK
7018 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7019 * a modeset, being disabled, or have no active planes.
7020 *
7021 * It's done in atomic commit rather than commit tail for now since
7022 * some of these interrupt handlers access the current CRTC state and
7023 * potentially the stream pointer itself.
7024 *
7025 * Since the atomic state is swapped within atomic commit and not within
7026 * commit tail this would leave to new state (that hasn't been committed yet)
7027 * being accesssed from within the handlers.
7028 *
7029 * TODO: Fix this so we can do this in commit tail and not have to block
7030 * in atomic check.
e7b07cee 7031 */
c2cea706 7032 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
54d76575 7033 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
428da2bd 7034 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee
HW
7035 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7036
d6ef9b41
NK
7037 if (dm_old_crtc_state->interrupts_enabled &&
7038 (!dm_new_crtc_state->interrupts_enabled ||
57638021 7039 drm_atomic_crtc_needs_modeset(new_crtc_state)))
e7b07cee
HW
7040 manage_dm_interrupts(adev, acrtc, false);
7041 }
1f6010a9
DF
7042 /*
7043 * Add check here for SoC's that support hardware cursor plane, to
7044 * unset legacy_cursor_update
7045 */
e7b07cee
HW
7046
7047 return drm_atomic_helper_commit(dev, state, nonblock);
7048
7049 /*TODO Handle EINTR, reenable IRQ*/
7050}
7051
b8592b48
LL
7052/**
7053 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7054 * @state: The atomic state to commit
7055 *
7056 * This will tell DC to commit the constructed DC state from atomic_check,
7057 * programming the hardware. Any failures here implies a hardware failure, since
7058 * atomic check should have filtered anything non-kosher.
7059 */
7578ecda 7060static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7061{
7062 struct drm_device *dev = state->dev;
7063 struct amdgpu_device *adev = dev->dev_private;
7064 struct amdgpu_display_manager *dm = &adev->dm;
7065 struct dm_atomic_state *dm_state;
eb3dc897 7066 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7067 uint32_t i, j;
5cc6dcbd 7068 struct drm_crtc *crtc;
0bc9706d 7069 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7070 unsigned long flags;
7071 bool wait_for_vblank = true;
7072 struct drm_connector *connector;
c2cea706 7073 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7074 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7075 int crtc_disable_count = 0;
e7b07cee
HW
7076
7077 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7078
eb3dc897
NK
7079 dm_state = dm_atomic_get_new_state(state);
7080 if (dm_state && dm_state->context) {
7081 dc_state = dm_state->context;
7082 } else {
7083 /* No state changes, retain current state. */
813d20dc 7084 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7085 ASSERT(dc_state_temp);
7086 dc_state = dc_state_temp;
7087 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7088 }
e7b07cee
HW
7089
7090 /* update changed items */
0bc9706d 7091 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7092 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7093
54d76575
LSL
7094 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7095 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7096
f1ad2f5e 7097 DRM_DEBUG_DRIVER(
e7b07cee
HW
7098 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7099 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7100 "connectors_changed:%d\n",
7101 acrtc->crtc_id,
0bc9706d
LSL
7102 new_crtc_state->enable,
7103 new_crtc_state->active,
7104 new_crtc_state->planes_changed,
7105 new_crtc_state->mode_changed,
7106 new_crtc_state->active_changed,
7107 new_crtc_state->connectors_changed);
e7b07cee 7108
27b3f4fc
LSL
7109 /* Copy all transient state flags into dc state */
7110 if (dm_new_crtc_state->stream) {
7111 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7112 dm_new_crtc_state->stream);
7113 }
7114
e7b07cee
HW
7115 /* handles headless hotplug case, updating new_state and
7116 * aconnector as needed
7117 */
7118
54d76575 7119 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7120
f1ad2f5e 7121 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7122
54d76575 7123 if (!dm_new_crtc_state->stream) {
e7b07cee 7124 /*
b830ebc9
HW
7125 * this could happen because of issues with
7126 * userspace notifications delivery.
7127 * In this case userspace tries to set mode on
1f6010a9
DF
7128 * display which is disconnected in fact.
7129 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7130 * We expect reset mode will come soon.
7131 *
7132 * This can also happen when unplug is done
7133 * during resume sequence ended
7134 *
7135 * In this case, we want to pretend we still
7136 * have a sink to keep the pipe running so that
7137 * hw state is consistent with the sw state
7138 */
f1ad2f5e 7139 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7140 __func__, acrtc->base.base.id);
7141 continue;
7142 }
7143
54d76575
LSL
7144 if (dm_old_crtc_state->stream)
7145 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7146
97028037
LP
7147 pm_runtime_get_noresume(dev->dev);
7148
e7b07cee 7149 acrtc->enabled = true;
0bc9706d
LSL
7150 acrtc->hw_mode = new_crtc_state->mode;
7151 crtc->hwmode = new_crtc_state->mode;
7152 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7153 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7154 /* i.e. reset mode */
8c322309
RL
7155 if (dm_old_crtc_state->stream) {
7156 if (dm_old_crtc_state->stream->link->psr_allow_active)
7157 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7158
54d76575 7159 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8c322309 7160 }
e7b07cee
HW
7161 }
7162 } /* for_each_crtc_in_state() */
7163
eb3dc897
NK
7164 if (dc_state) {
7165 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7166 mutex_lock(&dm->dc_lock);
eb3dc897 7167 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7168 mutex_unlock(&dm->dc_lock);
fa2123db 7169 }
e7b07cee 7170
0bc9706d 7171 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7172 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7173
54d76575 7174 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7175
54d76575 7176 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7177 const struct dc_stream_status *status =
54d76575 7178 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7179
eb3dc897 7180 if (!status)
09f609c3
LL
7181 status = dc_stream_get_status_from_state(dc_state,
7182 dm_new_crtc_state->stream);
eb3dc897 7183
e7b07cee 7184 if (!status)
54d76575 7185 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7186 else
7187 acrtc->otg_inst = status->primary_otg_inst;
7188 }
7189 }
0c8620d6
BL
7190#ifdef CONFIG_DRM_AMD_DC_HDCP
7191 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7192 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7193 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7194 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7195
7196 new_crtc_state = NULL;
7197
7198 if (acrtc)
7199 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7200
7201 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7202
7203 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7204 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7205 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7206 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7207 continue;
7208 }
7209
7210 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7211 hdcp_update_display(
7212 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7213 new_con_state->hdcp_content_type,
b1abe558
BL
7214 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7215 : false);
0c8620d6
BL
7216 }
7217#endif
e7b07cee 7218
02d6a6fc 7219 /* Handle connector state changes */
c2cea706 7220 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7221 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7222 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7223 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7224 struct dc_surface_update dummy_updates[MAX_SURFACES];
7225 struct dc_stream_update stream_update;
b232d4ed 7226 struct dc_info_packet hdr_packet;
e7b07cee 7227 struct dc_stream_status *status = NULL;
b232d4ed 7228 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7229
19afd799
NC
7230 memset(&dummy_updates, 0, sizeof(dummy_updates));
7231 memset(&stream_update, 0, sizeof(stream_update));
7232
44d09c6a 7233 if (acrtc) {
0bc9706d 7234 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7235 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7236 }
0bc9706d 7237
e7b07cee 7238 /* Skip any modesets/resets */
0bc9706d 7239 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7240 continue;
7241
54d76575 7242 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7243 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7244
b232d4ed
NK
7245 scaling_changed = is_scaling_state_different(dm_new_con_state,
7246 dm_old_con_state);
7247
7248 abm_changed = dm_new_crtc_state->abm_level !=
7249 dm_old_crtc_state->abm_level;
7250
7251 hdr_changed =
7252 is_hdr_metadata_different(old_con_state, new_con_state);
7253
7254 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7255 continue;
e7b07cee 7256
b6e881c9 7257 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7258 if (scaling_changed) {
02d6a6fc 7259 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7260 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7261
02d6a6fc
DF
7262 stream_update.src = dm_new_crtc_state->stream->src;
7263 stream_update.dst = dm_new_crtc_state->stream->dst;
7264 }
7265
b232d4ed 7266 if (abm_changed) {
02d6a6fc
DF
7267 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7268
7269 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7270 }
70e8ffc5 7271
b232d4ed
NK
7272 if (hdr_changed) {
7273 fill_hdr_info_packet(new_con_state, &hdr_packet);
7274 stream_update.hdr_static_metadata = &hdr_packet;
7275 }
7276
54d76575 7277 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7278 WARN_ON(!status);
3be5262e 7279 WARN_ON(!status->plane_count);
e7b07cee 7280
02d6a6fc
DF
7281 /*
7282 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7283 * Here we create an empty update on each plane.
7284 * To fix this, DC should permit updating only stream properties.
7285 */
7286 for (j = 0; j < status->plane_count; j++)
7287 dummy_updates[j].surface = status->plane_states[0];
7288
7289
7290 mutex_lock(&dm->dc_lock);
7291 dc_commit_updates_for_stream(dm->dc,
7292 dummy_updates,
7293 status->plane_count,
7294 dm_new_crtc_state->stream,
7295 &stream_update,
7296 dc_state);
7297 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7298 }
7299
b5e83f6f 7300 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7301 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7302 new_crtc_state, i) {
fe2a1965
LP
7303 if (old_crtc_state->active && !new_crtc_state->active)
7304 crtc_disable_count++;
7305
54d76575 7306 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7307 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7308
057be086
NK
7309 /* Update freesync active state. */
7310 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7311
66b0c973
MK
7312 /* Handle vrr on->off / off->on transitions */
7313 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7314 dm_new_crtc_state);
e7b07cee
HW
7315 }
7316
b5e83f6f
NK
7317 /* Enable interrupts for CRTCs going through a modeset. */
7318 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
e7b07cee 7319
420cd472 7320 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7321 if (new_crtc_state->async_flip)
420cd472
DF
7322 wait_for_vblank = false;
7323
e7b07cee 7324 /* update planes when needed per crtc*/
5cc6dcbd 7325 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7326 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7327
54d76575 7328 if (dm_new_crtc_state->stream)
eb3dc897 7329 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7330 dm, crtc, wait_for_vblank);
e7b07cee
HW
7331 }
7332
b5e83f6f
NK
7333 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7334 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
e7b07cee 7335
6ce8f316
NK
7336 /* Update audio instances for each connector. */
7337 amdgpu_dm_commit_audio(dev, state);
7338
e7b07cee
HW
7339 /*
7340 * send vblank event on all events not handled in flip and
7341 * mark consumed event for drm_atomic_helper_commit_hw_done
7342 */
7343 spin_lock_irqsave(&adev->ddev->event_lock, flags);
0bc9706d 7344 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7345
0bc9706d
LSL
7346 if (new_crtc_state->event)
7347 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7348
0bc9706d 7349 new_crtc_state->event = NULL;
e7b07cee
HW
7350 }
7351 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7352
29c8f234
LL
7353 /* Signal HW programming completion */
7354 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7355
7356 if (wait_for_vblank)
320a1274 7357 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7358
7359 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7360
1f6010a9
DF
7361 /*
7362 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7363 * so we can put the GPU into runtime suspend if we're not driving any
7364 * displays anymore
7365 */
fe2a1965
LP
7366 for (i = 0; i < crtc_disable_count; i++)
7367 pm_runtime_put_autosuspend(dev->dev);
97028037 7368 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7369
7370 if (dc_state_temp)
7371 dc_release_state(dc_state_temp);
e7b07cee
HW
7372}
7373
7374
7375static int dm_force_atomic_commit(struct drm_connector *connector)
7376{
7377 int ret = 0;
7378 struct drm_device *ddev = connector->dev;
7379 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7380 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7381 struct drm_plane *plane = disconnected_acrtc->base.primary;
7382 struct drm_connector_state *conn_state;
7383 struct drm_crtc_state *crtc_state;
7384 struct drm_plane_state *plane_state;
7385
7386 if (!state)
7387 return -ENOMEM;
7388
7389 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7390
7391 /* Construct an atomic state to restore previous display setting */
7392
7393 /*
7394 * Attach connectors to drm_atomic_state
7395 */
7396 conn_state = drm_atomic_get_connector_state(state, connector);
7397
7398 ret = PTR_ERR_OR_ZERO(conn_state);
7399 if (ret)
7400 goto err;
7401
7402 /* Attach crtc to drm_atomic_state*/
7403 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7404
7405 ret = PTR_ERR_OR_ZERO(crtc_state);
7406 if (ret)
7407 goto err;
7408
7409 /* force a restore */
7410 crtc_state->mode_changed = true;
7411
7412 /* Attach plane to drm_atomic_state */
7413 plane_state = drm_atomic_get_plane_state(state, plane);
7414
7415 ret = PTR_ERR_OR_ZERO(plane_state);
7416 if (ret)
7417 goto err;
7418
7419
7420 /* Call commit internally with the state we just constructed */
7421 ret = drm_atomic_commit(state);
7422 if (!ret)
7423 return 0;
7424
7425err:
7426 DRM_ERROR("Restoring old state failed with %i\n", ret);
7427 drm_atomic_state_put(state);
7428
7429 return ret;
7430}
7431
7432/*
1f6010a9
DF
7433 * This function handles all cases when set mode does not come upon hotplug.
7434 * This includes when a display is unplugged then plugged back into the
7435 * same port and when running without usermode desktop manager supprot
e7b07cee 7436 */
3ee6b26b
AD
7437void dm_restore_drm_connector_state(struct drm_device *dev,
7438 struct drm_connector *connector)
e7b07cee 7439{
c84dec2f 7440 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7441 struct amdgpu_crtc *disconnected_acrtc;
7442 struct dm_crtc_state *acrtc_state;
7443
7444 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7445 return;
7446
7447 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
7448 if (!disconnected_acrtc)
7449 return;
e7b07cee 7450
70e8ffc5
HW
7451 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7452 if (!acrtc_state->stream)
e7b07cee
HW
7453 return;
7454
7455 /*
7456 * If the previous sink is not released and different from the current,
7457 * we deduce we are in a state where we can not rely on usermode call
7458 * to turn on the display, so we do it here
7459 */
7460 if (acrtc_state->stream->sink != aconnector->dc_sink)
7461 dm_force_atomic_commit(&aconnector->base);
7462}
7463
1f6010a9 7464/*
e7b07cee
HW
7465 * Grabs all modesetting locks to serialize against any blocking commits,
7466 * Waits for completion of all non blocking commits.
7467 */
3ee6b26b
AD
7468static int do_aquire_global_lock(struct drm_device *dev,
7469 struct drm_atomic_state *state)
e7b07cee
HW
7470{
7471 struct drm_crtc *crtc;
7472 struct drm_crtc_commit *commit;
7473 long ret;
7474
1f6010a9
DF
7475 /*
7476 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
7477 * ensure that when the framework release it the
7478 * extra locks we are locking here will get released to
7479 */
7480 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7481 if (ret)
7482 return ret;
7483
7484 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7485 spin_lock(&crtc->commit_lock);
7486 commit = list_first_entry_or_null(&crtc->commit_list,
7487 struct drm_crtc_commit, commit_entry);
7488 if (commit)
7489 drm_crtc_commit_get(commit);
7490 spin_unlock(&crtc->commit_lock);
7491
7492 if (!commit)
7493 continue;
7494
1f6010a9
DF
7495 /*
7496 * Make sure all pending HW programming completed and
e7b07cee
HW
7497 * page flips done
7498 */
7499 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7500
7501 if (ret > 0)
7502 ret = wait_for_completion_interruptible_timeout(
7503 &commit->flip_done, 10*HZ);
7504
7505 if (ret == 0)
7506 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 7507 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
7508
7509 drm_crtc_commit_put(commit);
7510 }
7511
7512 return ret < 0 ? ret : 0;
7513}
7514
bb47de73
NK
7515static void get_freesync_config_for_crtc(
7516 struct dm_crtc_state *new_crtc_state,
7517 struct dm_connector_state *new_con_state)
98e6436d
AK
7518{
7519 struct mod_freesync_config config = {0};
98e6436d
AK
7520 struct amdgpu_dm_connector *aconnector =
7521 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 7522 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 7523 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 7524
a057ec46 7525 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
7526 vrefresh >= aconnector->min_vfreq &&
7527 vrefresh <= aconnector->max_vfreq;
bb47de73 7528
a057ec46
IB
7529 if (new_crtc_state->vrr_supported) {
7530 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 7531 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
7532 VRR_STATE_ACTIVE_VARIABLE :
7533 VRR_STATE_INACTIVE;
7534 config.min_refresh_in_uhz =
7535 aconnector->min_vfreq * 1000000;
7536 config.max_refresh_in_uhz =
7537 aconnector->max_vfreq * 1000000;
69ff8845 7538 config.vsif_supported = true;
180db303 7539 config.btr = true;
98e6436d
AK
7540 }
7541
bb47de73
NK
7542 new_crtc_state->freesync_config = config;
7543}
98e6436d 7544
bb47de73
NK
7545static void reset_freesync_config_for_crtc(
7546 struct dm_crtc_state *new_crtc_state)
7547{
7548 new_crtc_state->vrr_supported = false;
98e6436d 7549
180db303
NK
7550 memset(&new_crtc_state->vrr_params, 0,
7551 sizeof(new_crtc_state->vrr_params));
bb47de73
NK
7552 memset(&new_crtc_state->vrr_infopacket, 0,
7553 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
7554}
7555
4b9674e5
LL
7556static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7557 struct drm_atomic_state *state,
7558 struct drm_crtc *crtc,
7559 struct drm_crtc_state *old_crtc_state,
7560 struct drm_crtc_state *new_crtc_state,
7561 bool enable,
7562 bool *lock_and_validation_needed)
e7b07cee 7563{
eb3dc897 7564 struct dm_atomic_state *dm_state = NULL;
54d76575 7565 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 7566 struct dc_stream_state *new_stream;
62f55537 7567 int ret = 0;
d4d4a645 7568
1f6010a9
DF
7569 /*
7570 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7571 * update changed items
7572 */
4b9674e5
LL
7573 struct amdgpu_crtc *acrtc = NULL;
7574 struct amdgpu_dm_connector *aconnector = NULL;
7575 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7576 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 7577
4b9674e5 7578 new_stream = NULL;
9635b754 7579
4b9674e5
LL
7580 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7581 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7582 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 7583 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 7584
4b9674e5
LL
7585 /* TODO This hack should go away */
7586 if (aconnector && enable) {
7587 /* Make sure fake sink is created in plug-in scenario */
7588 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7589 &aconnector->base);
7590 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7591 &aconnector->base);
19f89e23 7592
4b9674e5
LL
7593 if (IS_ERR(drm_new_conn_state)) {
7594 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7595 goto fail;
7596 }
19f89e23 7597
4b9674e5
LL
7598 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7599 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 7600
02d35a67
JFZ
7601 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7602 goto skip_modeset;
7603
4b9674e5
LL
7604 new_stream = create_stream_for_sink(aconnector,
7605 &new_crtc_state->mode,
7606 dm_new_conn_state,
7607 dm_old_crtc_state->stream);
19f89e23 7608
4b9674e5
LL
7609 /*
7610 * we can have no stream on ACTION_SET if a display
7611 * was disconnected during S3, in this case it is not an
7612 * error, the OS will be updated after detection, and
7613 * will do the right thing on next atomic commit
7614 */
19f89e23 7615
4b9674e5
LL
7616 if (!new_stream) {
7617 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7618 __func__, acrtc->base.base.id);
7619 ret = -ENOMEM;
7620 goto fail;
7621 }
e7b07cee 7622
4b9674e5 7623 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 7624
88694af9
NK
7625 ret = fill_hdr_info_packet(drm_new_conn_state,
7626 &new_stream->hdr_static_metadata);
7627 if (ret)
7628 goto fail;
7629
7e930949
NK
7630 /*
7631 * If we already removed the old stream from the context
7632 * (and set the new stream to NULL) then we can't reuse
7633 * the old stream even if the stream and scaling are unchanged.
7634 * We'll hit the BUG_ON and black screen.
7635 *
7636 * TODO: Refactor this function to allow this check to work
7637 * in all conditions.
7638 */
7639 if (dm_new_crtc_state->stream &&
7640 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
7641 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7642 new_crtc_state->mode_changed = false;
7643 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7644 new_crtc_state->mode_changed);
62f55537 7645 }
4b9674e5 7646 }
b830ebc9 7647
02d35a67 7648 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
7649 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7650 goto skip_modeset;
e7b07cee 7651
4b9674e5
LL
7652 DRM_DEBUG_DRIVER(
7653 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7654 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7655 "connectors_changed:%d\n",
7656 acrtc->crtc_id,
7657 new_crtc_state->enable,
7658 new_crtc_state->active,
7659 new_crtc_state->planes_changed,
7660 new_crtc_state->mode_changed,
7661 new_crtc_state->active_changed,
7662 new_crtc_state->connectors_changed);
62f55537 7663
4b9674e5
LL
7664 /* Remove stream for any changed/disabled CRTC */
7665 if (!enable) {
62f55537 7666
4b9674e5
LL
7667 if (!dm_old_crtc_state->stream)
7668 goto skip_modeset;
eb3dc897 7669
4b9674e5
LL
7670 ret = dm_atomic_get_state(state, &dm_state);
7671 if (ret)
7672 goto fail;
e7b07cee 7673
4b9674e5
LL
7674 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7675 crtc->base.id);
62f55537 7676
4b9674e5
LL
7677 /* i.e. reset mode */
7678 if (dc_remove_stream_from_ctx(
7679 dm->dc,
7680 dm_state->context,
7681 dm_old_crtc_state->stream) != DC_OK) {
7682 ret = -EINVAL;
7683 goto fail;
7684 }
62f55537 7685
4b9674e5
LL
7686 dc_stream_release(dm_old_crtc_state->stream);
7687 dm_new_crtc_state->stream = NULL;
bb47de73 7688
4b9674e5 7689 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 7690
4b9674e5 7691 *lock_and_validation_needed = true;
62f55537 7692
4b9674e5
LL
7693 } else {/* Add stream for any updated/enabled CRTC */
7694 /*
7695 * Quick fix to prevent NULL pointer on new_stream when
7696 * added MST connectors not found in existing crtc_state in the chained mode
7697 * TODO: need to dig out the root cause of that
7698 */
7699 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7700 goto skip_modeset;
62f55537 7701
4b9674e5
LL
7702 if (modereset_required(new_crtc_state))
7703 goto skip_modeset;
62f55537 7704
4b9674e5
LL
7705 if (modeset_required(new_crtc_state, new_stream,
7706 dm_old_crtc_state->stream)) {
62f55537 7707
4b9674e5 7708 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 7709
4b9674e5
LL
7710 ret = dm_atomic_get_state(state, &dm_state);
7711 if (ret)
7712 goto fail;
27b3f4fc 7713
4b9674e5 7714 dm_new_crtc_state->stream = new_stream;
62f55537 7715
4b9674e5 7716 dc_stream_retain(new_stream);
1dc90497 7717
4b9674e5
LL
7718 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7719 crtc->base.id);
1dc90497 7720
4b9674e5
LL
7721 if (dc_add_stream_to_ctx(
7722 dm->dc,
7723 dm_state->context,
7724 dm_new_crtc_state->stream) != DC_OK) {
7725 ret = -EINVAL;
7726 goto fail;
9b690ef3
BL
7727 }
7728
4b9674e5
LL
7729 *lock_and_validation_needed = true;
7730 }
7731 }
e277adc5 7732
4b9674e5
LL
7733skip_modeset:
7734 /* Release extra reference */
7735 if (new_stream)
7736 dc_stream_release(new_stream);
e277adc5 7737
4b9674e5
LL
7738 /*
7739 * We want to do dc stream updates that do not require a
7740 * full modeset below.
7741 */
7742 if (!(enable && aconnector && new_crtc_state->enable &&
7743 new_crtc_state->active))
7744 return 0;
7745 /*
7746 * Given above conditions, the dc state cannot be NULL because:
7747 * 1. We're in the process of enabling CRTCs (just been added
7748 * to the dc context, or already is on the context)
7749 * 2. Has a valid connector attached, and
7750 * 3. Is currently active and enabled.
7751 * => The dc stream state currently exists.
7752 */
7753 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 7754
4b9674e5
LL
7755 /* Scaling or underscan settings */
7756 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7757 update_stream_scaling_settings(
7758 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 7759
b05e2c5e
DF
7760 /* ABM settings */
7761 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7762
4b9674e5
LL
7763 /*
7764 * Color management settings. We also update color properties
7765 * when a modeset is needed, to ensure it gets reprogrammed.
7766 */
7767 if (dm_new_crtc_state->base.color_mgmt_changed ||
7768 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 7769 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
7770 if (ret)
7771 goto fail;
62f55537 7772 }
e7b07cee 7773
4b9674e5
LL
7774 /* Update Freesync settings. */
7775 get_freesync_config_for_crtc(dm_new_crtc_state,
7776 dm_new_conn_state);
7777
62f55537 7778 return ret;
9635b754
DS
7779
7780fail:
7781 if (new_stream)
7782 dc_stream_release(new_stream);
7783 return ret;
62f55537 7784}
9b690ef3 7785
f6ff2a08
NK
7786static bool should_reset_plane(struct drm_atomic_state *state,
7787 struct drm_plane *plane,
7788 struct drm_plane_state *old_plane_state,
7789 struct drm_plane_state *new_plane_state)
7790{
7791 struct drm_plane *other;
7792 struct drm_plane_state *old_other_state, *new_other_state;
7793 struct drm_crtc_state *new_crtc_state;
7794 int i;
7795
70a1efac
NK
7796 /*
7797 * TODO: Remove this hack once the checks below are sufficient
7798 * enough to determine when we need to reset all the planes on
7799 * the stream.
7800 */
7801 if (state->allow_modeset)
7802 return true;
7803
f6ff2a08
NK
7804 /* Exit early if we know that we're adding or removing the plane. */
7805 if (old_plane_state->crtc != new_plane_state->crtc)
7806 return true;
7807
7808 /* old crtc == new_crtc == NULL, plane not in context. */
7809 if (!new_plane_state->crtc)
7810 return false;
7811
7812 new_crtc_state =
7813 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7814
7815 if (!new_crtc_state)
7816 return true;
7817
7316c4ad
NK
7818 /* CRTC Degamma changes currently require us to recreate planes. */
7819 if (new_crtc_state->color_mgmt_changed)
7820 return true;
7821
f6ff2a08
NK
7822 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7823 return true;
7824
7825 /*
7826 * If there are any new primary or overlay planes being added or
7827 * removed then the z-order can potentially change. To ensure
7828 * correct z-order and pipe acquisition the current DC architecture
7829 * requires us to remove and recreate all existing planes.
7830 *
7831 * TODO: Come up with a more elegant solution for this.
7832 */
7833 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7834 if (other->type == DRM_PLANE_TYPE_CURSOR)
7835 continue;
7836
7837 if (old_other_state->crtc != new_plane_state->crtc &&
7838 new_other_state->crtc != new_plane_state->crtc)
7839 continue;
7840
7841 if (old_other_state->crtc != new_other_state->crtc)
7842 return true;
7843
7844 /* TODO: Remove this once we can handle fast format changes. */
7845 if (old_other_state->fb && new_other_state->fb &&
7846 old_other_state->fb->format != new_other_state->fb->format)
7847 return true;
7848 }
7849
7850 return false;
7851}
7852
9e869063
LL
7853static int dm_update_plane_state(struct dc *dc,
7854 struct drm_atomic_state *state,
7855 struct drm_plane *plane,
7856 struct drm_plane_state *old_plane_state,
7857 struct drm_plane_state *new_plane_state,
7858 bool enable,
7859 bool *lock_and_validation_needed)
62f55537 7860{
eb3dc897
NK
7861
7862 struct dm_atomic_state *dm_state = NULL;
62f55537 7863 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 7864 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 7865 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 7866 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
f6ff2a08 7867 bool needs_reset;
62f55537 7868 int ret = 0;
e7b07cee 7869
9b690ef3 7870
9e869063
LL
7871 new_plane_crtc = new_plane_state->crtc;
7872 old_plane_crtc = old_plane_state->crtc;
7873 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7874 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 7875
9e869063
LL
7876 /*TODO Implement atomic check for cursor plane */
7877 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7878 return 0;
9b690ef3 7879
f6ff2a08
NK
7880 needs_reset = should_reset_plane(state, plane, old_plane_state,
7881 new_plane_state);
7882
9e869063
LL
7883 /* Remove any changed/removed planes */
7884 if (!enable) {
f6ff2a08 7885 if (!needs_reset)
9e869063 7886 return 0;
a7b06724 7887
9e869063
LL
7888 if (!old_plane_crtc)
7889 return 0;
62f55537 7890
9e869063
LL
7891 old_crtc_state = drm_atomic_get_old_crtc_state(
7892 state, old_plane_crtc);
7893 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 7894
9e869063
LL
7895 if (!dm_old_crtc_state->stream)
7896 return 0;
62f55537 7897
9e869063
LL
7898 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7899 plane->base.id, old_plane_crtc->base.id);
9b690ef3 7900
9e869063
LL
7901 ret = dm_atomic_get_state(state, &dm_state);
7902 if (ret)
7903 return ret;
eb3dc897 7904
9e869063
LL
7905 if (!dc_remove_plane_from_context(
7906 dc,
7907 dm_old_crtc_state->stream,
7908 dm_old_plane_state->dc_state,
7909 dm_state->context)) {
62f55537 7910
9e869063
LL
7911 ret = EINVAL;
7912 return ret;
7913 }
e7b07cee 7914
9b690ef3 7915
9e869063
LL
7916 dc_plane_state_release(dm_old_plane_state->dc_state);
7917 dm_new_plane_state->dc_state = NULL;
1dc90497 7918
9e869063 7919 *lock_and_validation_needed = true;
1dc90497 7920
9e869063
LL
7921 } else { /* Add new planes */
7922 struct dc_plane_state *dc_new_plane_state;
1dc90497 7923
9e869063
LL
7924 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7925 return 0;
e7b07cee 7926
9e869063
LL
7927 if (!new_plane_crtc)
7928 return 0;
e7b07cee 7929
9e869063
LL
7930 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7931 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 7932
9e869063
LL
7933 if (!dm_new_crtc_state->stream)
7934 return 0;
62f55537 7935
f6ff2a08 7936 if (!needs_reset)
9e869063 7937 return 0;
62f55537 7938
9e869063 7939 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 7940
9e869063
LL
7941 dc_new_plane_state = dc_create_plane_state(dc);
7942 if (!dc_new_plane_state)
7943 return -ENOMEM;
62f55537 7944
9e869063
LL
7945 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7946 plane->base.id, new_plane_crtc->base.id);
8c45c5db 7947
9e869063
LL
7948 ret = dm_atomic_get_state(state, &dm_state);
7949 if (ret) {
7950 dc_plane_state_release(dc_new_plane_state);
7951 return ret;
7952 }
eb3dc897 7953
9e869063
LL
7954 /*
7955 * Any atomic check errors that occur after this will
7956 * not need a release. The plane state will be attached
7957 * to the stream, and therefore part of the atomic
7958 * state. It'll be released when the atomic state is
7959 * cleaned.
7960 */
7961 if (!dc_add_plane_to_context(
7962 dc,
7963 dm_new_crtc_state->stream,
7964 dc_new_plane_state,
7965 dm_state->context)) {
62f55537 7966
9e869063
LL
7967 dc_plane_state_release(dc_new_plane_state);
7968 return -EINVAL;
7969 }
8c45c5db 7970
9e869063 7971 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 7972
cdde482c
SW
7973 ret = fill_dc_plane_attributes(
7974 new_plane_crtc->dev->dev_private,
7975 dm_new_plane_state,
7976 new_plane_state,
7977 new_crtc_state);
7978 if (ret)
7979 return ret;
7980
9e869063
LL
7981 /* Tell DC to do a full surface update every time there
7982 * is a plane change. Inefficient, but works for now.
7983 */
7984 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7985
7986 *lock_and_validation_needed = true;
62f55537 7987 }
e7b07cee
HW
7988
7989
62f55537
AG
7990 return ret;
7991}
a87fa993 7992
eb3dc897 7993static int
f843b308 7994dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
eb3dc897
NK
7995 struct drm_atomic_state *state,
7996 enum surface_update_type *out_type)
7997{
f843b308 7998 struct dc *dc = dm->dc;
eb3dc897
NK
7999 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8000 int i, j, num_plane, ret = 0;
a87fa993
BL
8001 struct drm_plane_state *old_plane_state, *new_plane_state;
8002 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
d3b65841 8003 struct drm_crtc *new_plane_crtc;
a87fa993
BL
8004 struct drm_plane *plane;
8005
8006 struct drm_crtc *crtc;
8007 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8008 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8009 struct dc_stream_status *status = NULL;
a87fa993 8010 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7527791e
RL
8011 struct surface_info_bundle {
8012 struct dc_surface_update surface_updates[MAX_SURFACES];
8013 struct dc_plane_info plane_infos[MAX_SURFACES];
8014 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8015 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8016 struct dc_stream_update stream_update;
8017 } *bundle;
a87fa993 8018
7527791e 8019 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
fe96b99d 8020
7527791e
RL
8021 if (!bundle) {
8022 DRM_ERROR("Failed to allocate update bundle\n");
4f712911
BL
8023 /* Set type to FULL to avoid crashing in DC*/
8024 update_type = UPDATE_TYPE_FULL;
eb3dc897 8025 goto cleanup;
4f712911 8026 }
a87fa993
BL
8027
8028 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2aa632c5 8029
7527791e 8030 memset(bundle, 0, sizeof(struct surface_info_bundle));
c448a53a 8031
a87fa993
BL
8032 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8033 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8034 num_plane = 0;
8035
6836d239
NK
8036 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8037 update_type = UPDATE_TYPE_FULL;
8038 goto cleanup;
8039 }
a87fa993 8040
6836d239 8041 if (!new_dm_crtc_state->stream)
c744e974 8042 continue;
eb3dc897 8043
c744e974 8044 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
2cc450ce
NK
8045 const struct amdgpu_framebuffer *amdgpu_fb =
8046 to_amdgpu_framebuffer(new_plane_state->fb);
7527791e
RL
8047 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8048 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8049 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
2cc450ce
NK
8050 uint64_t tiling_flags;
8051
c744e974 8052 new_plane_crtc = new_plane_state->crtc;
c744e974
NK
8053 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8054 old_dm_plane_state = to_dm_plane_state(old_plane_state);
eb3dc897 8055
c744e974
NK
8056 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8057 continue;
eb3dc897 8058
6836d239
NK
8059 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8060 update_type = UPDATE_TYPE_FULL;
8061 goto cleanup;
8062 }
8063
c744e974
NK
8064 if (crtc != new_plane_crtc)
8065 continue;
8066
7527791e
RL
8067 bundle->surface_updates[num_plane].surface =
8068 new_dm_plane_state->dc_state;
c744e974
NK
8069
8070 if (new_crtc_state->mode_changed) {
7527791e
RL
8071 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8072 bundle->stream_update.src = new_dm_crtc_state->stream->src;
c744e974
NK
8073 }
8074
8075 if (new_crtc_state->color_mgmt_changed) {
7527791e 8076 bundle->surface_updates[num_plane].gamma =
c744e974 8077 new_dm_plane_state->dc_state->gamma_correction;
7527791e 8078 bundle->surface_updates[num_plane].in_transfer_func =
c744e974 8079 new_dm_plane_state->dc_state->in_transfer_func;
7527791e 8080 bundle->stream_update.gamut_remap =
c744e974 8081 &new_dm_crtc_state->stream->gamut_remap_matrix;
7527791e 8082 bundle->stream_update.output_csc_transform =
cf020d49 8083 &new_dm_crtc_state->stream->csc_color_matrix;
7527791e 8084 bundle->stream_update.out_transfer_func =
c744e974 8085 new_dm_crtc_state->stream->out_transfer_func;
a87fa993
BL
8086 }
8087
004b3938 8088 ret = fill_dc_scaling_info(new_plane_state,
7527791e 8089 scaling_info);
004b3938
NK
8090 if (ret)
8091 goto cleanup;
8092
7527791e 8093 bundle->surface_updates[num_plane].scaling_info = scaling_info;
004b3938 8094
2cc450ce
NK
8095 if (amdgpu_fb) {
8096 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8097 if (ret)
8098 goto cleanup;
8099
2cc450ce
NK
8100 ret = fill_dc_plane_info_and_addr(
8101 dm->adev, new_plane_state, tiling_flags,
7527791e
RL
8102 plane_info,
8103 &flip_addr->address);
2cc450ce
NK
8104 if (ret)
8105 goto cleanup;
8106
7527791e
RL
8107 bundle->surface_updates[num_plane].plane_info = plane_info;
8108 bundle->surface_updates[num_plane].flip_addr = flip_addr;
2cc450ce
NK
8109 }
8110
c744e974
NK
8111 num_plane++;
8112 }
8113
8114 if (num_plane == 0)
8115 continue;
8116
8117 ret = dm_atomic_get_state(state, &dm_state);
8118 if (ret)
8119 goto cleanup;
8120
8121 old_dm_state = dm_atomic_get_old_state(state);
8122 if (!old_dm_state) {
8123 ret = -EINVAL;
8124 goto cleanup;
8125 }
8126
8127 status = dc_stream_get_status_from_state(old_dm_state->context,
8128 new_dm_crtc_state->stream);
7527791e 8129 bundle->stream_update.stream = new_dm_crtc_state->stream;
f843b308
NK
8130 /*
8131 * TODO: DC modifies the surface during this call so we need
8132 * to lock here - find a way to do this without locking.
8133 */
8134 mutex_lock(&dm->dc_lock);
7527791e
RL
8135 update_type = dc_check_update_surfaces_for_stream(
8136 dc, bundle->surface_updates, num_plane,
8137 &bundle->stream_update, status);
f843b308 8138 mutex_unlock(&dm->dc_lock);
c744e974
NK
8139
8140 if (update_type > UPDATE_TYPE_MED) {
a87fa993 8141 update_type = UPDATE_TYPE_FULL;
eb3dc897 8142 goto cleanup;
a87fa993
BL
8143 }
8144 }
8145
eb3dc897 8146cleanup:
7527791e 8147 kfree(bundle);
a87fa993 8148
eb3dc897
NK
8149 *out_type = update_type;
8150 return ret;
a87fa993 8151}
62f55537 8152
44be939f
ML
8153static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8154{
8155 struct drm_connector *connector;
8156 struct drm_connector_state *conn_state;
8157 struct amdgpu_dm_connector *aconnector = NULL;
8158 int i;
8159 for_each_new_connector_in_state(state, connector, conn_state, i) {
8160 if (conn_state->crtc != crtc)
8161 continue;
8162
8163 aconnector = to_amdgpu_dm_connector(connector);
8164 if (!aconnector->port || !aconnector->mst_port)
8165 aconnector = NULL;
8166 else
8167 break;
8168 }
8169
8170 if (!aconnector)
8171 return 0;
8172
8173 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8174}
8175
b8592b48
LL
8176/**
8177 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8178 * @dev: The DRM device
8179 * @state: The atomic state to commit
8180 *
8181 * Validate that the given atomic state is programmable by DC into hardware.
8182 * This involves constructing a &struct dc_state reflecting the new hardware
8183 * state we wish to commit, then querying DC to see if it is programmable. It's
8184 * important not to modify the existing DC state. Otherwise, atomic_check
8185 * may unexpectedly commit hardware changes.
8186 *
8187 * When validating the DC state, it's important that the right locks are
8188 * acquired. For full updates case which removes/adds/updates streams on one
8189 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8190 * that any such full update commit will wait for completion of any outstanding
8191 * flip using DRMs synchronization events. See
8192 * dm_determine_update_type_for_commit()
8193 *
8194 * Note that DM adds the affected connectors for all CRTCs in state, when that
8195 * might not seem necessary. This is because DC stream creation requires the
8196 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8197 * be possible but non-trivial - a possible TODO item.
8198 *
8199 * Return: -Error code if validation failed.
8200 */
7578ecda
AD
8201static int amdgpu_dm_atomic_check(struct drm_device *dev,
8202 struct drm_atomic_state *state)
62f55537 8203{
62f55537 8204 struct amdgpu_device *adev = dev->dev_private;
eb3dc897 8205 struct dm_atomic_state *dm_state = NULL;
62f55537 8206 struct dc *dc = adev->dm.dc;
62f55537 8207 struct drm_connector *connector;
c2cea706 8208 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8209 struct drm_crtc *crtc;
fc9e9920 8210 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8211 struct drm_plane *plane;
8212 struct drm_plane_state *old_plane_state, *new_plane_state;
a87fa993
BL
8213 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8214 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8215
1e88ad0a 8216 int ret, i;
e7b07cee 8217
62f55537
AG
8218 /*
8219 * This bool will be set for true for any modeset/reset
8220 * or plane update which implies non fast surface update.
8221 */
8222 bool lock_and_validation_needed = false;
8223
8224 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8225 if (ret)
8226 goto fail;
62f55537 8227
44be939f
ML
8228 if (adev->asic_type >= CHIP_NAVI10) {
8229 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8230 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8231 ret = add_affected_mst_dsc_crtcs(state, crtc);
8232 if (ret)
8233 goto fail;
8234 }
8235 }
8236 }
8237
1e88ad0a
S
8238 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8239 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8240 !new_crtc_state->color_mgmt_changed &&
a93587b3 8241 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8242 continue;
7bef1af3 8243
1e88ad0a
S
8244 if (!new_crtc_state->enable)
8245 continue;
fc9e9920 8246
1e88ad0a
S
8247 ret = drm_atomic_add_affected_connectors(state, crtc);
8248 if (ret)
8249 return ret;
fc9e9920 8250
1e88ad0a
S
8251 ret = drm_atomic_add_affected_planes(state, crtc);
8252 if (ret)
8253 goto fail;
e7b07cee
HW
8254 }
8255
2d9e6431
NK
8256 /*
8257 * Add all primary and overlay planes on the CRTC to the state
8258 * whenever a plane is enabled to maintain correct z-ordering
8259 * and to enable fast surface updates.
8260 */
8261 drm_for_each_crtc(crtc, dev) {
8262 bool modified = false;
8263
8264 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8265 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8266 continue;
8267
8268 if (new_plane_state->crtc == crtc ||
8269 old_plane_state->crtc == crtc) {
8270 modified = true;
8271 break;
8272 }
8273 }
8274
8275 if (!modified)
8276 continue;
8277
8278 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8279 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8280 continue;
8281
8282 new_plane_state =
8283 drm_atomic_get_plane_state(state, plane);
8284
8285 if (IS_ERR(new_plane_state)) {
8286 ret = PTR_ERR(new_plane_state);
8287 goto fail;
8288 }
8289 }
8290 }
8291
62f55537 8292 /* Remove exiting planes if they are modified */
9e869063
LL
8293 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8294 ret = dm_update_plane_state(dc, state, plane,
8295 old_plane_state,
8296 new_plane_state,
8297 false,
8298 &lock_and_validation_needed);
8299 if (ret)
8300 goto fail;
62f55537
AG
8301 }
8302
8303 /* Disable all crtcs which require disable */
4b9674e5
LL
8304 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8305 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8306 old_crtc_state,
8307 new_crtc_state,
8308 false,
8309 &lock_and_validation_needed);
8310 if (ret)
8311 goto fail;
62f55537
AG
8312 }
8313
8314 /* Enable all crtcs which require enable */
4b9674e5
LL
8315 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8316 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8317 old_crtc_state,
8318 new_crtc_state,
8319 true,
8320 &lock_and_validation_needed);
8321 if (ret)
8322 goto fail;
62f55537
AG
8323 }
8324
8325 /* Add new/modified planes */
9e869063
LL
8326 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8327 ret = dm_update_plane_state(dc, state, plane,
8328 old_plane_state,
8329 new_plane_state,
8330 true,
8331 &lock_and_validation_needed);
8332 if (ret)
8333 goto fail;
62f55537
AG
8334 }
8335
b349f76e
ES
8336 /* Run this here since we want to validate the streams we created */
8337 ret = drm_atomic_helper_check_planes(dev, state);
8338 if (ret)
8339 goto fail;
62f55537 8340
43d10d30
NK
8341 if (state->legacy_cursor_update) {
8342 /*
8343 * This is a fast cursor update coming from the plane update
8344 * helper, check if it can be done asynchronously for better
8345 * performance.
8346 */
8347 state->async_update =
8348 !drm_atomic_helper_async_check(dev, state);
8349
8350 /*
8351 * Skip the remaining global validation if this is an async
8352 * update. Cursor updates can be done without affecting
8353 * state or bandwidth calcs and this avoids the performance
8354 * penalty of locking the private state object and
8355 * allocating a new dc_state.
8356 */
8357 if (state->async_update)
8358 return 0;
8359 }
8360
ebdd27e1 8361 /* Check scaling and underscan changes*/
1f6010a9 8362 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8363 * new stream into context w\o causing full reset. Need to
8364 * decide how to handle.
8365 */
c2cea706 8366 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8367 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8368 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8369 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8370
8371 /* Skip any modesets/resets */
0bc9706d
LSL
8372 if (!acrtc || drm_atomic_crtc_needs_modeset(
8373 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8374 continue;
8375
b830ebc9 8376 /* Skip any thing not scale or underscan changes */
54d76575 8377 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8378 continue;
8379
a87fa993 8380 overall_update_type = UPDATE_TYPE_FULL;
e7b07cee
HW
8381 lock_and_validation_needed = true;
8382 }
8383
f843b308 8384 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
eb3dc897
NK
8385 if (ret)
8386 goto fail;
a87fa993
BL
8387
8388 if (overall_update_type < update_type)
8389 overall_update_type = update_type;
8390
8391 /*
8392 * lock_and_validation_needed was an old way to determine if we need to set
8393 * the global lock. Leaving it in to check if we broke any corner cases
8394 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8395 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8396 */
8397 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8398 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
e7b07cee 8399
a87fa993 8400 if (overall_update_type > UPDATE_TYPE_FAST) {
eb3dc897
NK
8401 ret = dm_atomic_get_state(state, &dm_state);
8402 if (ret)
8403 goto fail;
e7b07cee
HW
8404
8405 ret = do_aquire_global_lock(dev, state);
8406 if (ret)
8407 goto fail;
1dc90497 8408
d9fe1a4c 8409#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8410 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8411 goto fail;
8412
29b9ba74
ML
8413 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8414 if (ret)
8415 goto fail;
d9fe1a4c 8416#endif
29b9ba74 8417
ded58c7b
ZL
8418 /*
8419 * Perform validation of MST topology in the state:
8420 * We need to perform MST atomic check before calling
8421 * dc_validate_global_state(), or there is a chance
8422 * to get stuck in an infinite loop and hang eventually.
8423 */
8424 ret = drm_dp_mst_atomic_check(state);
8425 if (ret)
8426 goto fail;
8427
afcd526b 8428 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
e7b07cee
HW
8429 ret = -EINVAL;
8430 goto fail;
8431 }
bd200d19 8432 } else {
674e78ac 8433 /*
bd200d19
NK
8434 * The commit is a fast update. Fast updates shouldn't change
8435 * the DC context, affect global validation, and can have their
8436 * commit work done in parallel with other commits not touching
8437 * the same resource. If we have a new DC context as part of
8438 * the DM atomic state from validation we need to free it and
8439 * retain the existing one instead.
674e78ac 8440 */
bd200d19
NK
8441 struct dm_atomic_state *new_dm_state, *old_dm_state;
8442
8443 new_dm_state = dm_atomic_get_new_state(state);
8444 old_dm_state = dm_atomic_get_old_state(state);
8445
8446 if (new_dm_state && old_dm_state) {
8447 if (new_dm_state->context)
8448 dc_release_state(new_dm_state->context);
8449
8450 new_dm_state->context = old_dm_state->context;
8451
8452 if (old_dm_state->context)
8453 dc_retain_state(old_dm_state->context);
8454 }
e7b07cee
HW
8455 }
8456
caff0e66
NK
8457 /* Store the overall update type for use later in atomic check. */
8458 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8459 struct dm_crtc_state *dm_new_crtc_state =
8460 to_dm_crtc_state(new_crtc_state);
8461
8462 dm_new_crtc_state->update_type = (int)overall_update_type;
e7b07cee
HW
8463 }
8464
8465 /* Must be success */
8466 WARN_ON(ret);
8467 return ret;
8468
8469fail:
8470 if (ret == -EDEADLK)
01e28f9c 8471 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8472 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8473 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8474 else
01e28f9c 8475 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8476
8477 return ret;
8478}
8479
3ee6b26b
AD
8480static bool is_dp_capable_without_timing_msa(struct dc *dc,
8481 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8482{
8483 uint8_t dpcd_data;
8484 bool capable = false;
8485
c84dec2f 8486 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8487 dm_helpers_dp_read_dpcd(
8488 NULL,
c84dec2f 8489 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8490 DP_DOWN_STREAM_PORT_COUNT,
8491 &dpcd_data,
8492 sizeof(dpcd_data))) {
8493 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8494 }
8495
8496 return capable;
8497}
98e6436d
AK
8498void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8499 struct edid *edid)
e7b07cee
HW
8500{
8501 int i;
e7b07cee
HW
8502 bool edid_check_required;
8503 struct detailed_timing *timing;
8504 struct detailed_non_pixel *data;
8505 struct detailed_data_monitor_range *range;
c84dec2f
HW
8506 struct amdgpu_dm_connector *amdgpu_dm_connector =
8507 to_amdgpu_dm_connector(connector);
bb47de73 8508 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
8509
8510 struct drm_device *dev = connector->dev;
8511 struct amdgpu_device *adev = dev->dev_private;
bb47de73 8512 bool freesync_capable = false;
b830ebc9 8513
8218d7f1
HW
8514 if (!connector->state) {
8515 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 8516 goto update;
8218d7f1
HW
8517 }
8518
98e6436d
AK
8519 if (!edid) {
8520 dm_con_state = to_dm_connector_state(connector->state);
8521
8522 amdgpu_dm_connector->min_vfreq = 0;
8523 amdgpu_dm_connector->max_vfreq = 0;
8524 amdgpu_dm_connector->pixel_clock_mhz = 0;
8525
bb47de73 8526 goto update;
98e6436d
AK
8527 }
8528
8218d7f1
HW
8529 dm_con_state = to_dm_connector_state(connector->state);
8530
e7b07cee 8531 edid_check_required = false;
c84dec2f 8532 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 8533 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 8534 goto update;
e7b07cee
HW
8535 }
8536 if (!adev->dm.freesync_module)
bb47de73 8537 goto update;
e7b07cee
HW
8538 /*
8539 * if edid non zero restrict freesync only for dp and edp
8540 */
8541 if (edid) {
c84dec2f
HW
8542 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8543 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
8544 edid_check_required = is_dp_capable_without_timing_msa(
8545 adev->dm.dc,
c84dec2f 8546 amdgpu_dm_connector);
e7b07cee
HW
8547 }
8548 }
e7b07cee
HW
8549 if (edid_check_required == true && (edid->version > 1 ||
8550 (edid->version == 1 && edid->revision > 1))) {
8551 for (i = 0; i < 4; i++) {
8552
8553 timing = &edid->detailed_timings[i];
8554 data = &timing->data.other_data;
8555 range = &data->data.range;
8556 /*
8557 * Check if monitor has continuous frequency mode
8558 */
8559 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8560 continue;
8561 /*
8562 * Check for flag range limits only. If flag == 1 then
8563 * no additional timing information provided.
8564 * Default GTF, GTF Secondary curve and CVT are not
8565 * supported
8566 */
8567 if (range->flags != 1)
8568 continue;
8569
c84dec2f
HW
8570 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8571 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8572 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
8573 range->pixel_clock_mhz * 10;
8574 break;
8575 }
8576
c84dec2f 8577 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
8578 amdgpu_dm_connector->min_vfreq > 10) {
8579
bb47de73 8580 freesync_capable = true;
e7b07cee
HW
8581 }
8582 }
bb47de73
NK
8583
8584update:
8585 if (dm_con_state)
8586 dm_con_state->freesync_capable = freesync_capable;
8587
8588 if (connector->vrr_capable_property)
8589 drm_connector_set_vrr_capable_property(connector,
8590 freesync_capable);
e7b07cee
HW
8591}
8592
8c322309
RL
8593static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8594{
8595 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8596
8597 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8598 return;
8599 if (link->type == dc_connection_none)
8600 return;
8601 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8602 dpcd_data, sizeof(dpcd_data))) {
8603 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8604 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8605 }
8606}
8607
8608/*
8609 * amdgpu_dm_link_setup_psr() - configure psr link
8610 * @stream: stream state
8611 *
8612 * Return: true if success
8613 */
8614static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8615{
8616 struct dc_link *link = NULL;
8617 struct psr_config psr_config = {0};
8618 struct psr_context psr_context = {0};
8619 struct dc *dc = NULL;
8620 bool ret = false;
8621
8622 if (stream == NULL)
8623 return false;
8624
8625 link = stream->link;
8626 dc = link->ctx->dc;
8627
8628 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8629
8630 if (psr_config.psr_version > 0) {
8631 psr_config.psr_exit_link_training_required = 0x1;
8632 psr_config.psr_frame_capture_indication_req = 0;
8633 psr_config.psr_rfb_setup_time = 0x37;
8634 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8635 psr_config.allow_smu_optimizations = 0x0;
8636
8637 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8638
8639 }
8640 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8641
8642 return ret;
8643}
8644
8645/*
8646 * amdgpu_dm_psr_enable() - enable psr f/w
8647 * @stream: stream state
8648 *
8649 * Return: true if success
8650 */
8651bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8652{
8653 struct dc_link *link = stream->link;
5b5abe95
AK
8654 unsigned int vsync_rate_hz = 0;
8655 struct dc_static_screen_params params = {0};
8656 /* Calculate number of static frames before generating interrupt to
8657 * enter PSR.
8658 */
5b5abe95
AK
8659 // Init fail safe of 2 frames static
8660 unsigned int num_frames_static = 2;
8c322309
RL
8661
8662 DRM_DEBUG_DRIVER("Enabling psr...\n");
8663
5b5abe95
AK
8664 vsync_rate_hz = div64_u64(div64_u64((
8665 stream->timing.pix_clk_100hz * 100),
8666 stream->timing.v_total),
8667 stream->timing.h_total);
8668
8669 /* Round up
8670 * Calculate number of frames such that at least 30 ms of time has
8671 * passed.
8672 */
7aa62404
RL
8673 if (vsync_rate_hz != 0) {
8674 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 8675 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 8676 }
5b5abe95
AK
8677
8678 params.triggers.cursor_update = true;
8679 params.triggers.overlay_update = true;
8680 params.triggers.surface_update = true;
8681 params.num_frames = num_frames_static;
8c322309 8682
5b5abe95 8683 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 8684 &stream, 1,
5b5abe95 8685 &params);
8c322309
RL
8686
8687 return dc_link_set_psr_allow_active(link, true, false);
8688}
8689
8690/*
8691 * amdgpu_dm_psr_disable() - disable psr f/w
8692 * @stream: stream state
8693 *
8694 * Return: true if success
8695 */
8696static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8697{
8698
8699 DRM_DEBUG_DRIVER("Disabling psr...\n");
8700
8701 return dc_link_set_psr_allow_active(stream->link, false, true);
8702}