drm/amd/display: 3.2.108
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
100#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
79037324 102#endif
71c0fd92
RL
103#if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
104#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106#endif
469989ca
RL
107#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
108#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110#endif
2a411205
BL
111#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
112#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
114#endif
2200eb9e 115
a94d5569
DF
116#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
117MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 118
5ea23931
RL
119#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
120MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121
8c7aea40
NK
122/* Number of bytes in PSP header for firmware. */
123#define PSP_HEADER_BYTES 0x100
124
125/* Number of bytes in PSP footer for firmware. */
126#define PSP_FOOTER_BYTES 0x100
127
b8592b48
LL
128/**
129 * DOC: overview
130 *
131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
133 * requests into DC requests, and DC responses into DRM responses.
134 *
135 * The root control structure is &struct amdgpu_display_manager.
136 */
137
7578ecda
AD
138/* basic init/fini API */
139static int amdgpu_dm_init(struct amdgpu_device *adev);
140static void amdgpu_dm_fini(struct amdgpu_device *adev);
141
0f877894
OV
142static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
143{
144 switch (link->dpcd_caps.dongle_type) {
145 case DISPLAY_DONGLE_NONE:
146 return DRM_MODE_SUBCONNECTOR_Native;
147 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
148 return DRM_MODE_SUBCONNECTOR_VGA;
149 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
150 case DISPLAY_DONGLE_DP_DVI_DONGLE:
151 return DRM_MODE_SUBCONNECTOR_DVID;
152 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
153 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_HDMIA;
155 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
156 default:
157 return DRM_MODE_SUBCONNECTOR_Unknown;
158 }
159}
160
161static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
162{
163 struct dc_link *link = aconnector->dc_link;
164 struct drm_connector *connector = &aconnector->base;
165 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
166
167 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
168 return;
169
170 if (aconnector->dc_sink)
171 subconnector = get_subconnector_type(link);
172
173 drm_object_property_set_value(&connector->base,
174 connector->dev->mode_config.dp_subconnector_property,
175 subconnector);
176}
177
1f6010a9
DF
178/*
179 * initializes drm_device display related structures, based on the information
7578ecda
AD
180 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
181 * drm_encoder, drm_mode_config
182 *
183 * Returns 0 on success
184 */
185static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
186/* removes and deallocates the drm structures, created by the above function */
187static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
188
7578ecda 189static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 190 struct drm_plane *plane,
cc1fec57
NK
191 unsigned long possible_crtcs,
192 const struct dc_plane_cap *plane_cap);
7578ecda
AD
193static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
194 struct drm_plane *plane,
195 uint32_t link_index);
196static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
197 struct amdgpu_dm_connector *amdgpu_dm_connector,
198 uint32_t link_index,
199 struct amdgpu_encoder *amdgpu_encoder);
200static int amdgpu_dm_encoder_init(struct drm_device *dev,
201 struct amdgpu_encoder *aencoder,
202 uint32_t link_index);
203
204static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
205
206static int amdgpu_dm_atomic_commit(struct drm_device *dev,
207 struct drm_atomic_state *state,
208 bool nonblock);
209
210static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
211
212static int amdgpu_dm_atomic_check(struct drm_device *dev,
213 struct drm_atomic_state *state);
214
674e78ac
NK
215static void handle_cursor_update(struct drm_plane *plane,
216 struct drm_plane_state *old_plane_state);
7578ecda 217
8c322309
RL
218static void amdgpu_dm_set_psr_caps(struct dc_link *link);
219static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
220static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
221static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 222static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 223
4562236b
HW
224/*
225 * dm_vblank_get_counter
226 *
227 * @brief
228 * Get counter for number of vertical blanks
229 *
230 * @param
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
233 *
234 * @return
235 * Counter for vertical blanks
236 */
237static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238{
239 if (crtc >= adev->mode_info.num_crtc)
240 return 0;
241 else {
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
585d450c 244 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 crtc);
4562236b
HW
247 return 0;
248 }
249
585d450c 250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
251 }
252}
253
254static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 255 u32 *vbl, u32 *position)
4562236b 256{
81c50963
ST
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
4562236b
HW
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 return -EINVAL;
261 else {
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
585d450c 264 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 crtc);
4562236b
HW
267 return 0;
268 }
269
81c50963
ST
270 /*
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
273 */
585d450c 274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
275 &v_blank_start,
276 &v_blank_end,
277 &h_position,
278 &v_position);
279
e806208d
AG
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
282 }
283
284 return 0;
285}
286
287static bool dm_is_idle(void *handle)
288{
289 /* XXX todo */
290 return true;
291}
292
293static int dm_wait_for_idle(void *handle)
294{
295 /* XXX todo */
296 return 0;
297}
298
299static bool dm_check_soft_reset(void *handle)
300{
301 return false;
302}
303
304static int dm_soft_reset(void *handle)
305{
306 /* XXX todo */
307 return 0;
308}
309
3ee6b26b
AD
310static struct amdgpu_crtc *
311get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 int otg_inst)
4562236b 313{
4a580877 314 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
317
4562236b
HW
318 if (otg_inst == -1) {
319 WARN_ON(1);
320 return adev->mode_info.crtcs[0];
321 }
322
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326 if (amdgpu_crtc->otg_inst == otg_inst)
327 return amdgpu_crtc;
328 }
329
330 return NULL;
331}
332
585d450c
AP
333static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334{
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
339}
340
66b0c973
MK
341static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342{
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345}
346
b8e8c934
HW
347/**
348 * dm_pflip_high_irq() - Handle pageflip interrupt
349 * @interrupt_params: ignored
350 *
351 * Handles the pageflip interrupt by notifying all interested parties
352 * that the pageflip has been completed.
353 */
4562236b
HW
354static void dm_pflip_high_irq(void *interrupt_params)
355{
4562236b
HW
356 struct amdgpu_crtc *amdgpu_crtc;
357 struct common_irq_params *irq_params = interrupt_params;
358 struct amdgpu_device *adev = irq_params->adev;
359 unsigned long flags;
71bbe51a 360 struct drm_pending_vblank_event *e;
71bbe51a
MK
361 uint32_t vpos, hpos, v_blank_start, v_blank_end;
362 bool vrr_active;
4562236b
HW
363
364 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
365
366 /* IRQ could occur when in initial stage */
1f6010a9 367 /* TODO work and BO cleanup */
4562236b
HW
368 if (amdgpu_crtc == NULL) {
369 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
370 return;
371 }
372
4a580877 373 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
374
375 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
376 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
377 amdgpu_crtc->pflip_status,
378 AMDGPU_FLIP_SUBMITTED,
379 amdgpu_crtc->crtc_id,
380 amdgpu_crtc);
4a580877 381 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
382 return;
383 }
384
71bbe51a
MK
385 /* page flip completed. */
386 e = amdgpu_crtc->event;
387 amdgpu_crtc->event = NULL;
4562236b 388
71bbe51a
MK
389 if (!e)
390 WARN_ON(1);
1159898a 391
585d450c 392 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
393
394 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
395 if (!vrr_active ||
585d450c 396 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
397 &v_blank_end, &hpos, &vpos) ||
398 (vpos < v_blank_start)) {
399 /* Update to correct count and vblank timestamp if racing with
400 * vblank irq. This also updates to the correct vblank timestamp
401 * even in VRR mode, as scanout is past the front-porch atm.
402 */
403 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 404
71bbe51a
MK
405 /* Wake up userspace by sending the pageflip event with proper
406 * count and timestamp of vblank of flip completion.
407 */
408 if (e) {
409 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
410
411 /* Event sent, so done with vblank for this flip */
412 drm_crtc_vblank_put(&amdgpu_crtc->base);
413 }
414 } else if (e) {
415 /* VRR active and inside front-porch: vblank count and
416 * timestamp for pageflip event will only be up to date after
417 * drm_crtc_handle_vblank() has been executed from late vblank
418 * irq handler after start of back-porch (vline 0). We queue the
419 * pageflip event for send-out by drm_crtc_handle_vblank() with
420 * updated timestamp and count, once it runs after us.
421 *
422 * We need to open-code this instead of using the helper
423 * drm_crtc_arm_vblank_event(), as that helper would
424 * call drm_crtc_accurate_vblank_count(), which we must
425 * not call in VRR mode while we are in front-porch!
426 */
427
428 /* sequence will be replaced by real count during send-out. */
429 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
430 e->pipe = amdgpu_crtc->crtc_id;
431
4a580877 432 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
433 e = NULL;
434 }
4562236b 435
fdd1fe57
MK
436 /* Keep track of vblank of this flip for flip throttling. We use the
437 * cooked hw counter, as that one incremented at start of this vblank
438 * of pageflip completion, so last_flip_vblank is the forbidden count
439 * for queueing new pageflips if vsync + VRR is enabled.
440 */
5d1c59c4 441 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 442 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 443
54f5499a 444 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 445 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 446
71bbe51a
MK
447 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
448 amdgpu_crtc->crtc_id, amdgpu_crtc,
449 vrr_active, (int) !e);
4562236b
HW
450}
451
d2574c33
MK
452static void dm_vupdate_high_irq(void *interrupt_params)
453{
454 struct common_irq_params *irq_params = interrupt_params;
455 struct amdgpu_device *adev = irq_params->adev;
456 struct amdgpu_crtc *acrtc;
09aef2c4 457 unsigned long flags;
585d450c 458 int vrr_active;
d2574c33
MK
459
460 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
461
462 if (acrtc) {
585d450c 463 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
d2574c33 464
7f2be468
LP
465 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
466 acrtc->crtc_id,
585d450c 467 vrr_active);
d2574c33
MK
468
469 /* Core vblank handling is done here after end of front-porch in
470 * vrr mode, as vblank timestamping will give valid results
471 * while now done after front-porch. This will also deliver
472 * page-flip completion events that have been queued to us
473 * if a pageflip happened inside front-porch.
474 */
585d450c 475 if (vrr_active) {
d2574c33 476 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
477
478 /* BTR processing for pre-DCE12 ASICs */
585d450c 479 if (acrtc->dm_irq_params.stream &&
09aef2c4 480 adev->family < AMDGPU_FAMILY_AI) {
4a580877 481 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
482 mod_freesync_handle_v_update(
483 adev->dm.freesync_module,
585d450c
AP
484 acrtc->dm_irq_params.stream,
485 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
486
487 dc_stream_adjust_vmin_vmax(
488 adev->dm.dc,
585d450c
AP
489 acrtc->dm_irq_params.stream,
490 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 491 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
492 }
493 }
d2574c33
MK
494 }
495}
496
b8e8c934
HW
497/**
498 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 499 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
500 *
501 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
502 * event handler.
503 */
4562236b
HW
504static void dm_crtc_high_irq(void *interrupt_params)
505{
506 struct common_irq_params *irq_params = interrupt_params;
507 struct amdgpu_device *adev = irq_params->adev;
4562236b 508 struct amdgpu_crtc *acrtc;
09aef2c4 509 unsigned long flags;
585d450c 510 int vrr_active;
4562236b 511
b57de80a 512 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
513 if (!acrtc)
514 return;
515
585d450c 516 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 517
2b5aed9a 518 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 519 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 520
2346ef47
NK
521 /**
522 * Core vblank handling at start of front-porch is only possible
523 * in non-vrr mode, as only there vblank timestamping will give
524 * valid results while done in front-porch. Otherwise defer it
525 * to dm_vupdate_high_irq after end of front-porch.
526 */
585d450c 527 if (!vrr_active)
2346ef47
NK
528 drm_crtc_handle_vblank(&acrtc->base);
529
530 /**
531 * Following stuff must happen at start of vblank, for crc
532 * computation and below-the-range btr support in vrr mode.
533 */
16f17eda 534 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
535
536 /* BTR updates need to happen before VUPDATE on Vega and above. */
537 if (adev->family < AMDGPU_FAMILY_AI)
538 return;
16f17eda 539
4a580877 540 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 541
585d450c
AP
542 if (acrtc->dm_irq_params.stream &&
543 acrtc->dm_irq_params.vrr_params.supported &&
544 acrtc->dm_irq_params.freesync_config.state ==
545 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 546 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
547 acrtc->dm_irq_params.stream,
548 &acrtc->dm_irq_params.vrr_params);
16f17eda 549
585d450c
AP
550 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
551 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
552 }
553
2b5aed9a
MK
554 /*
555 * If there aren't any active_planes then DCH HUBP may be clock-gated.
556 * In that case, pageflip completion interrupts won't fire and pageflip
557 * completion events won't get delivered. Prevent this by sending
558 * pending pageflip events from here if a flip is still pending.
559 *
560 * If any planes are enabled, use dm_pflip_high_irq() instead, to
561 * avoid race conditions between flip programming and completion,
562 * which could cause too early flip completion events.
563 */
2346ef47
NK
564 if (adev->family >= AMDGPU_FAMILY_RV &&
565 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 566 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
567 if (acrtc->event) {
568 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
569 acrtc->event = NULL;
570 drm_crtc_vblank_put(&acrtc->base);
571 }
572 acrtc->pflip_status = AMDGPU_FLIP_NONE;
573 }
574
4a580877 575 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
576}
577
4562236b
HW
578static int dm_set_clockgating_state(void *handle,
579 enum amd_clockgating_state state)
580{
581 return 0;
582}
583
584static int dm_set_powergating_state(void *handle,
585 enum amd_powergating_state state)
586{
587 return 0;
588}
589
590/* Prototypes of private functions */
591static int dm_early_init(void* handle);
592
a32e24b4 593/* Allocate memory for FBC compressed data */
3e332d3a 594static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 595{
3e332d3a 596 struct drm_device *dev = connector->dev;
1348969a 597 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 598 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
599 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
600 struct drm_display_mode *mode;
42e67c3b
RL
601 unsigned long max_size = 0;
602
603 if (adev->dm.dc->fbc_compressor == NULL)
604 return;
a32e24b4 605
3e332d3a 606 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
607 return;
608
3e332d3a
RL
609 if (compressor->bo_ptr)
610 return;
42e67c3b 611
42e67c3b 612
3e332d3a
RL
613 list_for_each_entry(mode, &connector->modes, head) {
614 if (max_size < mode->htotal * mode->vtotal)
615 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
616 }
617
618 if (max_size) {
619 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 620 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 621 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
622
623 if (r)
42e67c3b
RL
624 DRM_ERROR("DM: Failed to initialize FBC\n");
625 else {
626 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
627 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
628 }
629
a32e24b4
RL
630 }
631
632}
a32e24b4 633
6ce8f316
NK
634static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
635 int pipe, bool *enabled,
636 unsigned char *buf, int max_bytes)
637{
638 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 639 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
640 struct drm_connector *connector;
641 struct drm_connector_list_iter conn_iter;
642 struct amdgpu_dm_connector *aconnector;
643 int ret = 0;
644
645 *enabled = false;
646
647 mutex_lock(&adev->dm.audio_lock);
648
649 drm_connector_list_iter_begin(dev, &conn_iter);
650 drm_for_each_connector_iter(connector, &conn_iter) {
651 aconnector = to_amdgpu_dm_connector(connector);
652 if (aconnector->audio_inst != port)
653 continue;
654
655 *enabled = true;
656 ret = drm_eld_size(connector->eld);
657 memcpy(buf, connector->eld, min(max_bytes, ret));
658
659 break;
660 }
661 drm_connector_list_iter_end(&conn_iter);
662
663 mutex_unlock(&adev->dm.audio_lock);
664
665 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
666
667 return ret;
668}
669
670static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
671 .get_eld = amdgpu_dm_audio_component_get_eld,
672};
673
674static int amdgpu_dm_audio_component_bind(struct device *kdev,
675 struct device *hda_kdev, void *data)
676{
677 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 678 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
679 struct drm_audio_component *acomp = data;
680
681 acomp->ops = &amdgpu_dm_audio_component_ops;
682 acomp->dev = kdev;
683 adev->dm.audio_component = acomp;
684
685 return 0;
686}
687
688static void amdgpu_dm_audio_component_unbind(struct device *kdev,
689 struct device *hda_kdev, void *data)
690{
691 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 692 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
693 struct drm_audio_component *acomp = data;
694
695 acomp->ops = NULL;
696 acomp->dev = NULL;
697 adev->dm.audio_component = NULL;
698}
699
700static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
701 .bind = amdgpu_dm_audio_component_bind,
702 .unbind = amdgpu_dm_audio_component_unbind,
703};
704
705static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
706{
707 int i, ret;
708
709 if (!amdgpu_audio)
710 return 0;
711
712 adev->mode_info.audio.enabled = true;
713
714 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
715
716 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
717 adev->mode_info.audio.pin[i].channels = -1;
718 adev->mode_info.audio.pin[i].rate = -1;
719 adev->mode_info.audio.pin[i].bits_per_sample = -1;
720 adev->mode_info.audio.pin[i].status_bits = 0;
721 adev->mode_info.audio.pin[i].category_code = 0;
722 adev->mode_info.audio.pin[i].connected = false;
723 adev->mode_info.audio.pin[i].id =
724 adev->dm.dc->res_pool->audios[i]->inst;
725 adev->mode_info.audio.pin[i].offset = 0;
726 }
727
728 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
729 if (ret < 0)
730 return ret;
731
732 adev->dm.audio_registered = true;
733
734 return 0;
735}
736
737static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
738{
739 if (!amdgpu_audio)
740 return;
741
742 if (!adev->mode_info.audio.enabled)
743 return;
744
745 if (adev->dm.audio_registered) {
746 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
747 adev->dm.audio_registered = false;
748 }
749
750 /* TODO: Disable audio? */
751
752 adev->mode_info.audio.enabled = false;
753}
754
dfd84d90 755static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
756{
757 struct drm_audio_component *acomp = adev->dm.audio_component;
758
759 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
760 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
761
762 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
763 pin, -1);
764 }
765}
766
743b9786
NK
767static int dm_dmub_hw_init(struct amdgpu_device *adev)
768{
743b9786
NK
769 const struct dmcub_firmware_header_v1_0 *hdr;
770 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 771 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
772 const struct firmware *dmub_fw = adev->dm.dmub_fw;
773 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
774 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
775 struct dmub_srv_hw_params hw_params;
776 enum dmub_status status;
777 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 778 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
779 bool has_hw_support;
780
781 if (!dmub_srv)
782 /* DMUB isn't supported on the ASIC. */
783 return 0;
784
8c7aea40
NK
785 if (!fb_info) {
786 DRM_ERROR("No framebuffer info for DMUB service.\n");
787 return -EINVAL;
788 }
789
743b9786
NK
790 if (!dmub_fw) {
791 /* Firmware required for DMUB support. */
792 DRM_ERROR("No firmware provided for DMUB.\n");
793 return -EINVAL;
794 }
795
796 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
797 if (status != DMUB_STATUS_OK) {
798 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
799 return -EINVAL;
800 }
801
802 if (!has_hw_support) {
803 DRM_INFO("DMUB unsupported on ASIC\n");
804 return 0;
805 }
806
807 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
808
743b9786
NK
809 fw_inst_const = dmub_fw->data +
810 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 811 PSP_HEADER_BYTES;
743b9786
NK
812
813 fw_bss_data = dmub_fw->data +
814 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
815 le32_to_cpu(hdr->inst_const_bytes);
816
817 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
818 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
819 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
820
821 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
822
ddde28a5
HW
823 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
824 * amdgpu_ucode_init_single_fw will load dmub firmware
825 * fw_inst_const part to cw0; otherwise, the firmware back door load
826 * will be done by dm_dmub_hw_init
827 */
828 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
829 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
830 fw_inst_const_size);
831 }
832
a576b345
NK
833 if (fw_bss_data_size)
834 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
835 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
836
837 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
838 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
839 adev->bios_size);
840
841 /* Reset regions that need to be reset. */
842 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
843 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
844
845 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
846 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
847
848 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
849 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
850
851 /* Initialize hardware. */
852 memset(&hw_params, 0, sizeof(hw_params));
853 hw_params.fb_base = adev->gmc.fb_start;
854 hw_params.fb_offset = adev->gmc.aper_base;
855
31a7f4bb
HW
856 /* backdoor load firmware and trigger dmub running */
857 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
858 hw_params.load_inst_const = true;
859
743b9786
NK
860 if (dmcu)
861 hw_params.psp_version = dmcu->psp_version;
862
8c7aea40
NK
863 for (i = 0; i < fb_info->num_fb; ++i)
864 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
865
866 status = dmub_srv_hw_init(dmub_srv, &hw_params);
867 if (status != DMUB_STATUS_OK) {
868 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
869 return -EINVAL;
870 }
871
872 /* Wait for firmware load to finish. */
873 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
874 if (status != DMUB_STATUS_OK)
875 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
876
877 /* Init DMCU and ABM if available. */
878 if (dmcu && abm) {
879 dmcu->funcs->dmcu_init(dmcu);
880 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
881 }
882
9a71c7d3
NK
883 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
884 if (!adev->dm.dc->ctx->dmub_srv) {
885 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
886 return -ENOMEM;
887 }
888
743b9786
NK
889 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
890 adev->dm.dmcub_fw_version);
891
892 return 0;
893}
894
c0fb85ae
YZ
895static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
896{
897 uint64_t pt_base;
898 uint32_t logical_addr_low;
899 uint32_t logical_addr_high;
900 uint32_t agp_base, agp_bot, agp_top;
901 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
902
903 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
904 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
905
906 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
907 /*
908 * Raven2 has a HW issue that it is unable to use the vram which
909 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
910 * workaround that increase system aperture high address (add 1)
911 * to get rid of the VM fault and hardware hang.
912 */
913 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
914 else
915 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
916
917 agp_base = 0;
918 agp_bot = adev->gmc.agp_start >> 24;
919 agp_top = adev->gmc.agp_end >> 24;
920
921
922 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
923 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
924 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
925 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
926 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
927 page_table_base.low_part = lower_32_bits(pt_base);
928
929 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
930 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
931
932 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
933 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
934 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
935
936 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
937 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
938 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
939
940 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
941 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
942 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
943
944 pa_config->is_hvm_enabled = 0;
945
946}
947
7578ecda 948static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
949{
950 struct dc_init_data init_data;
52704fca
BL
951#ifdef CONFIG_DRM_AMD_DC_HDCP
952 struct dc_callback_init init_params;
953#endif
c0fb85ae 954 struct dc_phy_addr_space_config pa_config;
743b9786 955 int r;
52704fca 956
4a580877 957 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
958 adev->dm.adev = adev;
959
4562236b
HW
960 /* Zero all the fields */
961 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
962#ifdef CONFIG_DRM_AMD_DC_HDCP
963 memset(&init_params, 0, sizeof(init_params));
964#endif
4562236b 965
674e78ac 966 mutex_init(&adev->dm.dc_lock);
6ce8f316 967 mutex_init(&adev->dm.audio_lock);
674e78ac 968
4562236b
HW
969 if(amdgpu_dm_irq_init(adev)) {
970 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
971 goto error;
972 }
973
974 init_data.asic_id.chip_family = adev->family;
975
2dc31ca1 976 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
977 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
978
770d13b1 979 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
980 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
981 init_data.asic_id.atombios_base_address =
982 adev->mode_info.atom_context->bios;
983
984 init_data.driver = adev;
985
986 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
987
988 if (!adev->dm.cgs_device) {
989 DRM_ERROR("amdgpu: failed to create cgs device.\n");
990 goto error;
991 }
992
993 init_data.cgs_device = adev->dm.cgs_device;
994
4562236b
HW
995 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
996
60fb100b
AD
997 switch (adev->asic_type) {
998 case CHIP_CARRIZO:
999 case CHIP_STONEY:
1000 case CHIP_RAVEN:
fe3db437 1001 case CHIP_RENOIR:
6e227308 1002 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1003#if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1004 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1005 init_data.flags.disable_dmcu = true;
1006#endif
60fb100b
AD
1007 break;
1008 default:
1009 break;
1010 }
6e227308 1011
04b94af4
AD
1012 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1013 init_data.flags.fbc_support = true;
1014
d99f38ae
AD
1015 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1016 init_data.flags.multi_mon_pp_mclk_switch = true;
1017
eaf56410
LL
1018 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1019 init_data.flags.disable_fractional_pwm = true;
1020
27eaa492 1021 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1022
48321c3d 1023 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 1024
4562236b
HW
1025 /* Display Core create. */
1026 adev->dm.dc = dc_create(&init_data);
1027
423788c7 1028 if (adev->dm.dc) {
76121231 1029 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1030 } else {
76121231 1031 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1032 goto error;
1033 }
4562236b 1034
8a791dab
HW
1035 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1036 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1037 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1038 }
1039
f99d8762
HW
1040 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1041 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1042
8a791dab
HW
1043 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1044 adev->dm.dc->debug.disable_stutter = true;
1045
1046 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1047 adev->dm.dc->debug.disable_dsc = true;
1048
1049 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1050 adev->dm.dc->debug.disable_clock_gate = true;
1051
743b9786
NK
1052 r = dm_dmub_hw_init(adev);
1053 if (r) {
1054 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1055 goto error;
1056 }
1057
bb6785c1
NK
1058 dc_hardware_init(adev->dm.dc);
1059
0b08c54b
YZ
1060#if defined(CONFIG_DRM_AMD_DC_DCN)
1061 if (adev->asic_type == CHIP_RENOIR) {
1062 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1063
0b08c54b
YZ
1064 // Call the DC init_memory func
1065 dc_setup_system_context(adev->dm.dc, &pa_config);
1066 }
1067#endif
c0fb85ae 1068
4562236b
HW
1069 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1070 if (!adev->dm.freesync_module) {
1071 DRM_ERROR(
1072 "amdgpu: failed to initialize freesync_module.\n");
1073 } else
f1ad2f5e 1074 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1075 adev->dm.freesync_module);
1076
e277adc5
LSL
1077 amdgpu_dm_init_color_mod();
1078
52704fca 1079#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 1080 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 1081 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1082
96a3b32e
BL
1083 if (!adev->dm.hdcp_workqueue)
1084 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1085 else
1086 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1087
96a3b32e
BL
1088 dc_init_callbacks(adev->dm.dc, &init_params);
1089 }
52704fca 1090#endif
4562236b
HW
1091 if (amdgpu_dm_initialize_drm_device(adev)) {
1092 DRM_ERROR(
1093 "amdgpu: failed to initialize sw for display support.\n");
1094 goto error;
1095 }
1096
1097 /* Update the actual used number of crtc */
1098 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1099
f74367e4
AD
1100 /* create fake encoders for MST */
1101 dm_dp_create_fake_mst_encoders(adev);
1102
4562236b
HW
1103 /* TODO: Add_display_info? */
1104
1105 /* TODO use dynamic cursor width */
4a580877
LT
1106 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1107 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1108
4a580877 1109 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1110 DRM_ERROR(
1111 "amdgpu: failed to initialize sw for display support.\n");
1112 goto error;
1113 }
1114
c0fb85ae 1115
f1ad2f5e 1116 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1117
1118 return 0;
1119error:
1120 amdgpu_dm_fini(adev);
1121
59d0f396 1122 return -EINVAL;
4562236b
HW
1123}
1124
7578ecda 1125static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1126{
f74367e4
AD
1127 int i;
1128
1129 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1130 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1131 }
1132
6ce8f316
NK
1133 amdgpu_dm_audio_fini(adev);
1134
4562236b 1135 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1136
52704fca
BL
1137#ifdef CONFIG_DRM_AMD_DC_HDCP
1138 if (adev->dm.hdcp_workqueue) {
1139 hdcp_destroy(adev->dm.hdcp_workqueue);
1140 adev->dm.hdcp_workqueue = NULL;
1141 }
1142
1143 if (adev->dm.dc)
1144 dc_deinit_callbacks(adev->dm.dc);
1145#endif
9a71c7d3
NK
1146 if (adev->dm.dc->ctx->dmub_srv) {
1147 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1148 adev->dm.dc->ctx->dmub_srv = NULL;
1149 }
1150
743b9786
NK
1151 if (adev->dm.dmub_bo)
1152 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1153 &adev->dm.dmub_bo_gpu_addr,
1154 &adev->dm.dmub_bo_cpu_addr);
52704fca 1155
c8bdf2b6
ED
1156 /* DC Destroy TODO: Replace destroy DAL */
1157 if (adev->dm.dc)
1158 dc_destroy(&adev->dm.dc);
4562236b
HW
1159 /*
1160 * TODO: pageflip, vlank interrupt
1161 *
1162 * amdgpu_dm_irq_fini(adev);
1163 */
1164
1165 if (adev->dm.cgs_device) {
1166 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1167 adev->dm.cgs_device = NULL;
1168 }
1169 if (adev->dm.freesync_module) {
1170 mod_freesync_destroy(adev->dm.freesync_module);
1171 adev->dm.freesync_module = NULL;
1172 }
674e78ac 1173
6ce8f316 1174 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1175 mutex_destroy(&adev->dm.dc_lock);
1176
4562236b
HW
1177 return;
1178}
1179
a94d5569 1180static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1181{
a7669aff 1182 const char *fw_name_dmcu = NULL;
a94d5569
DF
1183 int r;
1184 const struct dmcu_firmware_header_v1_0 *hdr;
1185
1186 switch(adev->asic_type) {
55e56389
MR
1187#if defined(CONFIG_DRM_AMD_DC_SI)
1188 case CHIP_TAHITI:
1189 case CHIP_PITCAIRN:
1190 case CHIP_VERDE:
1191 case CHIP_OLAND:
1192#endif
a94d5569
DF
1193 case CHIP_BONAIRE:
1194 case CHIP_HAWAII:
1195 case CHIP_KAVERI:
1196 case CHIP_KABINI:
1197 case CHIP_MULLINS:
1198 case CHIP_TONGA:
1199 case CHIP_FIJI:
1200 case CHIP_CARRIZO:
1201 case CHIP_STONEY:
1202 case CHIP_POLARIS11:
1203 case CHIP_POLARIS10:
1204 case CHIP_POLARIS12:
1205 case CHIP_VEGAM:
1206 case CHIP_VEGA10:
1207 case CHIP_VEGA12:
1208 case CHIP_VEGA20:
476e955d 1209 case CHIP_NAVI10:
baebcf2e 1210 case CHIP_NAVI14:
30221ad8 1211 case CHIP_RENOIR:
79037324
BL
1212#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1213 case CHIP_SIENNA_CICHLID:
a6c5308f 1214 case CHIP_NAVY_FLOUNDER:
469989ca 1215#endif
2a411205
BL
1216#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1217 case CHIP_DIMGREY_CAVEFISH:
1218#endif
469989ca
RL
1219#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1220 case CHIP_VANGOGH:
79037324 1221#endif
a94d5569 1222 return 0;
5ea23931
RL
1223 case CHIP_NAVI12:
1224 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1225 break;
a94d5569 1226 case CHIP_RAVEN:
a7669aff
HW
1227 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1228 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1229 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1230 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1231 else
a7669aff 1232 return 0;
a94d5569
DF
1233 break;
1234 default:
1235 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1236 return -EINVAL;
a94d5569
DF
1237 }
1238
1239 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1240 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1241 return 0;
1242 }
1243
1244 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1245 if (r == -ENOENT) {
1246 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1247 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1248 adev->dm.fw_dmcu = NULL;
1249 return 0;
1250 }
1251 if (r) {
1252 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1253 fw_name_dmcu);
1254 return r;
1255 }
1256
1257 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1258 if (r) {
1259 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1260 fw_name_dmcu);
1261 release_firmware(adev->dm.fw_dmcu);
1262 adev->dm.fw_dmcu = NULL;
1263 return r;
1264 }
1265
1266 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1267 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1268 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1269 adev->firmware.fw_size +=
1270 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1271
1272 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1273 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1274 adev->firmware.fw_size +=
1275 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1276
ee6e89c0
DF
1277 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1278
a94d5569
DF
1279 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1280
4562236b
HW
1281 return 0;
1282}
1283
743b9786
NK
1284static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1285{
1286 struct amdgpu_device *adev = ctx;
1287
1288 return dm_read_reg(adev->dm.dc->ctx, address);
1289}
1290
1291static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1292 uint32_t value)
1293{
1294 struct amdgpu_device *adev = ctx;
1295
1296 return dm_write_reg(adev->dm.dc->ctx, address, value);
1297}
1298
1299static int dm_dmub_sw_init(struct amdgpu_device *adev)
1300{
1301 struct dmub_srv_create_params create_params;
8c7aea40
NK
1302 struct dmub_srv_region_params region_params;
1303 struct dmub_srv_region_info region_info;
1304 struct dmub_srv_fb_params fb_params;
1305 struct dmub_srv_fb_info *fb_info;
1306 struct dmub_srv *dmub_srv;
743b9786
NK
1307 const struct dmcub_firmware_header_v1_0 *hdr;
1308 const char *fw_name_dmub;
1309 enum dmub_asic dmub_asic;
1310 enum dmub_status status;
1311 int r;
1312
1313 switch (adev->asic_type) {
1314 case CHIP_RENOIR:
1315 dmub_asic = DMUB_ASIC_DCN21;
1316 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1317#if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1318 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1319 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1320#endif
743b9786 1321 break;
79037324
BL
1322#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1323 case CHIP_SIENNA_CICHLID:
1324 dmub_asic = DMUB_ASIC_DCN30;
1325 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1326 break;
5ce868fc
BL
1327 case CHIP_NAVY_FLOUNDER:
1328 dmub_asic = DMUB_ASIC_DCN30;
1329 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324
BL
1330 break;
1331#endif
469989ca
RL
1332#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1333 case CHIP_VANGOGH:
1334 dmub_asic = DMUB_ASIC_DCN301;
1335 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1336 break;
1337#endif
2a411205
BL
1338#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1339 case CHIP_DIMGREY_CAVEFISH:
1340 dmub_asic = DMUB_ASIC_DCN302;
1341 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1342 break;
1343#endif
743b9786
NK
1344
1345 default:
1346 /* ASIC doesn't support DMUB. */
1347 return 0;
1348 }
1349
743b9786
NK
1350 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1351 if (r) {
1352 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1353 return 0;
1354 }
1355
1356 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1357 if (r) {
1358 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1359 return 0;
1360 }
1361
743b9786 1362 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1363
9a6ed547
NK
1364 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1365 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1366 AMDGPU_UCODE_ID_DMCUB;
1367 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1368 adev->dm.dmub_fw;
1369 adev->firmware.fw_size +=
1370 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1371
9a6ed547
NK
1372 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1373 adev->dm.dmcub_fw_version);
1374 }
1375
1376 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1377
8c7aea40
NK
1378 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1379 dmub_srv = adev->dm.dmub_srv;
1380
1381 if (!dmub_srv) {
1382 DRM_ERROR("Failed to allocate DMUB service!\n");
1383 return -ENOMEM;
1384 }
1385
1386 memset(&create_params, 0, sizeof(create_params));
1387 create_params.user_ctx = adev;
1388 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1389 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1390 create_params.asic = dmub_asic;
1391
1392 /* Create the DMUB service. */
1393 status = dmub_srv_create(dmub_srv, &create_params);
1394 if (status != DMUB_STATUS_OK) {
1395 DRM_ERROR("Error creating DMUB service: %d\n", status);
1396 return -EINVAL;
1397 }
1398
1399 /* Calculate the size of all the regions for the DMUB service. */
1400 memset(&region_params, 0, sizeof(region_params));
1401
1402 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1403 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1404 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1405 region_params.vbios_size = adev->bios_size;
0922b899 1406 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1407 adev->dm.dmub_fw->data +
1408 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1409 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1410 region_params.fw_inst_const =
1411 adev->dm.dmub_fw->data +
1412 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1413 PSP_HEADER_BYTES;
8c7aea40
NK
1414
1415 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1416 &region_info);
1417
1418 if (status != DMUB_STATUS_OK) {
1419 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1420 return -EINVAL;
1421 }
1422
1423 /*
1424 * Allocate a framebuffer based on the total size of all the regions.
1425 * TODO: Move this into GART.
1426 */
1427 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1428 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1429 &adev->dm.dmub_bo_gpu_addr,
1430 &adev->dm.dmub_bo_cpu_addr);
1431 if (r)
1432 return r;
1433
1434 /* Rebase the regions on the framebuffer address. */
1435 memset(&fb_params, 0, sizeof(fb_params));
1436 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1437 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1438 fb_params.region_info = &region_info;
1439
1440 adev->dm.dmub_fb_info =
1441 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1442 fb_info = adev->dm.dmub_fb_info;
1443
1444 if (!fb_info) {
1445 DRM_ERROR(
1446 "Failed to allocate framebuffer info for DMUB service!\n");
1447 return -ENOMEM;
1448 }
1449
1450 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1451 if (status != DMUB_STATUS_OK) {
1452 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1453 return -EINVAL;
1454 }
1455
743b9786
NK
1456 return 0;
1457}
1458
a94d5569
DF
1459static int dm_sw_init(void *handle)
1460{
1461 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1462 int r;
1463
1464 r = dm_dmub_sw_init(adev);
1465 if (r)
1466 return r;
a94d5569
DF
1467
1468 return load_dmcu_fw(adev);
1469}
1470
4562236b
HW
1471static int dm_sw_fini(void *handle)
1472{
a94d5569
DF
1473 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1474
8c7aea40
NK
1475 kfree(adev->dm.dmub_fb_info);
1476 adev->dm.dmub_fb_info = NULL;
1477
743b9786
NK
1478 if (adev->dm.dmub_srv) {
1479 dmub_srv_destroy(adev->dm.dmub_srv);
1480 adev->dm.dmub_srv = NULL;
1481 }
1482
75e1658e
ND
1483 release_firmware(adev->dm.dmub_fw);
1484 adev->dm.dmub_fw = NULL;
743b9786 1485
75e1658e
ND
1486 release_firmware(adev->dm.fw_dmcu);
1487 adev->dm.fw_dmcu = NULL;
a94d5569 1488
4562236b
HW
1489 return 0;
1490}
1491
7abcf6b5 1492static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1493{
c84dec2f 1494 struct amdgpu_dm_connector *aconnector;
4562236b 1495 struct drm_connector *connector;
f8d2d39e 1496 struct drm_connector_list_iter iter;
7abcf6b5 1497 int ret = 0;
4562236b 1498
f8d2d39e
LP
1499 drm_connector_list_iter_begin(dev, &iter);
1500 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1501 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1502 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1503 aconnector->mst_mgr.aux) {
f1ad2f5e 1504 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1505 aconnector,
1506 aconnector->base.base.id);
7abcf6b5
AG
1507
1508 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1509 if (ret < 0) {
1510 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1511 aconnector->dc_link->type =
1512 dc_connection_single;
1513 break;
7abcf6b5 1514 }
f8d2d39e 1515 }
4562236b 1516 }
f8d2d39e 1517 drm_connector_list_iter_end(&iter);
4562236b 1518
7abcf6b5
AG
1519 return ret;
1520}
1521
1522static int dm_late_init(void *handle)
1523{
42e67c3b 1524 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1525
bbf854dc
DF
1526 struct dmcu_iram_parameters params;
1527 unsigned int linear_lut[16];
1528 int i;
17bdb4a8 1529 struct dmcu *dmcu = NULL;
5cb32419 1530 bool ret = true;
bbf854dc 1531
17bdb4a8
JFZ
1532 dmcu = adev->dm.dc->res_pool->dmcu;
1533
bbf854dc
DF
1534 for (i = 0; i < 16; i++)
1535 linear_lut[i] = 0xFFFF * i / 15;
1536
1537 params.set = 0;
1538 params.backlight_ramping_start = 0xCCCC;
1539 params.backlight_ramping_reduction = 0xCCCCCCCC;
1540 params.backlight_lut_array_size = 16;
1541 params.backlight_lut_array = linear_lut;
1542
2ad0cdf9
AK
1543 /* Min backlight level after ABM reduction, Don't allow below 1%
1544 * 0xFFFF x 0.01 = 0x28F
1545 */
1546 params.min_abm_backlight = 0x28F;
1547
5cb32419
RL
1548 /* In the case where abm is implemented on dmcub,
1549 * dmcu object will be null.
1550 * ABM 2.4 and up are implemented on dmcub.
1551 */
1552 if (dmcu)
1553 ret = dmcu_load_iram(dmcu, params);
1554 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1555 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1556
14ed1c90
HW
1557 if (!ret)
1558 return -EINVAL;
bbf854dc 1559
4a580877 1560 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1561}
1562
1563static void s3_handle_mst(struct drm_device *dev, bool suspend)
1564{
c84dec2f 1565 struct amdgpu_dm_connector *aconnector;
4562236b 1566 struct drm_connector *connector;
f8d2d39e 1567 struct drm_connector_list_iter iter;
fe7553be
LP
1568 struct drm_dp_mst_topology_mgr *mgr;
1569 int ret;
1570 bool need_hotplug = false;
4562236b 1571
f8d2d39e
LP
1572 drm_connector_list_iter_begin(dev, &iter);
1573 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1574 aconnector = to_amdgpu_dm_connector(connector);
1575 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1576 aconnector->mst_port)
1577 continue;
1578
1579 mgr = &aconnector->mst_mgr;
1580
1581 if (suspend) {
1582 drm_dp_mst_topology_mgr_suspend(mgr);
1583 } else {
6f85f738 1584 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1585 if (ret < 0) {
1586 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1587 need_hotplug = true;
1588 }
1589 }
4562236b 1590 }
f8d2d39e 1591 drm_connector_list_iter_end(&iter);
fe7553be
LP
1592
1593 if (need_hotplug)
1594 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1595}
1596
9340dfd3
HW
1597static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1598{
1599 struct smu_context *smu = &adev->smu;
1600 int ret = 0;
1601
1602 if (!is_support_sw_smu(adev))
1603 return 0;
1604
1605 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1606 * on window driver dc implementation.
1607 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1608 * should be passed to smu during boot up and resume from s3.
1609 * boot up: dc calculate dcn watermark clock settings within dc_create,
1610 * dcn20_resource_construct
1611 * then call pplib functions below to pass the settings to smu:
1612 * smu_set_watermarks_for_clock_ranges
1613 * smu_set_watermarks_table
1614 * navi10_set_watermarks_table
1615 * smu_write_watermarks_table
1616 *
1617 * For Renoir, clock settings of dcn watermark are also fixed values.
1618 * dc has implemented different flow for window driver:
1619 * dc_hardware_init / dc_set_power_state
1620 * dcn10_init_hw
1621 * notify_wm_ranges
1622 * set_wm_ranges
1623 * -- Linux
1624 * smu_set_watermarks_for_clock_ranges
1625 * renoir_set_watermarks_table
1626 * smu_write_watermarks_table
1627 *
1628 * For Linux,
1629 * dc_hardware_init -> amdgpu_dm_init
1630 * dc_set_power_state --> dm_resume
1631 *
1632 * therefore, this function apply to navi10/12/14 but not Renoir
1633 * *
1634 */
1635 switch(adev->asic_type) {
1636 case CHIP_NAVI10:
1637 case CHIP_NAVI14:
1638 case CHIP_NAVI12:
1639 break;
1640 default:
1641 return 0;
1642 }
1643
e7a95eea
EQ
1644 ret = smu_write_watermarks_table(smu);
1645 if (ret) {
1646 DRM_ERROR("Failed to update WMTABLE!\n");
1647 return ret;
9340dfd3
HW
1648 }
1649
9340dfd3
HW
1650 return 0;
1651}
1652
b8592b48
LL
1653/**
1654 * dm_hw_init() - Initialize DC device
28d687ea 1655 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1656 *
1657 * Initialize the &struct amdgpu_display_manager device. This involves calling
1658 * the initializers of each DM component, then populating the struct with them.
1659 *
1660 * Although the function implies hardware initialization, both hardware and
1661 * software are initialized here. Splitting them out to their relevant init
1662 * hooks is a future TODO item.
1663 *
1664 * Some notable things that are initialized here:
1665 *
1666 * - Display Core, both software and hardware
1667 * - DC modules that we need (freesync and color management)
1668 * - DRM software states
1669 * - Interrupt sources and handlers
1670 * - Vblank support
1671 * - Debug FS entries, if enabled
1672 */
4562236b
HW
1673static int dm_hw_init(void *handle)
1674{
1675 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1676 /* Create DAL display manager */
1677 amdgpu_dm_init(adev);
4562236b
HW
1678 amdgpu_dm_hpd_init(adev);
1679
4562236b
HW
1680 return 0;
1681}
1682
b8592b48
LL
1683/**
1684 * dm_hw_fini() - Teardown DC device
28d687ea 1685 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1686 *
1687 * Teardown components within &struct amdgpu_display_manager that require
1688 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1689 * were loaded. Also flush IRQ workqueues and disable them.
1690 */
4562236b
HW
1691static int dm_hw_fini(void *handle)
1692{
1693 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1694
1695 amdgpu_dm_hpd_fini(adev);
1696
1697 amdgpu_dm_irq_fini(adev);
21de3396 1698 amdgpu_dm_fini(adev);
4562236b
HW
1699 return 0;
1700}
1701
cdaae837
BL
1702
1703static int dm_enable_vblank(struct drm_crtc *crtc);
1704static void dm_disable_vblank(struct drm_crtc *crtc);
1705
1706static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1707 struct dc_state *state, bool enable)
1708{
1709 enum dc_irq_source irq_source;
1710 struct amdgpu_crtc *acrtc;
1711 int rc = -EBUSY;
1712 int i = 0;
1713
1714 for (i = 0; i < state->stream_count; i++) {
1715 acrtc = get_crtc_by_otg_inst(
1716 adev, state->stream_status[i].primary_otg_inst);
1717
1718 if (acrtc && state->stream_status[i].plane_count != 0) {
1719 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1720 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1721 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1722 acrtc->crtc_id, enable ? "en" : "dis", rc);
1723 if (rc)
1724 DRM_WARN("Failed to %s pflip interrupts\n",
1725 enable ? "enable" : "disable");
1726
1727 if (enable) {
1728 rc = dm_enable_vblank(&acrtc->base);
1729 if (rc)
1730 DRM_WARN("Failed to enable vblank interrupts\n");
1731 } else {
1732 dm_disable_vblank(&acrtc->base);
1733 }
1734
1735 }
1736 }
1737
1738}
1739
dfd84d90 1740static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1741{
1742 struct dc_state *context = NULL;
1743 enum dc_status res = DC_ERROR_UNEXPECTED;
1744 int i;
1745 struct dc_stream_state *del_streams[MAX_PIPES];
1746 int del_streams_count = 0;
1747
1748 memset(del_streams, 0, sizeof(del_streams));
1749
1750 context = dc_create_state(dc);
1751 if (context == NULL)
1752 goto context_alloc_fail;
1753
1754 dc_resource_state_copy_construct_current(dc, context);
1755
1756 /* First remove from context all streams */
1757 for (i = 0; i < context->stream_count; i++) {
1758 struct dc_stream_state *stream = context->streams[i];
1759
1760 del_streams[del_streams_count++] = stream;
1761 }
1762
1763 /* Remove all planes for removed streams and then remove the streams */
1764 for (i = 0; i < del_streams_count; i++) {
1765 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1766 res = DC_FAIL_DETACH_SURFACES;
1767 goto fail;
1768 }
1769
1770 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1771 if (res != DC_OK)
1772 goto fail;
1773 }
1774
1775
1776 res = dc_validate_global_state(dc, context, false);
1777
1778 if (res != DC_OK) {
1779 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1780 goto fail;
1781 }
1782
1783 res = dc_commit_state(dc, context);
1784
1785fail:
1786 dc_release_state(context);
1787
1788context_alloc_fail:
1789 return res;
1790}
1791
4562236b
HW
1792static int dm_suspend(void *handle)
1793{
1794 struct amdgpu_device *adev = handle;
1795 struct amdgpu_display_manager *dm = &adev->dm;
1796 int ret = 0;
4562236b 1797
53b3f8f4 1798 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1799 mutex_lock(&dm->dc_lock);
1800 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1801
1802 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1803
1804 amdgpu_dm_commit_zero_streams(dm->dc);
1805
1806 amdgpu_dm_irq_suspend(adev);
1807
1808 return ret;
1809 }
4562236b 1810
d2f0b53b 1811 WARN_ON(adev->dm.cached_state);
4a580877 1812 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1813
4a580877 1814 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1815
4562236b
HW
1816 amdgpu_dm_irq_suspend(adev);
1817
a3621485 1818
32f5062d 1819 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1820
1c2075d4 1821 return 0;
4562236b
HW
1822}
1823
1daf8c63
AD
1824static struct amdgpu_dm_connector *
1825amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1826 struct drm_crtc *crtc)
4562236b
HW
1827{
1828 uint32_t i;
c2cea706 1829 struct drm_connector_state *new_con_state;
4562236b
HW
1830 struct drm_connector *connector;
1831 struct drm_crtc *crtc_from_state;
1832
c2cea706
LSL
1833 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1834 crtc_from_state = new_con_state->crtc;
4562236b
HW
1835
1836 if (crtc_from_state == crtc)
c84dec2f 1837 return to_amdgpu_dm_connector(connector);
4562236b
HW
1838 }
1839
1840 return NULL;
1841}
1842
fbbdadf2
BL
1843static void emulated_link_detect(struct dc_link *link)
1844{
1845 struct dc_sink_init_data sink_init_data = { 0 };
1846 struct display_sink_capability sink_caps = { 0 };
1847 enum dc_edid_status edid_status;
1848 struct dc_context *dc_ctx = link->ctx;
1849 struct dc_sink *sink = NULL;
1850 struct dc_sink *prev_sink = NULL;
1851
1852 link->type = dc_connection_none;
1853 prev_sink = link->local_sink;
1854
1855 if (prev_sink != NULL)
1856 dc_sink_retain(prev_sink);
1857
1858 switch (link->connector_signal) {
1859 case SIGNAL_TYPE_HDMI_TYPE_A: {
1860 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1861 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1862 break;
1863 }
1864
1865 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1866 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1867 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1868 break;
1869 }
1870
1871 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1872 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1873 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1874 break;
1875 }
1876
1877 case SIGNAL_TYPE_LVDS: {
1878 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1879 sink_caps.signal = SIGNAL_TYPE_LVDS;
1880 break;
1881 }
1882
1883 case SIGNAL_TYPE_EDP: {
1884 sink_caps.transaction_type =
1885 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1886 sink_caps.signal = SIGNAL_TYPE_EDP;
1887 break;
1888 }
1889
1890 case SIGNAL_TYPE_DISPLAY_PORT: {
1891 sink_caps.transaction_type =
1892 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1893 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1894 break;
1895 }
1896
1897 default:
1898 DC_ERROR("Invalid connector type! signal:%d\n",
1899 link->connector_signal);
1900 return;
1901 }
1902
1903 sink_init_data.link = link;
1904 sink_init_data.sink_signal = sink_caps.signal;
1905
1906 sink = dc_sink_create(&sink_init_data);
1907 if (!sink) {
1908 DC_ERROR("Failed to create sink!\n");
1909 return;
1910 }
1911
dcd5fb82 1912 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1913 link->local_sink = sink;
1914
1915 edid_status = dm_helpers_read_local_edid(
1916 link->ctx,
1917 link,
1918 sink);
1919
1920 if (edid_status != EDID_OK)
1921 DC_ERROR("Failed to read EDID");
1922
1923}
1924
cdaae837
BL
1925static void dm_gpureset_commit_state(struct dc_state *dc_state,
1926 struct amdgpu_display_manager *dm)
1927{
1928 struct {
1929 struct dc_surface_update surface_updates[MAX_SURFACES];
1930 struct dc_plane_info plane_infos[MAX_SURFACES];
1931 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1932 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1933 struct dc_stream_update stream_update;
1934 } * bundle;
1935 int k, m;
1936
1937 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1938
1939 if (!bundle) {
1940 dm_error("Failed to allocate update bundle\n");
1941 goto cleanup;
1942 }
1943
1944 for (k = 0; k < dc_state->stream_count; k++) {
1945 bundle->stream_update.stream = dc_state->streams[k];
1946
1947 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1948 bundle->surface_updates[m].surface =
1949 dc_state->stream_status->plane_states[m];
1950 bundle->surface_updates[m].surface->force_full_update =
1951 true;
1952 }
1953 dc_commit_updates_for_stream(
1954 dm->dc, bundle->surface_updates,
1955 dc_state->stream_status->plane_count,
1956 dc_state->streams[k], &bundle->stream_update, dc_state);
1957 }
1958
1959cleanup:
1960 kfree(bundle);
1961
1962 return;
1963}
1964
4562236b
HW
1965static int dm_resume(void *handle)
1966{
1967 struct amdgpu_device *adev = handle;
4a580877 1968 struct drm_device *ddev = adev_to_drm(adev);
4562236b 1969 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 1970 struct amdgpu_dm_connector *aconnector;
4562236b 1971 struct drm_connector *connector;
f8d2d39e 1972 struct drm_connector_list_iter iter;
4562236b 1973 struct drm_crtc *crtc;
c2cea706 1974 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
1975 struct dm_crtc_state *dm_new_crtc_state;
1976 struct drm_plane *plane;
1977 struct drm_plane_state *new_plane_state;
1978 struct dm_plane_state *dm_new_plane_state;
113b7a01 1979 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 1980 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
1981 struct dc_state *dc_state;
1982 int i, r, j;
4562236b 1983
53b3f8f4 1984 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1985 dc_state = dm->cached_dc_state;
1986
1987 r = dm_dmub_hw_init(adev);
1988 if (r)
1989 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1990
1991 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1992 dc_resume(dm->dc);
1993
1994 amdgpu_dm_irq_resume_early(adev);
1995
1996 for (i = 0; i < dc_state->stream_count; i++) {
1997 dc_state->streams[i]->mode_changed = true;
1998 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1999 dc_state->stream_status->plane_states[j]->update_flags.raw
2000 = 0xffffffff;
2001 }
2002 }
2003
2004 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2005
cdaae837
BL
2006 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2007
2008 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2009
2010 dc_release_state(dm->cached_dc_state);
2011 dm->cached_dc_state = NULL;
2012
2013 amdgpu_dm_irq_resume_late(adev);
2014
2015 mutex_unlock(&dm->dc_lock);
2016
2017 return 0;
2018 }
113b7a01
LL
2019 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2020 dc_release_state(dm_state->context);
2021 dm_state->context = dc_create_state(dm->dc);
2022 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2023 dc_resource_state_construct(dm->dc, dm_state->context);
2024
8c7aea40
NK
2025 /* Before powering on DC we need to re-initialize DMUB. */
2026 r = dm_dmub_hw_init(adev);
2027 if (r)
2028 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2029
a80aa93d
ML
2030 /* power on hardware */
2031 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2032
4562236b
HW
2033 /* program HPD filter */
2034 dc_resume(dm->dc);
2035
4562236b
HW
2036 /*
2037 * early enable HPD Rx IRQ, should be done before set mode as short
2038 * pulse interrupts are used for MST
2039 */
2040 amdgpu_dm_irq_resume_early(adev);
2041
d20ebea8 2042 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2043 s3_handle_mst(ddev, false);
2044
4562236b 2045 /* Do detection*/
f8d2d39e
LP
2046 drm_connector_list_iter_begin(ddev, &iter);
2047 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2048 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2049
2050 /*
2051 * this is the case when traversing through already created
2052 * MST connectors, should be skipped
2053 */
2054 if (aconnector->mst_port)
2055 continue;
2056
03ea364c 2057 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2058 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2059 DRM_ERROR("KMS: Failed to detect connector\n");
2060
2061 if (aconnector->base.force && new_connection_type == dc_connection_none)
2062 emulated_link_detect(aconnector->dc_link);
2063 else
2064 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2065
2066 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2067 aconnector->fake_enable = false;
2068
dcd5fb82
MF
2069 if (aconnector->dc_sink)
2070 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2071 aconnector->dc_sink = NULL;
2072 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2073 mutex_unlock(&aconnector->hpd_lock);
4562236b 2074 }
f8d2d39e 2075 drm_connector_list_iter_end(&iter);
4562236b 2076
1f6010a9 2077 /* Force mode set in atomic commit */
a80aa93d 2078 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2079 new_crtc_state->active_changed = true;
4f346e65 2080
fcb4019e
LSL
2081 /*
2082 * atomic_check is expected to create the dc states. We need to release
2083 * them here, since they were duplicated as part of the suspend
2084 * procedure.
2085 */
a80aa93d 2086 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2087 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2088 if (dm_new_crtc_state->stream) {
2089 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2090 dc_stream_release(dm_new_crtc_state->stream);
2091 dm_new_crtc_state->stream = NULL;
2092 }
2093 }
2094
a80aa93d 2095 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2096 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2097 if (dm_new_plane_state->dc_state) {
2098 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2099 dc_plane_state_release(dm_new_plane_state->dc_state);
2100 dm_new_plane_state->dc_state = NULL;
2101 }
2102 }
2103
2d1af6a1 2104 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2105
a80aa93d 2106 dm->cached_state = NULL;
0a214e2f 2107
9faa4237 2108 amdgpu_dm_irq_resume_late(adev);
4562236b 2109
9340dfd3
HW
2110 amdgpu_dm_smu_write_watermarks_table(adev);
2111
2d1af6a1 2112 return 0;
4562236b
HW
2113}
2114
b8592b48
LL
2115/**
2116 * DOC: DM Lifecycle
2117 *
2118 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2119 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2120 * the base driver's device list to be initialized and torn down accordingly.
2121 *
2122 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2123 */
2124
4562236b
HW
2125static const struct amd_ip_funcs amdgpu_dm_funcs = {
2126 .name = "dm",
2127 .early_init = dm_early_init,
7abcf6b5 2128 .late_init = dm_late_init,
4562236b
HW
2129 .sw_init = dm_sw_init,
2130 .sw_fini = dm_sw_fini,
2131 .hw_init = dm_hw_init,
2132 .hw_fini = dm_hw_fini,
2133 .suspend = dm_suspend,
2134 .resume = dm_resume,
2135 .is_idle = dm_is_idle,
2136 .wait_for_idle = dm_wait_for_idle,
2137 .check_soft_reset = dm_check_soft_reset,
2138 .soft_reset = dm_soft_reset,
2139 .set_clockgating_state = dm_set_clockgating_state,
2140 .set_powergating_state = dm_set_powergating_state,
2141};
2142
2143const struct amdgpu_ip_block_version dm_ip_block =
2144{
2145 .type = AMD_IP_BLOCK_TYPE_DCE,
2146 .major = 1,
2147 .minor = 0,
2148 .rev = 0,
2149 .funcs = &amdgpu_dm_funcs,
2150};
2151
ca3268c4 2152
b8592b48
LL
2153/**
2154 * DOC: atomic
2155 *
2156 * *WIP*
2157 */
0a323b84 2158
b3663f70 2159static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2160 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 2161 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2162 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 2163 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
2164};
2165
2166static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2167 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2168};
2169
94562810
RS
2170static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2171{
2172 u32 max_cll, min_cll, max, min, q, r;
2173 struct amdgpu_dm_backlight_caps *caps;
2174 struct amdgpu_display_manager *dm;
2175 struct drm_connector *conn_base;
2176 struct amdgpu_device *adev;
ec11fe37 2177 struct dc_link *link = NULL;
94562810
RS
2178 static const u8 pre_computed_values[] = {
2179 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2180 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2181
2182 if (!aconnector || !aconnector->dc_link)
2183 return;
2184
ec11fe37 2185 link = aconnector->dc_link;
2186 if (link->connector_signal != SIGNAL_TYPE_EDP)
2187 return;
2188
94562810 2189 conn_base = &aconnector->base;
1348969a 2190 adev = drm_to_adev(conn_base->dev);
94562810
RS
2191 dm = &adev->dm;
2192 caps = &dm->backlight_caps;
2193 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2194 caps->aux_support = false;
2195 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2196 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2197
2198 if (caps->ext_caps->bits.oled == 1 ||
2199 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2200 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2201 caps->aux_support = true;
2202
2203 /* From the specification (CTA-861-G), for calculating the maximum
2204 * luminance we need to use:
2205 * Luminance = 50*2**(CV/32)
2206 * Where CV is a one-byte value.
2207 * For calculating this expression we may need float point precision;
2208 * to avoid this complexity level, we take advantage that CV is divided
2209 * by a constant. From the Euclids division algorithm, we know that CV
2210 * can be written as: CV = 32*q + r. Next, we replace CV in the
2211 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2212 * need to pre-compute the value of r/32. For pre-computing the values
2213 * We just used the following Ruby line:
2214 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2215 * The results of the above expressions can be verified at
2216 * pre_computed_values.
2217 */
2218 q = max_cll >> 5;
2219 r = max_cll % 32;
2220 max = (1 << q) * pre_computed_values[r];
2221
2222 // min luminance: maxLum * (CV/255)^2 / 100
2223 q = DIV_ROUND_CLOSEST(min_cll, 255);
2224 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2225
2226 caps->aux_max_input_signal = max;
2227 caps->aux_min_input_signal = min;
2228}
2229
97e51c16
HW
2230void amdgpu_dm_update_connector_after_detect(
2231 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2232{
2233 struct drm_connector *connector = &aconnector->base;
2234 struct drm_device *dev = connector->dev;
b73a22d3 2235 struct dc_sink *sink;
4562236b
HW
2236
2237 /* MST handled by drm_mst framework */
2238 if (aconnector->mst_mgr.mst_state == true)
2239 return;
2240
4562236b 2241 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2242 if (sink)
2243 dc_sink_retain(sink);
4562236b 2244
1f6010a9
DF
2245 /*
2246 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2247 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2248 * Skip if already done during boot.
4562236b
HW
2249 */
2250 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2251 && aconnector->dc_em_sink) {
2252
1f6010a9
DF
2253 /*
2254 * For S3 resume with headless use eml_sink to fake stream
2255 * because on resume connector->sink is set to NULL
4562236b
HW
2256 */
2257 mutex_lock(&dev->mode_config.mutex);
2258
2259 if (sink) {
922aa1e1 2260 if (aconnector->dc_sink) {
98e6436d 2261 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2262 /*
2263 * retain and release below are used to
2264 * bump up refcount for sink because the link doesn't point
2265 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2266 * reshuffle by UMD we will get into unwanted dc_sink release
2267 */
dcd5fb82 2268 dc_sink_release(aconnector->dc_sink);
922aa1e1 2269 }
4562236b 2270 aconnector->dc_sink = sink;
dcd5fb82 2271 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2272 amdgpu_dm_update_freesync_caps(connector,
2273 aconnector->edid);
4562236b 2274 } else {
98e6436d 2275 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2276 if (!aconnector->dc_sink) {
4562236b 2277 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2278 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2279 }
4562236b
HW
2280 }
2281
2282 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2283
2284 if (sink)
2285 dc_sink_release(sink);
4562236b
HW
2286 return;
2287 }
2288
2289 /*
2290 * TODO: temporary guard to look for proper fix
2291 * if this sink is MST sink, we should not do anything
2292 */
dcd5fb82
MF
2293 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2294 dc_sink_release(sink);
4562236b 2295 return;
dcd5fb82 2296 }
4562236b
HW
2297
2298 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2299 /*
2300 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2301 * Do nothing!!
2302 */
f1ad2f5e 2303 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2304 aconnector->connector_id);
dcd5fb82
MF
2305 if (sink)
2306 dc_sink_release(sink);
4562236b
HW
2307 return;
2308 }
2309
f1ad2f5e 2310 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2311 aconnector->connector_id, aconnector->dc_sink, sink);
2312
2313 mutex_lock(&dev->mode_config.mutex);
2314
1f6010a9
DF
2315 /*
2316 * 1. Update status of the drm connector
2317 * 2. Send an event and let userspace tell us what to do
2318 */
4562236b 2319 if (sink) {
1f6010a9
DF
2320 /*
2321 * TODO: check if we still need the S3 mode update workaround.
2322 * If yes, put it here.
2323 */
4562236b 2324 if (aconnector->dc_sink)
98e6436d 2325 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2326
2327 aconnector->dc_sink = sink;
dcd5fb82 2328 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2329 if (sink->dc_edid.length == 0) {
4562236b 2330 aconnector->edid = NULL;
e6142dd5
AP
2331 if (aconnector->dc_link->aux_mode) {
2332 drm_dp_cec_unset_edid(
2333 &aconnector->dm_dp_aux.aux);
2334 }
900b3cb1 2335 } else {
4562236b 2336 aconnector->edid =
e6142dd5 2337 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2338
c555f023 2339 drm_connector_update_edid_property(connector,
e6142dd5 2340 aconnector->edid);
b24bdc37 2341 drm_add_edid_modes(connector, aconnector->edid);
e6142dd5
AP
2342
2343 if (aconnector->dc_link->aux_mode)
2344 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2345 aconnector->edid);
4562236b 2346 }
e6142dd5 2347
98e6436d 2348 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2349 update_connector_ext_caps(aconnector);
4562236b 2350 } else {
e86e8947 2351 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2352 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2353 drm_connector_update_edid_property(connector, NULL);
4562236b 2354 aconnector->num_modes = 0;
dcd5fb82 2355 dc_sink_release(aconnector->dc_sink);
4562236b 2356 aconnector->dc_sink = NULL;
5326c452 2357 aconnector->edid = NULL;
0c8620d6
BL
2358#ifdef CONFIG_DRM_AMD_DC_HDCP
2359 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2360 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2361 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2362#endif
4562236b
HW
2363 }
2364
2365 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2366
0f877894
OV
2367 update_subconnector_property(aconnector);
2368
dcd5fb82
MF
2369 if (sink)
2370 dc_sink_release(sink);
4562236b
HW
2371}
2372
2373static void handle_hpd_irq(void *param)
2374{
c84dec2f 2375 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2376 struct drm_connector *connector = &aconnector->base;
2377 struct drm_device *dev = connector->dev;
fbbdadf2 2378 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2379#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2380 struct amdgpu_device *adev = drm_to_adev(dev);
0c8620d6 2381#endif
4562236b 2382
1f6010a9
DF
2383 /*
2384 * In case of failure or MST no need to update connector status or notify the OS
2385 * since (for MST case) MST does this in its own context.
4562236b
HW
2386 */
2387 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2388
0c8620d6 2389#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2390 if (adev->dm.hdcp_workqueue)
96a3b32e 2391 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2392#endif
2e0ac3d6
HW
2393 if (aconnector->fake_enable)
2394 aconnector->fake_enable = false;
2395
fbbdadf2
BL
2396 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2397 DRM_ERROR("KMS: Failed to detect connector\n");
2398
2399 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2400 emulated_link_detect(aconnector->dc_link);
2401
2402
2403 drm_modeset_lock_all(dev);
2404 dm_restore_drm_connector_state(dev, connector);
2405 drm_modeset_unlock_all(dev);
2406
2407 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2408 drm_kms_helper_hotplug_event(dev);
2409
2410 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2411 amdgpu_dm_update_connector_after_detect(aconnector);
2412
2413
2414 drm_modeset_lock_all(dev);
2415 dm_restore_drm_connector_state(dev, connector);
2416 drm_modeset_unlock_all(dev);
2417
2418 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2419 drm_kms_helper_hotplug_event(dev);
2420 }
2421 mutex_unlock(&aconnector->hpd_lock);
2422
2423}
2424
c84dec2f 2425static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2426{
2427 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2428 uint8_t dret;
2429 bool new_irq_handled = false;
2430 int dpcd_addr;
2431 int dpcd_bytes_to_read;
2432
2433 const int max_process_count = 30;
2434 int process_count = 0;
2435
2436 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2437
2438 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2439 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2440 /* DPCD 0x200 - 0x201 for downstream IRQ */
2441 dpcd_addr = DP_SINK_COUNT;
2442 } else {
2443 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2444 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2445 dpcd_addr = DP_SINK_COUNT_ESI;
2446 }
2447
2448 dret = drm_dp_dpcd_read(
2449 &aconnector->dm_dp_aux.aux,
2450 dpcd_addr,
2451 esi,
2452 dpcd_bytes_to_read);
2453
2454 while (dret == dpcd_bytes_to_read &&
2455 process_count < max_process_count) {
2456 uint8_t retry;
2457 dret = 0;
2458
2459 process_count++;
2460
f1ad2f5e 2461 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2462 /* handle HPD short pulse irq */
2463 if (aconnector->mst_mgr.mst_state)
2464 drm_dp_mst_hpd_irq(
2465 &aconnector->mst_mgr,
2466 esi,
2467 &new_irq_handled);
4562236b
HW
2468
2469 if (new_irq_handled) {
2470 /* ACK at DPCD to notify down stream */
2471 const int ack_dpcd_bytes_to_write =
2472 dpcd_bytes_to_read - 1;
2473
2474 for (retry = 0; retry < 3; retry++) {
2475 uint8_t wret;
2476
2477 wret = drm_dp_dpcd_write(
2478 &aconnector->dm_dp_aux.aux,
2479 dpcd_addr + 1,
2480 &esi[1],
2481 ack_dpcd_bytes_to_write);
2482 if (wret == ack_dpcd_bytes_to_write)
2483 break;
2484 }
2485
1f6010a9 2486 /* check if there is new irq to be handled */
4562236b
HW
2487 dret = drm_dp_dpcd_read(
2488 &aconnector->dm_dp_aux.aux,
2489 dpcd_addr,
2490 esi,
2491 dpcd_bytes_to_read);
2492
2493 new_irq_handled = false;
d4a6e8a9 2494 } else {
4562236b 2495 break;
d4a6e8a9 2496 }
4562236b
HW
2497 }
2498
2499 if (process_count == max_process_count)
f1ad2f5e 2500 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2501}
2502
2503static void handle_hpd_rx_irq(void *param)
2504{
c84dec2f 2505 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2506 struct drm_connector *connector = &aconnector->base;
2507 struct drm_device *dev = connector->dev;
53cbf65c 2508 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2509 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2510 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2511#ifdef CONFIG_DRM_AMD_DC_HDCP
2512 union hpd_irq_data hpd_irq_data;
1348969a 2513 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270
BL
2514
2515 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2516#endif
4562236b 2517
1f6010a9
DF
2518 /*
2519 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2520 * conflict, after implement i2c helper, this mutex should be
2521 * retired.
2522 */
53cbf65c 2523 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2524 mutex_lock(&aconnector->hpd_lock);
2525
2a0f9270
BL
2526
2527#ifdef CONFIG_DRM_AMD_DC_HDCP
2528 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2529#else
4e18814e 2530 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2531#endif
4562236b
HW
2532 !is_mst_root_connector) {
2533 /* Downstream Port status changed. */
fbbdadf2
BL
2534 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2535 DRM_ERROR("KMS: Failed to detect connector\n");
2536
2537 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2538 emulated_link_detect(dc_link);
2539
2540 if (aconnector->fake_enable)
2541 aconnector->fake_enable = false;
2542
2543 amdgpu_dm_update_connector_after_detect(aconnector);
2544
2545
2546 drm_modeset_lock_all(dev);
2547 dm_restore_drm_connector_state(dev, connector);
2548 drm_modeset_unlock_all(dev);
2549
2550 drm_kms_helper_hotplug_event(dev);
2551 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2552
2553 if (aconnector->fake_enable)
2554 aconnector->fake_enable = false;
2555
4562236b
HW
2556 amdgpu_dm_update_connector_after_detect(aconnector);
2557
2558
2559 drm_modeset_lock_all(dev);
2560 dm_restore_drm_connector_state(dev, connector);
2561 drm_modeset_unlock_all(dev);
2562
2563 drm_kms_helper_hotplug_event(dev);
2564 }
2565 }
2a0f9270 2566#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2567 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2568 if (adev->dm.hdcp_workqueue)
2569 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2570 }
2a0f9270 2571#endif
4562236b 2572 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2573 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2574 dm_handle_hpd_rx_irq(aconnector);
2575
e86e8947
HV
2576 if (dc_link->type != dc_connection_mst_branch) {
2577 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2578 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2579 }
4562236b
HW
2580}
2581
2582static void register_hpd_handlers(struct amdgpu_device *adev)
2583{
4a580877 2584 struct drm_device *dev = adev_to_drm(adev);
4562236b 2585 struct drm_connector *connector;
c84dec2f 2586 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2587 const struct dc_link *dc_link;
2588 struct dc_interrupt_params int_params = {0};
2589
2590 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2591 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2592
2593 list_for_each_entry(connector,
2594 &dev->mode_config.connector_list, head) {
2595
c84dec2f 2596 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2597 dc_link = aconnector->dc_link;
2598
2599 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2600 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2601 int_params.irq_source = dc_link->irq_source_hpd;
2602
2603 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2604 handle_hpd_irq,
2605 (void *) aconnector);
2606 }
2607
2608 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2609
2610 /* Also register for DP short pulse (hpd_rx). */
2611 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2612 int_params.irq_source = dc_link->irq_source_hpd_rx;
2613
2614 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2615 handle_hpd_rx_irq,
2616 (void *) aconnector);
2617 }
2618 }
2619}
2620
55e56389
MR
2621#if defined(CONFIG_DRM_AMD_DC_SI)
2622/* Register IRQ sources and initialize IRQ callbacks */
2623static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2624{
2625 struct dc *dc = adev->dm.dc;
2626 struct common_irq_params *c_irq_params;
2627 struct dc_interrupt_params int_params = {0};
2628 int r;
2629 int i;
2630 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2631
2632 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2633 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2634
2635 /*
2636 * Actions of amdgpu_irq_add_id():
2637 * 1. Register a set() function with base driver.
2638 * Base driver will call set() function to enable/disable an
2639 * interrupt in DC hardware.
2640 * 2. Register amdgpu_dm_irq_handler().
2641 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2642 * coming from DC hardware.
2643 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2644 * for acknowledging and handling. */
2645
2646 /* Use VBLANK interrupt */
2647 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2648 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2649 if (r) {
2650 DRM_ERROR("Failed to add crtc irq id!\n");
2651 return r;
2652 }
2653
2654 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2655 int_params.irq_source =
2656 dc_interrupt_to_irq_source(dc, i+1 , 0);
2657
2658 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2659
2660 c_irq_params->adev = adev;
2661 c_irq_params->irq_src = int_params.irq_source;
2662
2663 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2664 dm_crtc_high_irq, c_irq_params);
2665 }
2666
2667 /* Use GRPH_PFLIP interrupt */
2668 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2669 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2670 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2671 if (r) {
2672 DRM_ERROR("Failed to add page flip irq id!\n");
2673 return r;
2674 }
2675
2676 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2677 int_params.irq_source =
2678 dc_interrupt_to_irq_source(dc, i, 0);
2679
2680 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2681
2682 c_irq_params->adev = adev;
2683 c_irq_params->irq_src = int_params.irq_source;
2684
2685 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2686 dm_pflip_high_irq, c_irq_params);
2687
2688 }
2689
2690 /* HPD */
2691 r = amdgpu_irq_add_id(adev, client_id,
2692 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2693 if (r) {
2694 DRM_ERROR("Failed to add hpd irq id!\n");
2695 return r;
2696 }
2697
2698 register_hpd_handlers(adev);
2699
2700 return 0;
2701}
2702#endif
2703
4562236b
HW
2704/* Register IRQ sources and initialize IRQ callbacks */
2705static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2706{
2707 struct dc *dc = adev->dm.dc;
2708 struct common_irq_params *c_irq_params;
2709 struct dc_interrupt_params int_params = {0};
2710 int r;
2711 int i;
1ffdeca6 2712 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2713
84374725 2714 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2715 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2716
2717 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2718 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2719
1f6010a9
DF
2720 /*
2721 * Actions of amdgpu_irq_add_id():
4562236b
HW
2722 * 1. Register a set() function with base driver.
2723 * Base driver will call set() function to enable/disable an
2724 * interrupt in DC hardware.
2725 * 2. Register amdgpu_dm_irq_handler().
2726 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2727 * coming from DC hardware.
2728 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2729 * for acknowledging and handling. */
2730
b57de80a 2731 /* Use VBLANK interrupt */
e9029155 2732 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2733 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2734 if (r) {
2735 DRM_ERROR("Failed to add crtc irq id!\n");
2736 return r;
2737 }
2738
2739 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2740 int_params.irq_source =
3d761e79 2741 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2742
b57de80a 2743 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2744
2745 c_irq_params->adev = adev;
2746 c_irq_params->irq_src = int_params.irq_source;
2747
2748 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2749 dm_crtc_high_irq, c_irq_params);
2750 }
2751
d2574c33
MK
2752 /* Use VUPDATE interrupt */
2753 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2754 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2755 if (r) {
2756 DRM_ERROR("Failed to add vupdate irq id!\n");
2757 return r;
2758 }
2759
2760 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2761 int_params.irq_source =
2762 dc_interrupt_to_irq_source(dc, i, 0);
2763
2764 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2765
2766 c_irq_params->adev = adev;
2767 c_irq_params->irq_src = int_params.irq_source;
2768
2769 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2770 dm_vupdate_high_irq, c_irq_params);
2771 }
2772
3d761e79 2773 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2774 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2775 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2776 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2777 if (r) {
2778 DRM_ERROR("Failed to add page flip irq id!\n");
2779 return r;
2780 }
2781
2782 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2783 int_params.irq_source =
2784 dc_interrupt_to_irq_source(dc, i, 0);
2785
2786 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2787
2788 c_irq_params->adev = adev;
2789 c_irq_params->irq_src = int_params.irq_source;
2790
2791 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2792 dm_pflip_high_irq, c_irq_params);
2793
2794 }
2795
2796 /* HPD */
2c8ad2d5
AD
2797 r = amdgpu_irq_add_id(adev, client_id,
2798 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2799 if (r) {
2800 DRM_ERROR("Failed to add hpd irq id!\n");
2801 return r;
2802 }
2803
2804 register_hpd_handlers(adev);
2805
2806 return 0;
2807}
2808
b86a1aa3 2809#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2810/* Register IRQ sources and initialize IRQ callbacks */
2811static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2812{
2813 struct dc *dc = adev->dm.dc;
2814 struct common_irq_params *c_irq_params;
2815 struct dc_interrupt_params int_params = {0};
2816 int r;
2817 int i;
2818
2819 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2820 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2821
1f6010a9
DF
2822 /*
2823 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2824 * 1. Register a set() function with base driver.
2825 * Base driver will call set() function to enable/disable an
2826 * interrupt in DC hardware.
2827 * 2. Register amdgpu_dm_irq_handler().
2828 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2829 * coming from DC hardware.
2830 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2831 * for acknowledging and handling.
1f6010a9 2832 */
ff5ef992
AD
2833
2834 /* Use VSTARTUP interrupt */
2835 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2836 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2837 i++) {
3760f76c 2838 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2839
2840 if (r) {
2841 DRM_ERROR("Failed to add crtc irq id!\n");
2842 return r;
2843 }
2844
2845 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2846 int_params.irq_source =
2847 dc_interrupt_to_irq_source(dc, i, 0);
2848
2849 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2850
2851 c_irq_params->adev = adev;
2852 c_irq_params->irq_src = int_params.irq_source;
2853
2346ef47
NK
2854 amdgpu_dm_irq_register_interrupt(
2855 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2856 }
2857
2858 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2859 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2860 * to trigger at end of each vblank, regardless of state of the lock,
2861 * matching DCE behaviour.
2862 */
2863 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2864 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2865 i++) {
2866 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2867
2868 if (r) {
2869 DRM_ERROR("Failed to add vupdate irq id!\n");
2870 return r;
2871 }
2872
2873 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2874 int_params.irq_source =
2875 dc_interrupt_to_irq_source(dc, i, 0);
2876
2877 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2878
2879 c_irq_params->adev = adev;
2880 c_irq_params->irq_src = int_params.irq_source;
2881
ff5ef992 2882 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2883 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2884 }
2885
ff5ef992
AD
2886 /* Use GRPH_PFLIP interrupt */
2887 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2888 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2889 i++) {
3760f76c 2890 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2891 if (r) {
2892 DRM_ERROR("Failed to add page flip irq id!\n");
2893 return r;
2894 }
2895
2896 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2897 int_params.irq_source =
2898 dc_interrupt_to_irq_source(dc, i, 0);
2899
2900 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2901
2902 c_irq_params->adev = adev;
2903 c_irq_params->irq_src = int_params.irq_source;
2904
2905 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2906 dm_pflip_high_irq, c_irq_params);
2907
2908 }
2909
2910 /* HPD */
3760f76c 2911 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2912 &adev->hpd_irq);
2913 if (r) {
2914 DRM_ERROR("Failed to add hpd irq id!\n");
2915 return r;
2916 }
2917
2918 register_hpd_handlers(adev);
2919
2920 return 0;
2921}
2922#endif
2923
eb3dc897
NK
2924/*
2925 * Acquires the lock for the atomic state object and returns
2926 * the new atomic state.
2927 *
2928 * This should only be called during atomic check.
2929 */
2930static int dm_atomic_get_state(struct drm_atomic_state *state,
2931 struct dm_atomic_state **dm_state)
2932{
2933 struct drm_device *dev = state->dev;
1348969a 2934 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2935 struct amdgpu_display_manager *dm = &adev->dm;
2936 struct drm_private_state *priv_state;
eb3dc897
NK
2937
2938 if (*dm_state)
2939 return 0;
2940
eb3dc897
NK
2941 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2942 if (IS_ERR(priv_state))
2943 return PTR_ERR(priv_state);
2944
2945 *dm_state = to_dm_atomic_state(priv_state);
2946
2947 return 0;
2948}
2949
dfd84d90 2950static struct dm_atomic_state *
eb3dc897
NK
2951dm_atomic_get_new_state(struct drm_atomic_state *state)
2952{
2953 struct drm_device *dev = state->dev;
1348969a 2954 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2955 struct amdgpu_display_manager *dm = &adev->dm;
2956 struct drm_private_obj *obj;
2957 struct drm_private_state *new_obj_state;
2958 int i;
2959
2960 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2961 if (obj->funcs == dm->atomic_obj.funcs)
2962 return to_dm_atomic_state(new_obj_state);
2963 }
2964
2965 return NULL;
2966}
2967
eb3dc897
NK
2968static struct drm_private_state *
2969dm_atomic_duplicate_state(struct drm_private_obj *obj)
2970{
2971 struct dm_atomic_state *old_state, *new_state;
2972
2973 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2974 if (!new_state)
2975 return NULL;
2976
2977 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2978
813d20dc
AW
2979 old_state = to_dm_atomic_state(obj->state);
2980
2981 if (old_state && old_state->context)
2982 new_state->context = dc_copy_state(old_state->context);
2983
eb3dc897
NK
2984 if (!new_state->context) {
2985 kfree(new_state);
2986 return NULL;
2987 }
2988
eb3dc897
NK
2989 return &new_state->base;
2990}
2991
2992static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2993 struct drm_private_state *state)
2994{
2995 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2996
2997 if (dm_state && dm_state->context)
2998 dc_release_state(dm_state->context);
2999
3000 kfree(dm_state);
3001}
3002
3003static struct drm_private_state_funcs dm_atomic_state_funcs = {
3004 .atomic_duplicate_state = dm_atomic_duplicate_state,
3005 .atomic_destroy_state = dm_atomic_destroy_state,
3006};
3007
4562236b
HW
3008static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3009{
eb3dc897 3010 struct dm_atomic_state *state;
4562236b
HW
3011 int r;
3012
3013 adev->mode_info.mode_config_initialized = true;
3014
4a580877
LT
3015 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3016 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3017
4a580877
LT
3018 adev_to_drm(adev)->mode_config.max_width = 16384;
3019 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3020
4a580877
LT
3021 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3022 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3023 /* indicates support for immediate flip */
4a580877 3024 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3025
4a580877 3026 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3027
eb3dc897
NK
3028 state = kzalloc(sizeof(*state), GFP_KERNEL);
3029 if (!state)
3030 return -ENOMEM;
3031
813d20dc 3032 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3033 if (!state->context) {
3034 kfree(state);
3035 return -ENOMEM;
3036 }
3037
3038 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3039
4a580877 3040 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3041 &adev->dm.atomic_obj,
eb3dc897
NK
3042 &state->base,
3043 &dm_atomic_state_funcs);
3044
3dc9b1ce 3045 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3046 if (r) {
3047 dc_release_state(state->context);
3048 kfree(state);
4562236b 3049 return r;
b67a468a 3050 }
4562236b 3051
6ce8f316 3052 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3053 if (r) {
3054 dc_release_state(state->context);
3055 kfree(state);
6ce8f316 3056 return r;
b67a468a 3057 }
6ce8f316 3058
4562236b
HW
3059 return 0;
3060}
3061
206bbafe
DF
3062#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3063#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3064#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3065
4562236b
HW
3066#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3067 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3068
206bbafe
DF
3069static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3070{
3071#if defined(CONFIG_ACPI)
3072 struct amdgpu_dm_backlight_caps caps;
3073
58965855
FS
3074 memset(&caps, 0, sizeof(caps));
3075
206bbafe
DF
3076 if (dm->backlight_caps.caps_valid)
3077 return;
3078
3079 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3080 if (caps.caps_valid) {
94562810
RS
3081 dm->backlight_caps.caps_valid = true;
3082 if (caps.aux_support)
3083 return;
206bbafe
DF
3084 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3085 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3086 } else {
3087 dm->backlight_caps.min_input_signal =
3088 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3089 dm->backlight_caps.max_input_signal =
3090 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3091 }
3092#else
94562810
RS
3093 if (dm->backlight_caps.aux_support)
3094 return;
3095
8bcbc9ef
DF
3096 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3097 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3098#endif
3099}
3100
94562810
RS
3101static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3102{
3103 bool rc;
3104
3105 if (!link)
3106 return 1;
3107
3108 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3109 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3110
3111 return rc ? 0 : 1;
3112}
3113
69d9f427
AM
3114static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3115 unsigned *min, unsigned *max)
94562810 3116{
94562810 3117 if (!caps)
69d9f427 3118 return 0;
94562810 3119
69d9f427
AM
3120 if (caps->aux_support) {
3121 // Firmware limits are in nits, DC API wants millinits.
3122 *max = 1000 * caps->aux_max_input_signal;
3123 *min = 1000 * caps->aux_min_input_signal;
94562810 3124 } else {
69d9f427
AM
3125 // Firmware limits are 8-bit, PWM control is 16-bit.
3126 *max = 0x101 * caps->max_input_signal;
3127 *min = 0x101 * caps->min_input_signal;
94562810 3128 }
69d9f427
AM
3129 return 1;
3130}
94562810 3131
69d9f427
AM
3132static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3133 uint32_t brightness)
3134{
3135 unsigned min, max;
94562810 3136
69d9f427
AM
3137 if (!get_brightness_range(caps, &min, &max))
3138 return brightness;
3139
3140 // Rescale 0..255 to min..max
3141 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3142 AMDGPU_MAX_BL_LEVEL);
3143}
3144
3145static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3146 uint32_t brightness)
3147{
3148 unsigned min, max;
3149
3150 if (!get_brightness_range(caps, &min, &max))
3151 return brightness;
3152
3153 if (brightness < min)
3154 return 0;
3155 // Rescale min..max to 0..255
3156 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3157 max - min);
94562810
RS
3158}
3159
4562236b
HW
3160static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3161{
3162 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3163 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3164 struct dc_link *link = NULL;
3165 u32 brightness;
3166 bool rc;
4562236b 3167
206bbafe
DF
3168 amdgpu_dm_update_backlight_caps(dm);
3169 caps = dm->backlight_caps;
94562810
RS
3170
3171 link = (struct dc_link *)dm->backlight_link;
3172
69d9f427 3173 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3174 // Change brightness based on AUX property
3175 if (caps.aux_support)
3176 return set_backlight_via_aux(link, brightness);
3177
3178 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3179
3180 return rc ? 0 : 1;
4562236b
HW
3181}
3182
3183static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3184{
620a0d27
DF
3185 struct amdgpu_display_manager *dm = bl_get_data(bd);
3186 int ret = dc_link_get_backlight_level(dm->backlight_link);
3187
3188 if (ret == DC_ERROR_UNEXPECTED)
3189 return bd->props.brightness;
69d9f427 3190 return convert_brightness_to_user(&dm->backlight_caps, ret);
4562236b
HW
3191}
3192
3193static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3194 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3195 .get_brightness = amdgpu_dm_backlight_get_brightness,
3196 .update_status = amdgpu_dm_backlight_update_status,
3197};
3198
7578ecda
AD
3199static void
3200amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3201{
3202 char bl_name[16];
3203 struct backlight_properties props = { 0 };
3204
206bbafe
DF
3205 amdgpu_dm_update_backlight_caps(dm);
3206
4562236b 3207 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3208 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3209 props.type = BACKLIGHT_RAW;
3210
3211 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3212 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3213
3214 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3215 adev_to_drm(dm->adev)->dev,
3216 dm,
3217 &amdgpu_dm_backlight_ops,
3218 &props);
4562236b 3219
74baea42 3220 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3221 DRM_ERROR("DM: Backlight registration failed!\n");
3222 else
f1ad2f5e 3223 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3224}
3225
3226#endif
3227
df534fff 3228static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3229 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3230 enum drm_plane_type plane_type,
3231 const struct dc_plane_cap *plane_cap)
df534fff 3232{
f180b4bc 3233 struct drm_plane *plane;
df534fff
S
3234 unsigned long possible_crtcs;
3235 int ret = 0;
3236
f180b4bc 3237 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3238 if (!plane) {
3239 DRM_ERROR("KMS: Failed to allocate plane\n");
3240 return -ENOMEM;
3241 }
b2fddb13 3242 plane->type = plane_type;
df534fff
S
3243
3244 /*
b2fddb13
NK
3245 * HACK: IGT tests expect that the primary plane for a CRTC
3246 * can only have one possible CRTC. Only expose support for
3247 * any CRTC if they're not going to be used as a primary plane
3248 * for a CRTC - like overlay or underlay planes.
df534fff
S
3249 */
3250 possible_crtcs = 1 << plane_id;
3251 if (plane_id >= dm->dc->caps.max_streams)
3252 possible_crtcs = 0xff;
3253
cc1fec57 3254 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3255
3256 if (ret) {
3257 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3258 kfree(plane);
df534fff
S
3259 return ret;
3260 }
3261
54087768
NK
3262 if (mode_info)
3263 mode_info->planes[plane_id] = plane;
3264
df534fff
S
3265 return ret;
3266}
3267
89fc8d4e
HW
3268
3269static void register_backlight_device(struct amdgpu_display_manager *dm,
3270 struct dc_link *link)
3271{
3272#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3273 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3274
3275 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3276 link->type != dc_connection_none) {
1f6010a9
DF
3277 /*
3278 * Event if registration failed, we should continue with
89fc8d4e
HW
3279 * DM initialization because not having a backlight control
3280 * is better then a black screen.
3281 */
3282 amdgpu_dm_register_backlight_device(dm);
3283
3284 if (dm->backlight_dev)
3285 dm->backlight_link = link;
3286 }
3287#endif
3288}
3289
3290
1f6010a9
DF
3291/*
3292 * In this architecture, the association
4562236b
HW
3293 * connector -> encoder -> crtc
3294 * id not really requried. The crtc and connector will hold the
3295 * display_index as an abstraction to use with DAL component
3296 *
3297 * Returns 0 on success
3298 */
7578ecda 3299static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3300{
3301 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3302 int32_t i;
c84dec2f 3303 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3304 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3305 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3306 uint32_t link_cnt;
cc1fec57 3307 int32_t primary_planes;
fbbdadf2 3308 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3309 const struct dc_plane_cap *plane;
4562236b
HW
3310
3311 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3312 if (amdgpu_dm_mode_config_init(dm->adev)) {
3313 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3314 return -EINVAL;
4562236b
HW
3315 }
3316
b2fddb13
NK
3317 /* There is one primary plane per CRTC */
3318 primary_planes = dm->dc->caps.max_streams;
54087768 3319 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3320
b2fddb13
NK
3321 /*
3322 * Initialize primary planes, implicit planes for legacy IOCTLS.
3323 * Order is reversed to match iteration order in atomic check.
3324 */
3325 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3326 plane = &dm->dc->caps.planes[i];
3327
b2fddb13 3328 if (initialize_plane(dm, mode_info, i,
cc1fec57 3329 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3330 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3331 goto fail;
d4e13b0d 3332 }
df534fff 3333 }
92f3ac40 3334
0d579c7e
NK
3335 /*
3336 * Initialize overlay planes, index starting after primary planes.
3337 * These planes have a higher DRM index than the primary planes since
3338 * they should be considered as having a higher z-order.
3339 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3340 *
3341 * Only support DCN for now, and only expose one so we don't encourage
3342 * userspace to use up all the pipes.
0d579c7e 3343 */
cc1fec57
NK
3344 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3345 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3346
3347 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3348 continue;
3349
3350 if (!plane->blends_with_above || !plane->blends_with_below)
3351 continue;
3352
ea36ad34 3353 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3354 continue;
3355
54087768 3356 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3357 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3358 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3359 goto fail;
d4e13b0d 3360 }
cc1fec57
NK
3361
3362 /* Only create one overlay plane. */
3363 break;
d4e13b0d 3364 }
4562236b 3365
d4e13b0d 3366 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3367 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3368 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3369 goto fail;
4562236b 3370 }
4562236b 3371
ab2541b6 3372 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
3373
3374 /* loops over all connectors on the board */
3375 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3376 struct dc_link *link = NULL;
4562236b
HW
3377
3378 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3379 DRM_ERROR(
3380 "KMS: Cannot support more than %d display indexes\n",
3381 AMDGPU_DM_MAX_DISPLAY_INDEX);
3382 continue;
3383 }
3384
3385 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3386 if (!aconnector)
cd8a2ae8 3387 goto fail;
4562236b
HW
3388
3389 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3390 if (!aencoder)
cd8a2ae8 3391 goto fail;
4562236b
HW
3392
3393 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3394 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3395 goto fail;
4562236b
HW
3396 }
3397
3398 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3399 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3400 goto fail;
4562236b
HW
3401 }
3402
89fc8d4e
HW
3403 link = dc_get_link_at_index(dm->dc, i);
3404
fbbdadf2
BL
3405 if (!dc_link_detect_sink(link, &new_connection_type))
3406 DRM_ERROR("KMS: Failed to detect connector\n");
3407
3408 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3409 emulated_link_detect(link);
3410 amdgpu_dm_update_connector_after_detect(aconnector);
3411
3412 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3413 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3414 register_backlight_device(dm, link);
397a9bc5
RL
3415 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3416 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3417 }
3418
3419
4562236b
HW
3420 }
3421
3422 /* Software is initialized. Now we can register interrupt handlers. */
3423 switch (adev->asic_type) {
55e56389
MR
3424#if defined(CONFIG_DRM_AMD_DC_SI)
3425 case CHIP_TAHITI:
3426 case CHIP_PITCAIRN:
3427 case CHIP_VERDE:
3428 case CHIP_OLAND:
3429 if (dce60_register_irq_handlers(dm->adev)) {
3430 DRM_ERROR("DM: Failed to initialize IRQ\n");
3431 goto fail;
3432 }
3433 break;
3434#endif
4562236b
HW
3435 case CHIP_BONAIRE:
3436 case CHIP_HAWAII:
cd4b356f
AD
3437 case CHIP_KAVERI:
3438 case CHIP_KABINI:
3439 case CHIP_MULLINS:
4562236b
HW
3440 case CHIP_TONGA:
3441 case CHIP_FIJI:
3442 case CHIP_CARRIZO:
3443 case CHIP_STONEY:
3444 case CHIP_POLARIS11:
3445 case CHIP_POLARIS10:
b264d345 3446 case CHIP_POLARIS12:
7737de91 3447 case CHIP_VEGAM:
2c8ad2d5 3448 case CHIP_VEGA10:
2325ff30 3449 case CHIP_VEGA12:
1fe6bf2f 3450 case CHIP_VEGA20:
4562236b
HW
3451 if (dce110_register_irq_handlers(dm->adev)) {
3452 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3453 goto fail;
4562236b
HW
3454 }
3455 break;
b86a1aa3 3456#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3457 case CHIP_RAVEN:
fbd2afe5 3458 case CHIP_NAVI12:
476e955d 3459 case CHIP_NAVI10:
fce651e3 3460 case CHIP_NAVI14:
30221ad8 3461 case CHIP_RENOIR:
79037324
BL
3462#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3463 case CHIP_SIENNA_CICHLID:
a6c5308f 3464 case CHIP_NAVY_FLOUNDER:
469989ca 3465#endif
2a411205
BL
3466#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3467 case CHIP_DIMGREY_CAVEFISH:
3468#endif
469989ca
RL
3469#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3470 case CHIP_VANGOGH:
79037324 3471#endif
ff5ef992
AD
3472 if (dcn10_register_irq_handlers(dm->adev)) {
3473 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3474 goto fail;
ff5ef992
AD
3475 }
3476 break;
3477#endif
4562236b 3478 default:
e63f8673 3479 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3480 goto fail;
4562236b
HW
3481 }
3482
4562236b 3483 return 0;
cd8a2ae8 3484fail:
4562236b 3485 kfree(aencoder);
4562236b 3486 kfree(aconnector);
54087768 3487
59d0f396 3488 return -EINVAL;
4562236b
HW
3489}
3490
7578ecda 3491static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3492{
3493 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3494 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3495 return;
3496}
3497
3498/******************************************************************************
3499 * amdgpu_display_funcs functions
3500 *****************************************************************************/
3501
1f6010a9 3502/*
4562236b
HW
3503 * dm_bandwidth_update - program display watermarks
3504 *
3505 * @adev: amdgpu_device pointer
3506 *
3507 * Calculate and program the display watermarks and line buffer allocation.
3508 */
3509static void dm_bandwidth_update(struct amdgpu_device *adev)
3510{
49c07a99 3511 /* TODO: implement later */
4562236b
HW
3512}
3513
39cc5be2 3514static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3515 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3516 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3517 .backlight_set_level = NULL, /* never called for DC */
3518 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3519 .hpd_sense = NULL,/* called unconditionally */
3520 .hpd_set_polarity = NULL, /* called unconditionally */
3521 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3522 .page_flip_get_scanoutpos =
3523 dm_crtc_get_scanoutpos,/* called unconditionally */
3524 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3525 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3526};
3527
3528#if defined(CONFIG_DEBUG_KERNEL_DC)
3529
3ee6b26b
AD
3530static ssize_t s3_debug_store(struct device *device,
3531 struct device_attribute *attr,
3532 const char *buf,
3533 size_t count)
4562236b
HW
3534{
3535 int ret;
3536 int s3_state;
ef1de361 3537 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3538 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3539
3540 ret = kstrtoint(buf, 0, &s3_state);
3541
3542 if (ret == 0) {
3543 if (s3_state) {
3544 dm_resume(adev);
4a580877 3545 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3546 } else
3547 dm_suspend(adev);
3548 }
3549
3550 return ret == 0 ? count : 0;
3551}
3552
3553DEVICE_ATTR_WO(s3_debug);
3554
3555#endif
3556
3557static int dm_early_init(void *handle)
3558{
3559 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3560
4562236b 3561 switch (adev->asic_type) {
55e56389
MR
3562#if defined(CONFIG_DRM_AMD_DC_SI)
3563 case CHIP_TAHITI:
3564 case CHIP_PITCAIRN:
3565 case CHIP_VERDE:
3566 adev->mode_info.num_crtc = 6;
3567 adev->mode_info.num_hpd = 6;
3568 adev->mode_info.num_dig = 6;
3569 break;
3570 case CHIP_OLAND:
3571 adev->mode_info.num_crtc = 2;
3572 adev->mode_info.num_hpd = 2;
3573 adev->mode_info.num_dig = 2;
3574 break;
3575#endif
4562236b
HW
3576 case CHIP_BONAIRE:
3577 case CHIP_HAWAII:
3578 adev->mode_info.num_crtc = 6;
3579 adev->mode_info.num_hpd = 6;
3580 adev->mode_info.num_dig = 6;
4562236b 3581 break;
cd4b356f
AD
3582 case CHIP_KAVERI:
3583 adev->mode_info.num_crtc = 4;
3584 adev->mode_info.num_hpd = 6;
3585 adev->mode_info.num_dig = 7;
cd4b356f
AD
3586 break;
3587 case CHIP_KABINI:
3588 case CHIP_MULLINS:
3589 adev->mode_info.num_crtc = 2;
3590 adev->mode_info.num_hpd = 6;
3591 adev->mode_info.num_dig = 6;
cd4b356f 3592 break;
4562236b
HW
3593 case CHIP_FIJI:
3594 case CHIP_TONGA:
3595 adev->mode_info.num_crtc = 6;
3596 adev->mode_info.num_hpd = 6;
3597 adev->mode_info.num_dig = 7;
4562236b
HW
3598 break;
3599 case CHIP_CARRIZO:
3600 adev->mode_info.num_crtc = 3;
3601 adev->mode_info.num_hpd = 6;
3602 adev->mode_info.num_dig = 9;
4562236b
HW
3603 break;
3604 case CHIP_STONEY:
3605 adev->mode_info.num_crtc = 2;
3606 adev->mode_info.num_hpd = 6;
3607 adev->mode_info.num_dig = 9;
4562236b
HW
3608 break;
3609 case CHIP_POLARIS11:
b264d345 3610 case CHIP_POLARIS12:
4562236b
HW
3611 adev->mode_info.num_crtc = 5;
3612 adev->mode_info.num_hpd = 5;
3613 adev->mode_info.num_dig = 5;
4562236b
HW
3614 break;
3615 case CHIP_POLARIS10:
7737de91 3616 case CHIP_VEGAM:
4562236b
HW
3617 adev->mode_info.num_crtc = 6;
3618 adev->mode_info.num_hpd = 6;
3619 adev->mode_info.num_dig = 6;
4562236b 3620 break;
2c8ad2d5 3621 case CHIP_VEGA10:
2325ff30 3622 case CHIP_VEGA12:
1fe6bf2f 3623 case CHIP_VEGA20:
2c8ad2d5
AD
3624 adev->mode_info.num_crtc = 6;
3625 adev->mode_info.num_hpd = 6;
3626 adev->mode_info.num_dig = 6;
3627 break;
b86a1aa3 3628#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3629 case CHIP_RAVEN:
3630 adev->mode_info.num_crtc = 4;
3631 adev->mode_info.num_hpd = 4;
3632 adev->mode_info.num_dig = 4;
ff5ef992 3633 break;
476e955d 3634#endif
476e955d 3635 case CHIP_NAVI10:
fbd2afe5 3636 case CHIP_NAVI12:
79037324
BL
3637#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3638 case CHIP_SIENNA_CICHLID:
a6c5308f 3639 case CHIP_NAVY_FLOUNDER:
79037324 3640#endif
476e955d
HW
3641 adev->mode_info.num_crtc = 6;
3642 adev->mode_info.num_hpd = 6;
3643 adev->mode_info.num_dig = 6;
3644 break;
469989ca
RL
3645#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3646 case CHIP_VANGOGH:
3647 adev->mode_info.num_crtc = 4;
3648 adev->mode_info.num_hpd = 4;
3649 adev->mode_info.num_dig = 4;
3650 break;
3651#endif
fce651e3 3652 case CHIP_NAVI14:
2a411205
BL
3653#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3654 case CHIP_DIMGREY_CAVEFISH:
3655#endif
fce651e3
BL
3656 adev->mode_info.num_crtc = 5;
3657 adev->mode_info.num_hpd = 5;
3658 adev->mode_info.num_dig = 5;
3659 break;
30221ad8
BL
3660 case CHIP_RENOIR:
3661 adev->mode_info.num_crtc = 4;
3662 adev->mode_info.num_hpd = 4;
3663 adev->mode_info.num_dig = 4;
3664 break;
4562236b 3665 default:
e63f8673 3666 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3667 return -EINVAL;
3668 }
3669
c8dd5715
MD
3670 amdgpu_dm_set_irq_funcs(adev);
3671
39cc5be2
AD
3672 if (adev->mode_info.funcs == NULL)
3673 adev->mode_info.funcs = &dm_display_funcs;
3674
1f6010a9
DF
3675 /*
3676 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3677 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3678 * amdgpu_device_init()
3679 */
4562236b
HW
3680#if defined(CONFIG_DEBUG_KERNEL_DC)
3681 device_create_file(
4a580877 3682 adev_to_drm(adev)->dev,
4562236b
HW
3683 &dev_attr_s3_debug);
3684#endif
3685
3686 return 0;
3687}
3688
9b690ef3 3689static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3690 struct dc_stream_state *new_stream,
3691 struct dc_stream_state *old_stream)
9b690ef3 3692{
2afda735 3693 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3694}
3695
3696static bool modereset_required(struct drm_crtc_state *crtc_state)
3697{
2afda735 3698 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3699}
3700
7578ecda 3701static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3702{
3703 drm_encoder_cleanup(encoder);
3704 kfree(encoder);
3705}
3706
3707static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3708 .destroy = amdgpu_dm_encoder_destroy,
3709};
3710
e7b07cee 3711
695af5f9
NK
3712static int fill_dc_scaling_info(const struct drm_plane_state *state,
3713 struct dc_scaling_info *scaling_info)
e7b07cee 3714{
6491f0c0 3715 int scale_w, scale_h;
e7b07cee 3716
695af5f9 3717 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3718
695af5f9
NK
3719 /* Source is fixed 16.16 but we ignore mantissa for now... */
3720 scaling_info->src_rect.x = state->src_x >> 16;
3721 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3722
695af5f9
NK
3723 scaling_info->src_rect.width = state->src_w >> 16;
3724 if (scaling_info->src_rect.width == 0)
3725 return -EINVAL;
3726
3727 scaling_info->src_rect.height = state->src_h >> 16;
3728 if (scaling_info->src_rect.height == 0)
3729 return -EINVAL;
3730
3731 scaling_info->dst_rect.x = state->crtc_x;
3732 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3733
3734 if (state->crtc_w == 0)
695af5f9 3735 return -EINVAL;
e7b07cee 3736
695af5f9 3737 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3738
3739 if (state->crtc_h == 0)
695af5f9 3740 return -EINVAL;
e7b07cee 3741
695af5f9 3742 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3743
695af5f9
NK
3744 /* DRM doesn't specify clipping on destination output. */
3745 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3746
6491f0c0
NK
3747 /* TODO: Validate scaling per-format with DC plane caps */
3748 scale_w = scaling_info->dst_rect.width * 1000 /
3749 scaling_info->src_rect.width;
e7b07cee 3750
6491f0c0
NK
3751 if (scale_w < 250 || scale_w > 16000)
3752 return -EINVAL;
3753
3754 scale_h = scaling_info->dst_rect.height * 1000 /
3755 scaling_info->src_rect.height;
3756
3757 if (scale_h < 250 || scale_h > 16000)
3758 return -EINVAL;
3759
695af5f9
NK
3760 /*
3761 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3762 * assume reasonable defaults based on the format.
3763 */
e7b07cee 3764
695af5f9 3765 return 0;
4562236b 3766}
695af5f9 3767
3ee6b26b 3768static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
5888f07a 3769 uint64_t *tiling_flags, bool *tmz_surface)
e7b07cee 3770{
707477b0
NK
3771 struct amdgpu_bo *rbo;
3772 int r;
3773
3774 if (!amdgpu_fb) {
3775 *tiling_flags = 0;
3776 *tmz_surface = false;
3777 return 0;
3778 }
3779
3780 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3781 r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3782
e7b07cee 3783 if (unlikely(r)) {
1f6010a9 3784 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3785 if (r != -ERESTARTSYS)
3786 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3787 return r;
3788 }
3789
e7b07cee
HW
3790 if (tiling_flags)
3791 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3792
5888f07a
HW
3793 if (tmz_surface)
3794 *tmz_surface = amdgpu_bo_encrypted(rbo);
3795
e7b07cee
HW
3796 amdgpu_bo_unreserve(rbo);
3797
3798 return r;
3799}
3800
7df7e505
NK
3801static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3802{
3803 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3804
3805 return offset ? (address + offset * 256) : 0;
3806}
3807
695af5f9
NK
3808static int
3809fill_plane_dcc_attributes(struct amdgpu_device *adev,
3810 const struct amdgpu_framebuffer *afb,
3811 const enum surface_pixel_format format,
3812 const enum dc_rotation_angle rotation,
12e2b2d4 3813 const struct plane_size *plane_size,
695af5f9
NK
3814 const union dc_tiling_info *tiling_info,
3815 const uint64_t info,
3816 struct dc_plane_dcc_param *dcc,
87b7ebc2
RS
3817 struct dc_plane_address *address,
3818 bool force_disable_dcc)
7df7e505
NK
3819{
3820 struct dc *dc = adev->dm.dc;
8daa1218
NC
3821 struct dc_dcc_surface_param input;
3822 struct dc_surface_dcc_cap output;
7df7e505
NK
3823 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3824 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3825 uint64_t dcc_address;
3826
8daa1218
NC
3827 memset(&input, 0, sizeof(input));
3828 memset(&output, 0, sizeof(output));
3829
87b7ebc2
RS
3830 if (force_disable_dcc)
3831 return 0;
3832
7df7e505 3833 if (!offset)
09e5665a
NK
3834 return 0;
3835
695af5f9 3836 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3837 return 0;
7df7e505
NK
3838
3839 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3840 return -EINVAL;
7df7e505 3841
695af5f9 3842 input.format = format;
12e2b2d4
DL
3843 input.surface_size.width = plane_size->surface_size.width;
3844 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3845 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3846
695af5f9 3847 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3848 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3849 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3850 input.scan = SCAN_DIRECTION_VERTICAL;
3851
3852 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3853 return -EINVAL;
7df7e505
NK
3854
3855 if (!output.capable)
09e5665a 3856 return -EINVAL;
7df7e505
NK
3857
3858 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3859 return -EINVAL;
7df7e505 3860
09e5665a 3861 dcc->enable = 1;
12e2b2d4 3862 dcc->meta_pitch =
7df7e505 3863 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3864 dcc->independent_64b_blks = i64b;
7df7e505
NK
3865
3866 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3867 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3868 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3869
09e5665a
NK
3870 return 0;
3871}
3872
3873static int
320932bf 3874fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3875 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3876 const enum surface_pixel_format format,
3877 const enum dc_rotation_angle rotation,
3878 const uint64_t tiling_flags,
09e5665a 3879 union dc_tiling_info *tiling_info,
12e2b2d4 3880 struct plane_size *plane_size,
09e5665a 3881 struct dc_plane_dcc_param *dcc,
87b7ebc2 3882 struct dc_plane_address *address,
5888f07a 3883 bool tmz_surface,
87b7ebc2 3884 bool force_disable_dcc)
09e5665a 3885{
320932bf 3886 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3887 int ret;
3888
3889 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3890 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3891 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3892 memset(address, 0, sizeof(*address));
3893
5888f07a
HW
3894 address->tmz_surface = tmz_surface;
3895
695af5f9 3896 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3897 plane_size->surface_size.x = 0;
3898 plane_size->surface_size.y = 0;
3899 plane_size->surface_size.width = fb->width;
3900 plane_size->surface_size.height = fb->height;
3901 plane_size->surface_pitch =
320932bf
NK
3902 fb->pitches[0] / fb->format->cpp[0];
3903
e0634e8d
NK
3904 address->type = PLN_ADDR_TYPE_GRAPHICS;
3905 address->grph.addr.low_part = lower_32_bits(afb->address);
3906 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3907 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3908 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3909
12e2b2d4
DL
3910 plane_size->surface_size.x = 0;
3911 plane_size->surface_size.y = 0;
3912 plane_size->surface_size.width = fb->width;
3913 plane_size->surface_size.height = fb->height;
3914 plane_size->surface_pitch =
320932bf
NK
3915 fb->pitches[0] / fb->format->cpp[0];
3916
12e2b2d4
DL
3917 plane_size->chroma_size.x = 0;
3918 plane_size->chroma_size.y = 0;
320932bf 3919 /* TODO: set these based on surface format */
12e2b2d4
DL
3920 plane_size->chroma_size.width = fb->width / 2;
3921 plane_size->chroma_size.height = fb->height / 2;
320932bf 3922
12e2b2d4 3923 plane_size->chroma_pitch =
320932bf
NK
3924 fb->pitches[1] / fb->format->cpp[1];
3925
e0634e8d
NK
3926 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3927 address->video_progressive.luma_addr.low_part =
3928 lower_32_bits(afb->address);
3929 address->video_progressive.luma_addr.high_part =
3930 upper_32_bits(afb->address);
3931 address->video_progressive.chroma_addr.low_part =
3932 lower_32_bits(chroma_addr);
3933 address->video_progressive.chroma_addr.high_part =
3934 upper_32_bits(chroma_addr);
3935 }
09e5665a
NK
3936
3937 /* Fill GFX8 params */
3938 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3939 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3940
3941 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3942 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3943 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3944 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3945 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3946
3947 /* XXX fix me for VI */
3948 tiling_info->gfx8.num_banks = num_banks;
3949 tiling_info->gfx8.array_mode =
3950 DC_ARRAY_2D_TILED_THIN1;
3951 tiling_info->gfx8.tile_split = tile_split;
3952 tiling_info->gfx8.bank_width = bankw;
3953 tiling_info->gfx8.bank_height = bankh;
3954 tiling_info->gfx8.tile_aspect = mtaspect;
3955 tiling_info->gfx8.tile_mode =
3956 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3957 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3958 == DC_ARRAY_1D_TILED_THIN1) {
3959 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3960 }
3961
3962 tiling_info->gfx8.pipe_config =
3963 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3964
3965 if (adev->asic_type == CHIP_VEGA10 ||
3966 adev->asic_type == CHIP_VEGA12 ||
3967 adev->asic_type == CHIP_VEGA20 ||
476e955d 3968 adev->asic_type == CHIP_NAVI10 ||
fce651e3 3969 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 3970 adev->asic_type == CHIP_NAVI12 ||
79037324
BL
3971#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3972 adev->asic_type == CHIP_SIENNA_CICHLID ||
a6c5308f 3973 adev->asic_type == CHIP_NAVY_FLOUNDER ||
469989ca 3974#endif
2a411205
BL
3975#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3976 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3977#endif
469989ca
RL
3978#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3979 adev->asic_type == CHIP_VANGOGH ||
79037324 3980#endif
30221ad8 3981 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
3982 adev->asic_type == CHIP_RAVEN) {
3983 /* Fill GFX9 params */
3984 tiling_info->gfx9.num_pipes =
3985 adev->gfx.config.gb_addr_config_fields.num_pipes;
3986 tiling_info->gfx9.num_banks =
3987 adev->gfx.config.gb_addr_config_fields.num_banks;
3988 tiling_info->gfx9.pipe_interleave =
3989 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3990 tiling_info->gfx9.num_shader_engines =
3991 adev->gfx.config.gb_addr_config_fields.num_se;
3992 tiling_info->gfx9.max_compressed_frags =
3993 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3994 tiling_info->gfx9.num_rb_per_se =
3995 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3996 tiling_info->gfx9.swizzle =
3997 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3998 tiling_info->gfx9.shaderEnable = 1;
3999
79037324 4000#ifdef CONFIG_DRM_AMD_DC_DCN3_0
a6c5308f 4001 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
2a411205 4002 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4a3a1dc0
BN
4003 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4004 adev->asic_type == CHIP_VANGOGH)
79037324 4005 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
79037324 4006#endif
695af5f9
NK
4007 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4008 plane_size, tiling_info,
87b7ebc2
RS
4009 tiling_flags, dcc, address,
4010 force_disable_dcc);
09e5665a
NK
4011 if (ret)
4012 return ret;
4013 }
4014
4015 return 0;
7df7e505
NK
4016}
4017
d74004b6 4018static void
695af5f9 4019fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4020 bool *per_pixel_alpha, bool *global_alpha,
4021 int *global_alpha_value)
4022{
4023 *per_pixel_alpha = false;
4024 *global_alpha = false;
4025 *global_alpha_value = 0xff;
4026
4027 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4028 return;
4029
4030 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4031 static const uint32_t alpha_formats[] = {
4032 DRM_FORMAT_ARGB8888,
4033 DRM_FORMAT_RGBA8888,
4034 DRM_FORMAT_ABGR8888,
4035 };
4036 uint32_t format = plane_state->fb->format->format;
4037 unsigned int i;
4038
4039 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4040 if (format == alpha_formats[i]) {
4041 *per_pixel_alpha = true;
4042 break;
4043 }
4044 }
4045 }
4046
4047 if (plane_state->alpha < 0xffff) {
4048 *global_alpha = true;
4049 *global_alpha_value = plane_state->alpha >> 8;
4050 }
4051}
4052
004fefa3
NK
4053static int
4054fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4055 const enum surface_pixel_format format,
004fefa3
NK
4056 enum dc_color_space *color_space)
4057{
4058 bool full_range;
4059
4060 *color_space = COLOR_SPACE_SRGB;
4061
4062 /* DRM color properties only affect non-RGB formats. */
695af5f9 4063 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4064 return 0;
4065
4066 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4067
4068 switch (plane_state->color_encoding) {
4069 case DRM_COLOR_YCBCR_BT601:
4070 if (full_range)
4071 *color_space = COLOR_SPACE_YCBCR601;
4072 else
4073 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4074 break;
4075
4076 case DRM_COLOR_YCBCR_BT709:
4077 if (full_range)
4078 *color_space = COLOR_SPACE_YCBCR709;
4079 else
4080 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4081 break;
4082
4083 case DRM_COLOR_YCBCR_BT2020:
4084 if (full_range)
4085 *color_space = COLOR_SPACE_2020_YCBCR;
4086 else
4087 return -EINVAL;
4088 break;
4089
4090 default:
4091 return -EINVAL;
4092 }
4093
4094 return 0;
4095}
4096
695af5f9
NK
4097static int
4098fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4099 const struct drm_plane_state *plane_state,
4100 const uint64_t tiling_flags,
4101 struct dc_plane_info *plane_info,
87b7ebc2 4102 struct dc_plane_address *address,
5888f07a 4103 bool tmz_surface,
87b7ebc2 4104 bool force_disable_dcc)
695af5f9
NK
4105{
4106 const struct drm_framebuffer *fb = plane_state->fb;
4107 const struct amdgpu_framebuffer *afb =
4108 to_amdgpu_framebuffer(plane_state->fb);
4109 struct drm_format_name_buf format_name;
4110 int ret;
4111
4112 memset(plane_info, 0, sizeof(*plane_info));
4113
4114 switch (fb->format->format) {
4115 case DRM_FORMAT_C8:
4116 plane_info->format =
4117 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4118 break;
4119 case DRM_FORMAT_RGB565:
4120 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4121 break;
4122 case DRM_FORMAT_XRGB8888:
4123 case DRM_FORMAT_ARGB8888:
4124 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4125 break;
4126 case DRM_FORMAT_XRGB2101010:
4127 case DRM_FORMAT_ARGB2101010:
4128 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4129 break;
4130 case DRM_FORMAT_XBGR2101010:
4131 case DRM_FORMAT_ABGR2101010:
4132 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4133 break;
4134 case DRM_FORMAT_XBGR8888:
4135 case DRM_FORMAT_ABGR8888:
4136 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4137 break;
4138 case DRM_FORMAT_NV21:
4139 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4140 break;
4141 case DRM_FORMAT_NV12:
4142 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4143 break;
cbec6477
SW
4144 case DRM_FORMAT_P010:
4145 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4146 break;
492548dc
SW
4147 case DRM_FORMAT_XRGB16161616F:
4148 case DRM_FORMAT_ARGB16161616F:
4149 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4150 break;
2a5195dc
MK
4151 case DRM_FORMAT_XBGR16161616F:
4152 case DRM_FORMAT_ABGR16161616F:
4153 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4154 break;
695af5f9
NK
4155 default:
4156 DRM_ERROR(
4157 "Unsupported screen format %s\n",
4158 drm_get_format_name(fb->format->format, &format_name));
4159 return -EINVAL;
4160 }
4161
4162 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4163 case DRM_MODE_ROTATE_0:
4164 plane_info->rotation = ROTATION_ANGLE_0;
4165 break;
4166 case DRM_MODE_ROTATE_90:
4167 plane_info->rotation = ROTATION_ANGLE_90;
4168 break;
4169 case DRM_MODE_ROTATE_180:
4170 plane_info->rotation = ROTATION_ANGLE_180;
4171 break;
4172 case DRM_MODE_ROTATE_270:
4173 plane_info->rotation = ROTATION_ANGLE_270;
4174 break;
4175 default:
4176 plane_info->rotation = ROTATION_ANGLE_0;
4177 break;
4178 }
4179
4180 plane_info->visible = true;
4181 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4182
6d83a32d
MS
4183 plane_info->layer_index = 0;
4184
695af5f9
NK
4185 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4186 &plane_info->color_space);
4187 if (ret)
4188 return ret;
4189
4190 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4191 plane_info->rotation, tiling_flags,
4192 &plane_info->tiling_info,
4193 &plane_info->plane_size,
5888f07a 4194 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4195 force_disable_dcc);
695af5f9
NK
4196 if (ret)
4197 return ret;
4198
4199 fill_blending_from_plane_state(
4200 plane_state, &plane_info->per_pixel_alpha,
4201 &plane_info->global_alpha, &plane_info->global_alpha_value);
4202
4203 return 0;
4204}
4205
4206static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4207 struct dc_plane_state *dc_plane_state,
4208 struct drm_plane_state *plane_state,
4209 struct drm_crtc_state *crtc_state)
e7b07cee 4210{
cf020d49 4211 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
707477b0 4212 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
695af5f9
NK
4213 struct dc_scaling_info scaling_info;
4214 struct dc_plane_info plane_info;
695af5f9 4215 int ret;
87b7ebc2 4216 bool force_disable_dcc = false;
e7b07cee 4217
695af5f9
NK
4218 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4219 if (ret)
4220 return ret;
e7b07cee 4221
695af5f9
NK
4222 dc_plane_state->src_rect = scaling_info.src_rect;
4223 dc_plane_state->dst_rect = scaling_info.dst_rect;
4224 dc_plane_state->clip_rect = scaling_info.clip_rect;
4225 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4226
87b7ebc2 4227 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0
NK
4228 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4229 dm_plane_state->tiling_flags,
695af5f9 4230 &plane_info,
87b7ebc2 4231 &dc_plane_state->address,
707477b0 4232 dm_plane_state->tmz_surface,
87b7ebc2 4233 force_disable_dcc);
004fefa3
NK
4234 if (ret)
4235 return ret;
4236
695af5f9
NK
4237 dc_plane_state->format = plane_info.format;
4238 dc_plane_state->color_space = plane_info.color_space;
4239 dc_plane_state->format = plane_info.format;
4240 dc_plane_state->plane_size = plane_info.plane_size;
4241 dc_plane_state->rotation = plane_info.rotation;
4242 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4243 dc_plane_state->stereo_format = plane_info.stereo_format;
4244 dc_plane_state->tiling_info = plane_info.tiling_info;
4245 dc_plane_state->visible = plane_info.visible;
4246 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4247 dc_plane_state->global_alpha = plane_info.global_alpha;
4248 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4249 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4250 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4251
e277adc5
LSL
4252 /*
4253 * Always set input transfer function, since plane state is refreshed
4254 * every time.
4255 */
cf020d49
NK
4256 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4257 if (ret)
4258 return ret;
e7b07cee 4259
cf020d49 4260 return 0;
e7b07cee
HW
4261}
4262
3ee6b26b
AD
4263static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4264 const struct dm_connector_state *dm_state,
4265 struct dc_stream_state *stream)
e7b07cee
HW
4266{
4267 enum amdgpu_rmx_type rmx_type;
4268
4269 struct rect src = { 0 }; /* viewport in composition space*/
4270 struct rect dst = { 0 }; /* stream addressable area */
4271
4272 /* no mode. nothing to be done */
4273 if (!mode)
4274 return;
4275
4276 /* Full screen scaling by default */
4277 src.width = mode->hdisplay;
4278 src.height = mode->vdisplay;
4279 dst.width = stream->timing.h_addressable;
4280 dst.height = stream->timing.v_addressable;
4281
f4791779
HW
4282 if (dm_state) {
4283 rmx_type = dm_state->scaling;
4284 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4285 if (src.width * dst.height <
4286 src.height * dst.width) {
4287 /* height needs less upscaling/more downscaling */
4288 dst.width = src.width *
4289 dst.height / src.height;
4290 } else {
4291 /* width needs less upscaling/more downscaling */
4292 dst.height = src.height *
4293 dst.width / src.width;
4294 }
4295 } else if (rmx_type == RMX_CENTER) {
4296 dst = src;
e7b07cee 4297 }
e7b07cee 4298
f4791779
HW
4299 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4300 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4301
f4791779
HW
4302 if (dm_state->underscan_enable) {
4303 dst.x += dm_state->underscan_hborder / 2;
4304 dst.y += dm_state->underscan_vborder / 2;
4305 dst.width -= dm_state->underscan_hborder;
4306 dst.height -= dm_state->underscan_vborder;
4307 }
e7b07cee
HW
4308 }
4309
4310 stream->src = src;
4311 stream->dst = dst;
4312
f1ad2f5e 4313 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4314 dst.x, dst.y, dst.width, dst.height);
4315
4316}
4317
3ee6b26b 4318static enum dc_color_depth
42ba01fc 4319convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4320 bool is_y420, int requested_bpc)
e7b07cee 4321{
1bc22f20 4322 uint8_t bpc;
01c22997 4323
1bc22f20
SW
4324 if (is_y420) {
4325 bpc = 8;
4326
4327 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4328 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4329 bpc = 16;
4330 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4331 bpc = 12;
4332 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4333 bpc = 10;
4334 } else {
4335 bpc = (uint8_t)connector->display_info.bpc;
4336 /* Assume 8 bpc by default if no bpc is specified. */
4337 bpc = bpc ? bpc : 8;
4338 }
e7b07cee 4339
cbd14ae7 4340 if (requested_bpc > 0) {
01c22997
NK
4341 /*
4342 * Cap display bpc based on the user requested value.
4343 *
4344 * The value for state->max_bpc may not correctly updated
4345 * depending on when the connector gets added to the state
4346 * or if this was called outside of atomic check, so it
4347 * can't be used directly.
4348 */
cbd14ae7 4349 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4350
1825fd34
NK
4351 /* Round down to the nearest even number. */
4352 bpc = bpc - (bpc & 1);
4353 }
07e3a1cf 4354
e7b07cee
HW
4355 switch (bpc) {
4356 case 0:
1f6010a9
DF
4357 /*
4358 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4359 * EDID revision before 1.4
4360 * TODO: Fix edid parsing
4361 */
4362 return COLOR_DEPTH_888;
4363 case 6:
4364 return COLOR_DEPTH_666;
4365 case 8:
4366 return COLOR_DEPTH_888;
4367 case 10:
4368 return COLOR_DEPTH_101010;
4369 case 12:
4370 return COLOR_DEPTH_121212;
4371 case 14:
4372 return COLOR_DEPTH_141414;
4373 case 16:
4374 return COLOR_DEPTH_161616;
4375 default:
4376 return COLOR_DEPTH_UNDEFINED;
4377 }
4378}
4379
3ee6b26b
AD
4380static enum dc_aspect_ratio
4381get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4382{
e11d4147
LSL
4383 /* 1-1 mapping, since both enums follow the HDMI spec. */
4384 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4385}
4386
3ee6b26b
AD
4387static enum dc_color_space
4388get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4389{
4390 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4391
4392 switch (dc_crtc_timing->pixel_encoding) {
4393 case PIXEL_ENCODING_YCBCR422:
4394 case PIXEL_ENCODING_YCBCR444:
4395 case PIXEL_ENCODING_YCBCR420:
4396 {
4397 /*
4398 * 27030khz is the separation point between HDTV and SDTV
4399 * according to HDMI spec, we use YCbCr709 and YCbCr601
4400 * respectively
4401 */
380604e2 4402 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4403 if (dc_crtc_timing->flags.Y_ONLY)
4404 color_space =
4405 COLOR_SPACE_YCBCR709_LIMITED;
4406 else
4407 color_space = COLOR_SPACE_YCBCR709;
4408 } else {
4409 if (dc_crtc_timing->flags.Y_ONLY)
4410 color_space =
4411 COLOR_SPACE_YCBCR601_LIMITED;
4412 else
4413 color_space = COLOR_SPACE_YCBCR601;
4414 }
4415
4416 }
4417 break;
4418 case PIXEL_ENCODING_RGB:
4419 color_space = COLOR_SPACE_SRGB;
4420 break;
4421
4422 default:
4423 WARN_ON(1);
4424 break;
4425 }
4426
4427 return color_space;
4428}
4429
ea117312
TA
4430static bool adjust_colour_depth_from_display_info(
4431 struct dc_crtc_timing *timing_out,
4432 const struct drm_display_info *info)
400443e8 4433{
ea117312 4434 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4435 int normalized_clk;
400443e8 4436 do {
380604e2 4437 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4438 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4439 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4440 normalized_clk /= 2;
4441 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4442 switch (depth) {
4443 case COLOR_DEPTH_888:
4444 break;
400443e8
ML
4445 case COLOR_DEPTH_101010:
4446 normalized_clk = (normalized_clk * 30) / 24;
4447 break;
4448 case COLOR_DEPTH_121212:
4449 normalized_clk = (normalized_clk * 36) / 24;
4450 break;
4451 case COLOR_DEPTH_161616:
4452 normalized_clk = (normalized_clk * 48) / 24;
4453 break;
4454 default:
ea117312
TA
4455 /* The above depths are the only ones valid for HDMI. */
4456 return false;
400443e8 4457 }
ea117312
TA
4458 if (normalized_clk <= info->max_tmds_clock) {
4459 timing_out->display_color_depth = depth;
4460 return true;
4461 }
4462 } while (--depth > COLOR_DEPTH_666);
4463 return false;
400443e8 4464}
e7b07cee 4465
42ba01fc
NK
4466static void fill_stream_properties_from_drm_display_mode(
4467 struct dc_stream_state *stream,
4468 const struct drm_display_mode *mode_in,
4469 const struct drm_connector *connector,
4470 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4471 const struct dc_stream_state *old_stream,
4472 int requested_bpc)
e7b07cee
HW
4473{
4474 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4475 const struct drm_display_info *info = &connector->display_info;
d4252eee 4476 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4477 struct hdmi_vendor_infoframe hv_frame;
4478 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4479
acf83f86
WL
4480 memset(&hv_frame, 0, sizeof(hv_frame));
4481 memset(&avi_frame, 0, sizeof(avi_frame));
4482
e7b07cee
HW
4483 timing_out->h_border_left = 0;
4484 timing_out->h_border_right = 0;
4485 timing_out->v_border_top = 0;
4486 timing_out->v_border_bottom = 0;
4487 /* TODO: un-hardcode */
fe61a2f1 4488 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4489 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4490 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4491 else if (drm_mode_is_420_also(info, mode_in)
4492 && aconnector->force_yuv420_output)
4493 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4494 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4495 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4496 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4497 else
4498 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4499
4500 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4501 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4502 connector,
4503 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4504 requested_bpc);
e7b07cee
HW
4505 timing_out->scan_type = SCANNING_TYPE_NODATA;
4506 timing_out->hdmi_vic = 0;
b333730d
BL
4507
4508 if(old_stream) {
4509 timing_out->vic = old_stream->timing.vic;
4510 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4511 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4512 } else {
4513 timing_out->vic = drm_match_cea_mode(mode_in);
4514 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4515 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4516 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4517 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4518 }
e7b07cee 4519
1cb1d477
WL
4520 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4521 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4522 timing_out->vic = avi_frame.video_code;
4523 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4524 timing_out->hdmi_vic = hv_frame.vic;
4525 }
4526
e7b07cee
HW
4527 timing_out->h_addressable = mode_in->crtc_hdisplay;
4528 timing_out->h_total = mode_in->crtc_htotal;
4529 timing_out->h_sync_width =
4530 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4531 timing_out->h_front_porch =
4532 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4533 timing_out->v_total = mode_in->crtc_vtotal;
4534 timing_out->v_addressable = mode_in->crtc_vdisplay;
4535 timing_out->v_front_porch =
4536 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4537 timing_out->v_sync_width =
4538 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4539 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4540 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4541
4542 stream->output_color_space = get_output_color_space(timing_out);
4543
e43a432c
AK
4544 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4545 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4546 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4547 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4548 drm_mode_is_420_also(info, mode_in) &&
4549 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4550 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4551 adjust_colour_depth_from_display_info(timing_out, info);
4552 }
4553 }
e7b07cee
HW
4554}
4555
3ee6b26b
AD
4556static void fill_audio_info(struct audio_info *audio_info,
4557 const struct drm_connector *drm_connector,
4558 const struct dc_sink *dc_sink)
e7b07cee
HW
4559{
4560 int i = 0;
4561 int cea_revision = 0;
4562 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4563
4564 audio_info->manufacture_id = edid_caps->manufacturer_id;
4565 audio_info->product_id = edid_caps->product_id;
4566
4567 cea_revision = drm_connector->display_info.cea_rev;
4568
090afc1e 4569 strscpy(audio_info->display_name,
d2b2562c 4570 edid_caps->display_name,
090afc1e 4571 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4572
b830ebc9 4573 if (cea_revision >= 3) {
e7b07cee
HW
4574 audio_info->mode_count = edid_caps->audio_mode_count;
4575
4576 for (i = 0; i < audio_info->mode_count; ++i) {
4577 audio_info->modes[i].format_code =
4578 (enum audio_format_code)
4579 (edid_caps->audio_modes[i].format_code);
4580 audio_info->modes[i].channel_count =
4581 edid_caps->audio_modes[i].channel_count;
4582 audio_info->modes[i].sample_rates.all =
4583 edid_caps->audio_modes[i].sample_rate;
4584 audio_info->modes[i].sample_size =
4585 edid_caps->audio_modes[i].sample_size;
4586 }
4587 }
4588
4589 audio_info->flags.all = edid_caps->speaker_flags;
4590
4591 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4592 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4593 audio_info->video_latency = drm_connector->video_latency[0];
4594 audio_info->audio_latency = drm_connector->audio_latency[0];
4595 }
4596
4597 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4598
4599}
4600
3ee6b26b
AD
4601static void
4602copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4603 struct drm_display_mode *dst_mode)
e7b07cee
HW
4604{
4605 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4606 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4607 dst_mode->crtc_clock = src_mode->crtc_clock;
4608 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4609 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4610 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4611 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4612 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4613 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4614 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4615 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4616 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4617 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4618 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4619}
4620
3ee6b26b
AD
4621static void
4622decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4623 const struct drm_display_mode *native_mode,
4624 bool scale_enabled)
e7b07cee
HW
4625{
4626 if (scale_enabled) {
4627 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4628 } else if (native_mode->clock == drm_mode->clock &&
4629 native_mode->htotal == drm_mode->htotal &&
4630 native_mode->vtotal == drm_mode->vtotal) {
4631 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4632 } else {
4633 /* no scaling nor amdgpu inserted, no need to patch */
4634 }
4635}
4636
aed15309
ML
4637static struct dc_sink *
4638create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4639{
2e0ac3d6 4640 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4641 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4642 sink_init_data.link = aconnector->dc_link;
4643 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4644
4645 sink = dc_sink_create(&sink_init_data);
423788c7 4646 if (!sink) {
2e0ac3d6 4647 DRM_ERROR("Failed to create sink!\n");
aed15309 4648 return NULL;
423788c7 4649 }
2e0ac3d6 4650 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4651
aed15309 4652 return sink;
2e0ac3d6
HW
4653}
4654
fa2123db
ML
4655static void set_multisync_trigger_params(
4656 struct dc_stream_state *stream)
4657{
4658 if (stream->triggered_crtc_reset.enabled) {
4659 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4660 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4661 }
4662}
4663
4664static void set_master_stream(struct dc_stream_state *stream_set[],
4665 int stream_count)
4666{
4667 int j, highest_rfr = 0, master_stream = 0;
4668
4669 for (j = 0; j < stream_count; j++) {
4670 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4671 int refresh_rate = 0;
4672
380604e2 4673 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4674 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4675 if (refresh_rate > highest_rfr) {
4676 highest_rfr = refresh_rate;
4677 master_stream = j;
4678 }
4679 }
4680 }
4681 for (j = 0; j < stream_count; j++) {
03736f4c 4682 if (stream_set[j])
fa2123db
ML
4683 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4684 }
4685}
4686
4687static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4688{
4689 int i = 0;
4690
4691 if (context->stream_count < 2)
4692 return;
4693 for (i = 0; i < context->stream_count ; i++) {
4694 if (!context->streams[i])
4695 continue;
1f6010a9
DF
4696 /*
4697 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4698 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4699 * For now it's set to false
fa2123db
ML
4700 */
4701 set_multisync_trigger_params(context->streams[i]);
4702 }
4703 set_master_stream(context->streams, context->stream_count);
4704}
4705
3ee6b26b
AD
4706static struct dc_stream_state *
4707create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4708 const struct drm_display_mode *drm_mode,
b333730d 4709 const struct dm_connector_state *dm_state,
cbd14ae7
SW
4710 const struct dc_stream_state *old_stream,
4711 int requested_bpc)
e7b07cee
HW
4712{
4713 struct drm_display_mode *preferred_mode = NULL;
391ef035 4714 struct drm_connector *drm_connector;
42ba01fc
NK
4715 const struct drm_connector_state *con_state =
4716 dm_state ? &dm_state->base : NULL;
0971c40e 4717 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4718 struct drm_display_mode mode = *drm_mode;
4719 bool native_mode_found = false;
b333730d
BL
4720 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4721 int mode_refresh;
58124bf8 4722 int preferred_refresh = 0;
defeb878 4723#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4724 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4725#endif
df2f1015 4726 uint32_t link_bandwidth_kbps;
b333730d 4727
aed15309 4728 struct dc_sink *sink = NULL;
b830ebc9 4729 if (aconnector == NULL) {
e7b07cee 4730 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4731 return stream;
e7b07cee
HW
4732 }
4733
e7b07cee 4734 drm_connector = &aconnector->base;
2e0ac3d6 4735
f4ac176e 4736 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4737 sink = create_fake_sink(aconnector);
4738 if (!sink)
4739 return stream;
aed15309
ML
4740 } else {
4741 sink = aconnector->dc_sink;
dcd5fb82 4742 dc_sink_retain(sink);
f4ac176e 4743 }
2e0ac3d6 4744
aed15309 4745 stream = dc_create_stream_for_sink(sink);
4562236b 4746
b830ebc9 4747 if (stream == NULL) {
e7b07cee 4748 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4749 goto finish;
e7b07cee
HW
4750 }
4751
ceb3dbb4
JL
4752 stream->dm_stream_context = aconnector;
4753
4a36fcba
WL
4754 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4755 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4756
e7b07cee
HW
4757 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4758 /* Search for preferred mode */
4759 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4760 native_mode_found = true;
4761 break;
4762 }
4763 }
4764 if (!native_mode_found)
4765 preferred_mode = list_first_entry_or_null(
4766 &aconnector->base.modes,
4767 struct drm_display_mode,
4768 head);
4769
b333730d
BL
4770 mode_refresh = drm_mode_vrefresh(&mode);
4771
b830ebc9 4772 if (preferred_mode == NULL) {
1f6010a9
DF
4773 /*
4774 * This may not be an error, the use case is when we have no
e7b07cee
HW
4775 * usermode calls to reset and set mode upon hotplug. In this
4776 * case, we call set mode ourselves to restore the previous mode
4777 * and the modelist may not be filled in in time.
4778 */
f1ad2f5e 4779 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4780 } else {
4781 decide_crtc_timing_for_drm_display_mode(
4782 &mode, preferred_mode,
f4791779 4783 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4784 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4785 }
4786
f783577c
JFZ
4787 if (!dm_state)
4788 drm_mode_set_crtcinfo(&mode, 0);
4789
b333730d
BL
4790 /*
4791 * If scaling is enabled and refresh rate didn't change
4792 * we copy the vic and polarities of the old timings
4793 */
4794 if (!scale || mode_refresh != preferred_refresh)
4795 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4796 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
4797 else
4798 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4799 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 4800
df2f1015
DF
4801 stream->timing.flags.DSC = 0;
4802
4803 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4804#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4805 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4806 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 4807 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015 4808 &dsc_caps);
defeb878 4809#endif
df2f1015
DF
4810 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4811 dc_link_get_link_cap(aconnector->dc_link));
4812
defeb878 4813#if defined(CONFIG_DRM_AMD_DC_DCN)
0749ddeb 4814 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 4815 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
4816 dc_dsc_policy_set_enable_dsc_when_not_needed(
4817 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 4818
0417df16 4819 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4820 &dsc_caps,
0417df16 4821 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 4822 0,
df2f1015
DF
4823 link_bandwidth_kbps,
4824 &stream->timing,
4825 &stream->timing.dsc_cfg))
4826 stream->timing.flags.DSC = 1;
27e84dd7 4827 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 4828 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 4829 stream->timing.flags.DSC = 1;
734e4c97 4830
28b2f656
EB
4831 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4832 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 4833
28b2f656
EB
4834 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4835 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
4836
4837 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4838 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 4839 }
39a4eb85 4840#endif
df2f1015 4841 }
39a4eb85 4842
e7b07cee
HW
4843 update_stream_scaling_settings(&mode, dm_state, stream);
4844
4845 fill_audio_info(
4846 &stream->audio_info,
4847 drm_connector,
aed15309 4848 sink);
e7b07cee 4849
ceb3dbb4 4850 update_stream_signal(stream, sink);
9182b4cb 4851
d832fc3b 4852 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
4853 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4854
8a488f5d
RL
4855 if (stream->link->psr_settings.psr_feature_enabled) {
4856 //
4857 // should decide stream support vsc sdp colorimetry capability
4858 // before building vsc info packet
4859 //
4860 stream->use_vsc_sdp_for_colorimetry = false;
4861 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4862 stream->use_vsc_sdp_for_colorimetry =
4863 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4864 } else {
4865 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4866 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 4867 }
8a488f5d 4868 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 4869 }
aed15309 4870finish:
dcd5fb82 4871 dc_sink_release(sink);
9e3efe3e 4872
e7b07cee
HW
4873 return stream;
4874}
4875
7578ecda 4876static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4877{
4878 drm_crtc_cleanup(crtc);
4879 kfree(crtc);
4880}
4881
4882static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4883 struct drm_crtc_state *state)
e7b07cee
HW
4884{
4885 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4886
4887 /* TODO Destroy dc_stream objects are stream object is flattened */
4888 if (cur->stream)
4889 dc_stream_release(cur->stream);
4890
4891
4892 __drm_atomic_helper_crtc_destroy_state(state);
4893
4894
4895 kfree(state);
4896}
4897
4898static void dm_crtc_reset_state(struct drm_crtc *crtc)
4899{
4900 struct dm_crtc_state *state;
4901
4902 if (crtc->state)
4903 dm_crtc_destroy_state(crtc, crtc->state);
4904
4905 state = kzalloc(sizeof(*state), GFP_KERNEL);
4906 if (WARN_ON(!state))
4907 return;
4908
1f8a52ec 4909 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
4910}
4911
4912static struct drm_crtc_state *
4913dm_crtc_duplicate_state(struct drm_crtc *crtc)
4914{
4915 struct dm_crtc_state *state, *cur;
4916
4917 cur = to_dm_crtc_state(crtc->state);
4918
4919 if (WARN_ON(!crtc->state))
4920 return NULL;
4921
2004f45e 4922 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4923 if (!state)
4924 return NULL;
e7b07cee
HW
4925
4926 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4927
4928 if (cur->stream) {
4929 state->stream = cur->stream;
4930 dc_stream_retain(state->stream);
4931 }
4932
d6ef9b41 4933 state->active_planes = cur->active_planes;
98e6436d 4934 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4935 state->abm_level = cur->abm_level;
bb47de73
NK
4936 state->vrr_supported = cur->vrr_supported;
4937 state->freesync_config = cur->freesync_config;
14b25846 4938 state->crc_src = cur->crc_src;
cf020d49
NK
4939 state->cm_has_degamma = cur->cm_has_degamma;
4940 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4941
e7b07cee
HW
4942 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4943
4944 return &state->base;
4945}
4946
d2574c33
MK
4947static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4948{
4949 enum dc_irq_source irq_source;
4950 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 4951 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
4952 int rc;
4953
4954 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4955
4956 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4957
4958 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4959 acrtc->crtc_id, enable ? "en" : "dis", rc);
4960 return rc;
4961}
589d2739
HW
4962
4963static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4964{
4965 enum dc_irq_source irq_source;
4966 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 4967 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
4968 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4969 int rc = 0;
4970
4971 if (enable) {
4972 /* vblank irq on -> Only need vupdate irq in vrr mode */
4973 if (amdgpu_dm_vrr_active(acrtc_state))
4974 rc = dm_set_vupdate_irq(crtc, true);
4975 } else {
4976 /* vblank irq off -> vupdate irq off */
4977 rc = dm_set_vupdate_irq(crtc, false);
4978 }
4979
4980 if (rc)
4981 return rc;
589d2739
HW
4982
4983 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 4984 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
4985}
4986
4987static int dm_enable_vblank(struct drm_crtc *crtc)
4988{
4989 return dm_set_vblank(crtc, true);
4990}
4991
4992static void dm_disable_vblank(struct drm_crtc *crtc)
4993{
4994 dm_set_vblank(crtc, false);
4995}
4996
e7b07cee
HW
4997/* Implemented only the options currently availible for the driver */
4998static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4999 .reset = dm_crtc_reset_state,
5000 .destroy = amdgpu_dm_crtc_destroy,
5001 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5002 .set_config = drm_atomic_helper_set_config,
5003 .page_flip = drm_atomic_helper_page_flip,
5004 .atomic_duplicate_state = dm_crtc_duplicate_state,
5005 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5006 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5007 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5008 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5009 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5010 .enable_vblank = dm_enable_vblank,
5011 .disable_vblank = dm_disable_vblank,
e3eff4b5 5012 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
5013};
5014
5015static enum drm_connector_status
5016amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5017{
5018 bool connected;
c84dec2f 5019 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5020
1f6010a9
DF
5021 /*
5022 * Notes:
e7b07cee
HW
5023 * 1. This interface is NOT called in context of HPD irq.
5024 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5025 * makes it a bad place for *any* MST-related activity.
5026 */
e7b07cee 5027
8580d60b
HW
5028 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5029 !aconnector->fake_enable)
e7b07cee
HW
5030 connected = (aconnector->dc_sink != NULL);
5031 else
5032 connected = (aconnector->base.force == DRM_FORCE_ON);
5033
0f877894
OV
5034 update_subconnector_property(aconnector);
5035
e7b07cee
HW
5036 return (connected ? connector_status_connected :
5037 connector_status_disconnected);
5038}
5039
3ee6b26b
AD
5040int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5041 struct drm_connector_state *connector_state,
5042 struct drm_property *property,
5043 uint64_t val)
e7b07cee
HW
5044{
5045 struct drm_device *dev = connector->dev;
1348969a 5046 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5047 struct dm_connector_state *dm_old_state =
5048 to_dm_connector_state(connector->state);
5049 struct dm_connector_state *dm_new_state =
5050 to_dm_connector_state(connector_state);
5051
5052 int ret = -EINVAL;
5053
5054 if (property == dev->mode_config.scaling_mode_property) {
5055 enum amdgpu_rmx_type rmx_type;
5056
5057 switch (val) {
5058 case DRM_MODE_SCALE_CENTER:
5059 rmx_type = RMX_CENTER;
5060 break;
5061 case DRM_MODE_SCALE_ASPECT:
5062 rmx_type = RMX_ASPECT;
5063 break;
5064 case DRM_MODE_SCALE_FULLSCREEN:
5065 rmx_type = RMX_FULL;
5066 break;
5067 case DRM_MODE_SCALE_NONE:
5068 default:
5069 rmx_type = RMX_OFF;
5070 break;
5071 }
5072
5073 if (dm_old_state->scaling == rmx_type)
5074 return 0;
5075
5076 dm_new_state->scaling = rmx_type;
5077 ret = 0;
5078 } else if (property == adev->mode_info.underscan_hborder_property) {
5079 dm_new_state->underscan_hborder = val;
5080 ret = 0;
5081 } else if (property == adev->mode_info.underscan_vborder_property) {
5082 dm_new_state->underscan_vborder = val;
5083 ret = 0;
5084 } else if (property == adev->mode_info.underscan_property) {
5085 dm_new_state->underscan_enable = val;
5086 ret = 0;
c1ee92f9
DF
5087 } else if (property == adev->mode_info.abm_level_property) {
5088 dm_new_state->abm_level = val;
5089 ret = 0;
e7b07cee
HW
5090 }
5091
5092 return ret;
5093}
5094
3ee6b26b
AD
5095int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5096 const struct drm_connector_state *state,
5097 struct drm_property *property,
5098 uint64_t *val)
e7b07cee
HW
5099{
5100 struct drm_device *dev = connector->dev;
1348969a 5101 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5102 struct dm_connector_state *dm_state =
5103 to_dm_connector_state(state);
5104 int ret = -EINVAL;
5105
5106 if (property == dev->mode_config.scaling_mode_property) {
5107 switch (dm_state->scaling) {
5108 case RMX_CENTER:
5109 *val = DRM_MODE_SCALE_CENTER;
5110 break;
5111 case RMX_ASPECT:
5112 *val = DRM_MODE_SCALE_ASPECT;
5113 break;
5114 case RMX_FULL:
5115 *val = DRM_MODE_SCALE_FULLSCREEN;
5116 break;
5117 case RMX_OFF:
5118 default:
5119 *val = DRM_MODE_SCALE_NONE;
5120 break;
5121 }
5122 ret = 0;
5123 } else if (property == adev->mode_info.underscan_hborder_property) {
5124 *val = dm_state->underscan_hborder;
5125 ret = 0;
5126 } else if (property == adev->mode_info.underscan_vborder_property) {
5127 *val = dm_state->underscan_vborder;
5128 ret = 0;
5129 } else if (property == adev->mode_info.underscan_property) {
5130 *val = dm_state->underscan_enable;
5131 ret = 0;
c1ee92f9
DF
5132 } else if (property == adev->mode_info.abm_level_property) {
5133 *val = dm_state->abm_level;
5134 ret = 0;
e7b07cee 5135 }
c1ee92f9 5136
e7b07cee
HW
5137 return ret;
5138}
5139
526c654a
ED
5140static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5141{
5142 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5143
5144 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5145}
5146
7578ecda 5147static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5148{
c84dec2f 5149 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5150 const struct dc_link *link = aconnector->dc_link;
1348969a 5151 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5152 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5153
f5213f82
AG
5154 /*
5155 * Call only if mst_mgr was iniitalized before since it's not done
5156 * for all connector types.
5157 */
5158 if (aconnector->mst_mgr.dev)
5159 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5160
e7b07cee
HW
5161#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5162 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5163
89fc8d4e 5164 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5165 link->type != dc_connection_none &&
5166 dm->backlight_dev) {
5167 backlight_device_unregister(dm->backlight_dev);
5168 dm->backlight_dev = NULL;
e7b07cee
HW
5169 }
5170#endif
dcd5fb82
MF
5171
5172 if (aconnector->dc_em_sink)
5173 dc_sink_release(aconnector->dc_em_sink);
5174 aconnector->dc_em_sink = NULL;
5175 if (aconnector->dc_sink)
5176 dc_sink_release(aconnector->dc_sink);
5177 aconnector->dc_sink = NULL;
5178
e86e8947 5179 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5180 drm_connector_unregister(connector);
5181 drm_connector_cleanup(connector);
526c654a
ED
5182 if (aconnector->i2c) {
5183 i2c_del_adapter(&aconnector->i2c->base);
5184 kfree(aconnector->i2c);
5185 }
7daec99f 5186 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5187
e7b07cee
HW
5188 kfree(connector);
5189}
5190
5191void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5192{
5193 struct dm_connector_state *state =
5194 to_dm_connector_state(connector->state);
5195
df099b9b
LSL
5196 if (connector->state)
5197 __drm_atomic_helper_connector_destroy_state(connector->state);
5198
e7b07cee
HW
5199 kfree(state);
5200
5201 state = kzalloc(sizeof(*state), GFP_KERNEL);
5202
5203 if (state) {
5204 state->scaling = RMX_OFF;
5205 state->underscan_enable = false;
5206 state->underscan_hborder = 0;
5207 state->underscan_vborder = 0;
01933ba4 5208 state->base.max_requested_bpc = 8;
3261e013
ML
5209 state->vcpi_slots = 0;
5210 state->pbn = 0;
c3e50f89
NK
5211 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5212 state->abm_level = amdgpu_dm_abm_level;
5213
df099b9b 5214 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5215 }
5216}
5217
3ee6b26b
AD
5218struct drm_connector_state *
5219amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5220{
5221 struct dm_connector_state *state =
5222 to_dm_connector_state(connector->state);
5223
5224 struct dm_connector_state *new_state =
5225 kmemdup(state, sizeof(*state), GFP_KERNEL);
5226
98e6436d
AK
5227 if (!new_state)
5228 return NULL;
e7b07cee 5229
98e6436d
AK
5230 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5231
5232 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5233 new_state->abm_level = state->abm_level;
922454c2
NK
5234 new_state->scaling = state->scaling;
5235 new_state->underscan_enable = state->underscan_enable;
5236 new_state->underscan_hborder = state->underscan_hborder;
5237 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5238 new_state->vcpi_slots = state->vcpi_slots;
5239 new_state->pbn = state->pbn;
98e6436d 5240 return &new_state->base;
e7b07cee
HW
5241}
5242
14f04fa4
AD
5243static int
5244amdgpu_dm_connector_late_register(struct drm_connector *connector)
5245{
5246 struct amdgpu_dm_connector *amdgpu_dm_connector =
5247 to_amdgpu_dm_connector(connector);
00a8037e 5248 int r;
14f04fa4 5249
00a8037e
AD
5250 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5251 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5252 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5253 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5254 if (r)
5255 return r;
5256 }
5257
5258#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5259 connector_debugfs_init(amdgpu_dm_connector);
5260#endif
5261
5262 return 0;
5263}
5264
e7b07cee
HW
5265static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5266 .reset = amdgpu_dm_connector_funcs_reset,
5267 .detect = amdgpu_dm_connector_detect,
5268 .fill_modes = drm_helper_probe_single_connector_modes,
5269 .destroy = amdgpu_dm_connector_destroy,
5270 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5271 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5272 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5273 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5274 .late_register = amdgpu_dm_connector_late_register,
526c654a 5275 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5276};
5277
e7b07cee
HW
5278static int get_modes(struct drm_connector *connector)
5279{
5280 return amdgpu_dm_connector_get_modes(connector);
5281}
5282
c84dec2f 5283static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5284{
5285 struct dc_sink_init_data init_params = {
5286 .link = aconnector->dc_link,
5287 .sink_signal = SIGNAL_TYPE_VIRTUAL
5288 };
70e8ffc5 5289 struct edid *edid;
e7b07cee 5290
a89ff457 5291 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5292 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5293 aconnector->base.name);
5294
5295 aconnector->base.force = DRM_FORCE_OFF;
5296 aconnector->base.override_edid = false;
5297 return;
5298 }
5299
70e8ffc5
HW
5300 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5301
e7b07cee
HW
5302 aconnector->edid = edid;
5303
5304 aconnector->dc_em_sink = dc_link_add_remote_sink(
5305 aconnector->dc_link,
5306 (uint8_t *)edid,
5307 (edid->extensions + 1) * EDID_LENGTH,
5308 &init_params);
5309
dcd5fb82 5310 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5311 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5312 aconnector->dc_link->local_sink :
5313 aconnector->dc_em_sink;
dcd5fb82
MF
5314 dc_sink_retain(aconnector->dc_sink);
5315 }
e7b07cee
HW
5316}
5317
c84dec2f 5318static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5319{
5320 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5321
1f6010a9
DF
5322 /*
5323 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5324 * Those settings have to be != 0 to get initial modeset
5325 */
5326 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5327 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5328 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5329 }
5330
5331
5332 aconnector->base.override_edid = true;
5333 create_eml_sink(aconnector);
5334}
5335
cbd14ae7
SW
5336static struct dc_stream_state *
5337create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5338 const struct drm_display_mode *drm_mode,
5339 const struct dm_connector_state *dm_state,
5340 const struct dc_stream_state *old_stream)
5341{
5342 struct drm_connector *connector = &aconnector->base;
1348969a 5343 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 5344 struct dc_stream_state *stream;
4b7da34b
SW
5345 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5346 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5347 enum dc_status dc_result = DC_OK;
5348
5349 do {
5350 stream = create_stream_for_sink(aconnector, drm_mode,
5351 dm_state, old_stream,
5352 requested_bpc);
5353 if (stream == NULL) {
5354 DRM_ERROR("Failed to create stream for sink!\n");
5355 break;
5356 }
5357
5358 dc_result = dc_validate_stream(adev->dm.dc, stream);
5359
5360 if (dc_result != DC_OK) {
74a16675 5361 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5362 drm_mode->hdisplay,
5363 drm_mode->vdisplay,
5364 drm_mode->clock,
74a16675
RS
5365 dc_result,
5366 dc_status_to_str(dc_result));
cbd14ae7
SW
5367
5368 dc_stream_release(stream);
5369 stream = NULL;
5370 requested_bpc -= 2; /* lower bpc to retry validation */
5371 }
5372
5373 } while (stream == NULL && requested_bpc >= 6);
5374
5375 return stream;
5376}
5377
ba9ca088 5378enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5379 struct drm_display_mode *mode)
e7b07cee
HW
5380{
5381 int result = MODE_ERROR;
5382 struct dc_sink *dc_sink;
e7b07cee 5383 /* TODO: Unhardcode stream count */
0971c40e 5384 struct dc_stream_state *stream;
c84dec2f 5385 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5386
5387 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5388 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5389 return result;
5390
1f6010a9
DF
5391 /*
5392 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5393 * EDID mgmt
5394 */
5395 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5396 !aconnector->dc_em_sink)
5397 handle_edid_mgmt(aconnector);
5398
c84dec2f 5399 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5400
b830ebc9 5401 if (dc_sink == NULL) {
e7b07cee
HW
5402 DRM_ERROR("dc_sink is NULL!\n");
5403 goto fail;
5404 }
5405
cbd14ae7
SW
5406 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5407 if (stream) {
5408 dc_stream_release(stream);
e7b07cee 5409 result = MODE_OK;
cbd14ae7 5410 }
e7b07cee
HW
5411
5412fail:
5413 /* TODO: error handling*/
5414 return result;
5415}
5416
88694af9
NK
5417static int fill_hdr_info_packet(const struct drm_connector_state *state,
5418 struct dc_info_packet *out)
5419{
5420 struct hdmi_drm_infoframe frame;
5421 unsigned char buf[30]; /* 26 + 4 */
5422 ssize_t len;
5423 int ret, i;
5424
5425 memset(out, 0, sizeof(*out));
5426
5427 if (!state->hdr_output_metadata)
5428 return 0;
5429
5430 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5431 if (ret)
5432 return ret;
5433
5434 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5435 if (len < 0)
5436 return (int)len;
5437
5438 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5439 if (len != 30)
5440 return -EINVAL;
5441
5442 /* Prepare the infopacket for DC. */
5443 switch (state->connector->connector_type) {
5444 case DRM_MODE_CONNECTOR_HDMIA:
5445 out->hb0 = 0x87; /* type */
5446 out->hb1 = 0x01; /* version */
5447 out->hb2 = 0x1A; /* length */
5448 out->sb[0] = buf[3]; /* checksum */
5449 i = 1;
5450 break;
5451
5452 case DRM_MODE_CONNECTOR_DisplayPort:
5453 case DRM_MODE_CONNECTOR_eDP:
5454 out->hb0 = 0x00; /* sdp id, zero */
5455 out->hb1 = 0x87; /* type */
5456 out->hb2 = 0x1D; /* payload len - 1 */
5457 out->hb3 = (0x13 << 2); /* sdp version */
5458 out->sb[0] = 0x01; /* version */
5459 out->sb[1] = 0x1A; /* length */
5460 i = 2;
5461 break;
5462
5463 default:
5464 return -EINVAL;
5465 }
5466
5467 memcpy(&out->sb[i], &buf[4], 26);
5468 out->valid = true;
5469
5470 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5471 sizeof(out->sb), false);
5472
5473 return 0;
5474}
5475
5476static bool
5477is_hdr_metadata_different(const struct drm_connector_state *old_state,
5478 const struct drm_connector_state *new_state)
5479{
5480 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5481 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5482
5483 if (old_blob != new_blob) {
5484 if (old_blob && new_blob &&
5485 old_blob->length == new_blob->length)
5486 return memcmp(old_blob->data, new_blob->data,
5487 old_blob->length);
5488
5489 return true;
5490 }
5491
5492 return false;
5493}
5494
5495static int
5496amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5497 struct drm_atomic_state *state)
88694af9 5498{
51e857af
SP
5499 struct drm_connector_state *new_con_state =
5500 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5501 struct drm_connector_state *old_con_state =
5502 drm_atomic_get_old_connector_state(state, conn);
5503 struct drm_crtc *crtc = new_con_state->crtc;
5504 struct drm_crtc_state *new_crtc_state;
5505 int ret;
5506
5507 if (!crtc)
5508 return 0;
5509
5510 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5511 struct dc_info_packet hdr_infopacket;
5512
5513 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5514 if (ret)
5515 return ret;
5516
5517 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5518 if (IS_ERR(new_crtc_state))
5519 return PTR_ERR(new_crtc_state);
5520
5521 /*
5522 * DC considers the stream backends changed if the
5523 * static metadata changes. Forcing the modeset also
5524 * gives a simple way for userspace to switch from
b232d4ed
NK
5525 * 8bpc to 10bpc when setting the metadata to enter
5526 * or exit HDR.
5527 *
5528 * Changing the static metadata after it's been
5529 * set is permissible, however. So only force a
5530 * modeset if we're entering or exiting HDR.
88694af9 5531 */
b232d4ed
NK
5532 new_crtc_state->mode_changed =
5533 !old_con_state->hdr_output_metadata ||
5534 !new_con_state->hdr_output_metadata;
88694af9
NK
5535 }
5536
5537 return 0;
5538}
5539
e7b07cee
HW
5540static const struct drm_connector_helper_funcs
5541amdgpu_dm_connector_helper_funcs = {
5542 /*
1f6010a9 5543 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5544 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5545 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5546 * in get_modes call back, not just return the modes count
5547 */
e7b07cee
HW
5548 .get_modes = get_modes,
5549 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5550 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5551};
5552
5553static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5554{
5555}
5556
d6ef9b41 5557static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5558{
5559 struct drm_atomic_state *state = new_crtc_state->state;
5560 struct drm_plane *plane;
5561 int num_active = 0;
5562
5563 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5564 struct drm_plane_state *new_plane_state;
5565
5566 /* Cursor planes are "fake". */
5567 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5568 continue;
5569
5570 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5571
5572 if (!new_plane_state) {
5573 /*
5574 * The plane is enable on the CRTC and hasn't changed
5575 * state. This means that it previously passed
5576 * validation and is therefore enabled.
5577 */
5578 num_active += 1;
5579 continue;
5580 }
5581
5582 /* We need a framebuffer to be considered enabled. */
5583 num_active += (new_plane_state->fb != NULL);
5584 }
5585
d6ef9b41
NK
5586 return num_active;
5587}
5588
8fe684e9
NK
5589static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5590 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
5591{
5592 struct dm_crtc_state *dm_new_crtc_state =
5593 to_dm_crtc_state(new_crtc_state);
5594
5595 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
5596
5597 if (!dm_new_crtc_state->stream)
5598 return;
5599
5600 dm_new_crtc_state->active_planes =
5601 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
5602}
5603
3ee6b26b
AD
5604static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5605 struct drm_crtc_state *state)
e7b07cee 5606{
1348969a 5607 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee
HW
5608 struct dc *dc = adev->dm.dc;
5609 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5610 int ret = -EINVAL;
5611
8fe684e9 5612 dm_update_crtc_active_planes(crtc, state);
d6ef9b41 5613
9b690ef3
BL
5614 if (unlikely(!dm_crtc_state->stream &&
5615 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5616 WARN_ON(1);
5617 return ret;
5618 }
5619
bc92c065 5620 /*
b836a274
MD
5621 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5622 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5623 * planes are disabled, which is not supported by the hardware. And there is legacy
5624 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 5625 */
b836a274
MD
5626 if (state->enable &&
5627 !(state->plane_mask & drm_plane_mask(crtc->primary)))
c14a005c
NK
5628 return -EINVAL;
5629
b836a274
MD
5630 /* In some use cases, like reset, no stream is attached */
5631 if (!dm_crtc_state->stream)
5632 return 0;
5633
62c933f9 5634 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5635 return 0;
5636
5637 return ret;
5638}
5639
3ee6b26b
AD
5640static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5641 const struct drm_display_mode *mode,
5642 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5643{
5644 return true;
5645}
5646
5647static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5648 .disable = dm_crtc_helper_disable,
5649 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5650 .mode_fixup = dm_crtc_helper_mode_fixup,
5651 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5652};
5653
5654static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5655{
5656
5657}
5658
3261e013
ML
5659static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5660{
5661 switch (display_color_depth) {
5662 case COLOR_DEPTH_666:
5663 return 6;
5664 case COLOR_DEPTH_888:
5665 return 8;
5666 case COLOR_DEPTH_101010:
5667 return 10;
5668 case COLOR_DEPTH_121212:
5669 return 12;
5670 case COLOR_DEPTH_141414:
5671 return 14;
5672 case COLOR_DEPTH_161616:
5673 return 16;
5674 default:
5675 break;
5676 }
5677 return 0;
5678}
5679
3ee6b26b
AD
5680static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5681 struct drm_crtc_state *crtc_state,
5682 struct drm_connector_state *conn_state)
e7b07cee 5683{
3261e013
ML
5684 struct drm_atomic_state *state = crtc_state->state;
5685 struct drm_connector *connector = conn_state->connector;
5686 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5687 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5688 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5689 struct drm_dp_mst_topology_mgr *mst_mgr;
5690 struct drm_dp_mst_port *mst_port;
5691 enum dc_color_depth color_depth;
5692 int clock, bpp = 0;
1bc22f20 5693 bool is_y420 = false;
3261e013
ML
5694
5695 if (!aconnector->port || !aconnector->dc_sink)
5696 return 0;
5697
5698 mst_port = aconnector->port;
5699 mst_mgr = &aconnector->mst_port->mst_mgr;
5700
5701 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5702 return 0;
5703
5704 if (!state->duplicated) {
cbd14ae7 5705 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
5706 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5707 aconnector->force_yuv420_output;
cbd14ae7
SW
5708 color_depth = convert_color_depth_from_display_info(connector,
5709 is_y420,
5710 max_bpc);
3261e013
ML
5711 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5712 clock = adjusted_mode->clock;
dc48529f 5713 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5714 }
5715 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5716 mst_mgr,
5717 mst_port,
1c6c1cb5 5718 dm_new_connector_state->pbn,
03ca9600 5719 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
5720 if (dm_new_connector_state->vcpi_slots < 0) {
5721 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5722 return dm_new_connector_state->vcpi_slots;
5723 }
e7b07cee
HW
5724 return 0;
5725}
5726
5727const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5728 .disable = dm_encoder_helper_disable,
5729 .atomic_check = dm_encoder_helper_atomic_check
5730};
5731
d9fe1a4c 5732#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5733static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5734 struct dc_state *dc_state)
5735{
5736 struct dc_stream_state *stream = NULL;
5737 struct drm_connector *connector;
5738 struct drm_connector_state *new_con_state, *old_con_state;
5739 struct amdgpu_dm_connector *aconnector;
5740 struct dm_connector_state *dm_conn_state;
5741 int i, j, clock, bpp;
5742 int vcpi, pbn_div, pbn = 0;
5743
5744 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5745
5746 aconnector = to_amdgpu_dm_connector(connector);
5747
5748 if (!aconnector->port)
5749 continue;
5750
5751 if (!new_con_state || !new_con_state->crtc)
5752 continue;
5753
5754 dm_conn_state = to_dm_connector_state(new_con_state);
5755
5756 for (j = 0; j < dc_state->stream_count; j++) {
5757 stream = dc_state->streams[j];
5758 if (!stream)
5759 continue;
5760
5761 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5762 break;
5763
5764 stream = NULL;
5765 }
5766
5767 if (!stream)
5768 continue;
5769
5770 if (stream->timing.flags.DSC != 1) {
5771 drm_dp_mst_atomic_enable_dsc(state,
5772 aconnector->port,
5773 dm_conn_state->pbn,
5774 0,
5775 false);
5776 continue;
5777 }
5778
5779 pbn_div = dm_mst_get_pbn_divider(stream->link);
5780 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5781 clock = stream->timing.pix_clk_100hz / 10;
5782 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5783 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5784 aconnector->port,
5785 pbn, pbn_div,
5786 true);
5787 if (vcpi < 0)
5788 return vcpi;
5789
5790 dm_conn_state->pbn = pbn;
5791 dm_conn_state->vcpi_slots = vcpi;
5792 }
5793 return 0;
5794}
d9fe1a4c 5795#endif
29b9ba74 5796
e7b07cee
HW
5797static void dm_drm_plane_reset(struct drm_plane *plane)
5798{
5799 struct dm_plane_state *amdgpu_state = NULL;
5800
5801 if (plane->state)
5802 plane->funcs->atomic_destroy_state(plane, plane->state);
5803
5804 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5805 WARN_ON(amdgpu_state == NULL);
1f6010a9 5806
7ddaef96
NK
5807 if (amdgpu_state)
5808 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5809}
5810
5811static struct drm_plane_state *
5812dm_drm_plane_duplicate_state(struct drm_plane *plane)
5813{
5814 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5815
5816 old_dm_plane_state = to_dm_plane_state(plane->state);
5817 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5818 if (!dm_plane_state)
5819 return NULL;
5820
5821 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5822
3be5262e
HW
5823 if (old_dm_plane_state->dc_state) {
5824 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5825 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5826 }
5827
707477b0
NK
5828 /* Framebuffer hasn't been updated yet, so retain old flags. */
5829 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5830 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5831
e7b07cee
HW
5832 return &dm_plane_state->base;
5833}
5834
dfd84d90 5835static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5836 struct drm_plane_state *state)
e7b07cee
HW
5837{
5838 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5839
3be5262e
HW
5840 if (dm_plane_state->dc_state)
5841 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5842
0627bbd3 5843 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5844}
5845
5846static const struct drm_plane_funcs dm_plane_funcs = {
5847 .update_plane = drm_atomic_helper_update_plane,
5848 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5849 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5850 .reset = dm_drm_plane_reset,
5851 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5852 .atomic_destroy_state = dm_drm_plane_destroy_state,
5853};
5854
3ee6b26b
AD
5855static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5856 struct drm_plane_state *new_state)
e7b07cee
HW
5857{
5858 struct amdgpu_framebuffer *afb;
5859 struct drm_gem_object *obj;
5d43be0c 5860 struct amdgpu_device *adev;
e7b07cee 5861 struct amdgpu_bo *rbo;
e7b07cee 5862 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5863 struct list_head list;
5864 struct ttm_validate_buffer tv;
5865 struct ww_acquire_ctx ticket;
5d43be0c
CK
5866 uint32_t domain;
5867 int r;
e7b07cee
HW
5868
5869 if (!new_state->fb) {
f1ad2f5e 5870 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5871 return 0;
5872 }
5873
5874 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5875 obj = new_state->fb->obj[0];
e7b07cee 5876 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5877 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5878 INIT_LIST_HEAD(&list);
5879
5880 tv.bo = &rbo->tbo;
5881 tv.num_shared = 1;
5882 list_add(&tv.head, &list);
5883
9165fb87 5884 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5885 if (r) {
5886 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5887 return r;
0f257b09 5888 }
e7b07cee 5889
5d43be0c 5890 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5891 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5892 else
5893 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5894
7b7c6c81 5895 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5896 if (unlikely(r != 0)) {
30b7c614
HW
5897 if (r != -ERESTARTSYS)
5898 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5899 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5900 return r;
5901 }
5902
bb812f1e
JZ
5903 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5904 if (unlikely(r != 0)) {
5905 amdgpu_bo_unpin(rbo);
0f257b09 5906 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5907 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5908 return r;
5909 }
7df7e505 5910
0f257b09 5911 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5912
7b7c6c81 5913 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5914
5915 amdgpu_bo_ref(rbo);
5916
cf322b49
NK
5917 /**
5918 * We don't do surface updates on planes that have been newly created,
5919 * but we also don't have the afb->address during atomic check.
5920 *
5921 * Fill in buffer attributes depending on the address here, but only on
5922 * newly created planes since they're not being used by DC yet and this
5923 * won't modify global state.
5924 */
5925 dm_plane_state_old = to_dm_plane_state(plane->state);
5926 dm_plane_state_new = to_dm_plane_state(new_state);
5927
3be5262e 5928 if (dm_plane_state_new->dc_state &&
cf322b49
NK
5929 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5930 struct dc_plane_state *plane_state =
5931 dm_plane_state_new->dc_state;
5932 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 5933
320932bf 5934 fill_plane_buffer_attributes(
695af5f9 5935 adev, afb, plane_state->format, plane_state->rotation,
cf322b49
NK
5936 dm_plane_state_new->tiling_flags,
5937 &plane_state->tiling_info, &plane_state->plane_size,
5938 &plane_state->dcc, &plane_state->address,
5939 dm_plane_state_new->tmz_surface, force_disable_dcc);
e7b07cee
HW
5940 }
5941
e7b07cee
HW
5942 return 0;
5943}
5944
3ee6b26b
AD
5945static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5946 struct drm_plane_state *old_state)
e7b07cee
HW
5947{
5948 struct amdgpu_bo *rbo;
e7b07cee
HW
5949 int r;
5950
5951 if (!old_state->fb)
5952 return;
5953
e68d14dd 5954 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5955 r = amdgpu_bo_reserve(rbo, false);
5956 if (unlikely(r)) {
5957 DRM_ERROR("failed to reserve rbo before unpin\n");
5958 return;
b830ebc9
HW
5959 }
5960
5961 amdgpu_bo_unpin(rbo);
5962 amdgpu_bo_unreserve(rbo);
5963 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5964}
5965
8c44515b
AP
5966static int dm_plane_helper_check_state(struct drm_plane_state *state,
5967 struct drm_crtc_state *new_crtc_state)
5968{
5969 int max_downscale = 0;
5970 int max_upscale = INT_MAX;
5971
5972 /* TODO: These should be checked against DC plane caps */
5973 return drm_atomic_helper_check_plane_state(
5974 state, new_crtc_state, max_downscale, max_upscale, true, true);
5975}
5976
7578ecda
AD
5977static int dm_plane_atomic_check(struct drm_plane *plane,
5978 struct drm_plane_state *state)
cbd19488 5979{
1348969a 5980 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 5981 struct dc *dc = adev->dm.dc;
78171832 5982 struct dm_plane_state *dm_plane_state;
695af5f9 5983 struct dc_scaling_info scaling_info;
8c44515b 5984 struct drm_crtc_state *new_crtc_state;
695af5f9 5985 int ret;
78171832
NK
5986
5987 dm_plane_state = to_dm_plane_state(state);
cbd19488 5988
3be5262e 5989 if (!dm_plane_state->dc_state)
9a3329b1 5990 return 0;
cbd19488 5991
8c44515b
AP
5992 new_crtc_state =
5993 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5994 if (!new_crtc_state)
5995 return -EINVAL;
5996
5997 ret = dm_plane_helper_check_state(state, new_crtc_state);
5998 if (ret)
5999 return ret;
6000
695af5f9
NK
6001 ret = fill_dc_scaling_info(state, &scaling_info);
6002 if (ret)
6003 return ret;
a05bcff1 6004
62c933f9 6005 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6006 return 0;
6007
6008 return -EINVAL;
6009}
6010
674e78ac
NK
6011static int dm_plane_atomic_async_check(struct drm_plane *plane,
6012 struct drm_plane_state *new_plane_state)
6013{
6014 /* Only support async updates on cursor planes. */
6015 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6016 return -EINVAL;
6017
6018 return 0;
6019}
6020
6021static void dm_plane_atomic_async_update(struct drm_plane *plane,
6022 struct drm_plane_state *new_state)
6023{
6024 struct drm_plane_state *old_state =
6025 drm_atomic_get_old_plane_state(new_state->state, plane);
6026
332af874 6027 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6028
6029 plane->state->src_x = new_state->src_x;
6030 plane->state->src_y = new_state->src_y;
6031 plane->state->src_w = new_state->src_w;
6032 plane->state->src_h = new_state->src_h;
6033 plane->state->crtc_x = new_state->crtc_x;
6034 plane->state->crtc_y = new_state->crtc_y;
6035 plane->state->crtc_w = new_state->crtc_w;
6036 plane->state->crtc_h = new_state->crtc_h;
6037
6038 handle_cursor_update(plane, old_state);
6039}
6040
e7b07cee
HW
6041static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6042 .prepare_fb = dm_plane_helper_prepare_fb,
6043 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6044 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6045 .atomic_async_check = dm_plane_atomic_async_check,
6046 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6047};
6048
6049/*
6050 * TODO: these are currently initialized to rgb formats only.
6051 * For future use cases we should either initialize them dynamically based on
6052 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6053 * check will succeed, and let DC implement proper check
e7b07cee 6054 */
d90371b0 6055static const uint32_t rgb_formats[] = {
e7b07cee
HW
6056 DRM_FORMAT_XRGB8888,
6057 DRM_FORMAT_ARGB8888,
6058 DRM_FORMAT_RGBA8888,
6059 DRM_FORMAT_XRGB2101010,
6060 DRM_FORMAT_XBGR2101010,
6061 DRM_FORMAT_ARGB2101010,
6062 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6063 DRM_FORMAT_XBGR8888,
6064 DRM_FORMAT_ABGR8888,
46dd9ff7 6065 DRM_FORMAT_RGB565,
e7b07cee
HW
6066};
6067
0d579c7e
NK
6068static const uint32_t overlay_formats[] = {
6069 DRM_FORMAT_XRGB8888,
6070 DRM_FORMAT_ARGB8888,
6071 DRM_FORMAT_RGBA8888,
6072 DRM_FORMAT_XBGR8888,
6073 DRM_FORMAT_ABGR8888,
7267a1a9 6074 DRM_FORMAT_RGB565
e7b07cee
HW
6075};
6076
6077static const u32 cursor_formats[] = {
6078 DRM_FORMAT_ARGB8888
6079};
6080
37c6a93b
NK
6081static int get_plane_formats(const struct drm_plane *plane,
6082 const struct dc_plane_cap *plane_cap,
6083 uint32_t *formats, int max_formats)
e7b07cee 6084{
37c6a93b
NK
6085 int i, num_formats = 0;
6086
6087 /*
6088 * TODO: Query support for each group of formats directly from
6089 * DC plane caps. This will require adding more formats to the
6090 * caps list.
6091 */
e7b07cee 6092
f180b4bc 6093 switch (plane->type) {
e7b07cee 6094 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6095 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6096 if (num_formats >= max_formats)
6097 break;
6098
6099 formats[num_formats++] = rgb_formats[i];
6100 }
6101
ea36ad34 6102 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6103 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6104 if (plane_cap && plane_cap->pixel_format_support.p010)
6105 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6106 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6107 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6108 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6109 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6110 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6111 }
e7b07cee 6112 break;
37c6a93b 6113
e7b07cee 6114 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6115 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6116 if (num_formats >= max_formats)
6117 break;
6118
6119 formats[num_formats++] = overlay_formats[i];
6120 }
e7b07cee 6121 break;
37c6a93b 6122
e7b07cee 6123 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6124 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6125 if (num_formats >= max_formats)
6126 break;
6127
6128 formats[num_formats++] = cursor_formats[i];
6129 }
e7b07cee
HW
6130 break;
6131 }
6132
37c6a93b
NK
6133 return num_formats;
6134}
6135
6136static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6137 struct drm_plane *plane,
6138 unsigned long possible_crtcs,
6139 const struct dc_plane_cap *plane_cap)
6140{
6141 uint32_t formats[32];
6142 int num_formats;
6143 int res = -EPERM;
ecc874a6 6144 unsigned int supported_rotations;
37c6a93b
NK
6145
6146 num_formats = get_plane_formats(plane, plane_cap, formats,
6147 ARRAY_SIZE(formats));
6148
4a580877 6149 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b
NK
6150 &dm_plane_funcs, formats, num_formats,
6151 NULL, plane->type, NULL);
6152 if (res)
6153 return res;
6154
cc1fec57
NK
6155 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6156 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6157 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6158 BIT(DRM_MODE_BLEND_PREMULTI);
6159
6160 drm_plane_create_alpha_property(plane);
6161 drm_plane_create_blend_mode_property(plane, blend_caps);
6162 }
6163
fc8e5230 6164 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6165 plane_cap &&
6166 (plane_cap->pixel_format_support.nv12 ||
6167 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6168 /* This only affects YUV formats. */
6169 drm_plane_create_color_properties(
6170 plane,
6171 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6172 BIT(DRM_COLOR_YCBCR_BT709) |
6173 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6174 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6175 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6176 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6177 }
6178
ecc874a6
PLG
6179 supported_rotations =
6180 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6181 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6182
f784112f
MR
6183 if (dm->adev->asic_type >= CHIP_BONAIRE)
6184 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6185 supported_rotations);
ecc874a6 6186
f180b4bc 6187 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6188
96719c54 6189 /* Create (reset) the plane state */
f180b4bc
HW
6190 if (plane->funcs->reset)
6191 plane->funcs->reset(plane);
96719c54 6192
37c6a93b 6193 return 0;
e7b07cee
HW
6194}
6195
7578ecda
AD
6196static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6197 struct drm_plane *plane,
6198 uint32_t crtc_index)
e7b07cee
HW
6199{
6200 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6201 struct drm_plane *cursor_plane;
e7b07cee
HW
6202
6203 int res = -ENOMEM;
6204
6205 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6206 if (!cursor_plane)
6207 goto fail;
6208
f180b4bc 6209 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6210 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6211
6212 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6213 if (!acrtc)
6214 goto fail;
6215
6216 res = drm_crtc_init_with_planes(
6217 dm->ddev,
6218 &acrtc->base,
6219 plane,
f180b4bc 6220 cursor_plane,
e7b07cee
HW
6221 &amdgpu_dm_crtc_funcs, NULL);
6222
6223 if (res)
6224 goto fail;
6225
6226 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6227
96719c54
HW
6228 /* Create (reset) the plane state */
6229 if (acrtc->base.funcs->reset)
6230 acrtc->base.funcs->reset(&acrtc->base);
6231
e7b07cee
HW
6232 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6233 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6234
6235 acrtc->crtc_id = crtc_index;
6236 acrtc->base.enabled = false;
c37e2d29 6237 acrtc->otg_inst = -1;
e7b07cee
HW
6238
6239 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6240 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6241 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6242 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
6243
6244 return 0;
6245
6246fail:
b830ebc9
HW
6247 kfree(acrtc);
6248 kfree(cursor_plane);
e7b07cee
HW
6249 return res;
6250}
6251
6252
6253static int to_drm_connector_type(enum signal_type st)
6254{
6255 switch (st) {
6256 case SIGNAL_TYPE_HDMI_TYPE_A:
6257 return DRM_MODE_CONNECTOR_HDMIA;
6258 case SIGNAL_TYPE_EDP:
6259 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6260 case SIGNAL_TYPE_LVDS:
6261 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6262 case SIGNAL_TYPE_RGB:
6263 return DRM_MODE_CONNECTOR_VGA;
6264 case SIGNAL_TYPE_DISPLAY_PORT:
6265 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6266 return DRM_MODE_CONNECTOR_DisplayPort;
6267 case SIGNAL_TYPE_DVI_DUAL_LINK:
6268 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6269 return DRM_MODE_CONNECTOR_DVID;
6270 case SIGNAL_TYPE_VIRTUAL:
6271 return DRM_MODE_CONNECTOR_VIRTUAL;
6272
6273 default:
6274 return DRM_MODE_CONNECTOR_Unknown;
6275 }
6276}
6277
2b4c1c05
DV
6278static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6279{
62afb4ad
JRS
6280 struct drm_encoder *encoder;
6281
6282 /* There is only one encoder per connector */
6283 drm_connector_for_each_possible_encoder(connector, encoder)
6284 return encoder;
6285
6286 return NULL;
2b4c1c05
DV
6287}
6288
e7b07cee
HW
6289static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6290{
e7b07cee
HW
6291 struct drm_encoder *encoder;
6292 struct amdgpu_encoder *amdgpu_encoder;
6293
2b4c1c05 6294 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6295
6296 if (encoder == NULL)
6297 return;
6298
6299 amdgpu_encoder = to_amdgpu_encoder(encoder);
6300
6301 amdgpu_encoder->native_mode.clock = 0;
6302
6303 if (!list_empty(&connector->probed_modes)) {
6304 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6305
e7b07cee 6306 list_for_each_entry(preferred_mode,
b830ebc9
HW
6307 &connector->probed_modes,
6308 head) {
6309 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6310 amdgpu_encoder->native_mode = *preferred_mode;
6311
e7b07cee
HW
6312 break;
6313 }
6314
6315 }
6316}
6317
3ee6b26b
AD
6318static struct drm_display_mode *
6319amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6320 char *name,
6321 int hdisplay, int vdisplay)
e7b07cee
HW
6322{
6323 struct drm_device *dev = encoder->dev;
6324 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6325 struct drm_display_mode *mode = NULL;
6326 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6327
6328 mode = drm_mode_duplicate(dev, native_mode);
6329
b830ebc9 6330 if (mode == NULL)
e7b07cee
HW
6331 return NULL;
6332
6333 mode->hdisplay = hdisplay;
6334 mode->vdisplay = vdisplay;
6335 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6336 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6337
6338 return mode;
6339
6340}
6341
6342static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6343 struct drm_connector *connector)
e7b07cee
HW
6344{
6345 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6346 struct drm_display_mode *mode = NULL;
6347 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6348 struct amdgpu_dm_connector *amdgpu_dm_connector =
6349 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6350 int i;
6351 int n;
6352 struct mode_size {
6353 char name[DRM_DISPLAY_MODE_LEN];
6354 int w;
6355 int h;
b830ebc9 6356 } common_modes[] = {
e7b07cee
HW
6357 { "640x480", 640, 480},
6358 { "800x600", 800, 600},
6359 { "1024x768", 1024, 768},
6360 { "1280x720", 1280, 720},
6361 { "1280x800", 1280, 800},
6362 {"1280x1024", 1280, 1024},
6363 { "1440x900", 1440, 900},
6364 {"1680x1050", 1680, 1050},
6365 {"1600x1200", 1600, 1200},
6366 {"1920x1080", 1920, 1080},
6367 {"1920x1200", 1920, 1200}
6368 };
6369
b830ebc9 6370 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6371
6372 for (i = 0; i < n; i++) {
6373 struct drm_display_mode *curmode = NULL;
6374 bool mode_existed = false;
6375
6376 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6377 common_modes[i].h > native_mode->vdisplay ||
6378 (common_modes[i].w == native_mode->hdisplay &&
6379 common_modes[i].h == native_mode->vdisplay))
6380 continue;
e7b07cee
HW
6381
6382 list_for_each_entry(curmode, &connector->probed_modes, head) {
6383 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6384 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6385 mode_existed = true;
6386 break;
6387 }
6388 }
6389
6390 if (mode_existed)
6391 continue;
6392
6393 mode = amdgpu_dm_create_common_mode(encoder,
6394 common_modes[i].name, common_modes[i].w,
6395 common_modes[i].h);
6396 drm_mode_probed_add(connector, mode);
c84dec2f 6397 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6398 }
6399}
6400
3ee6b26b
AD
6401static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6402 struct edid *edid)
e7b07cee 6403{
c84dec2f
HW
6404 struct amdgpu_dm_connector *amdgpu_dm_connector =
6405 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6406
6407 if (edid) {
6408 /* empty probed_modes */
6409 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6410 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6411 drm_add_edid_modes(connector, edid);
6412
f1e5e913
YMM
6413 /* sorting the probed modes before calling function
6414 * amdgpu_dm_get_native_mode() since EDID can have
6415 * more than one preferred mode. The modes that are
6416 * later in the probed mode list could be of higher
6417 * and preferred resolution. For example, 3840x2160
6418 * resolution in base EDID preferred timing and 4096x2160
6419 * preferred resolution in DID extension block later.
6420 */
6421 drm_mode_sort(&connector->probed_modes);
e7b07cee 6422 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6423 } else {
c84dec2f 6424 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6425 }
e7b07cee
HW
6426}
6427
7578ecda 6428static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6429{
c84dec2f
HW
6430 struct amdgpu_dm_connector *amdgpu_dm_connector =
6431 to_amdgpu_dm_connector(connector);
e7b07cee 6432 struct drm_encoder *encoder;
c84dec2f 6433 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6434
2b4c1c05 6435 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6436
85ee15d6 6437 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
6438 amdgpu_dm_connector->num_modes =
6439 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6440 } else {
6441 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6442 amdgpu_dm_connector_add_common_modes(encoder, connector);
6443 }
3e332d3a 6444 amdgpu_dm_fbc_init(connector);
5099114b 6445
c84dec2f 6446 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6447}
6448
3ee6b26b
AD
6449void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6450 struct amdgpu_dm_connector *aconnector,
6451 int connector_type,
6452 struct dc_link *link,
6453 int link_index)
e7b07cee 6454{
1348969a 6455 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 6456
f04bee34
NK
6457 /*
6458 * Some of the properties below require access to state, like bpc.
6459 * Allocate some default initial connector state with our reset helper.
6460 */
6461 if (aconnector->base.funcs->reset)
6462 aconnector->base.funcs->reset(&aconnector->base);
6463
e7b07cee
HW
6464 aconnector->connector_id = link_index;
6465 aconnector->dc_link = link;
6466 aconnector->base.interlace_allowed = false;
6467 aconnector->base.doublescan_allowed = false;
6468 aconnector->base.stereo_allowed = false;
6469 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6470 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6471 aconnector->audio_inst = -1;
e7b07cee
HW
6472 mutex_init(&aconnector->hpd_lock);
6473
1f6010a9
DF
6474 /*
6475 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6476 * which means HPD hot plug not supported
6477 */
e7b07cee
HW
6478 switch (connector_type) {
6479 case DRM_MODE_CONNECTOR_HDMIA:
6480 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6481 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6482 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6483 break;
6484 case DRM_MODE_CONNECTOR_DisplayPort:
6485 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6486 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6487 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6488 break;
6489 case DRM_MODE_CONNECTOR_DVID:
6490 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6491 break;
6492 default:
6493 break;
6494 }
6495
6496 drm_object_attach_property(&aconnector->base.base,
6497 dm->ddev->mode_config.scaling_mode_property,
6498 DRM_MODE_SCALE_NONE);
6499
6500 drm_object_attach_property(&aconnector->base.base,
6501 adev->mode_info.underscan_property,
6502 UNDERSCAN_OFF);
6503 drm_object_attach_property(&aconnector->base.base,
6504 adev->mode_info.underscan_hborder_property,
6505 0);
6506 drm_object_attach_property(&aconnector->base.base,
6507 adev->mode_info.underscan_vborder_property,
6508 0);
1825fd34 6509
8c61b31e
JFZ
6510 if (!aconnector->mst_port)
6511 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 6512
4a8ca46b
RL
6513 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6514 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6515 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 6516
c1ee92f9 6517 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 6518 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
6519 drm_object_attach_property(&aconnector->base.base,
6520 adev->mode_info.abm_level_property, 0);
6521 }
bb47de73
NK
6522
6523 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
6524 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6525 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
6526 drm_object_attach_property(
6527 &aconnector->base.base,
6528 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6529
8c61b31e
JFZ
6530 if (!aconnector->mst_port)
6531 drm_connector_attach_vrr_capable_property(&aconnector->base);
6532
0c8620d6 6533#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 6534 if (adev->dm.hdcp_workqueue)
53e108aa 6535 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 6536#endif
bb47de73 6537 }
e7b07cee
HW
6538}
6539
7578ecda
AD
6540static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6541 struct i2c_msg *msgs, int num)
e7b07cee
HW
6542{
6543 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6544 struct ddc_service *ddc_service = i2c->ddc_service;
6545 struct i2c_command cmd;
6546 int i;
6547 int result = -EIO;
6548
b830ebc9 6549 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
6550
6551 if (!cmd.payloads)
6552 return result;
6553
6554 cmd.number_of_payloads = num;
6555 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6556 cmd.speed = 100;
6557
6558 for (i = 0; i < num; i++) {
6559 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6560 cmd.payloads[i].address = msgs[i].addr;
6561 cmd.payloads[i].length = msgs[i].len;
6562 cmd.payloads[i].data = msgs[i].buf;
6563 }
6564
c85e6e54
DF
6565 if (dc_submit_i2c(
6566 ddc_service->ctx->dc,
6567 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
6568 &cmd))
6569 result = num;
6570
6571 kfree(cmd.payloads);
6572 return result;
6573}
6574
7578ecda 6575static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
6576{
6577 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6578}
6579
6580static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6581 .master_xfer = amdgpu_dm_i2c_xfer,
6582 .functionality = amdgpu_dm_i2c_func,
6583};
6584
3ee6b26b
AD
6585static struct amdgpu_i2c_adapter *
6586create_i2c(struct ddc_service *ddc_service,
6587 int link_index,
6588 int *res)
e7b07cee
HW
6589{
6590 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6591 struct amdgpu_i2c_adapter *i2c;
6592
b830ebc9 6593 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
6594 if (!i2c)
6595 return NULL;
e7b07cee
HW
6596 i2c->base.owner = THIS_MODULE;
6597 i2c->base.class = I2C_CLASS_DDC;
6598 i2c->base.dev.parent = &adev->pdev->dev;
6599 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 6600 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
6601 i2c_set_adapdata(&i2c->base, i2c);
6602 i2c->ddc_service = ddc_service;
c85e6e54 6603 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
6604
6605 return i2c;
6606}
6607
89fc8d4e 6608
1f6010a9
DF
6609/*
6610 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
6611 * dc_link which will be represented by this aconnector.
6612 */
7578ecda
AD
6613static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6614 struct amdgpu_dm_connector *aconnector,
6615 uint32_t link_index,
6616 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6617{
6618 int res = 0;
6619 int connector_type;
6620 struct dc *dc = dm->dc;
6621 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6622 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6623
6624 link->priv = aconnector;
e7b07cee 6625
f1ad2f5e 6626 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6627
6628 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6629 if (!i2c) {
6630 DRM_ERROR("Failed to create i2c adapter data\n");
6631 return -ENOMEM;
6632 }
6633
e7b07cee
HW
6634 aconnector->i2c = i2c;
6635 res = i2c_add_adapter(&i2c->base);
6636
6637 if (res) {
6638 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6639 goto out_free;
6640 }
6641
6642 connector_type = to_drm_connector_type(link->connector_signal);
6643
17165de2 6644 res = drm_connector_init_with_ddc(
e7b07cee
HW
6645 dm->ddev,
6646 &aconnector->base,
6647 &amdgpu_dm_connector_funcs,
17165de2
AP
6648 connector_type,
6649 &i2c->base);
e7b07cee
HW
6650
6651 if (res) {
6652 DRM_ERROR("connector_init failed\n");
6653 aconnector->connector_id = -1;
6654 goto out_free;
6655 }
6656
6657 drm_connector_helper_add(
6658 &aconnector->base,
6659 &amdgpu_dm_connector_helper_funcs);
6660
6661 amdgpu_dm_connector_init_helper(
6662 dm,
6663 aconnector,
6664 connector_type,
6665 link,
6666 link_index);
6667
cde4c44d 6668 drm_connector_attach_encoder(
e7b07cee
HW
6669 &aconnector->base, &aencoder->base);
6670
e7b07cee
HW
6671 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6672 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 6673 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 6674
e7b07cee
HW
6675out_free:
6676 if (res) {
6677 kfree(i2c);
6678 aconnector->i2c = NULL;
6679 }
6680 return res;
6681}
6682
6683int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6684{
6685 switch (adev->mode_info.num_crtc) {
6686 case 1:
6687 return 0x1;
6688 case 2:
6689 return 0x3;
6690 case 3:
6691 return 0x7;
6692 case 4:
6693 return 0xf;
6694 case 5:
6695 return 0x1f;
6696 case 6:
6697 default:
6698 return 0x3f;
6699 }
6700}
6701
7578ecda
AD
6702static int amdgpu_dm_encoder_init(struct drm_device *dev,
6703 struct amdgpu_encoder *aencoder,
6704 uint32_t link_index)
e7b07cee 6705{
1348969a 6706 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6707
6708 int res = drm_encoder_init(dev,
6709 &aencoder->base,
6710 &amdgpu_dm_encoder_funcs,
6711 DRM_MODE_ENCODER_TMDS,
6712 NULL);
6713
6714 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6715
6716 if (!res)
6717 aencoder->encoder_id = link_index;
6718 else
6719 aencoder->encoder_id = -1;
6720
6721 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6722
6723 return res;
6724}
6725
3ee6b26b
AD
6726static void manage_dm_interrupts(struct amdgpu_device *adev,
6727 struct amdgpu_crtc *acrtc,
6728 bool enable)
e7b07cee
HW
6729{
6730 /*
8fe684e9
NK
6731 * We have no guarantee that the frontend index maps to the same
6732 * backend index - some even map to more than one.
6733 *
6734 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
6735 */
6736 int irq_type =
734dd01d 6737 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6738 adev,
6739 acrtc->crtc_id);
6740
6741 if (enable) {
6742 drm_crtc_vblank_on(&acrtc->base);
6743 amdgpu_irq_get(
6744 adev,
6745 &adev->pageflip_irq,
6746 irq_type);
6747 } else {
6748
6749 amdgpu_irq_put(
6750 adev,
6751 &adev->pageflip_irq,
6752 irq_type);
6753 drm_crtc_vblank_off(&acrtc->base);
6754 }
6755}
6756
8fe684e9
NK
6757static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6758 struct amdgpu_crtc *acrtc)
6759{
6760 int irq_type =
6761 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6762
6763 /**
6764 * This reads the current state for the IRQ and force reapplies
6765 * the setting to hardware.
6766 */
6767 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6768}
6769
3ee6b26b
AD
6770static bool
6771is_scaling_state_different(const struct dm_connector_state *dm_state,
6772 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6773{
6774 if (dm_state->scaling != old_dm_state->scaling)
6775 return true;
6776 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6777 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6778 return true;
6779 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6780 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6781 return true;
b830ebc9
HW
6782 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6783 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6784 return true;
e7b07cee
HW
6785 return false;
6786}
6787
0c8620d6
BL
6788#ifdef CONFIG_DRM_AMD_DC_HDCP
6789static bool is_content_protection_different(struct drm_connector_state *state,
6790 const struct drm_connector_state *old_state,
6791 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6792{
6793 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6794
53e108aa
BL
6795 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6796 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6797 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6798 return true;
6799 }
6800
0c8620d6
BL
6801 /* CP is being re enabled, ignore this */
6802 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6803 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6804 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6805 return false;
6806 }
6807
6808 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6809 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6810 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6811 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6812
6813 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6814 * hot-plug, headless s3, dpms
6815 */
6816 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6817 aconnector->dc_sink != NULL)
6818 return true;
6819
6820 if (old_state->content_protection == state->content_protection)
6821 return false;
6822
6823 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6824 return true;
6825
6826 return false;
6827}
6828
0c8620d6 6829#endif
3ee6b26b
AD
6830static void remove_stream(struct amdgpu_device *adev,
6831 struct amdgpu_crtc *acrtc,
6832 struct dc_stream_state *stream)
e7b07cee
HW
6833{
6834 /* this is the update mode case */
e7b07cee
HW
6835
6836 acrtc->otg_inst = -1;
6837 acrtc->enabled = false;
6838}
6839
7578ecda
AD
6840static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6841 struct dc_cursor_position *position)
2a8f6ccb 6842{
f4c2cc43 6843 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6844 int x, y;
6845 int xorigin = 0, yorigin = 0;
6846
e371e19c
NK
6847 position->enable = false;
6848 position->x = 0;
6849 position->y = 0;
6850
6851 if (!crtc || !plane->state->fb)
2a8f6ccb 6852 return 0;
2a8f6ccb
HW
6853
6854 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6855 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6856 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6857 __func__,
6858 plane->state->crtc_w,
6859 plane->state->crtc_h);
6860 return -EINVAL;
6861 }
6862
6863 x = plane->state->crtc_x;
6864 y = plane->state->crtc_y;
c14a005c 6865
e371e19c
NK
6866 if (x <= -amdgpu_crtc->max_cursor_width ||
6867 y <= -amdgpu_crtc->max_cursor_height)
6868 return 0;
6869
2a8f6ccb
HW
6870 if (x < 0) {
6871 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6872 x = 0;
6873 }
6874 if (y < 0) {
6875 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6876 y = 0;
6877 }
6878 position->enable = true;
d243b6ff 6879 position->translate_by_source = true;
2a8f6ccb
HW
6880 position->x = x;
6881 position->y = y;
6882 position->x_hotspot = xorigin;
6883 position->y_hotspot = yorigin;
6884
6885 return 0;
6886}
6887
3ee6b26b
AD
6888static void handle_cursor_update(struct drm_plane *plane,
6889 struct drm_plane_state *old_plane_state)
e7b07cee 6890{
1348969a 6891 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
6892 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6893 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6894 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6895 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6896 uint64_t address = afb ? afb->address : 0;
6897 struct dc_cursor_position position;
6898 struct dc_cursor_attributes attributes;
6899 int ret;
6900
e7b07cee
HW
6901 if (!plane->state->fb && !old_plane_state->fb)
6902 return;
6903
f1ad2f5e 6904 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6905 __func__,
6906 amdgpu_crtc->crtc_id,
6907 plane->state->crtc_w,
6908 plane->state->crtc_h);
2a8f6ccb
HW
6909
6910 ret = get_cursor_position(plane, crtc, &position);
6911 if (ret)
6912 return;
6913
6914 if (!position.enable) {
6915 /* turn off cursor */
674e78ac
NK
6916 if (crtc_state && crtc_state->stream) {
6917 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6918 dc_stream_set_cursor_position(crtc_state->stream,
6919 &position);
674e78ac
NK
6920 mutex_unlock(&adev->dm.dc_lock);
6921 }
2a8f6ccb 6922 return;
e7b07cee 6923 }
e7b07cee 6924
2a8f6ccb
HW
6925 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6926 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6927
c1cefe11 6928 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6929 attributes.address.high_part = upper_32_bits(address);
6930 attributes.address.low_part = lower_32_bits(address);
6931 attributes.width = plane->state->crtc_w;
6932 attributes.height = plane->state->crtc_h;
6933 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6934 attributes.rotation_angle = 0;
6935 attributes.attribute_flags.value = 0;
6936
6937 attributes.pitch = attributes.width;
6938
886daac9 6939 if (crtc_state->stream) {
674e78ac 6940 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6941 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6942 &attributes))
6943 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6944
2a8f6ccb
HW
6945 if (!dc_stream_set_cursor_position(crtc_state->stream,
6946 &position))
6947 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6948 mutex_unlock(&adev->dm.dc_lock);
886daac9 6949 }
2a8f6ccb 6950}
e7b07cee
HW
6951
6952static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6953{
6954
6955 assert_spin_locked(&acrtc->base.dev->event_lock);
6956 WARN_ON(acrtc->event);
6957
6958 acrtc->event = acrtc->base.state->event;
6959
6960 /* Set the flip status */
6961 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6962
6963 /* Mark this event as consumed */
6964 acrtc->base.state->event = NULL;
6965
6966 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6967 acrtc->crtc_id);
6968}
6969
bb47de73
NK
6970static void update_freesync_state_on_stream(
6971 struct amdgpu_display_manager *dm,
6972 struct dm_crtc_state *new_crtc_state,
180db303
NK
6973 struct dc_stream_state *new_stream,
6974 struct dc_plane_state *surface,
6975 u32 flip_timestamp_in_us)
bb47de73 6976{
09aef2c4 6977 struct mod_vrr_params vrr_params;
bb47de73 6978 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 6979 struct amdgpu_device *adev = dm->adev;
585d450c 6980 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 6981 unsigned long flags;
bb47de73
NK
6982
6983 if (!new_stream)
6984 return;
6985
6986 /*
6987 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6988 * For now it's sufficient to just guard against these conditions.
6989 */
6990
6991 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6992 return;
6993
4a580877 6994 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 6995 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 6996
180db303
NK
6997 if (surface) {
6998 mod_freesync_handle_preflip(
6999 dm->freesync_module,
7000 surface,
7001 new_stream,
7002 flip_timestamp_in_us,
7003 &vrr_params);
09aef2c4
MK
7004
7005 if (adev->family < AMDGPU_FAMILY_AI &&
7006 amdgpu_dm_vrr_active(new_crtc_state)) {
7007 mod_freesync_handle_v_update(dm->freesync_module,
7008 new_stream, &vrr_params);
e63e2491
EB
7009
7010 /* Need to call this before the frame ends. */
7011 dc_stream_adjust_vmin_vmax(dm->dc,
7012 new_crtc_state->stream,
7013 &vrr_params.adjust);
09aef2c4 7014 }
180db303 7015 }
bb47de73
NK
7016
7017 mod_freesync_build_vrr_infopacket(
7018 dm->freesync_module,
7019 new_stream,
180db303 7020 &vrr_params,
ecd0136b
HT
7021 PACKET_TYPE_VRR,
7022 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
7023 &vrr_infopacket);
7024
8a48b44c 7025 new_crtc_state->freesync_timing_changed |=
585d450c 7026 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7027 &vrr_params.adjust,
7028 sizeof(vrr_params.adjust)) != 0);
bb47de73 7029
8a48b44c 7030 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7031 (memcmp(&new_crtc_state->vrr_infopacket,
7032 &vrr_infopacket,
7033 sizeof(vrr_infopacket)) != 0);
7034
585d450c 7035 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7036 new_crtc_state->vrr_infopacket = vrr_infopacket;
7037
585d450c 7038 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7039 new_stream->vrr_infopacket = vrr_infopacket;
7040
7041 if (new_crtc_state->freesync_vrr_info_changed)
7042 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7043 new_crtc_state->base.crtc->base.id,
7044 (int)new_crtc_state->base.vrr_enabled,
180db303 7045 (int)vrr_params.state);
09aef2c4 7046
4a580877 7047 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7048}
7049
585d450c 7050static void update_stream_irq_parameters(
e854194c
MK
7051 struct amdgpu_display_manager *dm,
7052 struct dm_crtc_state *new_crtc_state)
7053{
7054 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7055 struct mod_vrr_params vrr_params;
e854194c 7056 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7057 struct amdgpu_device *adev = dm->adev;
585d450c 7058 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7059 unsigned long flags;
e854194c
MK
7060
7061 if (!new_stream)
7062 return;
7063
7064 /*
7065 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7066 * For now it's sufficient to just guard against these conditions.
7067 */
7068 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7069 return;
7070
4a580877 7071 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7072 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7073
e854194c
MK
7074 if (new_crtc_state->vrr_supported &&
7075 config.min_refresh_in_uhz &&
7076 config.max_refresh_in_uhz) {
7077 config.state = new_crtc_state->base.vrr_enabled ?
7078 VRR_STATE_ACTIVE_VARIABLE :
7079 VRR_STATE_INACTIVE;
7080 } else {
7081 config.state = VRR_STATE_UNSUPPORTED;
7082 }
7083
7084 mod_freesync_build_vrr_params(dm->freesync_module,
7085 new_stream,
7086 &config, &vrr_params);
7087
7088 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7089 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7090 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7091
585d450c
AP
7092 new_crtc_state->freesync_config = config;
7093 /* Copy state for access from DM IRQ handler */
7094 acrtc->dm_irq_params.freesync_config = config;
7095 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7096 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7097 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7098}
7099
66b0c973
MK
7100static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7101 struct dm_crtc_state *new_state)
7102{
7103 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7104 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7105
7106 if (!old_vrr_active && new_vrr_active) {
7107 /* Transition VRR inactive -> active:
7108 * While VRR is active, we must not disable vblank irq, as a
7109 * reenable after disable would compute bogus vblank/pflip
7110 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7111 *
7112 * We also need vupdate irq for the actual core vblank handling
7113 * at end of vblank.
66b0c973 7114 */
d2574c33 7115 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7116 drm_crtc_vblank_get(new_state->base.crtc);
7117 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7118 __func__, new_state->base.crtc->base.id);
7119 } else if (old_vrr_active && !new_vrr_active) {
7120 /* Transition VRR active -> inactive:
7121 * Allow vblank irq disable again for fixed refresh rate.
7122 */
d2574c33 7123 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7124 drm_crtc_vblank_put(new_state->base.crtc);
7125 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7126 __func__, new_state->base.crtc->base.id);
7127 }
7128}
7129
8ad27806
NK
7130static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7131{
7132 struct drm_plane *plane;
7133 struct drm_plane_state *old_plane_state, *new_plane_state;
7134 int i;
7135
7136 /*
7137 * TODO: Make this per-stream so we don't issue redundant updates for
7138 * commits with multiple streams.
7139 */
7140 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7141 new_plane_state, i)
7142 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7143 handle_cursor_update(plane, old_plane_state);
7144}
7145
3be5262e 7146static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7147 struct dc_state *dc_state,
3ee6b26b
AD
7148 struct drm_device *dev,
7149 struct amdgpu_display_manager *dm,
7150 struct drm_crtc *pcrtc,
420cd472 7151 bool wait_for_vblank)
e7b07cee 7152{
570c91d5 7153 uint32_t i;
8a48b44c 7154 uint64_t timestamp_ns;
e7b07cee 7155 struct drm_plane *plane;
0bc9706d 7156 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7157 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7158 struct drm_crtc_state *new_pcrtc_state =
7159 drm_atomic_get_new_crtc_state(state, pcrtc);
7160 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7161 struct dm_crtc_state *dm_old_crtc_state =
7162 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7163 int planes_count = 0, vpos, hpos;
570c91d5 7164 long r;
e7b07cee 7165 unsigned long flags;
8a48b44c 7166 struct amdgpu_bo *abo;
fdd1fe57
MK
7167 uint32_t target_vblank, last_flip_vblank;
7168 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7169 bool pflip_present = false;
bc7f670e
DF
7170 struct {
7171 struct dc_surface_update surface_updates[MAX_SURFACES];
7172 struct dc_plane_info plane_infos[MAX_SURFACES];
7173 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7174 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7175 struct dc_stream_update stream_update;
74aa7bd4 7176 } *bundle;
bc7f670e 7177
74aa7bd4 7178 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7179
74aa7bd4
DF
7180 if (!bundle) {
7181 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7182 goto cleanup;
7183 }
e7b07cee 7184
8ad27806
NK
7185 /*
7186 * Disable the cursor first if we're disabling all the planes.
7187 * It'll remain on the screen after the planes are re-enabled
7188 * if we don't.
7189 */
7190 if (acrtc_state->active_planes == 0)
7191 amdgpu_dm_commit_cursors(state);
7192
e7b07cee 7193 /* update planes when needed */
0bc9706d
LSL
7194 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7195 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7196 struct drm_crtc_state *new_crtc_state;
0bc9706d 7197 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 7198 bool plane_needs_flip;
c7af5f77 7199 struct dc_plane_state *dc_plane;
54d76575 7200 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7201
80c218d5
NK
7202 /* Cursor plane is handled after stream updates */
7203 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7204 continue;
e7b07cee 7205
f5ba60fe
DD
7206 if (!fb || !crtc || pcrtc != crtc)
7207 continue;
7208
7209 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7210 if (!new_crtc_state->active)
e7b07cee
HW
7211 continue;
7212
bc7f670e 7213 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7214
74aa7bd4 7215 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7216 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7217 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7218 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7219 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7220 }
8a48b44c 7221
695af5f9
NK
7222 fill_dc_scaling_info(new_plane_state,
7223 &bundle->scaling_infos[planes_count]);
8a48b44c 7224
695af5f9
NK
7225 bundle->surface_updates[planes_count].scaling_info =
7226 &bundle->scaling_infos[planes_count];
8a48b44c 7227
f5031000 7228 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7229
f5031000 7230 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7231
f5031000
DF
7232 if (!plane_needs_flip) {
7233 planes_count += 1;
7234 continue;
7235 }
8a48b44c 7236
2fac0f53
CK
7237 abo = gem_to_amdgpu_bo(fb->obj[0]);
7238
f8308898
AG
7239 /*
7240 * Wait for all fences on this FB. Do limited wait to avoid
7241 * deadlock during GPU reset when this fence will not signal
7242 * but we hold reservation lock for the BO.
7243 */
52791eee 7244 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7245 false,
f8308898
AG
7246 msecs_to_jiffies(5000));
7247 if (unlikely(r <= 0))
ed8a5fb2 7248 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7249
695af5f9 7250 fill_dc_plane_info_and_addr(
8ce5d842
NK
7251 dm->adev, new_plane_state,
7252 dm_new_plane_state->tiling_flags,
695af5f9 7253 &bundle->plane_infos[planes_count],
87b7ebc2 7254 &bundle->flip_addrs[planes_count].address,
8ce5d842 7255 dm_new_plane_state->tmz_surface, false);
87b7ebc2
RS
7256
7257 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7258 new_plane_state->plane->index,
7259 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7260
7261 bundle->surface_updates[planes_count].plane_info =
7262 &bundle->plane_infos[planes_count];
8a48b44c 7263
caff0e66
NK
7264 /*
7265 * Only allow immediate flips for fast updates that don't
7266 * change FB pitch, DCC state, rotation or mirroing.
7267 */
f5031000 7268 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7269 crtc->state->async_flip &&
caff0e66 7270 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7271
f5031000
DF
7272 timestamp_ns = ktime_get_ns();
7273 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7274 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7275 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7276
f5031000
DF
7277 if (!bundle->surface_updates[planes_count].surface) {
7278 DRM_ERROR("No surface for CRTC: id=%d\n",
7279 acrtc_attach->crtc_id);
7280 continue;
bc7f670e
DF
7281 }
7282
f5031000
DF
7283 if (plane == pcrtc->primary)
7284 update_freesync_state_on_stream(
7285 dm,
7286 acrtc_state,
7287 acrtc_state->stream,
7288 dc_plane,
7289 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7290
f5031000
DF
7291 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7292 __func__,
7293 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7294 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7295
7296 planes_count += 1;
7297
8a48b44c
DF
7298 }
7299
74aa7bd4 7300 if (pflip_present) {
634092b1
MK
7301 if (!vrr_active) {
7302 /* Use old throttling in non-vrr fixed refresh rate mode
7303 * to keep flip scheduling based on target vblank counts
7304 * working in a backwards compatible way, e.g., for
7305 * clients using the GLX_OML_sync_control extension or
7306 * DRI3/Present extension with defined target_msc.
7307 */
e3eff4b5 7308 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7309 }
7310 else {
7311 /* For variable refresh rate mode only:
7312 * Get vblank of last completed flip to avoid > 1 vrr
7313 * flips per video frame by use of throttling, but allow
7314 * flip programming anywhere in the possibly large
7315 * variable vrr vblank interval for fine-grained flip
7316 * timing control and more opportunity to avoid stutter
7317 * on late submission of flips.
7318 */
7319 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 7320 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
7321 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7322 }
7323
fdd1fe57 7324 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7325
7326 /*
7327 * Wait until we're out of the vertical blank period before the one
7328 * targeted by the flip
7329 */
7330 while ((acrtc_attach->enabled &&
7331 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7332 0, &vpos, &hpos, NULL,
7333 NULL, &pcrtc->hwmode)
7334 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7335 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7336 (int)(target_vblank -
e3eff4b5 7337 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7338 usleep_range(1000, 1100);
7339 }
7340
8fe684e9
NK
7341 /**
7342 * Prepare the flip event for the pageflip interrupt to handle.
7343 *
7344 * This only works in the case where we've already turned on the
7345 * appropriate hardware blocks (eg. HUBP) so in the transition case
7346 * from 0 -> n planes we have to skip a hardware generated event
7347 * and rely on sending it from software.
7348 */
7349 if (acrtc_attach->base.state->event &&
7350 acrtc_state->active_planes > 0) {
8a48b44c
DF
7351 drm_crtc_vblank_get(pcrtc);
7352
7353 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7354
7355 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7356 prepare_flip_isr(acrtc_attach);
7357
7358 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7359 }
7360
7361 if (acrtc_state->stream) {
8a48b44c 7362 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7363 bundle->stream_update.vrr_infopacket =
8a48b44c 7364 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7365 }
e7b07cee
HW
7366 }
7367
bc92c065 7368 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7369 if ((planes_count || acrtc_state->active_planes == 0) &&
7370 acrtc_state->stream) {
b6e881c9 7371 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7372 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7373 bundle->stream_update.src = acrtc_state->stream->src;
7374 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7375 }
7376
cf020d49
NK
7377 if (new_pcrtc_state->color_mgmt_changed) {
7378 /*
7379 * TODO: This isn't fully correct since we've actually
7380 * already modified the stream in place.
7381 */
7382 bundle->stream_update.gamut_remap =
7383 &acrtc_state->stream->gamut_remap_matrix;
7384 bundle->stream_update.output_csc_transform =
7385 &acrtc_state->stream->csc_color_matrix;
7386 bundle->stream_update.out_transfer_func =
7387 acrtc_state->stream->out_transfer_func;
7388 }
bc7f670e 7389
8a48b44c 7390 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7391 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7392 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7393
e63e2491
EB
7394 /*
7395 * If FreeSync state on the stream has changed then we need to
7396 * re-adjust the min/max bounds now that DC doesn't handle this
7397 * as part of commit.
7398 */
7399 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7400 amdgpu_dm_vrr_active(acrtc_state)) {
7401 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7402 dc_stream_adjust_vmin_vmax(
7403 dm->dc, acrtc_state->stream,
585d450c 7404 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
7405 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7406 }
bc7f670e 7407 mutex_lock(&dm->dc_lock);
8c322309 7408 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7409 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7410 amdgpu_dm_psr_disable(acrtc_state->stream);
7411
bc7f670e 7412 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7413 bundle->surface_updates,
bc7f670e
DF
7414 planes_count,
7415 acrtc_state->stream,
74aa7bd4 7416 &bundle->stream_update,
bc7f670e 7417 dc_state);
8c322309 7418
8fe684e9
NK
7419 /**
7420 * Enable or disable the interrupts on the backend.
7421 *
7422 * Most pipes are put into power gating when unused.
7423 *
7424 * When power gating is enabled on a pipe we lose the
7425 * interrupt enablement state when power gating is disabled.
7426 *
7427 * So we need to update the IRQ control state in hardware
7428 * whenever the pipe turns on (since it could be previously
7429 * power gated) or off (since some pipes can't be power gated
7430 * on some ASICs).
7431 */
7432 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
7433 dm_update_pflip_irq_state(drm_to_adev(dev),
7434 acrtc_attach);
8fe684e9 7435
8c322309 7436 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7437 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7438 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7439 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7440 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7441 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7442 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7443 amdgpu_dm_psr_enable(acrtc_state->stream);
7444 }
7445
bc7f670e 7446 mutex_unlock(&dm->dc_lock);
e7b07cee 7447 }
4b510503 7448
8ad27806
NK
7449 /*
7450 * Update cursor state *after* programming all the planes.
7451 * This avoids redundant programming in the case where we're going
7452 * to be disabling a single plane - those pipes are being disabled.
7453 */
7454 if (acrtc_state->active_planes)
7455 amdgpu_dm_commit_cursors(state);
80c218d5 7456
4b510503 7457cleanup:
74aa7bd4 7458 kfree(bundle);
e7b07cee
HW
7459}
7460
6ce8f316
NK
7461static void amdgpu_dm_commit_audio(struct drm_device *dev,
7462 struct drm_atomic_state *state)
7463{
1348969a 7464 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
7465 struct amdgpu_dm_connector *aconnector;
7466 struct drm_connector *connector;
7467 struct drm_connector_state *old_con_state, *new_con_state;
7468 struct drm_crtc_state *new_crtc_state;
7469 struct dm_crtc_state *new_dm_crtc_state;
7470 const struct dc_stream_status *status;
7471 int i, inst;
7472
7473 /* Notify device removals. */
7474 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7475 if (old_con_state->crtc != new_con_state->crtc) {
7476 /* CRTC changes require notification. */
7477 goto notify;
7478 }
7479
7480 if (!new_con_state->crtc)
7481 continue;
7482
7483 new_crtc_state = drm_atomic_get_new_crtc_state(
7484 state, new_con_state->crtc);
7485
7486 if (!new_crtc_state)
7487 continue;
7488
7489 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7490 continue;
7491
7492 notify:
7493 aconnector = to_amdgpu_dm_connector(connector);
7494
7495 mutex_lock(&adev->dm.audio_lock);
7496 inst = aconnector->audio_inst;
7497 aconnector->audio_inst = -1;
7498 mutex_unlock(&adev->dm.audio_lock);
7499
7500 amdgpu_dm_audio_eld_notify(adev, inst);
7501 }
7502
7503 /* Notify audio device additions. */
7504 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7505 if (!new_con_state->crtc)
7506 continue;
7507
7508 new_crtc_state = drm_atomic_get_new_crtc_state(
7509 state, new_con_state->crtc);
7510
7511 if (!new_crtc_state)
7512 continue;
7513
7514 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7515 continue;
7516
7517 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7518 if (!new_dm_crtc_state->stream)
7519 continue;
7520
7521 status = dc_stream_get_status(new_dm_crtc_state->stream);
7522 if (!status)
7523 continue;
7524
7525 aconnector = to_amdgpu_dm_connector(connector);
7526
7527 mutex_lock(&adev->dm.audio_lock);
7528 inst = status->audio_inst;
7529 aconnector->audio_inst = inst;
7530 mutex_unlock(&adev->dm.audio_lock);
7531
7532 amdgpu_dm_audio_eld_notify(adev, inst);
7533 }
7534}
7535
1f6010a9 7536/*
27b3f4fc
LSL
7537 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7538 * @crtc_state: the DRM CRTC state
7539 * @stream_state: the DC stream state.
7540 *
7541 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7542 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7543 */
7544static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7545 struct dc_stream_state *stream_state)
7546{
b9952f93 7547 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 7548}
e7b07cee 7549
7578ecda
AD
7550static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7551 struct drm_atomic_state *state,
7552 bool nonblock)
e7b07cee 7553{
1f6010a9
DF
7554 /*
7555 * Add check here for SoC's that support hardware cursor plane, to
7556 * unset legacy_cursor_update
7557 */
e7b07cee
HW
7558
7559 return drm_atomic_helper_commit(dev, state, nonblock);
7560
7561 /*TODO Handle EINTR, reenable IRQ*/
7562}
7563
b8592b48
LL
7564/**
7565 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7566 * @state: The atomic state to commit
7567 *
7568 * This will tell DC to commit the constructed DC state from atomic_check,
7569 * programming the hardware. Any failures here implies a hardware failure, since
7570 * atomic check should have filtered anything non-kosher.
7571 */
7578ecda 7572static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7573{
7574 struct drm_device *dev = state->dev;
1348969a 7575 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7576 struct amdgpu_display_manager *dm = &adev->dm;
7577 struct dm_atomic_state *dm_state;
eb3dc897 7578 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7579 uint32_t i, j;
5cc6dcbd 7580 struct drm_crtc *crtc;
0bc9706d 7581 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7582 unsigned long flags;
7583 bool wait_for_vblank = true;
7584 struct drm_connector *connector;
c2cea706 7585 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7586 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7587 int crtc_disable_count = 0;
6ee90e88 7588 bool mode_set_reset_required = false;
e7b07cee
HW
7589
7590 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7591
eb3dc897
NK
7592 dm_state = dm_atomic_get_new_state(state);
7593 if (dm_state && dm_state->context) {
7594 dc_state = dm_state->context;
7595 } else {
7596 /* No state changes, retain current state. */
813d20dc 7597 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7598 ASSERT(dc_state_temp);
7599 dc_state = dc_state_temp;
7600 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7601 }
e7b07cee 7602
6d90a208
AP
7603 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7604 new_crtc_state, i) {
7605 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7606
7607 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7608
7609 if (old_crtc_state->active &&
7610 (!new_crtc_state->active ||
7611 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7612 manage_dm_interrupts(adev, acrtc, false);
7613 dc_stream_release(dm_old_crtc_state->stream);
7614 }
7615 }
7616
e7b07cee 7617 /* update changed items */
0bc9706d 7618 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7619 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7620
54d76575
LSL
7621 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7622 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7623
f1ad2f5e 7624 DRM_DEBUG_DRIVER(
e7b07cee
HW
7625 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7626 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7627 "connectors_changed:%d\n",
7628 acrtc->crtc_id,
0bc9706d
LSL
7629 new_crtc_state->enable,
7630 new_crtc_state->active,
7631 new_crtc_state->planes_changed,
7632 new_crtc_state->mode_changed,
7633 new_crtc_state->active_changed,
7634 new_crtc_state->connectors_changed);
e7b07cee 7635
27b3f4fc
LSL
7636 /* Copy all transient state flags into dc state */
7637 if (dm_new_crtc_state->stream) {
7638 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7639 dm_new_crtc_state->stream);
7640 }
7641
e7b07cee
HW
7642 /* handles headless hotplug case, updating new_state and
7643 * aconnector as needed
7644 */
7645
54d76575 7646 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7647
f1ad2f5e 7648 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7649
54d76575 7650 if (!dm_new_crtc_state->stream) {
e7b07cee 7651 /*
b830ebc9
HW
7652 * this could happen because of issues with
7653 * userspace notifications delivery.
7654 * In this case userspace tries to set mode on
1f6010a9
DF
7655 * display which is disconnected in fact.
7656 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7657 * We expect reset mode will come soon.
7658 *
7659 * This can also happen when unplug is done
7660 * during resume sequence ended
7661 *
7662 * In this case, we want to pretend we still
7663 * have a sink to keep the pipe running so that
7664 * hw state is consistent with the sw state
7665 */
f1ad2f5e 7666 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7667 __func__, acrtc->base.base.id);
7668 continue;
7669 }
7670
54d76575
LSL
7671 if (dm_old_crtc_state->stream)
7672 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7673
97028037
LP
7674 pm_runtime_get_noresume(dev->dev);
7675
e7b07cee 7676 acrtc->enabled = true;
0bc9706d
LSL
7677 acrtc->hw_mode = new_crtc_state->mode;
7678 crtc->hwmode = new_crtc_state->mode;
6ee90e88 7679 mode_set_reset_required = true;
0bc9706d 7680 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7681 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7682 /* i.e. reset mode */
6ee90e88 7683 if (dm_old_crtc_state->stream)
54d76575 7684 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 7685 mode_set_reset_required = true;
e7b07cee
HW
7686 }
7687 } /* for_each_crtc_in_state() */
7688
eb3dc897 7689 if (dc_state) {
6ee90e88 7690 /* if there mode set or reset, disable eDP PSR */
7691 if (mode_set_reset_required)
7692 amdgpu_dm_psr_disable_all(dm);
7693
eb3dc897 7694 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7695 mutex_lock(&dm->dc_lock);
eb3dc897 7696 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7697 mutex_unlock(&dm->dc_lock);
fa2123db 7698 }
e7b07cee 7699
0bc9706d 7700 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7701 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7702
54d76575 7703 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7704
54d76575 7705 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7706 const struct dc_stream_status *status =
54d76575 7707 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7708
eb3dc897 7709 if (!status)
09f609c3
LL
7710 status = dc_stream_get_status_from_state(dc_state,
7711 dm_new_crtc_state->stream);
e7b07cee 7712 if (!status)
54d76575 7713 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7714 else
7715 acrtc->otg_inst = status->primary_otg_inst;
7716 }
7717 }
0c8620d6
BL
7718#ifdef CONFIG_DRM_AMD_DC_HDCP
7719 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7720 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7721 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7722 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7723
7724 new_crtc_state = NULL;
7725
7726 if (acrtc)
7727 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7728
7729 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7730
7731 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7732 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7733 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7734 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7735 continue;
7736 }
7737
7738 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7739 hdcp_update_display(
7740 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7741 new_con_state->hdcp_content_type,
b1abe558
BL
7742 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7743 : false);
0c8620d6
BL
7744 }
7745#endif
e7b07cee 7746
02d6a6fc 7747 /* Handle connector state changes */
c2cea706 7748 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7749 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7750 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7751 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7752 struct dc_surface_update dummy_updates[MAX_SURFACES];
7753 struct dc_stream_update stream_update;
b232d4ed 7754 struct dc_info_packet hdr_packet;
e7b07cee 7755 struct dc_stream_status *status = NULL;
b232d4ed 7756 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7757
19afd799
NC
7758 memset(&dummy_updates, 0, sizeof(dummy_updates));
7759 memset(&stream_update, 0, sizeof(stream_update));
7760
44d09c6a 7761 if (acrtc) {
0bc9706d 7762 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7763 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7764 }
0bc9706d 7765
e7b07cee 7766 /* Skip any modesets/resets */
0bc9706d 7767 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7768 continue;
7769
54d76575 7770 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7771 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7772
b232d4ed
NK
7773 scaling_changed = is_scaling_state_different(dm_new_con_state,
7774 dm_old_con_state);
7775
7776 abm_changed = dm_new_crtc_state->abm_level !=
7777 dm_old_crtc_state->abm_level;
7778
7779 hdr_changed =
7780 is_hdr_metadata_different(old_con_state, new_con_state);
7781
7782 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7783 continue;
e7b07cee 7784
b6e881c9 7785 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7786 if (scaling_changed) {
02d6a6fc 7787 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7788 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7789
02d6a6fc
DF
7790 stream_update.src = dm_new_crtc_state->stream->src;
7791 stream_update.dst = dm_new_crtc_state->stream->dst;
7792 }
7793
b232d4ed 7794 if (abm_changed) {
02d6a6fc
DF
7795 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7796
7797 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7798 }
70e8ffc5 7799
b232d4ed
NK
7800 if (hdr_changed) {
7801 fill_hdr_info_packet(new_con_state, &hdr_packet);
7802 stream_update.hdr_static_metadata = &hdr_packet;
7803 }
7804
54d76575 7805 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7806 WARN_ON(!status);
3be5262e 7807 WARN_ON(!status->plane_count);
e7b07cee 7808
02d6a6fc
DF
7809 /*
7810 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7811 * Here we create an empty update on each plane.
7812 * To fix this, DC should permit updating only stream properties.
7813 */
7814 for (j = 0; j < status->plane_count; j++)
7815 dummy_updates[j].surface = status->plane_states[0];
7816
7817
7818 mutex_lock(&dm->dc_lock);
7819 dc_commit_updates_for_stream(dm->dc,
7820 dummy_updates,
7821 status->plane_count,
7822 dm_new_crtc_state->stream,
7823 &stream_update,
7824 dc_state);
7825 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7826 }
7827
b5e83f6f 7828 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7829 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7830 new_crtc_state, i) {
fe2a1965
LP
7831 if (old_crtc_state->active && !new_crtc_state->active)
7832 crtc_disable_count++;
7833
54d76575 7834 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7835 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7836
585d450c
AP
7837 /* For freesync config update on crtc state and params for irq */
7838 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 7839
66b0c973
MK
7840 /* Handle vrr on->off / off->on transitions */
7841 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7842 dm_new_crtc_state);
e7b07cee
HW
7843 }
7844
8fe684e9
NK
7845 /**
7846 * Enable interrupts for CRTCs that are newly enabled or went through
7847 * a modeset. It was intentionally deferred until after the front end
7848 * state was modified to wait until the OTG was on and so the IRQ
7849 * handlers didn't access stale or invalid state.
7850 */
7851 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7852 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7853
585d450c
AP
7854 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7855
8fe684e9
NK
7856 if (new_crtc_state->active &&
7857 (!old_crtc_state->active ||
7858 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
7859 dc_stream_retain(dm_new_crtc_state->stream);
7860 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 7861 manage_dm_interrupts(adev, acrtc, true);
585d450c 7862
8fe684e9
NK
7863#ifdef CONFIG_DEBUG_FS
7864 /**
7865 * Frontend may have changed so reapply the CRC capture
7866 * settings for the stream.
7867 */
7868 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7869
7870 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7871 amdgpu_dm_crtc_configure_crc_source(
7872 crtc, dm_new_crtc_state,
7873 dm_new_crtc_state->crc_src);
7874 }
7875#endif
7876 }
7877 }
e7b07cee 7878
420cd472 7879 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7880 if (new_crtc_state->async_flip)
420cd472
DF
7881 wait_for_vblank = false;
7882
e7b07cee 7883 /* update planes when needed per crtc*/
5cc6dcbd 7884 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7885 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7886
54d76575 7887 if (dm_new_crtc_state->stream)
eb3dc897 7888 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7889 dm, crtc, wait_for_vblank);
e7b07cee
HW
7890 }
7891
6ce8f316
NK
7892 /* Update audio instances for each connector. */
7893 amdgpu_dm_commit_audio(dev, state);
7894
e7b07cee
HW
7895 /*
7896 * send vblank event on all events not handled in flip and
7897 * mark consumed event for drm_atomic_helper_commit_hw_done
7898 */
4a580877 7899 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 7900 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7901
0bc9706d
LSL
7902 if (new_crtc_state->event)
7903 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7904
0bc9706d 7905 new_crtc_state->event = NULL;
e7b07cee 7906 }
4a580877 7907 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 7908
29c8f234
LL
7909 /* Signal HW programming completion */
7910 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7911
7912 if (wait_for_vblank)
320a1274 7913 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7914
7915 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7916
1f6010a9
DF
7917 /*
7918 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7919 * so we can put the GPU into runtime suspend if we're not driving any
7920 * displays anymore
7921 */
fe2a1965
LP
7922 for (i = 0; i < crtc_disable_count; i++)
7923 pm_runtime_put_autosuspend(dev->dev);
97028037 7924 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7925
7926 if (dc_state_temp)
7927 dc_release_state(dc_state_temp);
e7b07cee
HW
7928}
7929
7930
7931static int dm_force_atomic_commit(struct drm_connector *connector)
7932{
7933 int ret = 0;
7934 struct drm_device *ddev = connector->dev;
7935 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7936 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7937 struct drm_plane *plane = disconnected_acrtc->base.primary;
7938 struct drm_connector_state *conn_state;
7939 struct drm_crtc_state *crtc_state;
7940 struct drm_plane_state *plane_state;
7941
7942 if (!state)
7943 return -ENOMEM;
7944
7945 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7946
7947 /* Construct an atomic state to restore previous display setting */
7948
7949 /*
7950 * Attach connectors to drm_atomic_state
7951 */
7952 conn_state = drm_atomic_get_connector_state(state, connector);
7953
7954 ret = PTR_ERR_OR_ZERO(conn_state);
7955 if (ret)
7956 goto err;
7957
7958 /* Attach crtc to drm_atomic_state*/
7959 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7960
7961 ret = PTR_ERR_OR_ZERO(crtc_state);
7962 if (ret)
7963 goto err;
7964
7965 /* force a restore */
7966 crtc_state->mode_changed = true;
7967
7968 /* Attach plane to drm_atomic_state */
7969 plane_state = drm_atomic_get_plane_state(state, plane);
7970
7971 ret = PTR_ERR_OR_ZERO(plane_state);
7972 if (ret)
7973 goto err;
7974
7975
7976 /* Call commit internally with the state we just constructed */
7977 ret = drm_atomic_commit(state);
7978 if (!ret)
7979 return 0;
7980
7981err:
7982 DRM_ERROR("Restoring old state failed with %i\n", ret);
7983 drm_atomic_state_put(state);
7984
7985 return ret;
7986}
7987
7988/*
1f6010a9
DF
7989 * This function handles all cases when set mode does not come upon hotplug.
7990 * This includes when a display is unplugged then plugged back into the
7991 * same port and when running without usermode desktop manager supprot
e7b07cee 7992 */
3ee6b26b
AD
7993void dm_restore_drm_connector_state(struct drm_device *dev,
7994 struct drm_connector *connector)
e7b07cee 7995{
c84dec2f 7996 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7997 struct amdgpu_crtc *disconnected_acrtc;
7998 struct dm_crtc_state *acrtc_state;
7999
8000 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8001 return;
8002
8003 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8004 if (!disconnected_acrtc)
8005 return;
e7b07cee 8006
70e8ffc5
HW
8007 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8008 if (!acrtc_state->stream)
e7b07cee
HW
8009 return;
8010
8011 /*
8012 * If the previous sink is not released and different from the current,
8013 * we deduce we are in a state where we can not rely on usermode call
8014 * to turn on the display, so we do it here
8015 */
8016 if (acrtc_state->stream->sink != aconnector->dc_sink)
8017 dm_force_atomic_commit(&aconnector->base);
8018}
8019
1f6010a9 8020/*
e7b07cee
HW
8021 * Grabs all modesetting locks to serialize against any blocking commits,
8022 * Waits for completion of all non blocking commits.
8023 */
3ee6b26b
AD
8024static int do_aquire_global_lock(struct drm_device *dev,
8025 struct drm_atomic_state *state)
e7b07cee
HW
8026{
8027 struct drm_crtc *crtc;
8028 struct drm_crtc_commit *commit;
8029 long ret;
8030
1f6010a9
DF
8031 /*
8032 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8033 * ensure that when the framework release it the
8034 * extra locks we are locking here will get released to
8035 */
8036 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8037 if (ret)
8038 return ret;
8039
8040 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8041 spin_lock(&crtc->commit_lock);
8042 commit = list_first_entry_or_null(&crtc->commit_list,
8043 struct drm_crtc_commit, commit_entry);
8044 if (commit)
8045 drm_crtc_commit_get(commit);
8046 spin_unlock(&crtc->commit_lock);
8047
8048 if (!commit)
8049 continue;
8050
1f6010a9
DF
8051 /*
8052 * Make sure all pending HW programming completed and
e7b07cee
HW
8053 * page flips done
8054 */
8055 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8056
8057 if (ret > 0)
8058 ret = wait_for_completion_interruptible_timeout(
8059 &commit->flip_done, 10*HZ);
8060
8061 if (ret == 0)
8062 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8063 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8064
8065 drm_crtc_commit_put(commit);
8066 }
8067
8068 return ret < 0 ? ret : 0;
8069}
8070
bb47de73
NK
8071static void get_freesync_config_for_crtc(
8072 struct dm_crtc_state *new_crtc_state,
8073 struct dm_connector_state *new_con_state)
98e6436d
AK
8074{
8075 struct mod_freesync_config config = {0};
98e6436d
AK
8076 struct amdgpu_dm_connector *aconnector =
8077 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8078 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8079 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 8080
a057ec46 8081 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8082 vrefresh >= aconnector->min_vfreq &&
8083 vrefresh <= aconnector->max_vfreq;
bb47de73 8084
a057ec46
IB
8085 if (new_crtc_state->vrr_supported) {
8086 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 8087 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
8088 VRR_STATE_ACTIVE_VARIABLE :
8089 VRR_STATE_INACTIVE;
8090 config.min_refresh_in_uhz =
8091 aconnector->min_vfreq * 1000000;
8092 config.max_refresh_in_uhz =
8093 aconnector->max_vfreq * 1000000;
69ff8845 8094 config.vsif_supported = true;
180db303 8095 config.btr = true;
98e6436d
AK
8096 }
8097
bb47de73
NK
8098 new_crtc_state->freesync_config = config;
8099}
98e6436d 8100
bb47de73
NK
8101static void reset_freesync_config_for_crtc(
8102 struct dm_crtc_state *new_crtc_state)
8103{
8104 new_crtc_state->vrr_supported = false;
98e6436d 8105
bb47de73
NK
8106 memset(&new_crtc_state->vrr_infopacket, 0,
8107 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8108}
8109
4b9674e5
LL
8110static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8111 struct drm_atomic_state *state,
8112 struct drm_crtc *crtc,
8113 struct drm_crtc_state *old_crtc_state,
8114 struct drm_crtc_state *new_crtc_state,
8115 bool enable,
8116 bool *lock_and_validation_needed)
e7b07cee 8117{
eb3dc897 8118 struct dm_atomic_state *dm_state = NULL;
54d76575 8119 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 8120 struct dc_stream_state *new_stream;
62f55537 8121 int ret = 0;
d4d4a645 8122
1f6010a9
DF
8123 /*
8124 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8125 * update changed items
8126 */
4b9674e5
LL
8127 struct amdgpu_crtc *acrtc = NULL;
8128 struct amdgpu_dm_connector *aconnector = NULL;
8129 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8130 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 8131
4b9674e5 8132 new_stream = NULL;
9635b754 8133
4b9674e5
LL
8134 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8135 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8136 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 8137 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 8138
4b9674e5
LL
8139 /* TODO This hack should go away */
8140 if (aconnector && enable) {
8141 /* Make sure fake sink is created in plug-in scenario */
8142 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8143 &aconnector->base);
8144 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8145 &aconnector->base);
19f89e23 8146
4b9674e5
LL
8147 if (IS_ERR(drm_new_conn_state)) {
8148 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8149 goto fail;
8150 }
19f89e23 8151
4b9674e5
LL
8152 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8153 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8154
02d35a67
JFZ
8155 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8156 goto skip_modeset;
8157
cbd14ae7
SW
8158 new_stream = create_validate_stream_for_sink(aconnector,
8159 &new_crtc_state->mode,
8160 dm_new_conn_state,
8161 dm_old_crtc_state->stream);
19f89e23 8162
4b9674e5
LL
8163 /*
8164 * we can have no stream on ACTION_SET if a display
8165 * was disconnected during S3, in this case it is not an
8166 * error, the OS will be updated after detection, and
8167 * will do the right thing on next atomic commit
8168 */
19f89e23 8169
4b9674e5
LL
8170 if (!new_stream) {
8171 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8172 __func__, acrtc->base.base.id);
8173 ret = -ENOMEM;
8174 goto fail;
8175 }
e7b07cee 8176
3d4e52d0
VL
8177 /*
8178 * TODO: Check VSDB bits to decide whether this should
8179 * be enabled or not.
8180 */
8181 new_stream->triggered_crtc_reset.enabled =
8182 dm->force_timing_sync;
8183
4b9674e5 8184 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8185
88694af9
NK
8186 ret = fill_hdr_info_packet(drm_new_conn_state,
8187 &new_stream->hdr_static_metadata);
8188 if (ret)
8189 goto fail;
8190
7e930949
NK
8191 /*
8192 * If we already removed the old stream from the context
8193 * (and set the new stream to NULL) then we can't reuse
8194 * the old stream even if the stream and scaling are unchanged.
8195 * We'll hit the BUG_ON and black screen.
8196 *
8197 * TODO: Refactor this function to allow this check to work
8198 * in all conditions.
8199 */
8200 if (dm_new_crtc_state->stream &&
8201 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8202 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8203 new_crtc_state->mode_changed = false;
8204 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8205 new_crtc_state->mode_changed);
62f55537 8206 }
4b9674e5 8207 }
b830ebc9 8208
02d35a67 8209 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8210 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8211 goto skip_modeset;
e7b07cee 8212
4b9674e5
LL
8213 DRM_DEBUG_DRIVER(
8214 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8215 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8216 "connectors_changed:%d\n",
8217 acrtc->crtc_id,
8218 new_crtc_state->enable,
8219 new_crtc_state->active,
8220 new_crtc_state->planes_changed,
8221 new_crtc_state->mode_changed,
8222 new_crtc_state->active_changed,
8223 new_crtc_state->connectors_changed);
62f55537 8224
4b9674e5
LL
8225 /* Remove stream for any changed/disabled CRTC */
8226 if (!enable) {
62f55537 8227
4b9674e5
LL
8228 if (!dm_old_crtc_state->stream)
8229 goto skip_modeset;
eb3dc897 8230
4b9674e5
LL
8231 ret = dm_atomic_get_state(state, &dm_state);
8232 if (ret)
8233 goto fail;
e7b07cee 8234
4b9674e5
LL
8235 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8236 crtc->base.id);
62f55537 8237
4b9674e5
LL
8238 /* i.e. reset mode */
8239 if (dc_remove_stream_from_ctx(
8240 dm->dc,
8241 dm_state->context,
8242 dm_old_crtc_state->stream) != DC_OK) {
8243 ret = -EINVAL;
8244 goto fail;
8245 }
62f55537 8246
4b9674e5
LL
8247 dc_stream_release(dm_old_crtc_state->stream);
8248 dm_new_crtc_state->stream = NULL;
bb47de73 8249
4b9674e5 8250 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8251
4b9674e5 8252 *lock_and_validation_needed = true;
62f55537 8253
4b9674e5
LL
8254 } else {/* Add stream for any updated/enabled CRTC */
8255 /*
8256 * Quick fix to prevent NULL pointer on new_stream when
8257 * added MST connectors not found in existing crtc_state in the chained mode
8258 * TODO: need to dig out the root cause of that
8259 */
8260 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8261 goto skip_modeset;
62f55537 8262
4b9674e5
LL
8263 if (modereset_required(new_crtc_state))
8264 goto skip_modeset;
62f55537 8265
4b9674e5
LL
8266 if (modeset_required(new_crtc_state, new_stream,
8267 dm_old_crtc_state->stream)) {
62f55537 8268
4b9674e5 8269 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8270
4b9674e5
LL
8271 ret = dm_atomic_get_state(state, &dm_state);
8272 if (ret)
8273 goto fail;
27b3f4fc 8274
4b9674e5 8275 dm_new_crtc_state->stream = new_stream;
62f55537 8276
4b9674e5 8277 dc_stream_retain(new_stream);
1dc90497 8278
4b9674e5
LL
8279 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8280 crtc->base.id);
1dc90497 8281
4b9674e5
LL
8282 if (dc_add_stream_to_ctx(
8283 dm->dc,
8284 dm_state->context,
8285 dm_new_crtc_state->stream) != DC_OK) {
8286 ret = -EINVAL;
8287 goto fail;
9b690ef3
BL
8288 }
8289
4b9674e5
LL
8290 *lock_and_validation_needed = true;
8291 }
8292 }
e277adc5 8293
4b9674e5
LL
8294skip_modeset:
8295 /* Release extra reference */
8296 if (new_stream)
8297 dc_stream_release(new_stream);
e277adc5 8298
4b9674e5
LL
8299 /*
8300 * We want to do dc stream updates that do not require a
8301 * full modeset below.
8302 */
2afda735 8303 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8304 return 0;
8305 /*
8306 * Given above conditions, the dc state cannot be NULL because:
8307 * 1. We're in the process of enabling CRTCs (just been added
8308 * to the dc context, or already is on the context)
8309 * 2. Has a valid connector attached, and
8310 * 3. Is currently active and enabled.
8311 * => The dc stream state currently exists.
8312 */
8313 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8314
4b9674e5
LL
8315 /* Scaling or underscan settings */
8316 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8317 update_stream_scaling_settings(
8318 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8319
b05e2c5e
DF
8320 /* ABM settings */
8321 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8322
4b9674e5
LL
8323 /*
8324 * Color management settings. We also update color properties
8325 * when a modeset is needed, to ensure it gets reprogrammed.
8326 */
8327 if (dm_new_crtc_state->base.color_mgmt_changed ||
8328 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8329 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8330 if (ret)
8331 goto fail;
62f55537 8332 }
e7b07cee 8333
4b9674e5
LL
8334 /* Update Freesync settings. */
8335 get_freesync_config_for_crtc(dm_new_crtc_state,
8336 dm_new_conn_state);
8337
62f55537 8338 return ret;
9635b754
DS
8339
8340fail:
8341 if (new_stream)
8342 dc_stream_release(new_stream);
8343 return ret;
62f55537 8344}
9b690ef3 8345
f6ff2a08
NK
8346static bool should_reset_plane(struct drm_atomic_state *state,
8347 struct drm_plane *plane,
8348 struct drm_plane_state *old_plane_state,
8349 struct drm_plane_state *new_plane_state)
8350{
8351 struct drm_plane *other;
8352 struct drm_plane_state *old_other_state, *new_other_state;
8353 struct drm_crtc_state *new_crtc_state;
8354 int i;
8355
70a1efac
NK
8356 /*
8357 * TODO: Remove this hack once the checks below are sufficient
8358 * enough to determine when we need to reset all the planes on
8359 * the stream.
8360 */
8361 if (state->allow_modeset)
8362 return true;
8363
f6ff2a08
NK
8364 /* Exit early if we know that we're adding or removing the plane. */
8365 if (old_plane_state->crtc != new_plane_state->crtc)
8366 return true;
8367
8368 /* old crtc == new_crtc == NULL, plane not in context. */
8369 if (!new_plane_state->crtc)
8370 return false;
8371
8372 new_crtc_state =
8373 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8374
8375 if (!new_crtc_state)
8376 return true;
8377
7316c4ad
NK
8378 /* CRTC Degamma changes currently require us to recreate planes. */
8379 if (new_crtc_state->color_mgmt_changed)
8380 return true;
8381
f6ff2a08
NK
8382 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8383 return true;
8384
8385 /*
8386 * If there are any new primary or overlay planes being added or
8387 * removed then the z-order can potentially change. To ensure
8388 * correct z-order and pipe acquisition the current DC architecture
8389 * requires us to remove and recreate all existing planes.
8390 *
8391 * TODO: Come up with a more elegant solution for this.
8392 */
8393 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9a81cc60
NK
8394 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8395
f6ff2a08
NK
8396 if (other->type == DRM_PLANE_TYPE_CURSOR)
8397 continue;
8398
8399 if (old_other_state->crtc != new_plane_state->crtc &&
8400 new_other_state->crtc != new_plane_state->crtc)
8401 continue;
8402
8403 if (old_other_state->crtc != new_other_state->crtc)
8404 return true;
8405
dc4cb30d
NK
8406 /* Src/dst size and scaling updates. */
8407 if (old_other_state->src_w != new_other_state->src_w ||
8408 old_other_state->src_h != new_other_state->src_h ||
8409 old_other_state->crtc_w != new_other_state->crtc_w ||
8410 old_other_state->crtc_h != new_other_state->crtc_h)
8411 return true;
8412
8413 /* Rotation / mirroring updates. */
8414 if (old_other_state->rotation != new_other_state->rotation)
8415 return true;
8416
8417 /* Blending updates. */
8418 if (old_other_state->pixel_blend_mode !=
8419 new_other_state->pixel_blend_mode)
8420 return true;
8421
8422 /* Alpha updates. */
8423 if (old_other_state->alpha != new_other_state->alpha)
8424 return true;
8425
8426 /* Colorspace changes. */
8427 if (old_other_state->color_range != new_other_state->color_range ||
8428 old_other_state->color_encoding != new_other_state->color_encoding)
8429 return true;
8430
9a81cc60
NK
8431 /* Framebuffer checks fall at the end. */
8432 if (!old_other_state->fb || !new_other_state->fb)
8433 continue;
8434
8435 /* Pixel format changes can require bandwidth updates. */
8436 if (old_other_state->fb->format != new_other_state->fb->format)
8437 return true;
8438
8439 old_dm_plane_state = to_dm_plane_state(old_other_state);
8440 new_dm_plane_state = to_dm_plane_state(new_other_state);
8441
8442 /* Tiling and DCC changes also require bandwidth updates. */
8443 if (old_dm_plane_state->tiling_flags !=
8444 new_dm_plane_state->tiling_flags)
f6ff2a08
NK
8445 return true;
8446 }
8447
8448 return false;
8449}
8450
9e869063
LL
8451static int dm_update_plane_state(struct dc *dc,
8452 struct drm_atomic_state *state,
8453 struct drm_plane *plane,
8454 struct drm_plane_state *old_plane_state,
8455 struct drm_plane_state *new_plane_state,
8456 bool enable,
8457 bool *lock_and_validation_needed)
62f55537 8458{
eb3dc897
NK
8459
8460 struct dm_atomic_state *dm_state = NULL;
62f55537 8461 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 8462 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 8463 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 8464 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 8465 struct amdgpu_crtc *new_acrtc;
f6ff2a08 8466 bool needs_reset;
62f55537 8467 int ret = 0;
e7b07cee 8468
9b690ef3 8469
9e869063
LL
8470 new_plane_crtc = new_plane_state->crtc;
8471 old_plane_crtc = old_plane_state->crtc;
8472 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8473 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 8474
626bf90f
SS
8475 /*TODO Implement better atomic check for cursor plane */
8476 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8477 if (!enable || !new_plane_crtc ||
8478 drm_atomic_plane_disabling(plane->state, new_plane_state))
8479 return 0;
8480
8481 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8482
8483 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8484 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8485 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8486 new_plane_state->crtc_w, new_plane_state->crtc_h);
8487 return -EINVAL;
8488 }
8489
9e869063 8490 return 0;
626bf90f 8491 }
9b690ef3 8492
f6ff2a08
NK
8493 needs_reset = should_reset_plane(state, plane, old_plane_state,
8494 new_plane_state);
8495
9e869063
LL
8496 /* Remove any changed/removed planes */
8497 if (!enable) {
f6ff2a08 8498 if (!needs_reset)
9e869063 8499 return 0;
a7b06724 8500
9e869063
LL
8501 if (!old_plane_crtc)
8502 return 0;
62f55537 8503
9e869063
LL
8504 old_crtc_state = drm_atomic_get_old_crtc_state(
8505 state, old_plane_crtc);
8506 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 8507
9e869063
LL
8508 if (!dm_old_crtc_state->stream)
8509 return 0;
62f55537 8510
9e869063
LL
8511 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8512 plane->base.id, old_plane_crtc->base.id);
9b690ef3 8513
9e869063
LL
8514 ret = dm_atomic_get_state(state, &dm_state);
8515 if (ret)
8516 return ret;
eb3dc897 8517
9e869063
LL
8518 if (!dc_remove_plane_from_context(
8519 dc,
8520 dm_old_crtc_state->stream,
8521 dm_old_plane_state->dc_state,
8522 dm_state->context)) {
62f55537 8523
c3537613 8524 return -EINVAL;
9e869063 8525 }
e7b07cee 8526
9b690ef3 8527
9e869063
LL
8528 dc_plane_state_release(dm_old_plane_state->dc_state);
8529 dm_new_plane_state->dc_state = NULL;
1dc90497 8530
9e869063 8531 *lock_and_validation_needed = true;
1dc90497 8532
9e869063
LL
8533 } else { /* Add new planes */
8534 struct dc_plane_state *dc_new_plane_state;
1dc90497 8535
9e869063
LL
8536 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8537 return 0;
e7b07cee 8538
9e869063
LL
8539 if (!new_plane_crtc)
8540 return 0;
e7b07cee 8541
9e869063
LL
8542 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8543 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 8544
9e869063
LL
8545 if (!dm_new_crtc_state->stream)
8546 return 0;
62f55537 8547
f6ff2a08 8548 if (!needs_reset)
9e869063 8549 return 0;
62f55537 8550
8c44515b
AP
8551 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8552 if (ret)
8553 return ret;
8554
9e869063 8555 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 8556
9e869063
LL
8557 dc_new_plane_state = dc_create_plane_state(dc);
8558 if (!dc_new_plane_state)
8559 return -ENOMEM;
62f55537 8560
9e869063
LL
8561 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8562 plane->base.id, new_plane_crtc->base.id);
8c45c5db 8563
695af5f9 8564 ret = fill_dc_plane_attributes(
1348969a 8565 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
8566 dc_new_plane_state,
8567 new_plane_state,
8568 new_crtc_state);
8569 if (ret) {
8570 dc_plane_state_release(dc_new_plane_state);
8571 return ret;
8572 }
62f55537 8573
9e869063
LL
8574 ret = dm_atomic_get_state(state, &dm_state);
8575 if (ret) {
8576 dc_plane_state_release(dc_new_plane_state);
8577 return ret;
8578 }
eb3dc897 8579
9e869063
LL
8580 /*
8581 * Any atomic check errors that occur after this will
8582 * not need a release. The plane state will be attached
8583 * to the stream, and therefore part of the atomic
8584 * state. It'll be released when the atomic state is
8585 * cleaned.
8586 */
8587 if (!dc_add_plane_to_context(
8588 dc,
8589 dm_new_crtc_state->stream,
8590 dc_new_plane_state,
8591 dm_state->context)) {
62f55537 8592
9e869063
LL
8593 dc_plane_state_release(dc_new_plane_state);
8594 return -EINVAL;
8595 }
8c45c5db 8596
9e869063 8597 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 8598
9e869063
LL
8599 /* Tell DC to do a full surface update every time there
8600 * is a plane change. Inefficient, but works for now.
8601 */
8602 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8603
8604 *lock_and_validation_needed = true;
62f55537 8605 }
e7b07cee
HW
8606
8607
62f55537
AG
8608 return ret;
8609}
a87fa993 8610
e10517b3 8611#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
8612static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8613{
8614 struct drm_connector *connector;
8615 struct drm_connector_state *conn_state;
8616 struct amdgpu_dm_connector *aconnector = NULL;
8617 int i;
8618 for_each_new_connector_in_state(state, connector, conn_state, i) {
8619 if (conn_state->crtc != crtc)
8620 continue;
8621
8622 aconnector = to_amdgpu_dm_connector(connector);
8623 if (!aconnector->port || !aconnector->mst_port)
8624 aconnector = NULL;
8625 else
8626 break;
8627 }
8628
8629 if (!aconnector)
8630 return 0;
8631
8632 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8633}
e10517b3 8634#endif
44be939f 8635
b8592b48
LL
8636/**
8637 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8638 * @dev: The DRM device
8639 * @state: The atomic state to commit
8640 *
8641 * Validate that the given atomic state is programmable by DC into hardware.
8642 * This involves constructing a &struct dc_state reflecting the new hardware
8643 * state we wish to commit, then querying DC to see if it is programmable. It's
8644 * important not to modify the existing DC state. Otherwise, atomic_check
8645 * may unexpectedly commit hardware changes.
8646 *
8647 * When validating the DC state, it's important that the right locks are
8648 * acquired. For full updates case which removes/adds/updates streams on one
8649 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8650 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 8651 * flip using DRMs synchronization events.
b8592b48
LL
8652 *
8653 * Note that DM adds the affected connectors for all CRTCs in state, when that
8654 * might not seem necessary. This is because DC stream creation requires the
8655 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8656 * be possible but non-trivial - a possible TODO item.
8657 *
8658 * Return: -Error code if validation failed.
8659 */
7578ecda
AD
8660static int amdgpu_dm_atomic_check(struct drm_device *dev,
8661 struct drm_atomic_state *state)
62f55537 8662{
1348969a 8663 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 8664 struct dm_atomic_state *dm_state = NULL;
62f55537 8665 struct dc *dc = adev->dm.dc;
62f55537 8666 struct drm_connector *connector;
c2cea706 8667 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8668 struct drm_crtc *crtc;
fc9e9920 8669 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8670 struct drm_plane *plane;
8671 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 8672 enum dc_status status;
1e88ad0a 8673 int ret, i;
62f55537
AG
8674 bool lock_and_validation_needed = false;
8675
8676 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8677 if (ret)
8678 goto fail;
62f55537 8679
c5892a10
SW
8680 /* Check connector changes */
8681 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8682 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8683 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8684
8685 /* Skip connectors that are disabled or part of modeset already. */
8686 if (!old_con_state->crtc && !new_con_state->crtc)
8687 continue;
8688
8689 if (!new_con_state->crtc)
8690 continue;
8691
8692 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8693 if (IS_ERR(new_crtc_state)) {
8694 ret = PTR_ERR(new_crtc_state);
8695 goto fail;
8696 }
8697
8698 if (dm_old_con_state->abm_level !=
8699 dm_new_con_state->abm_level)
8700 new_crtc_state->connectors_changed = true;
8701 }
8702
e10517b3 8703#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
8704 if (adev->asic_type >= CHIP_NAVI10) {
8705 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8706 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8707 ret = add_affected_mst_dsc_crtcs(state, crtc);
8708 if (ret)
8709 goto fail;
8710 }
8711 }
8712 }
e10517b3 8713#endif
1e88ad0a
S
8714 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8715 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8716 !new_crtc_state->color_mgmt_changed &&
a93587b3 8717 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8718 continue;
7bef1af3 8719
1e88ad0a
S
8720 if (!new_crtc_state->enable)
8721 continue;
fc9e9920 8722
1e88ad0a
S
8723 ret = drm_atomic_add_affected_connectors(state, crtc);
8724 if (ret)
8725 return ret;
fc9e9920 8726
1e88ad0a
S
8727 ret = drm_atomic_add_affected_planes(state, crtc);
8728 if (ret)
8729 goto fail;
e7b07cee
HW
8730 }
8731
2d9e6431
NK
8732 /*
8733 * Add all primary and overlay planes on the CRTC to the state
8734 * whenever a plane is enabled to maintain correct z-ordering
8735 * and to enable fast surface updates.
8736 */
8737 drm_for_each_crtc(crtc, dev) {
8738 bool modified = false;
8739
8740 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8741 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8742 continue;
8743
8744 if (new_plane_state->crtc == crtc ||
8745 old_plane_state->crtc == crtc) {
8746 modified = true;
8747 break;
8748 }
8749 }
8750
8751 if (!modified)
8752 continue;
8753
8754 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8755 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8756 continue;
8757
8758 new_plane_state =
8759 drm_atomic_get_plane_state(state, plane);
8760
8761 if (IS_ERR(new_plane_state)) {
8762 ret = PTR_ERR(new_plane_state);
8763 goto fail;
8764 }
8765 }
8766 }
8767
707477b0
NK
8768 /* Prepass for updating tiling flags on new planes. */
8769 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8770 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8771 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8772
8773 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8774 &new_dm_plane_state->tmz_surface);
8775 if (ret)
8776 goto fail;
8777 }
8778
62f55537 8779 /* Remove exiting planes if they are modified */
9e869063
LL
8780 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8781 ret = dm_update_plane_state(dc, state, plane,
8782 old_plane_state,
8783 new_plane_state,
8784 false,
8785 &lock_and_validation_needed);
8786 if (ret)
8787 goto fail;
62f55537
AG
8788 }
8789
8790 /* Disable all crtcs which require disable */
4b9674e5
LL
8791 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8792 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8793 old_crtc_state,
8794 new_crtc_state,
8795 false,
8796 &lock_and_validation_needed);
8797 if (ret)
8798 goto fail;
62f55537
AG
8799 }
8800
8801 /* Enable all crtcs which require enable */
4b9674e5
LL
8802 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8803 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8804 old_crtc_state,
8805 new_crtc_state,
8806 true,
8807 &lock_and_validation_needed);
8808 if (ret)
8809 goto fail;
62f55537
AG
8810 }
8811
8812 /* Add new/modified planes */
9e869063
LL
8813 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8814 ret = dm_update_plane_state(dc, state, plane,
8815 old_plane_state,
8816 new_plane_state,
8817 true,
8818 &lock_and_validation_needed);
8819 if (ret)
8820 goto fail;
62f55537
AG
8821 }
8822
b349f76e
ES
8823 /* Run this here since we want to validate the streams we created */
8824 ret = drm_atomic_helper_check_planes(dev, state);
8825 if (ret)
8826 goto fail;
62f55537 8827
43d10d30
NK
8828 if (state->legacy_cursor_update) {
8829 /*
8830 * This is a fast cursor update coming from the plane update
8831 * helper, check if it can be done asynchronously for better
8832 * performance.
8833 */
8834 state->async_update =
8835 !drm_atomic_helper_async_check(dev, state);
8836
8837 /*
8838 * Skip the remaining global validation if this is an async
8839 * update. Cursor updates can be done without affecting
8840 * state or bandwidth calcs and this avoids the performance
8841 * penalty of locking the private state object and
8842 * allocating a new dc_state.
8843 */
8844 if (state->async_update)
8845 return 0;
8846 }
8847
ebdd27e1 8848 /* Check scaling and underscan changes*/
1f6010a9 8849 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8850 * new stream into context w\o causing full reset. Need to
8851 * decide how to handle.
8852 */
c2cea706 8853 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8854 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8855 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8856 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8857
8858 /* Skip any modesets/resets */
0bc9706d
LSL
8859 if (!acrtc || drm_atomic_crtc_needs_modeset(
8860 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8861 continue;
8862
b830ebc9 8863 /* Skip any thing not scale or underscan changes */
54d76575 8864 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8865 continue;
8866
8867 lock_and_validation_needed = true;
8868 }
8869
f6d7c7fa
NK
8870 /**
8871 * Streams and planes are reset when there are changes that affect
8872 * bandwidth. Anything that affects bandwidth needs to go through
8873 * DC global validation to ensure that the configuration can be applied
8874 * to hardware.
8875 *
8876 * We have to currently stall out here in atomic_check for outstanding
8877 * commits to finish in this case because our IRQ handlers reference
8878 * DRM state directly - we can end up disabling interrupts too early
8879 * if we don't.
8880 *
8881 * TODO: Remove this stall and drop DM state private objects.
a87fa993 8882 */
f6d7c7fa 8883 if (lock_and_validation_needed) {
eb3dc897
NK
8884 ret = dm_atomic_get_state(state, &dm_state);
8885 if (ret)
8886 goto fail;
e7b07cee
HW
8887
8888 ret = do_aquire_global_lock(dev, state);
8889 if (ret)
8890 goto fail;
1dc90497 8891
d9fe1a4c 8892#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8893 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8894 goto fail;
8895
29b9ba74
ML
8896 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8897 if (ret)
8898 goto fail;
d9fe1a4c 8899#endif
29b9ba74 8900
ded58c7b
ZL
8901 /*
8902 * Perform validation of MST topology in the state:
8903 * We need to perform MST atomic check before calling
8904 * dc_validate_global_state(), or there is a chance
8905 * to get stuck in an infinite loop and hang eventually.
8906 */
8907 ret = drm_dp_mst_atomic_check(state);
8908 if (ret)
8909 goto fail;
74a16675
RS
8910 status = dc_validate_global_state(dc, dm_state->context, false);
8911 if (status != DC_OK) {
8912 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8913 dc_status_to_str(status), status);
e7b07cee
HW
8914 ret = -EINVAL;
8915 goto fail;
8916 }
bd200d19 8917 } else {
674e78ac 8918 /*
bd200d19
NK
8919 * The commit is a fast update. Fast updates shouldn't change
8920 * the DC context, affect global validation, and can have their
8921 * commit work done in parallel with other commits not touching
8922 * the same resource. If we have a new DC context as part of
8923 * the DM atomic state from validation we need to free it and
8924 * retain the existing one instead.
fde9f39a
MR
8925 *
8926 * Furthermore, since the DM atomic state only contains the DC
8927 * context and can safely be annulled, we can free the state
8928 * and clear the associated private object now to free
8929 * some memory and avoid a possible use-after-free later.
674e78ac 8930 */
bd200d19 8931
fde9f39a
MR
8932 for (i = 0; i < state->num_private_objs; i++) {
8933 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 8934
fde9f39a
MR
8935 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8936 int j = state->num_private_objs-1;
bd200d19 8937
fde9f39a
MR
8938 dm_atomic_destroy_state(obj,
8939 state->private_objs[i].state);
8940
8941 /* If i is not at the end of the array then the
8942 * last element needs to be moved to where i was
8943 * before the array can safely be truncated.
8944 */
8945 if (i != j)
8946 state->private_objs[i] =
8947 state->private_objs[j];
bd200d19 8948
fde9f39a
MR
8949 state->private_objs[j].ptr = NULL;
8950 state->private_objs[j].state = NULL;
8951 state->private_objs[j].old_state = NULL;
8952 state->private_objs[j].new_state = NULL;
8953
8954 state->num_private_objs = j;
8955 break;
8956 }
bd200d19 8957 }
e7b07cee
HW
8958 }
8959
caff0e66
NK
8960 /* Store the overall update type for use later in atomic check. */
8961 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8962 struct dm_crtc_state *dm_new_crtc_state =
8963 to_dm_crtc_state(new_crtc_state);
8964
f6d7c7fa
NK
8965 dm_new_crtc_state->update_type = lock_and_validation_needed ?
8966 UPDATE_TYPE_FULL :
8967 UPDATE_TYPE_FAST;
e7b07cee
HW
8968 }
8969
8970 /* Must be success */
8971 WARN_ON(ret);
8972 return ret;
8973
8974fail:
8975 if (ret == -EDEADLK)
01e28f9c 8976 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 8977 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 8978 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 8979 else
01e28f9c 8980 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
8981
8982 return ret;
8983}
8984
3ee6b26b
AD
8985static bool is_dp_capable_without_timing_msa(struct dc *dc,
8986 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
8987{
8988 uint8_t dpcd_data;
8989 bool capable = false;
8990
c84dec2f 8991 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
8992 dm_helpers_dp_read_dpcd(
8993 NULL,
c84dec2f 8994 amdgpu_dm_connector->dc_link,
e7b07cee
HW
8995 DP_DOWN_STREAM_PORT_COUNT,
8996 &dpcd_data,
8997 sizeof(dpcd_data))) {
8998 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8999 }
9000
9001 return capable;
9002}
98e6436d
AK
9003void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9004 struct edid *edid)
e7b07cee
HW
9005{
9006 int i;
e7b07cee
HW
9007 bool edid_check_required;
9008 struct detailed_timing *timing;
9009 struct detailed_non_pixel *data;
9010 struct detailed_data_monitor_range *range;
c84dec2f
HW
9011 struct amdgpu_dm_connector *amdgpu_dm_connector =
9012 to_amdgpu_dm_connector(connector);
bb47de73 9013 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
9014
9015 struct drm_device *dev = connector->dev;
1348969a 9016 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 9017 bool freesync_capable = false;
b830ebc9 9018
8218d7f1
HW
9019 if (!connector->state) {
9020 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9021 goto update;
8218d7f1
HW
9022 }
9023
98e6436d
AK
9024 if (!edid) {
9025 dm_con_state = to_dm_connector_state(connector->state);
9026
9027 amdgpu_dm_connector->min_vfreq = 0;
9028 amdgpu_dm_connector->max_vfreq = 0;
9029 amdgpu_dm_connector->pixel_clock_mhz = 0;
9030
bb47de73 9031 goto update;
98e6436d
AK
9032 }
9033
8218d7f1
HW
9034 dm_con_state = to_dm_connector_state(connector->state);
9035
e7b07cee 9036 edid_check_required = false;
c84dec2f 9037 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9038 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9039 goto update;
e7b07cee
HW
9040 }
9041 if (!adev->dm.freesync_module)
bb47de73 9042 goto update;
e7b07cee
HW
9043 /*
9044 * if edid non zero restrict freesync only for dp and edp
9045 */
9046 if (edid) {
c84dec2f
HW
9047 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9048 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
9049 edid_check_required = is_dp_capable_without_timing_msa(
9050 adev->dm.dc,
c84dec2f 9051 amdgpu_dm_connector);
e7b07cee
HW
9052 }
9053 }
e7b07cee
HW
9054 if (edid_check_required == true && (edid->version > 1 ||
9055 (edid->version == 1 && edid->revision > 1))) {
9056 for (i = 0; i < 4; i++) {
9057
9058 timing = &edid->detailed_timings[i];
9059 data = &timing->data.other_data;
9060 range = &data->data.range;
9061 /*
9062 * Check if monitor has continuous frequency mode
9063 */
9064 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9065 continue;
9066 /*
9067 * Check for flag range limits only. If flag == 1 then
9068 * no additional timing information provided.
9069 * Default GTF, GTF Secondary curve and CVT are not
9070 * supported
9071 */
9072 if (range->flags != 1)
9073 continue;
9074
c84dec2f
HW
9075 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9076 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9077 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
9078 range->pixel_clock_mhz * 10;
9079 break;
9080 }
9081
c84dec2f 9082 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
9083 amdgpu_dm_connector->min_vfreq > 10) {
9084
bb47de73 9085 freesync_capable = true;
e7b07cee
HW
9086 }
9087 }
bb47de73
NK
9088
9089update:
9090 if (dm_con_state)
9091 dm_con_state->freesync_capable = freesync_capable;
9092
9093 if (connector->vrr_capable_property)
9094 drm_connector_set_vrr_capable_property(connector,
9095 freesync_capable);
e7b07cee
HW
9096}
9097
8c322309
RL
9098static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9099{
9100 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9101
9102 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9103 return;
9104 if (link->type == dc_connection_none)
9105 return;
9106 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9107 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
9108 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9109
9110 if (dpcd_data[0] == 0) {
1cfbbdde 9111 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
9112 link->psr_settings.psr_feature_enabled = false;
9113 } else {
1cfbbdde 9114 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
9115 link->psr_settings.psr_feature_enabled = true;
9116 }
9117
9118 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9119 }
9120}
9121
9122/*
9123 * amdgpu_dm_link_setup_psr() - configure psr link
9124 * @stream: stream state
9125 *
9126 * Return: true if success
9127 */
9128static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9129{
9130 struct dc_link *link = NULL;
9131 struct psr_config psr_config = {0};
9132 struct psr_context psr_context = {0};
8c322309
RL
9133 bool ret = false;
9134
9135 if (stream == NULL)
9136 return false;
9137
9138 link = stream->link;
8c322309 9139
d1ebfdd8 9140 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
9141
9142 if (psr_config.psr_version > 0) {
9143 psr_config.psr_exit_link_training_required = 0x1;
9144 psr_config.psr_frame_capture_indication_req = 0;
9145 psr_config.psr_rfb_setup_time = 0x37;
9146 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9147 psr_config.allow_smu_optimizations = 0x0;
9148
9149 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9150
9151 }
d1ebfdd8 9152 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9153
9154 return ret;
9155}
9156
9157/*
9158 * amdgpu_dm_psr_enable() - enable psr f/w
9159 * @stream: stream state
9160 *
9161 * Return: true if success
9162 */
9163bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9164{
9165 struct dc_link *link = stream->link;
5b5abe95
AK
9166 unsigned int vsync_rate_hz = 0;
9167 struct dc_static_screen_params params = {0};
9168 /* Calculate number of static frames before generating interrupt to
9169 * enter PSR.
9170 */
5b5abe95
AK
9171 // Init fail safe of 2 frames static
9172 unsigned int num_frames_static = 2;
8c322309
RL
9173
9174 DRM_DEBUG_DRIVER("Enabling psr...\n");
9175
5b5abe95
AK
9176 vsync_rate_hz = div64_u64(div64_u64((
9177 stream->timing.pix_clk_100hz * 100),
9178 stream->timing.v_total),
9179 stream->timing.h_total);
9180
9181 /* Round up
9182 * Calculate number of frames such that at least 30 ms of time has
9183 * passed.
9184 */
7aa62404
RL
9185 if (vsync_rate_hz != 0) {
9186 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9187 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9188 }
5b5abe95
AK
9189
9190 params.triggers.cursor_update = true;
9191 params.triggers.overlay_update = true;
9192 params.triggers.surface_update = true;
9193 params.num_frames = num_frames_static;
8c322309 9194
5b5abe95 9195 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9196 &stream, 1,
5b5abe95 9197 &params);
8c322309
RL
9198
9199 return dc_link_set_psr_allow_active(link, true, false);
9200}
9201
9202/*
9203 * amdgpu_dm_psr_disable() - disable psr f/w
9204 * @stream: stream state
9205 *
9206 * Return: true if success
9207 */
9208static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9209{
9210
9211 DRM_DEBUG_DRIVER("Disabling psr...\n");
9212
9213 return dc_link_set_psr_allow_active(stream->link, false, true);
9214}
3d4e52d0 9215
6ee90e88 9216/*
9217 * amdgpu_dm_psr_disable() - disable psr f/w
9218 * if psr is enabled on any stream
9219 *
9220 * Return: true if success
9221 */
9222static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9223{
9224 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9225 return dc_set_psr_allow_active(dm->dc, false);
9226}
9227
3d4e52d0
VL
9228void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9229{
1348969a 9230 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
9231 struct dc *dc = adev->dm.dc;
9232 int i;
9233
9234 mutex_lock(&adev->dm.dc_lock);
9235 if (dc->current_state) {
9236 for (i = 0; i < dc->current_state->stream_count; ++i)
9237 dc->current_state->streams[i]
9238 ->triggered_crtc_reset.enabled =
9239 adev->dm.force_timing_sync;
9240
9241 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9242 dc_trigger_sync(dc, dc->current_state);
9243 }
9244 mutex_unlock(&adev->dm.dc_lock);
9245}