drm/amd/pm: fix pp_dpm_fclk
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
4562236b
HW
37
38#include "vid.h"
39#include "amdgpu.h"
a49dcb88 40#include "amdgpu_display.h"
a94d5569 41#include "amdgpu_ucode.h"
4562236b
HW
42#include "atom.h"
43#include "amdgpu_dm.h"
52704fca
BL
44#ifdef CONFIG_DRM_AMD_DC_HDCP
45#include "amdgpu_dm_hdcp.h"
53e108aa 46#include <drm/drm_hdcp.h>
52704fca 47#endif
e7b07cee 48#include "amdgpu_pm.h"
4562236b
HW
49
50#include "amd_shared.h"
51#include "amdgpu_dm_irq.h"
52#include "dm_helpers.h"
e7b07cee 53#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
54#if defined(CONFIG_DEBUG_FS)
55#include "amdgpu_dm_debugfs.h"
56#endif
4562236b
HW
57
58#include "ivsrcid/ivsrcid_vislands30.h"
59
60#include <linux/module.h>
61#include <linux/moduleparam.h>
62#include <linux/version.h>
e7b07cee 63#include <linux/types.h>
97028037 64#include <linux/pm_runtime.h>
09d21852 65#include <linux/pci.h>
a94d5569 66#include <linux/firmware.h>
6ce8f316 67#include <linux/component.h>
4562236b
HW
68
69#include <drm/drm_atomic.h>
674e78ac 70#include <drm/drm_atomic_uapi.h>
4562236b
HW
71#include <drm/drm_atomic_helper.h>
72#include <drm/drm_dp_mst_helper.h>
e7b07cee 73#include <drm/drm_fb_helper.h>
09d21852 74#include <drm/drm_fourcc.h>
e7b07cee 75#include <drm/drm_edid.h>
09d21852 76#include <drm/drm_vblank.h>
6ce8f316 77#include <drm/drm_audio_component.h>
0c8620d6 78#include <drm/drm_hdcp.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
100#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
79037324 102#endif
71c0fd92
RL
103#if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
104#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106#endif
469989ca
RL
107#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
108#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110#endif
2a411205
BL
111#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
112#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
114#endif
2200eb9e 115
a94d5569
DF
116#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
117MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 118
5ea23931
RL
119#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
120MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121
8c7aea40
NK
122/* Number of bytes in PSP header for firmware. */
123#define PSP_HEADER_BYTES 0x100
124
125/* Number of bytes in PSP footer for firmware. */
126#define PSP_FOOTER_BYTES 0x100
127
b8592b48
LL
128/**
129 * DOC: overview
130 *
131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
133 * requests into DC requests, and DC responses into DRM responses.
134 *
135 * The root control structure is &struct amdgpu_display_manager.
136 */
137
7578ecda
AD
138/* basic init/fini API */
139static int amdgpu_dm_init(struct amdgpu_device *adev);
140static void amdgpu_dm_fini(struct amdgpu_device *adev);
141
0f877894
OV
142static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
143{
144 switch (link->dpcd_caps.dongle_type) {
145 case DISPLAY_DONGLE_NONE:
146 return DRM_MODE_SUBCONNECTOR_Native;
147 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
148 return DRM_MODE_SUBCONNECTOR_VGA;
149 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
150 case DISPLAY_DONGLE_DP_DVI_DONGLE:
151 return DRM_MODE_SUBCONNECTOR_DVID;
152 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
153 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_HDMIA;
155 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
156 default:
157 return DRM_MODE_SUBCONNECTOR_Unknown;
158 }
159}
160
161static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
162{
163 struct dc_link *link = aconnector->dc_link;
164 struct drm_connector *connector = &aconnector->base;
165 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
166
167 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
168 return;
169
170 if (aconnector->dc_sink)
171 subconnector = get_subconnector_type(link);
172
173 drm_object_property_set_value(&connector->base,
174 connector->dev->mode_config.dp_subconnector_property,
175 subconnector);
176}
177
1f6010a9
DF
178/*
179 * initializes drm_device display related structures, based on the information
7578ecda
AD
180 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
181 * drm_encoder, drm_mode_config
182 *
183 * Returns 0 on success
184 */
185static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
186/* removes and deallocates the drm structures, created by the above function */
187static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
188
7578ecda 189static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 190 struct drm_plane *plane,
cc1fec57
NK
191 unsigned long possible_crtcs,
192 const struct dc_plane_cap *plane_cap);
7578ecda
AD
193static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
194 struct drm_plane *plane,
195 uint32_t link_index);
196static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
197 struct amdgpu_dm_connector *amdgpu_dm_connector,
198 uint32_t link_index,
199 struct amdgpu_encoder *amdgpu_encoder);
200static int amdgpu_dm_encoder_init(struct drm_device *dev,
201 struct amdgpu_encoder *aencoder,
202 uint32_t link_index);
203
204static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
205
206static int amdgpu_dm_atomic_commit(struct drm_device *dev,
207 struct drm_atomic_state *state,
208 bool nonblock);
209
210static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
211
212static int amdgpu_dm_atomic_check(struct drm_device *dev,
213 struct drm_atomic_state *state);
214
674e78ac
NK
215static void handle_cursor_update(struct drm_plane *plane,
216 struct drm_plane_state *old_plane_state);
7578ecda 217
8c322309
RL
218static void amdgpu_dm_set_psr_caps(struct dc_link *link);
219static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
220static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
221static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 222static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 223
4562236b
HW
224/*
225 * dm_vblank_get_counter
226 *
227 * @brief
228 * Get counter for number of vertical blanks
229 *
230 * @param
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
233 *
234 * @return
235 * Counter for vertical blanks
236 */
237static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238{
239 if (crtc >= adev->mode_info.num_crtc)
240 return 0;
241 else {
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
585d450c 244 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 crtc);
4562236b
HW
247 return 0;
248 }
249
585d450c 250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
251 }
252}
253
254static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 255 u32 *vbl, u32 *position)
4562236b 256{
81c50963
ST
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
4562236b
HW
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 return -EINVAL;
261 else {
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
585d450c 264 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 crtc);
4562236b
HW
267 return 0;
268 }
269
81c50963
ST
270 /*
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
273 */
585d450c 274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
275 &v_blank_start,
276 &v_blank_end,
277 &h_position,
278 &v_position);
279
e806208d
AG
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
282 }
283
284 return 0;
285}
286
287static bool dm_is_idle(void *handle)
288{
289 /* XXX todo */
290 return true;
291}
292
293static int dm_wait_for_idle(void *handle)
294{
295 /* XXX todo */
296 return 0;
297}
298
299static bool dm_check_soft_reset(void *handle)
300{
301 return false;
302}
303
304static int dm_soft_reset(void *handle)
305{
306 /* XXX todo */
307 return 0;
308}
309
3ee6b26b
AD
310static struct amdgpu_crtc *
311get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 int otg_inst)
4562236b 313{
4a580877 314 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
317
4562236b
HW
318 if (otg_inst == -1) {
319 WARN_ON(1);
320 return adev->mode_info.crtcs[0];
321 }
322
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326 if (amdgpu_crtc->otg_inst == otg_inst)
327 return amdgpu_crtc;
328 }
329
330 return NULL;
331}
332
585d450c
AP
333static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334{
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
339}
340
66b0c973
MK
341static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342{
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345}
346
b8e8c934
HW
347/**
348 * dm_pflip_high_irq() - Handle pageflip interrupt
349 * @interrupt_params: ignored
350 *
351 * Handles the pageflip interrupt by notifying all interested parties
352 * that the pageflip has been completed.
353 */
4562236b
HW
354static void dm_pflip_high_irq(void *interrupt_params)
355{
4562236b
HW
356 struct amdgpu_crtc *amdgpu_crtc;
357 struct common_irq_params *irq_params = interrupt_params;
358 struct amdgpu_device *adev = irq_params->adev;
359 unsigned long flags;
71bbe51a 360 struct drm_pending_vblank_event *e;
71bbe51a
MK
361 uint32_t vpos, hpos, v_blank_start, v_blank_end;
362 bool vrr_active;
4562236b
HW
363
364 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
365
366 /* IRQ could occur when in initial stage */
1f6010a9 367 /* TODO work and BO cleanup */
4562236b
HW
368 if (amdgpu_crtc == NULL) {
369 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
370 return;
371 }
372
4a580877 373 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
374
375 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
376 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
377 amdgpu_crtc->pflip_status,
378 AMDGPU_FLIP_SUBMITTED,
379 amdgpu_crtc->crtc_id,
380 amdgpu_crtc);
4a580877 381 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
382 return;
383 }
384
71bbe51a
MK
385 /* page flip completed. */
386 e = amdgpu_crtc->event;
387 amdgpu_crtc->event = NULL;
4562236b 388
71bbe51a
MK
389 if (!e)
390 WARN_ON(1);
1159898a 391
585d450c 392 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
393
394 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
395 if (!vrr_active ||
585d450c 396 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
397 &v_blank_end, &hpos, &vpos) ||
398 (vpos < v_blank_start)) {
399 /* Update to correct count and vblank timestamp if racing with
400 * vblank irq. This also updates to the correct vblank timestamp
401 * even in VRR mode, as scanout is past the front-porch atm.
402 */
403 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 404
71bbe51a
MK
405 /* Wake up userspace by sending the pageflip event with proper
406 * count and timestamp of vblank of flip completion.
407 */
408 if (e) {
409 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
410
411 /* Event sent, so done with vblank for this flip */
412 drm_crtc_vblank_put(&amdgpu_crtc->base);
413 }
414 } else if (e) {
415 /* VRR active and inside front-porch: vblank count and
416 * timestamp for pageflip event will only be up to date after
417 * drm_crtc_handle_vblank() has been executed from late vblank
418 * irq handler after start of back-porch (vline 0). We queue the
419 * pageflip event for send-out by drm_crtc_handle_vblank() with
420 * updated timestamp and count, once it runs after us.
421 *
422 * We need to open-code this instead of using the helper
423 * drm_crtc_arm_vblank_event(), as that helper would
424 * call drm_crtc_accurate_vblank_count(), which we must
425 * not call in VRR mode while we are in front-porch!
426 */
427
428 /* sequence will be replaced by real count during send-out. */
429 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
430 e->pipe = amdgpu_crtc->crtc_id;
431
4a580877 432 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
433 e = NULL;
434 }
4562236b 435
fdd1fe57
MK
436 /* Keep track of vblank of this flip for flip throttling. We use the
437 * cooked hw counter, as that one incremented at start of this vblank
438 * of pageflip completion, so last_flip_vblank is the forbidden count
439 * for queueing new pageflips if vsync + VRR is enabled.
440 */
5d1c59c4 441 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 442 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 443
54f5499a 444 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 445 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 446
71bbe51a
MK
447 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
448 amdgpu_crtc->crtc_id, amdgpu_crtc,
449 vrr_active, (int) !e);
4562236b
HW
450}
451
d2574c33
MK
452static void dm_vupdate_high_irq(void *interrupt_params)
453{
454 struct common_irq_params *irq_params = interrupt_params;
455 struct amdgpu_device *adev = irq_params->adev;
456 struct amdgpu_crtc *acrtc;
09aef2c4 457 unsigned long flags;
585d450c 458 int vrr_active;
d2574c33
MK
459
460 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
461
462 if (acrtc) {
585d450c 463 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
d2574c33 464
7f2be468
LP
465 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
466 acrtc->crtc_id,
585d450c 467 vrr_active);
d2574c33
MK
468
469 /* Core vblank handling is done here after end of front-porch in
470 * vrr mode, as vblank timestamping will give valid results
471 * while now done after front-porch. This will also deliver
472 * page-flip completion events that have been queued to us
473 * if a pageflip happened inside front-porch.
474 */
585d450c 475 if (vrr_active) {
d2574c33 476 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
477
478 /* BTR processing for pre-DCE12 ASICs */
585d450c 479 if (acrtc->dm_irq_params.stream &&
09aef2c4 480 adev->family < AMDGPU_FAMILY_AI) {
4a580877 481 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
482 mod_freesync_handle_v_update(
483 adev->dm.freesync_module,
585d450c
AP
484 acrtc->dm_irq_params.stream,
485 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
486
487 dc_stream_adjust_vmin_vmax(
488 adev->dm.dc,
585d450c
AP
489 acrtc->dm_irq_params.stream,
490 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 491 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
492 }
493 }
d2574c33
MK
494 }
495}
496
b8e8c934
HW
497/**
498 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 499 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
500 *
501 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
502 * event handler.
503 */
4562236b
HW
504static void dm_crtc_high_irq(void *interrupt_params)
505{
506 struct common_irq_params *irq_params = interrupt_params;
507 struct amdgpu_device *adev = irq_params->adev;
4562236b 508 struct amdgpu_crtc *acrtc;
09aef2c4 509 unsigned long flags;
585d450c 510 int vrr_active;
4562236b 511
b57de80a 512 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
513 if (!acrtc)
514 return;
515
585d450c 516 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 517
2b5aed9a 518 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 519 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 520
2346ef47
NK
521 /**
522 * Core vblank handling at start of front-porch is only possible
523 * in non-vrr mode, as only there vblank timestamping will give
524 * valid results while done in front-porch. Otherwise defer it
525 * to dm_vupdate_high_irq after end of front-porch.
526 */
585d450c 527 if (!vrr_active)
2346ef47
NK
528 drm_crtc_handle_vblank(&acrtc->base);
529
530 /**
531 * Following stuff must happen at start of vblank, for crc
532 * computation and below-the-range btr support in vrr mode.
533 */
16f17eda 534 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
535
536 /* BTR updates need to happen before VUPDATE on Vega and above. */
537 if (adev->family < AMDGPU_FAMILY_AI)
538 return;
16f17eda 539
4a580877 540 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 541
585d450c
AP
542 if (acrtc->dm_irq_params.stream &&
543 acrtc->dm_irq_params.vrr_params.supported &&
544 acrtc->dm_irq_params.freesync_config.state ==
545 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 546 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
547 acrtc->dm_irq_params.stream,
548 &acrtc->dm_irq_params.vrr_params);
16f17eda 549
585d450c
AP
550 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
551 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
552 }
553
2b5aed9a
MK
554 /*
555 * If there aren't any active_planes then DCH HUBP may be clock-gated.
556 * In that case, pageflip completion interrupts won't fire and pageflip
557 * completion events won't get delivered. Prevent this by sending
558 * pending pageflip events from here if a flip is still pending.
559 *
560 * If any planes are enabled, use dm_pflip_high_irq() instead, to
561 * avoid race conditions between flip programming and completion,
562 * which could cause too early flip completion events.
563 */
2346ef47
NK
564 if (adev->family >= AMDGPU_FAMILY_RV &&
565 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 566 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
567 if (acrtc->event) {
568 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
569 acrtc->event = NULL;
570 drm_crtc_vblank_put(&acrtc->base);
571 }
572 acrtc->pflip_status = AMDGPU_FLIP_NONE;
573 }
574
4a580877 575 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
576}
577
4562236b
HW
578static int dm_set_clockgating_state(void *handle,
579 enum amd_clockgating_state state)
580{
581 return 0;
582}
583
584static int dm_set_powergating_state(void *handle,
585 enum amd_powergating_state state)
586{
587 return 0;
588}
589
590/* Prototypes of private functions */
591static int dm_early_init(void* handle);
592
a32e24b4 593/* Allocate memory for FBC compressed data */
3e332d3a 594static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 595{
3e332d3a 596 struct drm_device *dev = connector->dev;
1348969a 597 struct amdgpu_device *adev = drm_to_adev(dev);
a32e24b4 598 struct dm_comressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
599 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
600 struct drm_display_mode *mode;
42e67c3b
RL
601 unsigned long max_size = 0;
602
603 if (adev->dm.dc->fbc_compressor == NULL)
604 return;
a32e24b4 605
3e332d3a 606 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
607 return;
608
3e332d3a
RL
609 if (compressor->bo_ptr)
610 return;
42e67c3b 611
42e67c3b 612
3e332d3a
RL
613 list_for_each_entry(mode, &connector->modes, head) {
614 if (max_size < mode->htotal * mode->vtotal)
615 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
616 }
617
618 if (max_size) {
619 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 620 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 621 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
622
623 if (r)
42e67c3b
RL
624 DRM_ERROR("DM: Failed to initialize FBC\n");
625 else {
626 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
627 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
628 }
629
a32e24b4
RL
630 }
631
632}
a32e24b4 633
6ce8f316
NK
634static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
635 int pipe, bool *enabled,
636 unsigned char *buf, int max_bytes)
637{
638 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 639 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
640 struct drm_connector *connector;
641 struct drm_connector_list_iter conn_iter;
642 struct amdgpu_dm_connector *aconnector;
643 int ret = 0;
644
645 *enabled = false;
646
647 mutex_lock(&adev->dm.audio_lock);
648
649 drm_connector_list_iter_begin(dev, &conn_iter);
650 drm_for_each_connector_iter(connector, &conn_iter) {
651 aconnector = to_amdgpu_dm_connector(connector);
652 if (aconnector->audio_inst != port)
653 continue;
654
655 *enabled = true;
656 ret = drm_eld_size(connector->eld);
657 memcpy(buf, connector->eld, min(max_bytes, ret));
658
659 break;
660 }
661 drm_connector_list_iter_end(&conn_iter);
662
663 mutex_unlock(&adev->dm.audio_lock);
664
665 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
666
667 return ret;
668}
669
670static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
671 .get_eld = amdgpu_dm_audio_component_get_eld,
672};
673
674static int amdgpu_dm_audio_component_bind(struct device *kdev,
675 struct device *hda_kdev, void *data)
676{
677 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 678 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
679 struct drm_audio_component *acomp = data;
680
681 acomp->ops = &amdgpu_dm_audio_component_ops;
682 acomp->dev = kdev;
683 adev->dm.audio_component = acomp;
684
685 return 0;
686}
687
688static void amdgpu_dm_audio_component_unbind(struct device *kdev,
689 struct device *hda_kdev, void *data)
690{
691 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 692 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
693 struct drm_audio_component *acomp = data;
694
695 acomp->ops = NULL;
696 acomp->dev = NULL;
697 adev->dm.audio_component = NULL;
698}
699
700static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
701 .bind = amdgpu_dm_audio_component_bind,
702 .unbind = amdgpu_dm_audio_component_unbind,
703};
704
705static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
706{
707 int i, ret;
708
709 if (!amdgpu_audio)
710 return 0;
711
712 adev->mode_info.audio.enabled = true;
713
714 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
715
716 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
717 adev->mode_info.audio.pin[i].channels = -1;
718 adev->mode_info.audio.pin[i].rate = -1;
719 adev->mode_info.audio.pin[i].bits_per_sample = -1;
720 adev->mode_info.audio.pin[i].status_bits = 0;
721 adev->mode_info.audio.pin[i].category_code = 0;
722 adev->mode_info.audio.pin[i].connected = false;
723 adev->mode_info.audio.pin[i].id =
724 adev->dm.dc->res_pool->audios[i]->inst;
725 adev->mode_info.audio.pin[i].offset = 0;
726 }
727
728 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
729 if (ret < 0)
730 return ret;
731
732 adev->dm.audio_registered = true;
733
734 return 0;
735}
736
737static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
738{
739 if (!amdgpu_audio)
740 return;
741
742 if (!adev->mode_info.audio.enabled)
743 return;
744
745 if (adev->dm.audio_registered) {
746 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
747 adev->dm.audio_registered = false;
748 }
749
750 /* TODO: Disable audio? */
751
752 adev->mode_info.audio.enabled = false;
753}
754
dfd84d90 755static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
756{
757 struct drm_audio_component *acomp = adev->dm.audio_component;
758
759 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
760 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
761
762 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
763 pin, -1);
764 }
765}
766
743b9786
NK
767static int dm_dmub_hw_init(struct amdgpu_device *adev)
768{
743b9786
NK
769 const struct dmcub_firmware_header_v1_0 *hdr;
770 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 771 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
772 const struct firmware *dmub_fw = adev->dm.dmub_fw;
773 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
774 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
775 struct dmub_srv_hw_params hw_params;
776 enum dmub_status status;
777 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 778 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
779 bool has_hw_support;
780
781 if (!dmub_srv)
782 /* DMUB isn't supported on the ASIC. */
783 return 0;
784
8c7aea40
NK
785 if (!fb_info) {
786 DRM_ERROR("No framebuffer info for DMUB service.\n");
787 return -EINVAL;
788 }
789
743b9786
NK
790 if (!dmub_fw) {
791 /* Firmware required for DMUB support. */
792 DRM_ERROR("No firmware provided for DMUB.\n");
793 return -EINVAL;
794 }
795
796 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
797 if (status != DMUB_STATUS_OK) {
798 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
799 return -EINVAL;
800 }
801
802 if (!has_hw_support) {
803 DRM_INFO("DMUB unsupported on ASIC\n");
804 return 0;
805 }
806
807 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
808
743b9786
NK
809 fw_inst_const = dmub_fw->data +
810 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 811 PSP_HEADER_BYTES;
743b9786
NK
812
813 fw_bss_data = dmub_fw->data +
814 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
815 le32_to_cpu(hdr->inst_const_bytes);
816
817 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
818 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
819 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
820
821 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
822
ddde28a5
HW
823 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
824 * amdgpu_ucode_init_single_fw will load dmub firmware
825 * fw_inst_const part to cw0; otherwise, the firmware back door load
826 * will be done by dm_dmub_hw_init
827 */
828 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
829 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
830 fw_inst_const_size);
831 }
832
a576b345
NK
833 if (fw_bss_data_size)
834 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
835 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
836
837 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
838 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
839 adev->bios_size);
840
841 /* Reset regions that need to be reset. */
842 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
843 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
844
845 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
846 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
847
848 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
849 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
850
851 /* Initialize hardware. */
852 memset(&hw_params, 0, sizeof(hw_params));
853 hw_params.fb_base = adev->gmc.fb_start;
854 hw_params.fb_offset = adev->gmc.aper_base;
855
31a7f4bb
HW
856 /* backdoor load firmware and trigger dmub running */
857 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
858 hw_params.load_inst_const = true;
859
743b9786
NK
860 if (dmcu)
861 hw_params.psp_version = dmcu->psp_version;
862
8c7aea40
NK
863 for (i = 0; i < fb_info->num_fb; ++i)
864 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
865
866 status = dmub_srv_hw_init(dmub_srv, &hw_params);
867 if (status != DMUB_STATUS_OK) {
868 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
869 return -EINVAL;
870 }
871
872 /* Wait for firmware load to finish. */
873 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
874 if (status != DMUB_STATUS_OK)
875 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
876
877 /* Init DMCU and ABM if available. */
878 if (dmcu && abm) {
879 dmcu->funcs->dmcu_init(dmcu);
880 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
881 }
882
9a71c7d3
NK
883 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
884 if (!adev->dm.dc->ctx->dmub_srv) {
885 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
886 return -ENOMEM;
887 }
888
743b9786
NK
889 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
890 adev->dm.dmcub_fw_version);
891
892 return 0;
893}
894
c44a22b3
EB
895static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
896 struct drm_atomic_state *state)
897{
898 struct drm_connector *connector;
899 struct drm_crtc *crtc;
900 struct amdgpu_dm_connector *amdgpu_dm_connector;
901 struct drm_connector_state *conn_state;
902 struct dm_crtc_state *acrtc_state;
903 struct drm_crtc_state *crtc_state;
904 struct dc_stream_state *stream;
905 struct drm_device *dev = adev_to_drm(adev);
906
907 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
908
909 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
910 conn_state = connector->state;
911
912 if (!(conn_state && conn_state->crtc))
913 continue;
914
915 crtc = conn_state->crtc;
916 acrtc_state = to_dm_crtc_state(crtc->state);
917
918 if (!(acrtc_state && acrtc_state->stream))
919 continue;
920
921 stream = acrtc_state->stream;
922
923 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
924 amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
925 amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
926 amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
927 conn_state = drm_atomic_get_connector_state(state, connector);
928 crtc_state = drm_atomic_get_crtc_state(state, crtc);
929 crtc_state->mode_changed = true;
930 }
931 }
932}
933
c0fb85ae
YZ
934static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
935{
936 uint64_t pt_base;
937 uint32_t logical_addr_low;
938 uint32_t logical_addr_high;
939 uint32_t agp_base, agp_bot, agp_top;
940 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
941
942 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
943 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
944
945 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
946 /*
947 * Raven2 has a HW issue that it is unable to use the vram which
948 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
949 * workaround that increase system aperture high address (add 1)
950 * to get rid of the VM fault and hardware hang.
951 */
952 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
953 else
954 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
955
956 agp_base = 0;
957 agp_bot = adev->gmc.agp_start >> 24;
958 agp_top = adev->gmc.agp_end >> 24;
959
960
961 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
962 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
963 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
964 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
965 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
966 page_table_base.low_part = lower_32_bits(pt_base);
967
968 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
969 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
970
971 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
972 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
973 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
974
975 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
976 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
977 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
978
979 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
980 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
981 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
982
983 pa_config->is_hvm_enabled = 0;
984
985}
986
987
7578ecda 988static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
989{
990 struct dc_init_data init_data;
52704fca
BL
991#ifdef CONFIG_DRM_AMD_DC_HDCP
992 struct dc_callback_init init_params;
993#endif
c0fb85ae 994 struct dc_phy_addr_space_config pa_config;
743b9786 995 int r;
52704fca 996
4a580877 997 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
998 adev->dm.adev = adev;
999
4562236b
HW
1000 /* Zero all the fields */
1001 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1002#ifdef CONFIG_DRM_AMD_DC_HDCP
1003 memset(&init_params, 0, sizeof(init_params));
1004#endif
4562236b 1005
674e78ac 1006 mutex_init(&adev->dm.dc_lock);
6ce8f316 1007 mutex_init(&adev->dm.audio_lock);
674e78ac 1008
4562236b
HW
1009 if(amdgpu_dm_irq_init(adev)) {
1010 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1011 goto error;
1012 }
1013
1014 init_data.asic_id.chip_family = adev->family;
1015
2dc31ca1 1016 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1017 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1018
770d13b1 1019 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1020 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1021 init_data.asic_id.atombios_base_address =
1022 adev->mode_info.atom_context->bios;
1023
1024 init_data.driver = adev;
1025
1026 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1027
1028 if (!adev->dm.cgs_device) {
1029 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1030 goto error;
1031 }
1032
1033 init_data.cgs_device = adev->dm.cgs_device;
1034
4562236b
HW
1035 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1036
60fb100b
AD
1037 switch (adev->asic_type) {
1038 case CHIP_CARRIZO:
1039 case CHIP_STONEY:
1040 case CHIP_RAVEN:
fe3db437 1041 case CHIP_RENOIR:
6e227308 1042 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1043#if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1044 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1045 init_data.flags.disable_dmcu = true;
1046#endif
60fb100b
AD
1047 break;
1048 default:
1049 break;
1050 }
6e227308 1051
04b94af4
AD
1052 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053 init_data.flags.fbc_support = true;
1054
d99f38ae
AD
1055 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056 init_data.flags.multi_mon_pp_mclk_switch = true;
1057
eaf56410
LL
1058 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059 init_data.flags.disable_fractional_pwm = true;
1060
27eaa492 1061 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1062
48321c3d 1063 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
27eaa492 1064
4562236b
HW
1065 /* Display Core create. */
1066 adev->dm.dc = dc_create(&init_data);
1067
423788c7 1068 if (adev->dm.dc) {
76121231 1069 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1070 } else {
76121231 1071 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1072 goto error;
1073 }
4562236b 1074
8a791dab
HW
1075 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1076 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1077 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1078 }
1079
f99d8762
HW
1080 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1081 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1082
8a791dab
HW
1083 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1084 adev->dm.dc->debug.disable_stutter = true;
1085
1086 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1087 adev->dm.dc->debug.disable_dsc = true;
1088
1089 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1090 adev->dm.dc->debug.disable_clock_gate = true;
1091
743b9786
NK
1092 r = dm_dmub_hw_init(adev);
1093 if (r) {
1094 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1095 goto error;
1096 }
1097
bb6785c1
NK
1098 dc_hardware_init(adev->dm.dc);
1099
c0fb85ae
YZ
1100 mmhub_read_system_context(adev, &pa_config);
1101
1102 // Call the DC init_memory func
1103 dc_setup_system_context(adev->dm.dc, &pa_config);
1104
4562236b
HW
1105 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1106 if (!adev->dm.freesync_module) {
1107 DRM_ERROR(
1108 "amdgpu: failed to initialize freesync_module.\n");
1109 } else
f1ad2f5e 1110 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1111 adev->dm.freesync_module);
1112
e277adc5
LSL
1113 amdgpu_dm_init_color_mod();
1114
52704fca 1115#ifdef CONFIG_DRM_AMD_DC_HDCP
96a3b32e 1116 if (adev->asic_type >= CHIP_RAVEN) {
e50dc171 1117 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1118
96a3b32e
BL
1119 if (!adev->dm.hdcp_workqueue)
1120 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1121 else
1122 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1123
96a3b32e
BL
1124 dc_init_callbacks(adev->dm.dc, &init_params);
1125 }
52704fca 1126#endif
4562236b
HW
1127 if (amdgpu_dm_initialize_drm_device(adev)) {
1128 DRM_ERROR(
1129 "amdgpu: failed to initialize sw for display support.\n");
1130 goto error;
1131 }
1132
1133 /* Update the actual used number of crtc */
1134 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1135
f74367e4
AD
1136 /* create fake encoders for MST */
1137 dm_dp_create_fake_mst_encoders(adev);
1138
4562236b
HW
1139 /* TODO: Add_display_info? */
1140
1141 /* TODO use dynamic cursor width */
4a580877
LT
1142 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1143 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1144
4a580877 1145 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1146 DRM_ERROR(
1147 "amdgpu: failed to initialize sw for display support.\n");
1148 goto error;
1149 }
1150
c0fb85ae 1151
f1ad2f5e 1152 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1153
1154 return 0;
1155error:
1156 amdgpu_dm_fini(adev);
1157
59d0f396 1158 return -EINVAL;
4562236b
HW
1159}
1160
7578ecda 1161static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1162{
f74367e4
AD
1163 int i;
1164
1165 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1166 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1167 }
1168
6ce8f316
NK
1169 amdgpu_dm_audio_fini(adev);
1170
4562236b 1171 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1172
52704fca
BL
1173#ifdef CONFIG_DRM_AMD_DC_HDCP
1174 if (adev->dm.hdcp_workqueue) {
1175 hdcp_destroy(adev->dm.hdcp_workqueue);
1176 adev->dm.hdcp_workqueue = NULL;
1177 }
1178
1179 if (adev->dm.dc)
1180 dc_deinit_callbacks(adev->dm.dc);
1181#endif
9a71c7d3
NK
1182 if (adev->dm.dc->ctx->dmub_srv) {
1183 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1184 adev->dm.dc->ctx->dmub_srv = NULL;
1185 }
1186
743b9786
NK
1187 if (adev->dm.dmub_bo)
1188 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1189 &adev->dm.dmub_bo_gpu_addr,
1190 &adev->dm.dmub_bo_cpu_addr);
52704fca 1191
c8bdf2b6
ED
1192 /* DC Destroy TODO: Replace destroy DAL */
1193 if (adev->dm.dc)
1194 dc_destroy(&adev->dm.dc);
4562236b
HW
1195 /*
1196 * TODO: pageflip, vlank interrupt
1197 *
1198 * amdgpu_dm_irq_fini(adev);
1199 */
1200
1201 if (adev->dm.cgs_device) {
1202 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1203 adev->dm.cgs_device = NULL;
1204 }
1205 if (adev->dm.freesync_module) {
1206 mod_freesync_destroy(adev->dm.freesync_module);
1207 adev->dm.freesync_module = NULL;
1208 }
674e78ac 1209
6ce8f316 1210 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1211 mutex_destroy(&adev->dm.dc_lock);
1212
4562236b
HW
1213 return;
1214}
1215
a94d5569 1216static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1217{
a7669aff 1218 const char *fw_name_dmcu = NULL;
a94d5569
DF
1219 int r;
1220 const struct dmcu_firmware_header_v1_0 *hdr;
1221
1222 switch(adev->asic_type) {
55e56389
MR
1223#if defined(CONFIG_DRM_AMD_DC_SI)
1224 case CHIP_TAHITI:
1225 case CHIP_PITCAIRN:
1226 case CHIP_VERDE:
1227 case CHIP_OLAND:
1228#endif
a94d5569
DF
1229 case CHIP_BONAIRE:
1230 case CHIP_HAWAII:
1231 case CHIP_KAVERI:
1232 case CHIP_KABINI:
1233 case CHIP_MULLINS:
1234 case CHIP_TONGA:
1235 case CHIP_FIJI:
1236 case CHIP_CARRIZO:
1237 case CHIP_STONEY:
1238 case CHIP_POLARIS11:
1239 case CHIP_POLARIS10:
1240 case CHIP_POLARIS12:
1241 case CHIP_VEGAM:
1242 case CHIP_VEGA10:
1243 case CHIP_VEGA12:
1244 case CHIP_VEGA20:
476e955d 1245 case CHIP_NAVI10:
baebcf2e 1246 case CHIP_NAVI14:
30221ad8 1247 case CHIP_RENOIR:
79037324
BL
1248#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1249 case CHIP_SIENNA_CICHLID:
a6c5308f 1250 case CHIP_NAVY_FLOUNDER:
469989ca 1251#endif
2a411205
BL
1252#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1253 case CHIP_DIMGREY_CAVEFISH:
1254#endif
469989ca
RL
1255#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1256 case CHIP_VANGOGH:
79037324 1257#endif
a94d5569 1258 return 0;
5ea23931
RL
1259 case CHIP_NAVI12:
1260 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1261 break;
a94d5569 1262 case CHIP_RAVEN:
a7669aff
HW
1263 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1264 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1265 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1266 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1267 else
a7669aff 1268 return 0;
a94d5569
DF
1269 break;
1270 default:
1271 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1272 return -EINVAL;
a94d5569
DF
1273 }
1274
1275 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1276 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1277 return 0;
1278 }
1279
1280 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1281 if (r == -ENOENT) {
1282 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1283 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1284 adev->dm.fw_dmcu = NULL;
1285 return 0;
1286 }
1287 if (r) {
1288 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1289 fw_name_dmcu);
1290 return r;
1291 }
1292
1293 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1294 if (r) {
1295 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1296 fw_name_dmcu);
1297 release_firmware(adev->dm.fw_dmcu);
1298 adev->dm.fw_dmcu = NULL;
1299 return r;
1300 }
1301
1302 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1303 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1304 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1305 adev->firmware.fw_size +=
1306 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1307
1308 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1309 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1310 adev->firmware.fw_size +=
1311 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1312
ee6e89c0
DF
1313 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1314
a94d5569
DF
1315 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1316
4562236b
HW
1317 return 0;
1318}
1319
743b9786
NK
1320static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1321{
1322 struct amdgpu_device *adev = ctx;
1323
1324 return dm_read_reg(adev->dm.dc->ctx, address);
1325}
1326
1327static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1328 uint32_t value)
1329{
1330 struct amdgpu_device *adev = ctx;
1331
1332 return dm_write_reg(adev->dm.dc->ctx, address, value);
1333}
1334
1335static int dm_dmub_sw_init(struct amdgpu_device *adev)
1336{
1337 struct dmub_srv_create_params create_params;
8c7aea40
NK
1338 struct dmub_srv_region_params region_params;
1339 struct dmub_srv_region_info region_info;
1340 struct dmub_srv_fb_params fb_params;
1341 struct dmub_srv_fb_info *fb_info;
1342 struct dmub_srv *dmub_srv;
743b9786
NK
1343 const struct dmcub_firmware_header_v1_0 *hdr;
1344 const char *fw_name_dmub;
1345 enum dmub_asic dmub_asic;
1346 enum dmub_status status;
1347 int r;
1348
1349 switch (adev->asic_type) {
1350 case CHIP_RENOIR:
1351 dmub_asic = DMUB_ASIC_DCN21;
1352 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1353#if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1354 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1355 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1356#endif
743b9786 1357 break;
79037324
BL
1358#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1359 case CHIP_SIENNA_CICHLID:
1360 dmub_asic = DMUB_ASIC_DCN30;
1361 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1362 break;
5ce868fc
BL
1363 case CHIP_NAVY_FLOUNDER:
1364 dmub_asic = DMUB_ASIC_DCN30;
1365 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324
BL
1366 break;
1367#endif
469989ca
RL
1368#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1369 case CHIP_VANGOGH:
1370 dmub_asic = DMUB_ASIC_DCN301;
1371 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1372 break;
1373#endif
2a411205
BL
1374#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1375 case CHIP_DIMGREY_CAVEFISH:
1376 dmub_asic = DMUB_ASIC_DCN302;
1377 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1378 break;
1379#endif
743b9786
NK
1380
1381 default:
1382 /* ASIC doesn't support DMUB. */
1383 return 0;
1384 }
1385
743b9786
NK
1386 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1387 if (r) {
1388 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1389 return 0;
1390 }
1391
1392 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1393 if (r) {
1394 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1395 return 0;
1396 }
1397
743b9786 1398 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1399
9a6ed547
NK
1400 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1401 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1402 AMDGPU_UCODE_ID_DMCUB;
1403 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1404 adev->dm.dmub_fw;
1405 adev->firmware.fw_size +=
1406 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1407
9a6ed547
NK
1408 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1409 adev->dm.dmcub_fw_version);
1410 }
1411
1412 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1413
8c7aea40
NK
1414 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1415 dmub_srv = adev->dm.dmub_srv;
1416
1417 if (!dmub_srv) {
1418 DRM_ERROR("Failed to allocate DMUB service!\n");
1419 return -ENOMEM;
1420 }
1421
1422 memset(&create_params, 0, sizeof(create_params));
1423 create_params.user_ctx = adev;
1424 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1425 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1426 create_params.asic = dmub_asic;
1427
1428 /* Create the DMUB service. */
1429 status = dmub_srv_create(dmub_srv, &create_params);
1430 if (status != DMUB_STATUS_OK) {
1431 DRM_ERROR("Error creating DMUB service: %d\n", status);
1432 return -EINVAL;
1433 }
1434
1435 /* Calculate the size of all the regions for the DMUB service. */
1436 memset(&region_params, 0, sizeof(region_params));
1437
1438 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1439 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1440 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1441 region_params.vbios_size = adev->bios_size;
0922b899 1442 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1443 adev->dm.dmub_fw->data +
1444 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1445 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1446 region_params.fw_inst_const =
1447 adev->dm.dmub_fw->data +
1448 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1449 PSP_HEADER_BYTES;
8c7aea40
NK
1450
1451 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1452 &region_info);
1453
1454 if (status != DMUB_STATUS_OK) {
1455 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1456 return -EINVAL;
1457 }
1458
1459 /*
1460 * Allocate a framebuffer based on the total size of all the regions.
1461 * TODO: Move this into GART.
1462 */
1463 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1464 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1465 &adev->dm.dmub_bo_gpu_addr,
1466 &adev->dm.dmub_bo_cpu_addr);
1467 if (r)
1468 return r;
1469
1470 /* Rebase the regions on the framebuffer address. */
1471 memset(&fb_params, 0, sizeof(fb_params));
1472 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1473 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1474 fb_params.region_info = &region_info;
1475
1476 adev->dm.dmub_fb_info =
1477 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1478 fb_info = adev->dm.dmub_fb_info;
1479
1480 if (!fb_info) {
1481 DRM_ERROR(
1482 "Failed to allocate framebuffer info for DMUB service!\n");
1483 return -ENOMEM;
1484 }
1485
1486 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1487 if (status != DMUB_STATUS_OK) {
1488 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1489 return -EINVAL;
1490 }
1491
743b9786
NK
1492 return 0;
1493}
1494
a94d5569
DF
1495static int dm_sw_init(void *handle)
1496{
1497 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1498 int r;
1499
1500 r = dm_dmub_sw_init(adev);
1501 if (r)
1502 return r;
a94d5569
DF
1503
1504 return load_dmcu_fw(adev);
1505}
1506
4562236b
HW
1507static int dm_sw_fini(void *handle)
1508{
a94d5569
DF
1509 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1510
8c7aea40
NK
1511 kfree(adev->dm.dmub_fb_info);
1512 adev->dm.dmub_fb_info = NULL;
1513
743b9786
NK
1514 if (adev->dm.dmub_srv) {
1515 dmub_srv_destroy(adev->dm.dmub_srv);
1516 adev->dm.dmub_srv = NULL;
1517 }
1518
75e1658e
ND
1519 release_firmware(adev->dm.dmub_fw);
1520 adev->dm.dmub_fw = NULL;
743b9786 1521
75e1658e
ND
1522 release_firmware(adev->dm.fw_dmcu);
1523 adev->dm.fw_dmcu = NULL;
a94d5569 1524
4562236b
HW
1525 return 0;
1526}
1527
7abcf6b5 1528static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1529{
c84dec2f 1530 struct amdgpu_dm_connector *aconnector;
4562236b 1531 struct drm_connector *connector;
f8d2d39e 1532 struct drm_connector_list_iter iter;
7abcf6b5 1533 int ret = 0;
4562236b 1534
f8d2d39e
LP
1535 drm_connector_list_iter_begin(dev, &iter);
1536 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1537 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1538 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1539 aconnector->mst_mgr.aux) {
f1ad2f5e 1540 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1541 aconnector,
1542 aconnector->base.base.id);
7abcf6b5
AG
1543
1544 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1545 if (ret < 0) {
1546 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1547 aconnector->dc_link->type =
1548 dc_connection_single;
1549 break;
7abcf6b5 1550 }
f8d2d39e 1551 }
4562236b 1552 }
f8d2d39e 1553 drm_connector_list_iter_end(&iter);
4562236b 1554
7abcf6b5
AG
1555 return ret;
1556}
1557
1558static int dm_late_init(void *handle)
1559{
42e67c3b 1560 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1561
bbf854dc
DF
1562 struct dmcu_iram_parameters params;
1563 unsigned int linear_lut[16];
1564 int i;
17bdb4a8 1565 struct dmcu *dmcu = NULL;
5cb32419 1566 bool ret = true;
bbf854dc 1567
17bdb4a8
JFZ
1568 dmcu = adev->dm.dc->res_pool->dmcu;
1569
bbf854dc
DF
1570 for (i = 0; i < 16; i++)
1571 linear_lut[i] = 0xFFFF * i / 15;
1572
1573 params.set = 0;
1574 params.backlight_ramping_start = 0xCCCC;
1575 params.backlight_ramping_reduction = 0xCCCCCCCC;
1576 params.backlight_lut_array_size = 16;
1577 params.backlight_lut_array = linear_lut;
1578
2ad0cdf9
AK
1579 /* Min backlight level after ABM reduction, Don't allow below 1%
1580 * 0xFFFF x 0.01 = 0x28F
1581 */
1582 params.min_abm_backlight = 0x28F;
1583
5cb32419
RL
1584 /* In the case where abm is implemented on dmcub,
1585 * dmcu object will be null.
1586 * ABM 2.4 and up are implemented on dmcub.
1587 */
1588 if (dmcu)
1589 ret = dmcu_load_iram(dmcu, params);
1590 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1591 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1592
14ed1c90
HW
1593 if (!ret)
1594 return -EINVAL;
bbf854dc 1595
4a580877 1596 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1597}
1598
1599static void s3_handle_mst(struct drm_device *dev, bool suspend)
1600{
c84dec2f 1601 struct amdgpu_dm_connector *aconnector;
4562236b 1602 struct drm_connector *connector;
f8d2d39e 1603 struct drm_connector_list_iter iter;
fe7553be
LP
1604 struct drm_dp_mst_topology_mgr *mgr;
1605 int ret;
1606 bool need_hotplug = false;
4562236b 1607
f8d2d39e
LP
1608 drm_connector_list_iter_begin(dev, &iter);
1609 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1610 aconnector = to_amdgpu_dm_connector(connector);
1611 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1612 aconnector->mst_port)
1613 continue;
1614
1615 mgr = &aconnector->mst_mgr;
1616
1617 if (suspend) {
1618 drm_dp_mst_topology_mgr_suspend(mgr);
1619 } else {
6f85f738 1620 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1621 if (ret < 0) {
1622 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1623 need_hotplug = true;
1624 }
1625 }
4562236b 1626 }
f8d2d39e 1627 drm_connector_list_iter_end(&iter);
fe7553be
LP
1628
1629 if (need_hotplug)
1630 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1631}
1632
9340dfd3
HW
1633static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1634{
1635 struct smu_context *smu = &adev->smu;
1636 int ret = 0;
1637
1638 if (!is_support_sw_smu(adev))
1639 return 0;
1640
1641 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1642 * on window driver dc implementation.
1643 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1644 * should be passed to smu during boot up and resume from s3.
1645 * boot up: dc calculate dcn watermark clock settings within dc_create,
1646 * dcn20_resource_construct
1647 * then call pplib functions below to pass the settings to smu:
1648 * smu_set_watermarks_for_clock_ranges
1649 * smu_set_watermarks_table
1650 * navi10_set_watermarks_table
1651 * smu_write_watermarks_table
1652 *
1653 * For Renoir, clock settings of dcn watermark are also fixed values.
1654 * dc has implemented different flow for window driver:
1655 * dc_hardware_init / dc_set_power_state
1656 * dcn10_init_hw
1657 * notify_wm_ranges
1658 * set_wm_ranges
1659 * -- Linux
1660 * smu_set_watermarks_for_clock_ranges
1661 * renoir_set_watermarks_table
1662 * smu_write_watermarks_table
1663 *
1664 * For Linux,
1665 * dc_hardware_init -> amdgpu_dm_init
1666 * dc_set_power_state --> dm_resume
1667 *
1668 * therefore, this function apply to navi10/12/14 but not Renoir
1669 * *
1670 */
1671 switch(adev->asic_type) {
1672 case CHIP_NAVI10:
1673 case CHIP_NAVI14:
1674 case CHIP_NAVI12:
1675 break;
1676 default:
1677 return 0;
1678 }
1679
e7a95eea
EQ
1680 ret = smu_write_watermarks_table(smu);
1681 if (ret) {
1682 DRM_ERROR("Failed to update WMTABLE!\n");
1683 return ret;
9340dfd3
HW
1684 }
1685
9340dfd3
HW
1686 return 0;
1687}
1688
b8592b48
LL
1689/**
1690 * dm_hw_init() - Initialize DC device
28d687ea 1691 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1692 *
1693 * Initialize the &struct amdgpu_display_manager device. This involves calling
1694 * the initializers of each DM component, then populating the struct with them.
1695 *
1696 * Although the function implies hardware initialization, both hardware and
1697 * software are initialized here. Splitting them out to their relevant init
1698 * hooks is a future TODO item.
1699 *
1700 * Some notable things that are initialized here:
1701 *
1702 * - Display Core, both software and hardware
1703 * - DC modules that we need (freesync and color management)
1704 * - DRM software states
1705 * - Interrupt sources and handlers
1706 * - Vblank support
1707 * - Debug FS entries, if enabled
1708 */
4562236b
HW
1709static int dm_hw_init(void *handle)
1710{
1711 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1712 /* Create DAL display manager */
1713 amdgpu_dm_init(adev);
4562236b
HW
1714 amdgpu_dm_hpd_init(adev);
1715
4562236b
HW
1716 return 0;
1717}
1718
b8592b48
LL
1719/**
1720 * dm_hw_fini() - Teardown DC device
28d687ea 1721 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1722 *
1723 * Teardown components within &struct amdgpu_display_manager that require
1724 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1725 * were loaded. Also flush IRQ workqueues and disable them.
1726 */
4562236b
HW
1727static int dm_hw_fini(void *handle)
1728{
1729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730
1731 amdgpu_dm_hpd_fini(adev);
1732
1733 amdgpu_dm_irq_fini(adev);
21de3396 1734 amdgpu_dm_fini(adev);
4562236b
HW
1735 return 0;
1736}
1737
cdaae837
BL
1738
1739static int dm_enable_vblank(struct drm_crtc *crtc);
1740static void dm_disable_vblank(struct drm_crtc *crtc);
1741
1742static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1743 struct dc_state *state, bool enable)
1744{
1745 enum dc_irq_source irq_source;
1746 struct amdgpu_crtc *acrtc;
1747 int rc = -EBUSY;
1748 int i = 0;
1749
1750 for (i = 0; i < state->stream_count; i++) {
1751 acrtc = get_crtc_by_otg_inst(
1752 adev, state->stream_status[i].primary_otg_inst);
1753
1754 if (acrtc && state->stream_status[i].plane_count != 0) {
1755 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1756 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1757 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1758 acrtc->crtc_id, enable ? "en" : "dis", rc);
1759 if (rc)
1760 DRM_WARN("Failed to %s pflip interrupts\n",
1761 enable ? "enable" : "disable");
1762
1763 if (enable) {
1764 rc = dm_enable_vblank(&acrtc->base);
1765 if (rc)
1766 DRM_WARN("Failed to enable vblank interrupts\n");
1767 } else {
1768 dm_disable_vblank(&acrtc->base);
1769 }
1770
1771 }
1772 }
1773
1774}
1775
dfd84d90 1776static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1777{
1778 struct dc_state *context = NULL;
1779 enum dc_status res = DC_ERROR_UNEXPECTED;
1780 int i;
1781 struct dc_stream_state *del_streams[MAX_PIPES];
1782 int del_streams_count = 0;
1783
1784 memset(del_streams, 0, sizeof(del_streams));
1785
1786 context = dc_create_state(dc);
1787 if (context == NULL)
1788 goto context_alloc_fail;
1789
1790 dc_resource_state_copy_construct_current(dc, context);
1791
1792 /* First remove from context all streams */
1793 for (i = 0; i < context->stream_count; i++) {
1794 struct dc_stream_state *stream = context->streams[i];
1795
1796 del_streams[del_streams_count++] = stream;
1797 }
1798
1799 /* Remove all planes for removed streams and then remove the streams */
1800 for (i = 0; i < del_streams_count; i++) {
1801 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1802 res = DC_FAIL_DETACH_SURFACES;
1803 goto fail;
1804 }
1805
1806 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1807 if (res != DC_OK)
1808 goto fail;
1809 }
1810
1811
1812 res = dc_validate_global_state(dc, context, false);
1813
1814 if (res != DC_OK) {
1815 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1816 goto fail;
1817 }
1818
1819 res = dc_commit_state(dc, context);
1820
1821fail:
1822 dc_release_state(context);
1823
1824context_alloc_fail:
1825 return res;
1826}
1827
4562236b
HW
1828static int dm_suspend(void *handle)
1829{
1830 struct amdgpu_device *adev = handle;
1831 struct amdgpu_display_manager *dm = &adev->dm;
1832 int ret = 0;
4562236b 1833
53b3f8f4 1834 if (amdgpu_in_reset(adev)) {
cdaae837
BL
1835 mutex_lock(&dm->dc_lock);
1836 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1837
1838 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1839
1840 amdgpu_dm_commit_zero_streams(dm->dc);
1841
1842 amdgpu_dm_irq_suspend(adev);
1843
1844 return ret;
1845 }
4562236b 1846
d2f0b53b 1847 WARN_ON(adev->dm.cached_state);
4a580877 1848 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1849
4a580877 1850 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1851
4562236b
HW
1852 amdgpu_dm_irq_suspend(adev);
1853
a3621485 1854
32f5062d 1855 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1856
1c2075d4 1857 return 0;
4562236b
HW
1858}
1859
1daf8c63
AD
1860static struct amdgpu_dm_connector *
1861amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1862 struct drm_crtc *crtc)
4562236b
HW
1863{
1864 uint32_t i;
c2cea706 1865 struct drm_connector_state *new_con_state;
4562236b
HW
1866 struct drm_connector *connector;
1867 struct drm_crtc *crtc_from_state;
1868
c2cea706
LSL
1869 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1870 crtc_from_state = new_con_state->crtc;
4562236b
HW
1871
1872 if (crtc_from_state == crtc)
c84dec2f 1873 return to_amdgpu_dm_connector(connector);
4562236b
HW
1874 }
1875
1876 return NULL;
1877}
1878
fbbdadf2
BL
1879static void emulated_link_detect(struct dc_link *link)
1880{
1881 struct dc_sink_init_data sink_init_data = { 0 };
1882 struct display_sink_capability sink_caps = { 0 };
1883 enum dc_edid_status edid_status;
1884 struct dc_context *dc_ctx = link->ctx;
1885 struct dc_sink *sink = NULL;
1886 struct dc_sink *prev_sink = NULL;
1887
1888 link->type = dc_connection_none;
1889 prev_sink = link->local_sink;
1890
1891 if (prev_sink != NULL)
1892 dc_sink_retain(prev_sink);
1893
1894 switch (link->connector_signal) {
1895 case SIGNAL_TYPE_HDMI_TYPE_A: {
1896 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1897 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1898 break;
1899 }
1900
1901 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1902 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1903 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1904 break;
1905 }
1906
1907 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1908 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1909 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1910 break;
1911 }
1912
1913 case SIGNAL_TYPE_LVDS: {
1914 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1915 sink_caps.signal = SIGNAL_TYPE_LVDS;
1916 break;
1917 }
1918
1919 case SIGNAL_TYPE_EDP: {
1920 sink_caps.transaction_type =
1921 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1922 sink_caps.signal = SIGNAL_TYPE_EDP;
1923 break;
1924 }
1925
1926 case SIGNAL_TYPE_DISPLAY_PORT: {
1927 sink_caps.transaction_type =
1928 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1929 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1930 break;
1931 }
1932
1933 default:
1934 DC_ERROR("Invalid connector type! signal:%d\n",
1935 link->connector_signal);
1936 return;
1937 }
1938
1939 sink_init_data.link = link;
1940 sink_init_data.sink_signal = sink_caps.signal;
1941
1942 sink = dc_sink_create(&sink_init_data);
1943 if (!sink) {
1944 DC_ERROR("Failed to create sink!\n");
1945 return;
1946 }
1947
dcd5fb82 1948 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1949 link->local_sink = sink;
1950
1951 edid_status = dm_helpers_read_local_edid(
1952 link->ctx,
1953 link,
1954 sink);
1955
1956 if (edid_status != EDID_OK)
1957 DC_ERROR("Failed to read EDID");
1958
1959}
1960
cdaae837
BL
1961static void dm_gpureset_commit_state(struct dc_state *dc_state,
1962 struct amdgpu_display_manager *dm)
1963{
1964 struct {
1965 struct dc_surface_update surface_updates[MAX_SURFACES];
1966 struct dc_plane_info plane_infos[MAX_SURFACES];
1967 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1968 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1969 struct dc_stream_update stream_update;
1970 } * bundle;
1971 int k, m;
1972
1973 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1974
1975 if (!bundle) {
1976 dm_error("Failed to allocate update bundle\n");
1977 goto cleanup;
1978 }
1979
1980 for (k = 0; k < dc_state->stream_count; k++) {
1981 bundle->stream_update.stream = dc_state->streams[k];
1982
1983 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1984 bundle->surface_updates[m].surface =
1985 dc_state->stream_status->plane_states[m];
1986 bundle->surface_updates[m].surface->force_full_update =
1987 true;
1988 }
1989 dc_commit_updates_for_stream(
1990 dm->dc, bundle->surface_updates,
1991 dc_state->stream_status->plane_count,
1992 dc_state->streams[k], &bundle->stream_update, dc_state);
1993 }
1994
1995cleanup:
1996 kfree(bundle);
1997
1998 return;
1999}
2000
4562236b
HW
2001static int dm_resume(void *handle)
2002{
2003 struct amdgpu_device *adev = handle;
4a580877 2004 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2005 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2006 struct amdgpu_dm_connector *aconnector;
4562236b 2007 struct drm_connector *connector;
f8d2d39e 2008 struct drm_connector_list_iter iter;
4562236b 2009 struct drm_crtc *crtc;
c2cea706 2010 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2011 struct dm_crtc_state *dm_new_crtc_state;
2012 struct drm_plane *plane;
2013 struct drm_plane_state *new_plane_state;
2014 struct dm_plane_state *dm_new_plane_state;
113b7a01 2015 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2016 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2017 struct dc_state *dc_state;
2018 int i, r, j;
4562236b 2019
53b3f8f4 2020 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2021 dc_state = dm->cached_dc_state;
2022
2023 r = dm_dmub_hw_init(adev);
2024 if (r)
2025 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2026
2027 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2028 dc_resume(dm->dc);
2029
2030 amdgpu_dm_irq_resume_early(adev);
2031
2032 for (i = 0; i < dc_state->stream_count; i++) {
2033 dc_state->streams[i]->mode_changed = true;
2034 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2035 dc_state->stream_status->plane_states[j]->update_flags.raw
2036 = 0xffffffff;
2037 }
2038 }
2039
2040 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2041
cdaae837
BL
2042 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2043
2044 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2045
2046 dc_release_state(dm->cached_dc_state);
2047 dm->cached_dc_state = NULL;
2048
2049 amdgpu_dm_irq_resume_late(adev);
2050
2051 mutex_unlock(&dm->dc_lock);
2052
2053 return 0;
2054 }
113b7a01
LL
2055 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2056 dc_release_state(dm_state->context);
2057 dm_state->context = dc_create_state(dm->dc);
2058 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2059 dc_resource_state_construct(dm->dc, dm_state->context);
2060
8c7aea40
NK
2061 /* Before powering on DC we need to re-initialize DMUB. */
2062 r = dm_dmub_hw_init(adev);
2063 if (r)
2064 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2065
a80aa93d
ML
2066 /* power on hardware */
2067 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2068
4562236b
HW
2069 /* program HPD filter */
2070 dc_resume(dm->dc);
2071
4562236b
HW
2072 /*
2073 * early enable HPD Rx IRQ, should be done before set mode as short
2074 * pulse interrupts are used for MST
2075 */
2076 amdgpu_dm_irq_resume_early(adev);
2077
d20ebea8 2078 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2079 s3_handle_mst(ddev, false);
2080
4562236b 2081 /* Do detection*/
f8d2d39e
LP
2082 drm_connector_list_iter_begin(ddev, &iter);
2083 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2084 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2085
2086 /*
2087 * this is the case when traversing through already created
2088 * MST connectors, should be skipped
2089 */
2090 if (aconnector->mst_port)
2091 continue;
2092
03ea364c 2093 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2094 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2095 DRM_ERROR("KMS: Failed to detect connector\n");
2096
2097 if (aconnector->base.force && new_connection_type == dc_connection_none)
2098 emulated_link_detect(aconnector->dc_link);
2099 else
2100 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2101
2102 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2103 aconnector->fake_enable = false;
2104
dcd5fb82
MF
2105 if (aconnector->dc_sink)
2106 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2107 aconnector->dc_sink = NULL;
2108 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2109 mutex_unlock(&aconnector->hpd_lock);
4562236b 2110 }
f8d2d39e 2111 drm_connector_list_iter_end(&iter);
4562236b 2112
1f6010a9 2113 /* Force mode set in atomic commit */
a80aa93d 2114 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2115 new_crtc_state->active_changed = true;
4f346e65 2116
fcb4019e
LSL
2117 /*
2118 * atomic_check is expected to create the dc states. We need to release
2119 * them here, since they were duplicated as part of the suspend
2120 * procedure.
2121 */
a80aa93d 2122 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2123 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2124 if (dm_new_crtc_state->stream) {
2125 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2126 dc_stream_release(dm_new_crtc_state->stream);
2127 dm_new_crtc_state->stream = NULL;
2128 }
2129 }
2130
a80aa93d 2131 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2132 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2133 if (dm_new_plane_state->dc_state) {
2134 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2135 dc_plane_state_release(dm_new_plane_state->dc_state);
2136 dm_new_plane_state->dc_state = NULL;
2137 }
2138 }
2139
2d1af6a1 2140 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2141
a80aa93d 2142 dm->cached_state = NULL;
0a214e2f 2143
9faa4237 2144 amdgpu_dm_irq_resume_late(adev);
4562236b 2145
9340dfd3
HW
2146 amdgpu_dm_smu_write_watermarks_table(adev);
2147
2d1af6a1 2148 return 0;
4562236b
HW
2149}
2150
b8592b48
LL
2151/**
2152 * DOC: DM Lifecycle
2153 *
2154 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2155 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2156 * the base driver's device list to be initialized and torn down accordingly.
2157 *
2158 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2159 */
2160
4562236b
HW
2161static const struct amd_ip_funcs amdgpu_dm_funcs = {
2162 .name = "dm",
2163 .early_init = dm_early_init,
7abcf6b5 2164 .late_init = dm_late_init,
4562236b
HW
2165 .sw_init = dm_sw_init,
2166 .sw_fini = dm_sw_fini,
2167 .hw_init = dm_hw_init,
2168 .hw_fini = dm_hw_fini,
2169 .suspend = dm_suspend,
2170 .resume = dm_resume,
2171 .is_idle = dm_is_idle,
2172 .wait_for_idle = dm_wait_for_idle,
2173 .check_soft_reset = dm_check_soft_reset,
2174 .soft_reset = dm_soft_reset,
2175 .set_clockgating_state = dm_set_clockgating_state,
2176 .set_powergating_state = dm_set_powergating_state,
2177};
2178
2179const struct amdgpu_ip_block_version dm_ip_block =
2180{
2181 .type = AMD_IP_BLOCK_TYPE_DCE,
2182 .major = 1,
2183 .minor = 0,
2184 .rev = 0,
2185 .funcs = &amdgpu_dm_funcs,
2186};
2187
ca3268c4 2188
b8592b48
LL
2189/**
2190 * DOC: atomic
2191 *
2192 * *WIP*
2193 */
0a323b84 2194
b3663f70 2195static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2196 .fb_create = amdgpu_display_user_framebuffer_create,
366c1baa 2197 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2198 .atomic_check = amdgpu_dm_atomic_check,
da5c47f6 2199 .atomic_commit = amdgpu_dm_atomic_commit,
54f5499a
AG
2200};
2201
2202static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2203 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2204};
2205
94562810
RS
2206static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2207{
2208 u32 max_cll, min_cll, max, min, q, r;
2209 struct amdgpu_dm_backlight_caps *caps;
2210 struct amdgpu_display_manager *dm;
2211 struct drm_connector *conn_base;
2212 struct amdgpu_device *adev;
ec11fe37 2213 struct dc_link *link = NULL;
94562810
RS
2214 static const u8 pre_computed_values[] = {
2215 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2216 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2217
2218 if (!aconnector || !aconnector->dc_link)
2219 return;
2220
ec11fe37 2221 link = aconnector->dc_link;
2222 if (link->connector_signal != SIGNAL_TYPE_EDP)
2223 return;
2224
94562810 2225 conn_base = &aconnector->base;
1348969a 2226 adev = drm_to_adev(conn_base->dev);
94562810
RS
2227 dm = &adev->dm;
2228 caps = &dm->backlight_caps;
2229 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2230 caps->aux_support = false;
2231 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2232 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2233
2234 if (caps->ext_caps->bits.oled == 1 ||
2235 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2236 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2237 caps->aux_support = true;
2238
2239 /* From the specification (CTA-861-G), for calculating the maximum
2240 * luminance we need to use:
2241 * Luminance = 50*2**(CV/32)
2242 * Where CV is a one-byte value.
2243 * For calculating this expression we may need float point precision;
2244 * to avoid this complexity level, we take advantage that CV is divided
2245 * by a constant. From the Euclids division algorithm, we know that CV
2246 * can be written as: CV = 32*q + r. Next, we replace CV in the
2247 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2248 * need to pre-compute the value of r/32. For pre-computing the values
2249 * We just used the following Ruby line:
2250 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2251 * The results of the above expressions can be verified at
2252 * pre_computed_values.
2253 */
2254 q = max_cll >> 5;
2255 r = max_cll % 32;
2256 max = (1 << q) * pre_computed_values[r];
2257
2258 // min luminance: maxLum * (CV/255)^2 / 100
2259 q = DIV_ROUND_CLOSEST(min_cll, 255);
2260 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2261
2262 caps->aux_max_input_signal = max;
2263 caps->aux_min_input_signal = min;
2264}
2265
97e51c16
HW
2266void amdgpu_dm_update_connector_after_detect(
2267 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2268{
2269 struct drm_connector *connector = &aconnector->base;
2270 struct drm_device *dev = connector->dev;
b73a22d3 2271 struct dc_sink *sink;
4562236b
HW
2272
2273 /* MST handled by drm_mst framework */
2274 if (aconnector->mst_mgr.mst_state == true)
2275 return;
2276
4562236b 2277 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2278 if (sink)
2279 dc_sink_retain(sink);
4562236b 2280
1f6010a9
DF
2281 /*
2282 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2283 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2284 * Skip if already done during boot.
4562236b
HW
2285 */
2286 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2287 && aconnector->dc_em_sink) {
2288
1f6010a9
DF
2289 /*
2290 * For S3 resume with headless use eml_sink to fake stream
2291 * because on resume connector->sink is set to NULL
4562236b
HW
2292 */
2293 mutex_lock(&dev->mode_config.mutex);
2294
2295 if (sink) {
922aa1e1 2296 if (aconnector->dc_sink) {
98e6436d 2297 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2298 /*
2299 * retain and release below are used to
2300 * bump up refcount for sink because the link doesn't point
2301 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2302 * reshuffle by UMD we will get into unwanted dc_sink release
2303 */
dcd5fb82 2304 dc_sink_release(aconnector->dc_sink);
922aa1e1 2305 }
4562236b 2306 aconnector->dc_sink = sink;
dcd5fb82 2307 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2308 amdgpu_dm_update_freesync_caps(connector,
2309 aconnector->edid);
4562236b 2310 } else {
98e6436d 2311 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2312 if (!aconnector->dc_sink) {
4562236b 2313 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2314 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2315 }
4562236b
HW
2316 }
2317
2318 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2319
2320 if (sink)
2321 dc_sink_release(sink);
4562236b
HW
2322 return;
2323 }
2324
2325 /*
2326 * TODO: temporary guard to look for proper fix
2327 * if this sink is MST sink, we should not do anything
2328 */
dcd5fb82
MF
2329 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2330 dc_sink_release(sink);
4562236b 2331 return;
dcd5fb82 2332 }
4562236b
HW
2333
2334 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2335 /*
2336 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2337 * Do nothing!!
2338 */
f1ad2f5e 2339 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2340 aconnector->connector_id);
dcd5fb82
MF
2341 if (sink)
2342 dc_sink_release(sink);
4562236b
HW
2343 return;
2344 }
2345
f1ad2f5e 2346 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2347 aconnector->connector_id, aconnector->dc_sink, sink);
2348
2349 mutex_lock(&dev->mode_config.mutex);
2350
1f6010a9
DF
2351 /*
2352 * 1. Update status of the drm connector
2353 * 2. Send an event and let userspace tell us what to do
2354 */
4562236b 2355 if (sink) {
1f6010a9
DF
2356 /*
2357 * TODO: check if we still need the S3 mode update workaround.
2358 * If yes, put it here.
2359 */
4562236b 2360 if (aconnector->dc_sink)
98e6436d 2361 amdgpu_dm_update_freesync_caps(connector, NULL);
4562236b
HW
2362
2363 aconnector->dc_sink = sink;
dcd5fb82 2364 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2365 if (sink->dc_edid.length == 0) {
4562236b 2366 aconnector->edid = NULL;
e6142dd5
AP
2367 if (aconnector->dc_link->aux_mode) {
2368 drm_dp_cec_unset_edid(
2369 &aconnector->dm_dp_aux.aux);
2370 }
900b3cb1 2371 } else {
4562236b 2372 aconnector->edid =
e6142dd5 2373 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2374
c555f023 2375 drm_connector_update_edid_property(connector,
e6142dd5 2376 aconnector->edid);
b24bdc37 2377 drm_add_edid_modes(connector, aconnector->edid);
e6142dd5
AP
2378
2379 if (aconnector->dc_link->aux_mode)
2380 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2381 aconnector->edid);
4562236b 2382 }
e6142dd5 2383
98e6436d 2384 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2385 update_connector_ext_caps(aconnector);
4562236b 2386 } else {
e86e8947 2387 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2388 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2389 drm_connector_update_edid_property(connector, NULL);
4562236b 2390 aconnector->num_modes = 0;
dcd5fb82 2391 dc_sink_release(aconnector->dc_sink);
4562236b 2392 aconnector->dc_sink = NULL;
5326c452 2393 aconnector->edid = NULL;
0c8620d6
BL
2394#ifdef CONFIG_DRM_AMD_DC_HDCP
2395 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2396 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2397 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2398#endif
4562236b
HW
2399 }
2400
2401 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2402
0f877894
OV
2403 update_subconnector_property(aconnector);
2404
dcd5fb82
MF
2405 if (sink)
2406 dc_sink_release(sink);
4562236b
HW
2407}
2408
2409static void handle_hpd_irq(void *param)
2410{
c84dec2f 2411 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2412 struct drm_connector *connector = &aconnector->base;
2413 struct drm_device *dev = connector->dev;
fbbdadf2 2414 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2415#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2416 struct amdgpu_device *adev = drm_to_adev(dev);
0c8620d6 2417#endif
4562236b 2418
1f6010a9
DF
2419 /*
2420 * In case of failure or MST no need to update connector status or notify the OS
2421 * since (for MST case) MST does this in its own context.
4562236b
HW
2422 */
2423 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2424
0c8620d6 2425#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 2426 if (adev->dm.hdcp_workqueue)
96a3b32e 2427 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
0c8620d6 2428#endif
2e0ac3d6
HW
2429 if (aconnector->fake_enable)
2430 aconnector->fake_enable = false;
2431
fbbdadf2
BL
2432 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2433 DRM_ERROR("KMS: Failed to detect connector\n");
2434
2435 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2436 emulated_link_detect(aconnector->dc_link);
2437
2438
2439 drm_modeset_lock_all(dev);
2440 dm_restore_drm_connector_state(dev, connector);
2441 drm_modeset_unlock_all(dev);
2442
2443 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2444 drm_kms_helper_hotplug_event(dev);
2445
2446 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
4562236b
HW
2447 amdgpu_dm_update_connector_after_detect(aconnector);
2448
2449
2450 drm_modeset_lock_all(dev);
2451 dm_restore_drm_connector_state(dev, connector);
2452 drm_modeset_unlock_all(dev);
2453
2454 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2455 drm_kms_helper_hotplug_event(dev);
2456 }
2457 mutex_unlock(&aconnector->hpd_lock);
2458
2459}
2460
c84dec2f 2461static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2462{
2463 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2464 uint8_t dret;
2465 bool new_irq_handled = false;
2466 int dpcd_addr;
2467 int dpcd_bytes_to_read;
2468
2469 const int max_process_count = 30;
2470 int process_count = 0;
2471
2472 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2473
2474 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2475 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2476 /* DPCD 0x200 - 0x201 for downstream IRQ */
2477 dpcd_addr = DP_SINK_COUNT;
2478 } else {
2479 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2480 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2481 dpcd_addr = DP_SINK_COUNT_ESI;
2482 }
2483
2484 dret = drm_dp_dpcd_read(
2485 &aconnector->dm_dp_aux.aux,
2486 dpcd_addr,
2487 esi,
2488 dpcd_bytes_to_read);
2489
2490 while (dret == dpcd_bytes_to_read &&
2491 process_count < max_process_count) {
2492 uint8_t retry;
2493 dret = 0;
2494
2495 process_count++;
2496
f1ad2f5e 2497 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2498 /* handle HPD short pulse irq */
2499 if (aconnector->mst_mgr.mst_state)
2500 drm_dp_mst_hpd_irq(
2501 &aconnector->mst_mgr,
2502 esi,
2503 &new_irq_handled);
4562236b
HW
2504
2505 if (new_irq_handled) {
2506 /* ACK at DPCD to notify down stream */
2507 const int ack_dpcd_bytes_to_write =
2508 dpcd_bytes_to_read - 1;
2509
2510 for (retry = 0; retry < 3; retry++) {
2511 uint8_t wret;
2512
2513 wret = drm_dp_dpcd_write(
2514 &aconnector->dm_dp_aux.aux,
2515 dpcd_addr + 1,
2516 &esi[1],
2517 ack_dpcd_bytes_to_write);
2518 if (wret == ack_dpcd_bytes_to_write)
2519 break;
2520 }
2521
1f6010a9 2522 /* check if there is new irq to be handled */
4562236b
HW
2523 dret = drm_dp_dpcd_read(
2524 &aconnector->dm_dp_aux.aux,
2525 dpcd_addr,
2526 esi,
2527 dpcd_bytes_to_read);
2528
2529 new_irq_handled = false;
d4a6e8a9 2530 } else {
4562236b 2531 break;
d4a6e8a9 2532 }
4562236b
HW
2533 }
2534
2535 if (process_count == max_process_count)
f1ad2f5e 2536 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2537}
2538
2539static void handle_hpd_rx_irq(void *param)
2540{
c84dec2f 2541 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2542 struct drm_connector *connector = &aconnector->base;
2543 struct drm_device *dev = connector->dev;
53cbf65c 2544 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2545 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
fbbdadf2 2546 enum dc_connection_type new_connection_type = dc_connection_none;
2a0f9270
BL
2547#ifdef CONFIG_DRM_AMD_DC_HDCP
2548 union hpd_irq_data hpd_irq_data;
1348969a 2549 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270
BL
2550
2551 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2552#endif
4562236b 2553
1f6010a9
DF
2554 /*
2555 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2556 * conflict, after implement i2c helper, this mutex should be
2557 * retired.
2558 */
53cbf65c 2559 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2560 mutex_lock(&aconnector->hpd_lock);
2561
2a0f9270
BL
2562
2563#ifdef CONFIG_DRM_AMD_DC_HDCP
2564 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2565#else
4e18814e 2566 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2a0f9270 2567#endif
4562236b
HW
2568 !is_mst_root_connector) {
2569 /* Downstream Port status changed. */
fbbdadf2
BL
2570 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2571 DRM_ERROR("KMS: Failed to detect connector\n");
2572
2573 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2574 emulated_link_detect(dc_link);
2575
2576 if (aconnector->fake_enable)
2577 aconnector->fake_enable = false;
2578
2579 amdgpu_dm_update_connector_after_detect(aconnector);
2580
2581
2582 drm_modeset_lock_all(dev);
2583 dm_restore_drm_connector_state(dev, connector);
2584 drm_modeset_unlock_all(dev);
2585
2586 drm_kms_helper_hotplug_event(dev);
2587 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2588
2589 if (aconnector->fake_enable)
2590 aconnector->fake_enable = false;
2591
4562236b
HW
2592 amdgpu_dm_update_connector_after_detect(aconnector);
2593
2594
2595 drm_modeset_lock_all(dev);
2596 dm_restore_drm_connector_state(dev, connector);
2597 drm_modeset_unlock_all(dev);
2598
2599 drm_kms_helper_hotplug_event(dev);
2600 }
2601 }
2a0f9270 2602#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2603 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2604 if (adev->dm.hdcp_workqueue)
2605 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2606 }
2a0f9270 2607#endif
4562236b 2608 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
53cbf65c 2609 (dc_link->type == dc_connection_mst_branch))
4562236b
HW
2610 dm_handle_hpd_rx_irq(aconnector);
2611
e86e8947
HV
2612 if (dc_link->type != dc_connection_mst_branch) {
2613 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2614 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2615 }
4562236b
HW
2616}
2617
2618static void register_hpd_handlers(struct amdgpu_device *adev)
2619{
4a580877 2620 struct drm_device *dev = adev_to_drm(adev);
4562236b 2621 struct drm_connector *connector;
c84dec2f 2622 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2623 const struct dc_link *dc_link;
2624 struct dc_interrupt_params int_params = {0};
2625
2626 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2627 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2628
2629 list_for_each_entry(connector,
2630 &dev->mode_config.connector_list, head) {
2631
c84dec2f 2632 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2633 dc_link = aconnector->dc_link;
2634
2635 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2636 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2637 int_params.irq_source = dc_link->irq_source_hpd;
2638
2639 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2640 handle_hpd_irq,
2641 (void *) aconnector);
2642 }
2643
2644 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2645
2646 /* Also register for DP short pulse (hpd_rx). */
2647 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2648 int_params.irq_source = dc_link->irq_source_hpd_rx;
2649
2650 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2651 handle_hpd_rx_irq,
2652 (void *) aconnector);
2653 }
2654 }
2655}
2656
55e56389
MR
2657#if defined(CONFIG_DRM_AMD_DC_SI)
2658/* Register IRQ sources and initialize IRQ callbacks */
2659static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2660{
2661 struct dc *dc = adev->dm.dc;
2662 struct common_irq_params *c_irq_params;
2663 struct dc_interrupt_params int_params = {0};
2664 int r;
2665 int i;
2666 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2667
2668 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2669 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2670
2671 /*
2672 * Actions of amdgpu_irq_add_id():
2673 * 1. Register a set() function with base driver.
2674 * Base driver will call set() function to enable/disable an
2675 * interrupt in DC hardware.
2676 * 2. Register amdgpu_dm_irq_handler().
2677 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2678 * coming from DC hardware.
2679 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2680 * for acknowledging and handling. */
2681
2682 /* Use VBLANK interrupt */
2683 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2684 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2685 if (r) {
2686 DRM_ERROR("Failed to add crtc irq id!\n");
2687 return r;
2688 }
2689
2690 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2691 int_params.irq_source =
2692 dc_interrupt_to_irq_source(dc, i+1 , 0);
2693
2694 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2695
2696 c_irq_params->adev = adev;
2697 c_irq_params->irq_src = int_params.irq_source;
2698
2699 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2700 dm_crtc_high_irq, c_irq_params);
2701 }
2702
2703 /* Use GRPH_PFLIP interrupt */
2704 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2705 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2706 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2707 if (r) {
2708 DRM_ERROR("Failed to add page flip irq id!\n");
2709 return r;
2710 }
2711
2712 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2713 int_params.irq_source =
2714 dc_interrupt_to_irq_source(dc, i, 0);
2715
2716 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2717
2718 c_irq_params->adev = adev;
2719 c_irq_params->irq_src = int_params.irq_source;
2720
2721 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2722 dm_pflip_high_irq, c_irq_params);
2723
2724 }
2725
2726 /* HPD */
2727 r = amdgpu_irq_add_id(adev, client_id,
2728 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2729 if (r) {
2730 DRM_ERROR("Failed to add hpd irq id!\n");
2731 return r;
2732 }
2733
2734 register_hpd_handlers(adev);
2735
2736 return 0;
2737}
2738#endif
2739
4562236b
HW
2740/* Register IRQ sources and initialize IRQ callbacks */
2741static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2742{
2743 struct dc *dc = adev->dm.dc;
2744 struct common_irq_params *c_irq_params;
2745 struct dc_interrupt_params int_params = {0};
2746 int r;
2747 int i;
1ffdeca6 2748 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2749
84374725 2750 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2751 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2752
2753 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2754 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2755
1f6010a9
DF
2756 /*
2757 * Actions of amdgpu_irq_add_id():
4562236b
HW
2758 * 1. Register a set() function with base driver.
2759 * Base driver will call set() function to enable/disable an
2760 * interrupt in DC hardware.
2761 * 2. Register amdgpu_dm_irq_handler().
2762 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2763 * coming from DC hardware.
2764 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2765 * for acknowledging and handling. */
2766
b57de80a 2767 /* Use VBLANK interrupt */
e9029155 2768 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2769 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2770 if (r) {
2771 DRM_ERROR("Failed to add crtc irq id!\n");
2772 return r;
2773 }
2774
2775 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2776 int_params.irq_source =
3d761e79 2777 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2778
b57de80a 2779 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2780
2781 c_irq_params->adev = adev;
2782 c_irq_params->irq_src = int_params.irq_source;
2783
2784 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2785 dm_crtc_high_irq, c_irq_params);
2786 }
2787
d2574c33
MK
2788 /* Use VUPDATE interrupt */
2789 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2790 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2791 if (r) {
2792 DRM_ERROR("Failed to add vupdate irq id!\n");
2793 return r;
2794 }
2795
2796 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2797 int_params.irq_source =
2798 dc_interrupt_to_irq_source(dc, i, 0);
2799
2800 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2801
2802 c_irq_params->adev = adev;
2803 c_irq_params->irq_src = int_params.irq_source;
2804
2805 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2806 dm_vupdate_high_irq, c_irq_params);
2807 }
2808
3d761e79 2809 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2810 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2811 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2812 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2813 if (r) {
2814 DRM_ERROR("Failed to add page flip irq id!\n");
2815 return r;
2816 }
2817
2818 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2819 int_params.irq_source =
2820 dc_interrupt_to_irq_source(dc, i, 0);
2821
2822 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2823
2824 c_irq_params->adev = adev;
2825 c_irq_params->irq_src = int_params.irq_source;
2826
2827 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2828 dm_pflip_high_irq, c_irq_params);
2829
2830 }
2831
2832 /* HPD */
2c8ad2d5
AD
2833 r = amdgpu_irq_add_id(adev, client_id,
2834 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2835 if (r) {
2836 DRM_ERROR("Failed to add hpd irq id!\n");
2837 return r;
2838 }
2839
2840 register_hpd_handlers(adev);
2841
2842 return 0;
2843}
2844
b86a1aa3 2845#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2846/* Register IRQ sources and initialize IRQ callbacks */
2847static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2848{
2849 struct dc *dc = adev->dm.dc;
2850 struct common_irq_params *c_irq_params;
2851 struct dc_interrupt_params int_params = {0};
2852 int r;
2853 int i;
2854
2855 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2856 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2857
1f6010a9
DF
2858 /*
2859 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2860 * 1. Register a set() function with base driver.
2861 * Base driver will call set() function to enable/disable an
2862 * interrupt in DC hardware.
2863 * 2. Register amdgpu_dm_irq_handler().
2864 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2865 * coming from DC hardware.
2866 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2867 * for acknowledging and handling.
1f6010a9 2868 */
ff5ef992
AD
2869
2870 /* Use VSTARTUP interrupt */
2871 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2872 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2873 i++) {
3760f76c 2874 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2875
2876 if (r) {
2877 DRM_ERROR("Failed to add crtc irq id!\n");
2878 return r;
2879 }
2880
2881 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2882 int_params.irq_source =
2883 dc_interrupt_to_irq_source(dc, i, 0);
2884
2885 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2886
2887 c_irq_params->adev = adev;
2888 c_irq_params->irq_src = int_params.irq_source;
2889
2346ef47
NK
2890 amdgpu_dm_irq_register_interrupt(
2891 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2892 }
2893
2894 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2895 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2896 * to trigger at end of each vblank, regardless of state of the lock,
2897 * matching DCE behaviour.
2898 */
2899 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2900 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2901 i++) {
2902 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2903
2904 if (r) {
2905 DRM_ERROR("Failed to add vupdate irq id!\n");
2906 return r;
2907 }
2908
2909 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2910 int_params.irq_source =
2911 dc_interrupt_to_irq_source(dc, i, 0);
2912
2913 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2914
2915 c_irq_params->adev = adev;
2916 c_irq_params->irq_src = int_params.irq_source;
2917
ff5ef992 2918 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 2919 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
2920 }
2921
ff5ef992
AD
2922 /* Use GRPH_PFLIP interrupt */
2923 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2924 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2925 i++) {
3760f76c 2926 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
2927 if (r) {
2928 DRM_ERROR("Failed to add page flip irq id!\n");
2929 return r;
2930 }
2931
2932 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2933 int_params.irq_source =
2934 dc_interrupt_to_irq_source(dc, i, 0);
2935
2936 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2937
2938 c_irq_params->adev = adev;
2939 c_irq_params->irq_src = int_params.irq_source;
2940
2941 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2942 dm_pflip_high_irq, c_irq_params);
2943
2944 }
2945
2946 /* HPD */
3760f76c 2947 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
2948 &adev->hpd_irq);
2949 if (r) {
2950 DRM_ERROR("Failed to add hpd irq id!\n");
2951 return r;
2952 }
2953
2954 register_hpd_handlers(adev);
2955
2956 return 0;
2957}
2958#endif
2959
eb3dc897
NK
2960/*
2961 * Acquires the lock for the atomic state object and returns
2962 * the new atomic state.
2963 *
2964 * This should only be called during atomic check.
2965 */
2966static int dm_atomic_get_state(struct drm_atomic_state *state,
2967 struct dm_atomic_state **dm_state)
2968{
2969 struct drm_device *dev = state->dev;
1348969a 2970 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2971 struct amdgpu_display_manager *dm = &adev->dm;
2972 struct drm_private_state *priv_state;
eb3dc897
NK
2973
2974 if (*dm_state)
2975 return 0;
2976
eb3dc897
NK
2977 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2978 if (IS_ERR(priv_state))
2979 return PTR_ERR(priv_state);
2980
2981 *dm_state = to_dm_atomic_state(priv_state);
2982
2983 return 0;
2984}
2985
dfd84d90 2986static struct dm_atomic_state *
eb3dc897
NK
2987dm_atomic_get_new_state(struct drm_atomic_state *state)
2988{
2989 struct drm_device *dev = state->dev;
1348969a 2990 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
2991 struct amdgpu_display_manager *dm = &adev->dm;
2992 struct drm_private_obj *obj;
2993 struct drm_private_state *new_obj_state;
2994 int i;
2995
2996 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2997 if (obj->funcs == dm->atomic_obj.funcs)
2998 return to_dm_atomic_state(new_obj_state);
2999 }
3000
3001 return NULL;
3002}
3003
eb3dc897
NK
3004static struct drm_private_state *
3005dm_atomic_duplicate_state(struct drm_private_obj *obj)
3006{
3007 struct dm_atomic_state *old_state, *new_state;
3008
3009 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3010 if (!new_state)
3011 return NULL;
3012
3013 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3014
813d20dc
AW
3015 old_state = to_dm_atomic_state(obj->state);
3016
3017 if (old_state && old_state->context)
3018 new_state->context = dc_copy_state(old_state->context);
3019
eb3dc897
NK
3020 if (!new_state->context) {
3021 kfree(new_state);
3022 return NULL;
3023 }
3024
eb3dc897
NK
3025 return &new_state->base;
3026}
3027
3028static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3029 struct drm_private_state *state)
3030{
3031 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3032
3033 if (dm_state && dm_state->context)
3034 dc_release_state(dm_state->context);
3035
3036 kfree(dm_state);
3037}
3038
3039static struct drm_private_state_funcs dm_atomic_state_funcs = {
3040 .atomic_duplicate_state = dm_atomic_duplicate_state,
3041 .atomic_destroy_state = dm_atomic_destroy_state,
3042};
3043
4562236b
HW
3044static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3045{
eb3dc897 3046 struct dm_atomic_state *state;
4562236b
HW
3047 int r;
3048
3049 adev->mode_info.mode_config_initialized = true;
3050
4a580877
LT
3051 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3052 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3053
4a580877
LT
3054 adev_to_drm(adev)->mode_config.max_width = 16384;
3055 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3056
4a580877
LT
3057 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3058 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3059 /* indicates support for immediate flip */
4a580877 3060 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3061
4a580877 3062 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3063
eb3dc897
NK
3064 state = kzalloc(sizeof(*state), GFP_KERNEL);
3065 if (!state)
3066 return -ENOMEM;
3067
813d20dc 3068 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3069 if (!state->context) {
3070 kfree(state);
3071 return -ENOMEM;
3072 }
3073
3074 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3075
4a580877 3076 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3077 &adev->dm.atomic_obj,
eb3dc897
NK
3078 &state->base,
3079 &dm_atomic_state_funcs);
3080
3dc9b1ce 3081 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3082 if (r) {
3083 dc_release_state(state->context);
3084 kfree(state);
4562236b 3085 return r;
b67a468a 3086 }
4562236b 3087
6ce8f316 3088 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3089 if (r) {
3090 dc_release_state(state->context);
3091 kfree(state);
6ce8f316 3092 return r;
b67a468a 3093 }
6ce8f316 3094
4562236b
HW
3095 return 0;
3096}
3097
206bbafe
DF
3098#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3099#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3100#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3101
4562236b
HW
3102#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3103 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3104
206bbafe
DF
3105static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3106{
3107#if defined(CONFIG_ACPI)
3108 struct amdgpu_dm_backlight_caps caps;
3109
58965855
FS
3110 memset(&caps, 0, sizeof(caps));
3111
206bbafe
DF
3112 if (dm->backlight_caps.caps_valid)
3113 return;
3114
3115 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3116 if (caps.caps_valid) {
94562810
RS
3117 dm->backlight_caps.caps_valid = true;
3118 if (caps.aux_support)
3119 return;
206bbafe
DF
3120 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3121 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3122 } else {
3123 dm->backlight_caps.min_input_signal =
3124 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3125 dm->backlight_caps.max_input_signal =
3126 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3127 }
3128#else
94562810
RS
3129 if (dm->backlight_caps.aux_support)
3130 return;
3131
8bcbc9ef
DF
3132 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3133 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3134#endif
3135}
3136
94562810
RS
3137static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3138{
3139 bool rc;
3140
3141 if (!link)
3142 return 1;
3143
3144 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3145 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3146
3147 return rc ? 0 : 1;
3148}
3149
69d9f427
AM
3150static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3151 unsigned *min, unsigned *max)
94562810 3152{
94562810 3153 if (!caps)
69d9f427 3154 return 0;
94562810 3155
69d9f427
AM
3156 if (caps->aux_support) {
3157 // Firmware limits are in nits, DC API wants millinits.
3158 *max = 1000 * caps->aux_max_input_signal;
3159 *min = 1000 * caps->aux_min_input_signal;
94562810 3160 } else {
69d9f427
AM
3161 // Firmware limits are 8-bit, PWM control is 16-bit.
3162 *max = 0x101 * caps->max_input_signal;
3163 *min = 0x101 * caps->min_input_signal;
94562810 3164 }
69d9f427
AM
3165 return 1;
3166}
94562810 3167
69d9f427
AM
3168static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3169 uint32_t brightness)
3170{
3171 unsigned min, max;
94562810 3172
69d9f427
AM
3173 if (!get_brightness_range(caps, &min, &max))
3174 return brightness;
3175
3176 // Rescale 0..255 to min..max
3177 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3178 AMDGPU_MAX_BL_LEVEL);
3179}
3180
3181static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3182 uint32_t brightness)
3183{
3184 unsigned min, max;
3185
3186 if (!get_brightness_range(caps, &min, &max))
3187 return brightness;
3188
3189 if (brightness < min)
3190 return 0;
3191 // Rescale min..max to 0..255
3192 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3193 max - min);
94562810
RS
3194}
3195
4562236b
HW
3196static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3197{
3198 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3199 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3200 struct dc_link *link = NULL;
3201 u32 brightness;
3202 bool rc;
4562236b 3203
206bbafe
DF
3204 amdgpu_dm_update_backlight_caps(dm);
3205 caps = dm->backlight_caps;
94562810
RS
3206
3207 link = (struct dc_link *)dm->backlight_link;
3208
69d9f427 3209 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3210 // Change brightness based on AUX property
3211 if (caps.aux_support)
3212 return set_backlight_via_aux(link, brightness);
3213
3214 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3215
3216 return rc ? 0 : 1;
4562236b
HW
3217}
3218
3219static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3220{
620a0d27
DF
3221 struct amdgpu_display_manager *dm = bl_get_data(bd);
3222 int ret = dc_link_get_backlight_level(dm->backlight_link);
3223
3224 if (ret == DC_ERROR_UNEXPECTED)
3225 return bd->props.brightness;
69d9f427 3226 return convert_brightness_to_user(&dm->backlight_caps, ret);
4562236b
HW
3227}
3228
3229static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3230 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3231 .get_brightness = amdgpu_dm_backlight_get_brightness,
3232 .update_status = amdgpu_dm_backlight_update_status,
3233};
3234
7578ecda
AD
3235static void
3236amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3237{
3238 char bl_name[16];
3239 struct backlight_properties props = { 0 };
3240
206bbafe
DF
3241 amdgpu_dm_update_backlight_caps(dm);
3242
4562236b 3243 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3244 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3245 props.type = BACKLIGHT_RAW;
3246
3247 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3248 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3249
3250 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3251 adev_to_drm(dm->adev)->dev,
3252 dm,
3253 &amdgpu_dm_backlight_ops,
3254 &props);
4562236b 3255
74baea42 3256 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3257 DRM_ERROR("DM: Backlight registration failed!\n");
3258 else
f1ad2f5e 3259 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3260}
3261
3262#endif
3263
df534fff 3264static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3265 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3266 enum drm_plane_type plane_type,
3267 const struct dc_plane_cap *plane_cap)
df534fff 3268{
f180b4bc 3269 struct drm_plane *plane;
df534fff
S
3270 unsigned long possible_crtcs;
3271 int ret = 0;
3272
f180b4bc 3273 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3274 if (!plane) {
3275 DRM_ERROR("KMS: Failed to allocate plane\n");
3276 return -ENOMEM;
3277 }
b2fddb13 3278 plane->type = plane_type;
df534fff
S
3279
3280 /*
b2fddb13
NK
3281 * HACK: IGT tests expect that the primary plane for a CRTC
3282 * can only have one possible CRTC. Only expose support for
3283 * any CRTC if they're not going to be used as a primary plane
3284 * for a CRTC - like overlay or underlay planes.
df534fff
S
3285 */
3286 possible_crtcs = 1 << plane_id;
3287 if (plane_id >= dm->dc->caps.max_streams)
3288 possible_crtcs = 0xff;
3289
cc1fec57 3290 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3291
3292 if (ret) {
3293 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3294 kfree(plane);
df534fff
S
3295 return ret;
3296 }
3297
54087768
NK
3298 if (mode_info)
3299 mode_info->planes[plane_id] = plane;
3300
df534fff
S
3301 return ret;
3302}
3303
89fc8d4e
HW
3304
3305static void register_backlight_device(struct amdgpu_display_manager *dm,
3306 struct dc_link *link)
3307{
3308#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3309 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3310
3311 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3312 link->type != dc_connection_none) {
1f6010a9
DF
3313 /*
3314 * Event if registration failed, we should continue with
89fc8d4e
HW
3315 * DM initialization because not having a backlight control
3316 * is better then a black screen.
3317 */
3318 amdgpu_dm_register_backlight_device(dm);
3319
3320 if (dm->backlight_dev)
3321 dm->backlight_link = link;
3322 }
3323#endif
3324}
3325
3326
1f6010a9
DF
3327/*
3328 * In this architecture, the association
4562236b
HW
3329 * connector -> encoder -> crtc
3330 * id not really requried. The crtc and connector will hold the
3331 * display_index as an abstraction to use with DAL component
3332 *
3333 * Returns 0 on success
3334 */
7578ecda 3335static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3336{
3337 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3338 int32_t i;
c84dec2f 3339 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3340 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3341 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3342 uint32_t link_cnt;
cc1fec57 3343 int32_t primary_planes;
fbbdadf2 3344 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3345 const struct dc_plane_cap *plane;
4562236b
HW
3346
3347 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3348 if (amdgpu_dm_mode_config_init(dm->adev)) {
3349 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3350 return -EINVAL;
4562236b
HW
3351 }
3352
b2fddb13
NK
3353 /* There is one primary plane per CRTC */
3354 primary_planes = dm->dc->caps.max_streams;
54087768 3355 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3356
b2fddb13
NK
3357 /*
3358 * Initialize primary planes, implicit planes for legacy IOCTLS.
3359 * Order is reversed to match iteration order in atomic check.
3360 */
3361 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3362 plane = &dm->dc->caps.planes[i];
3363
b2fddb13 3364 if (initialize_plane(dm, mode_info, i,
cc1fec57 3365 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3366 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3367 goto fail;
d4e13b0d 3368 }
df534fff 3369 }
92f3ac40 3370
0d579c7e
NK
3371 /*
3372 * Initialize overlay planes, index starting after primary planes.
3373 * These planes have a higher DRM index than the primary planes since
3374 * they should be considered as having a higher z-order.
3375 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3376 *
3377 * Only support DCN for now, and only expose one so we don't encourage
3378 * userspace to use up all the pipes.
0d579c7e 3379 */
cc1fec57
NK
3380 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3381 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3382
3383 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3384 continue;
3385
3386 if (!plane->blends_with_above || !plane->blends_with_below)
3387 continue;
3388
ea36ad34 3389 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3390 continue;
3391
54087768 3392 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3393 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3394 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3395 goto fail;
d4e13b0d 3396 }
cc1fec57
NK
3397
3398 /* Only create one overlay plane. */
3399 break;
d4e13b0d 3400 }
4562236b 3401
d4e13b0d 3402 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3403 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3404 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3405 goto fail;
4562236b 3406 }
4562236b 3407
ab2541b6 3408 dm->display_indexes_num = dm->dc->caps.max_streams;
4562236b
HW
3409
3410 /* loops over all connectors on the board */
3411 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3412 struct dc_link *link = NULL;
4562236b
HW
3413
3414 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3415 DRM_ERROR(
3416 "KMS: Cannot support more than %d display indexes\n",
3417 AMDGPU_DM_MAX_DISPLAY_INDEX);
3418 continue;
3419 }
3420
3421 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3422 if (!aconnector)
cd8a2ae8 3423 goto fail;
4562236b
HW
3424
3425 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3426 if (!aencoder)
cd8a2ae8 3427 goto fail;
4562236b
HW
3428
3429 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3430 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3431 goto fail;
4562236b
HW
3432 }
3433
3434 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3435 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3436 goto fail;
4562236b
HW
3437 }
3438
89fc8d4e
HW
3439 link = dc_get_link_at_index(dm->dc, i);
3440
fbbdadf2
BL
3441 if (!dc_link_detect_sink(link, &new_connection_type))
3442 DRM_ERROR("KMS: Failed to detect connector\n");
3443
3444 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3445 emulated_link_detect(link);
3446 amdgpu_dm_update_connector_after_detect(aconnector);
3447
3448 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3449 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3450 register_backlight_device(dm, link);
397a9bc5
RL
3451 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3452 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3453 }
3454
3455
4562236b
HW
3456 }
3457
3458 /* Software is initialized. Now we can register interrupt handlers. */
3459 switch (adev->asic_type) {
55e56389
MR
3460#if defined(CONFIG_DRM_AMD_DC_SI)
3461 case CHIP_TAHITI:
3462 case CHIP_PITCAIRN:
3463 case CHIP_VERDE:
3464 case CHIP_OLAND:
3465 if (dce60_register_irq_handlers(dm->adev)) {
3466 DRM_ERROR("DM: Failed to initialize IRQ\n");
3467 goto fail;
3468 }
3469 break;
3470#endif
4562236b
HW
3471 case CHIP_BONAIRE:
3472 case CHIP_HAWAII:
cd4b356f
AD
3473 case CHIP_KAVERI:
3474 case CHIP_KABINI:
3475 case CHIP_MULLINS:
4562236b
HW
3476 case CHIP_TONGA:
3477 case CHIP_FIJI:
3478 case CHIP_CARRIZO:
3479 case CHIP_STONEY:
3480 case CHIP_POLARIS11:
3481 case CHIP_POLARIS10:
b264d345 3482 case CHIP_POLARIS12:
7737de91 3483 case CHIP_VEGAM:
2c8ad2d5 3484 case CHIP_VEGA10:
2325ff30 3485 case CHIP_VEGA12:
1fe6bf2f 3486 case CHIP_VEGA20:
4562236b
HW
3487 if (dce110_register_irq_handlers(dm->adev)) {
3488 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3489 goto fail;
4562236b
HW
3490 }
3491 break;
b86a1aa3 3492#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3493 case CHIP_RAVEN:
fbd2afe5 3494 case CHIP_NAVI12:
476e955d 3495 case CHIP_NAVI10:
fce651e3 3496 case CHIP_NAVI14:
30221ad8 3497 case CHIP_RENOIR:
79037324
BL
3498#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3499 case CHIP_SIENNA_CICHLID:
a6c5308f 3500 case CHIP_NAVY_FLOUNDER:
469989ca 3501#endif
2a411205
BL
3502#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3503 case CHIP_DIMGREY_CAVEFISH:
3504#endif
469989ca
RL
3505#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3506 case CHIP_VANGOGH:
79037324 3507#endif
ff5ef992
AD
3508 if (dcn10_register_irq_handlers(dm->adev)) {
3509 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3510 goto fail;
ff5ef992
AD
3511 }
3512 break;
3513#endif
4562236b 3514 default:
e63f8673 3515 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3516 goto fail;
4562236b
HW
3517 }
3518
4562236b 3519 return 0;
cd8a2ae8 3520fail:
4562236b 3521 kfree(aencoder);
4562236b 3522 kfree(aconnector);
54087768 3523
59d0f396 3524 return -EINVAL;
4562236b
HW
3525}
3526
7578ecda 3527static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3528{
3529 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3530 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3531 return;
3532}
3533
3534/******************************************************************************
3535 * amdgpu_display_funcs functions
3536 *****************************************************************************/
3537
1f6010a9 3538/*
4562236b
HW
3539 * dm_bandwidth_update - program display watermarks
3540 *
3541 * @adev: amdgpu_device pointer
3542 *
3543 * Calculate and program the display watermarks and line buffer allocation.
3544 */
3545static void dm_bandwidth_update(struct amdgpu_device *adev)
3546{
49c07a99 3547 /* TODO: implement later */
4562236b
HW
3548}
3549
39cc5be2 3550static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3551 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3552 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3553 .backlight_set_level = NULL, /* never called for DC */
3554 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3555 .hpd_sense = NULL,/* called unconditionally */
3556 .hpd_set_polarity = NULL, /* called unconditionally */
3557 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3558 .page_flip_get_scanoutpos =
3559 dm_crtc_get_scanoutpos,/* called unconditionally */
3560 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3561 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3562};
3563
3564#if defined(CONFIG_DEBUG_KERNEL_DC)
3565
3ee6b26b
AD
3566static ssize_t s3_debug_store(struct device *device,
3567 struct device_attribute *attr,
3568 const char *buf,
3569 size_t count)
4562236b
HW
3570{
3571 int ret;
3572 int s3_state;
ef1de361 3573 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3574 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3575
3576 ret = kstrtoint(buf, 0, &s3_state);
3577
3578 if (ret == 0) {
3579 if (s3_state) {
3580 dm_resume(adev);
4a580877 3581 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3582 } else
3583 dm_suspend(adev);
3584 }
3585
3586 return ret == 0 ? count : 0;
3587}
3588
3589DEVICE_ATTR_WO(s3_debug);
3590
3591#endif
3592
3593static int dm_early_init(void *handle)
3594{
3595 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3596
4562236b 3597 switch (adev->asic_type) {
55e56389
MR
3598#if defined(CONFIG_DRM_AMD_DC_SI)
3599 case CHIP_TAHITI:
3600 case CHIP_PITCAIRN:
3601 case CHIP_VERDE:
3602 adev->mode_info.num_crtc = 6;
3603 adev->mode_info.num_hpd = 6;
3604 adev->mode_info.num_dig = 6;
3605 break;
3606 case CHIP_OLAND:
3607 adev->mode_info.num_crtc = 2;
3608 adev->mode_info.num_hpd = 2;
3609 adev->mode_info.num_dig = 2;
3610 break;
3611#endif
4562236b
HW
3612 case CHIP_BONAIRE:
3613 case CHIP_HAWAII:
3614 adev->mode_info.num_crtc = 6;
3615 adev->mode_info.num_hpd = 6;
3616 adev->mode_info.num_dig = 6;
4562236b 3617 break;
cd4b356f
AD
3618 case CHIP_KAVERI:
3619 adev->mode_info.num_crtc = 4;
3620 adev->mode_info.num_hpd = 6;
3621 adev->mode_info.num_dig = 7;
cd4b356f
AD
3622 break;
3623 case CHIP_KABINI:
3624 case CHIP_MULLINS:
3625 adev->mode_info.num_crtc = 2;
3626 adev->mode_info.num_hpd = 6;
3627 adev->mode_info.num_dig = 6;
cd4b356f 3628 break;
4562236b
HW
3629 case CHIP_FIJI:
3630 case CHIP_TONGA:
3631 adev->mode_info.num_crtc = 6;
3632 adev->mode_info.num_hpd = 6;
3633 adev->mode_info.num_dig = 7;
4562236b
HW
3634 break;
3635 case CHIP_CARRIZO:
3636 adev->mode_info.num_crtc = 3;
3637 adev->mode_info.num_hpd = 6;
3638 adev->mode_info.num_dig = 9;
4562236b
HW
3639 break;
3640 case CHIP_STONEY:
3641 adev->mode_info.num_crtc = 2;
3642 adev->mode_info.num_hpd = 6;
3643 adev->mode_info.num_dig = 9;
4562236b
HW
3644 break;
3645 case CHIP_POLARIS11:
b264d345 3646 case CHIP_POLARIS12:
4562236b
HW
3647 adev->mode_info.num_crtc = 5;
3648 adev->mode_info.num_hpd = 5;
3649 adev->mode_info.num_dig = 5;
4562236b
HW
3650 break;
3651 case CHIP_POLARIS10:
7737de91 3652 case CHIP_VEGAM:
4562236b
HW
3653 adev->mode_info.num_crtc = 6;
3654 adev->mode_info.num_hpd = 6;
3655 adev->mode_info.num_dig = 6;
4562236b 3656 break;
2c8ad2d5 3657 case CHIP_VEGA10:
2325ff30 3658 case CHIP_VEGA12:
1fe6bf2f 3659 case CHIP_VEGA20:
2c8ad2d5
AD
3660 adev->mode_info.num_crtc = 6;
3661 adev->mode_info.num_hpd = 6;
3662 adev->mode_info.num_dig = 6;
3663 break;
b86a1aa3 3664#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3665 case CHIP_RAVEN:
3666 adev->mode_info.num_crtc = 4;
3667 adev->mode_info.num_hpd = 4;
3668 adev->mode_info.num_dig = 4;
ff5ef992 3669 break;
476e955d 3670#endif
476e955d 3671 case CHIP_NAVI10:
fbd2afe5 3672 case CHIP_NAVI12:
79037324
BL
3673#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3674 case CHIP_SIENNA_CICHLID:
a6c5308f 3675 case CHIP_NAVY_FLOUNDER:
79037324 3676#endif
476e955d
HW
3677 adev->mode_info.num_crtc = 6;
3678 adev->mode_info.num_hpd = 6;
3679 adev->mode_info.num_dig = 6;
3680 break;
469989ca
RL
3681#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3682 case CHIP_VANGOGH:
3683 adev->mode_info.num_crtc = 4;
3684 adev->mode_info.num_hpd = 4;
3685 adev->mode_info.num_dig = 4;
3686 break;
3687#endif
fce651e3 3688 case CHIP_NAVI14:
2a411205
BL
3689#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3690 case CHIP_DIMGREY_CAVEFISH:
3691#endif
fce651e3
BL
3692 adev->mode_info.num_crtc = 5;
3693 adev->mode_info.num_hpd = 5;
3694 adev->mode_info.num_dig = 5;
3695 break;
30221ad8
BL
3696 case CHIP_RENOIR:
3697 adev->mode_info.num_crtc = 4;
3698 adev->mode_info.num_hpd = 4;
3699 adev->mode_info.num_dig = 4;
3700 break;
4562236b 3701 default:
e63f8673 3702 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3703 return -EINVAL;
3704 }
3705
c8dd5715
MD
3706 amdgpu_dm_set_irq_funcs(adev);
3707
39cc5be2
AD
3708 if (adev->mode_info.funcs == NULL)
3709 adev->mode_info.funcs = &dm_display_funcs;
3710
1f6010a9
DF
3711 /*
3712 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3713 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3714 * amdgpu_device_init()
3715 */
4562236b
HW
3716#if defined(CONFIG_DEBUG_KERNEL_DC)
3717 device_create_file(
4a580877 3718 adev_to_drm(adev)->dev,
4562236b
HW
3719 &dev_attr_s3_debug);
3720#endif
3721
3722 return 0;
3723}
3724
9b690ef3 3725static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3726 struct dc_stream_state *new_stream,
3727 struct dc_stream_state *old_stream)
9b690ef3 3728{
2afda735 3729 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3730}
3731
3732static bool modereset_required(struct drm_crtc_state *crtc_state)
3733{
2afda735 3734 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3735}
3736
7578ecda 3737static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3738{
3739 drm_encoder_cleanup(encoder);
3740 kfree(encoder);
3741}
3742
3743static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3744 .destroy = amdgpu_dm_encoder_destroy,
3745};
3746
e7b07cee 3747
695af5f9
NK
3748static int fill_dc_scaling_info(const struct drm_plane_state *state,
3749 struct dc_scaling_info *scaling_info)
e7b07cee 3750{
6491f0c0 3751 int scale_w, scale_h;
e7b07cee 3752
695af5f9 3753 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3754
695af5f9
NK
3755 /* Source is fixed 16.16 but we ignore mantissa for now... */
3756 scaling_info->src_rect.x = state->src_x >> 16;
3757 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3758
695af5f9
NK
3759 scaling_info->src_rect.width = state->src_w >> 16;
3760 if (scaling_info->src_rect.width == 0)
3761 return -EINVAL;
3762
3763 scaling_info->src_rect.height = state->src_h >> 16;
3764 if (scaling_info->src_rect.height == 0)
3765 return -EINVAL;
3766
3767 scaling_info->dst_rect.x = state->crtc_x;
3768 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3769
3770 if (state->crtc_w == 0)
695af5f9 3771 return -EINVAL;
e7b07cee 3772
695af5f9 3773 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3774
3775 if (state->crtc_h == 0)
695af5f9 3776 return -EINVAL;
e7b07cee 3777
695af5f9 3778 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3779
695af5f9
NK
3780 /* DRM doesn't specify clipping on destination output. */
3781 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3782
6491f0c0
NK
3783 /* TODO: Validate scaling per-format with DC plane caps */
3784 scale_w = scaling_info->dst_rect.width * 1000 /
3785 scaling_info->src_rect.width;
e7b07cee 3786
6491f0c0
NK
3787 if (scale_w < 250 || scale_w > 16000)
3788 return -EINVAL;
3789
3790 scale_h = scaling_info->dst_rect.height * 1000 /
3791 scaling_info->src_rect.height;
3792
3793 if (scale_h < 250 || scale_h > 16000)
3794 return -EINVAL;
3795
695af5f9
NK
3796 /*
3797 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3798 * assume reasonable defaults based on the format.
3799 */
e7b07cee 3800
695af5f9 3801 return 0;
4562236b 3802}
695af5f9 3803
3ee6b26b 3804static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
5888f07a 3805 uint64_t *tiling_flags, bool *tmz_surface)
e7b07cee 3806{
707477b0
NK
3807 struct amdgpu_bo *rbo;
3808 int r;
3809
3810 if (!amdgpu_fb) {
3811 *tiling_flags = 0;
3812 *tmz_surface = false;
3813 return 0;
3814 }
3815
3816 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3817 r = amdgpu_bo_reserve(rbo, false);
b830ebc9 3818
e7b07cee 3819 if (unlikely(r)) {
1f6010a9 3820 /* Don't show error message when returning -ERESTARTSYS */
9bbc3031
JZ
3821 if (r != -ERESTARTSYS)
3822 DRM_ERROR("Unable to reserve buffer: %d\n", r);
e7b07cee
HW
3823 return r;
3824 }
3825
e7b07cee
HW
3826 if (tiling_flags)
3827 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3828
5888f07a
HW
3829 if (tmz_surface)
3830 *tmz_surface = amdgpu_bo_encrypted(rbo);
3831
e7b07cee
HW
3832 amdgpu_bo_unreserve(rbo);
3833
3834 return r;
3835}
3836
7df7e505
NK
3837static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3838{
3839 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3840
3841 return offset ? (address + offset * 256) : 0;
3842}
3843
695af5f9
NK
3844static int
3845fill_plane_dcc_attributes(struct amdgpu_device *adev,
3846 const struct amdgpu_framebuffer *afb,
3847 const enum surface_pixel_format format,
3848 const enum dc_rotation_angle rotation,
12e2b2d4 3849 const struct plane_size *plane_size,
695af5f9
NK
3850 const union dc_tiling_info *tiling_info,
3851 const uint64_t info,
3852 struct dc_plane_dcc_param *dcc,
87b7ebc2
RS
3853 struct dc_plane_address *address,
3854 bool force_disable_dcc)
7df7e505
NK
3855{
3856 struct dc *dc = adev->dm.dc;
8daa1218
NC
3857 struct dc_dcc_surface_param input;
3858 struct dc_surface_dcc_cap output;
7df7e505
NK
3859 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3860 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3861 uint64_t dcc_address;
3862
8daa1218
NC
3863 memset(&input, 0, sizeof(input));
3864 memset(&output, 0, sizeof(output));
3865
87b7ebc2
RS
3866 if (force_disable_dcc)
3867 return 0;
3868
7df7e505 3869 if (!offset)
09e5665a
NK
3870 return 0;
3871
695af5f9 3872 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
09e5665a 3873 return 0;
7df7e505
NK
3874
3875 if (!dc->cap_funcs.get_dcc_compression_cap)
09e5665a 3876 return -EINVAL;
7df7e505 3877
695af5f9 3878 input.format = format;
12e2b2d4
DL
3879 input.surface_size.width = plane_size->surface_size.width;
3880 input.surface_size.height = plane_size->surface_size.height;
695af5f9 3881 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 3882
695af5f9 3883 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 3884 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 3885 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
3886 input.scan = SCAN_DIRECTION_VERTICAL;
3887
3888 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 3889 return -EINVAL;
7df7e505
NK
3890
3891 if (!output.capable)
09e5665a 3892 return -EINVAL;
7df7e505
NK
3893
3894 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
09e5665a 3895 return -EINVAL;
7df7e505 3896
09e5665a 3897 dcc->enable = 1;
12e2b2d4 3898 dcc->meta_pitch =
7df7e505 3899 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
12e2b2d4 3900 dcc->independent_64b_blks = i64b;
7df7e505
NK
3901
3902 dcc_address = get_dcc_address(afb->address, info);
09e5665a
NK
3903 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3904 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
7df7e505 3905
09e5665a
NK
3906 return 0;
3907}
3908
3909static int
320932bf 3910fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 3911 const struct amdgpu_framebuffer *afb,
695af5f9
NK
3912 const enum surface_pixel_format format,
3913 const enum dc_rotation_angle rotation,
3914 const uint64_t tiling_flags,
09e5665a 3915 union dc_tiling_info *tiling_info,
12e2b2d4 3916 struct plane_size *plane_size,
09e5665a 3917 struct dc_plane_dcc_param *dcc,
87b7ebc2 3918 struct dc_plane_address *address,
5888f07a 3919 bool tmz_surface,
87b7ebc2 3920 bool force_disable_dcc)
09e5665a 3921{
320932bf 3922 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
3923 int ret;
3924
3925 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 3926 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 3927 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
3928 memset(address, 0, sizeof(*address));
3929
5888f07a
HW
3930 address->tmz_surface = tmz_surface;
3931
695af5f9 3932 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
12e2b2d4
DL
3933 plane_size->surface_size.x = 0;
3934 plane_size->surface_size.y = 0;
3935 plane_size->surface_size.width = fb->width;
3936 plane_size->surface_size.height = fb->height;
3937 plane_size->surface_pitch =
320932bf
NK
3938 fb->pitches[0] / fb->format->cpp[0];
3939
e0634e8d
NK
3940 address->type = PLN_ADDR_TYPE_GRAPHICS;
3941 address->grph.addr.low_part = lower_32_bits(afb->address);
3942 address->grph.addr.high_part = upper_32_bits(afb->address);
1894478a 3943 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
1791e54f 3944 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 3945
12e2b2d4
DL
3946 plane_size->surface_size.x = 0;
3947 plane_size->surface_size.y = 0;
3948 plane_size->surface_size.width = fb->width;
3949 plane_size->surface_size.height = fb->height;
3950 plane_size->surface_pitch =
320932bf
NK
3951 fb->pitches[0] / fb->format->cpp[0];
3952
12e2b2d4
DL
3953 plane_size->chroma_size.x = 0;
3954 plane_size->chroma_size.y = 0;
320932bf 3955 /* TODO: set these based on surface format */
12e2b2d4
DL
3956 plane_size->chroma_size.width = fb->width / 2;
3957 plane_size->chroma_size.height = fb->height / 2;
320932bf 3958
12e2b2d4 3959 plane_size->chroma_pitch =
320932bf
NK
3960 fb->pitches[1] / fb->format->cpp[1];
3961
e0634e8d
NK
3962 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3963 address->video_progressive.luma_addr.low_part =
3964 lower_32_bits(afb->address);
3965 address->video_progressive.luma_addr.high_part =
3966 upper_32_bits(afb->address);
3967 address->video_progressive.chroma_addr.low_part =
3968 lower_32_bits(chroma_addr);
3969 address->video_progressive.chroma_addr.high_part =
3970 upper_32_bits(chroma_addr);
3971 }
09e5665a
NK
3972
3973 /* Fill GFX8 params */
3974 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3975 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3976
3977 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3978 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3979 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3980 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3981 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3982
3983 /* XXX fix me for VI */
3984 tiling_info->gfx8.num_banks = num_banks;
3985 tiling_info->gfx8.array_mode =
3986 DC_ARRAY_2D_TILED_THIN1;
3987 tiling_info->gfx8.tile_split = tile_split;
3988 tiling_info->gfx8.bank_width = bankw;
3989 tiling_info->gfx8.bank_height = bankh;
3990 tiling_info->gfx8.tile_aspect = mtaspect;
3991 tiling_info->gfx8.tile_mode =
3992 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3993 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3994 == DC_ARRAY_1D_TILED_THIN1) {
3995 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3996 }
3997
3998 tiling_info->gfx8.pipe_config =
3999 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4000
4001 if (adev->asic_type == CHIP_VEGA10 ||
4002 adev->asic_type == CHIP_VEGA12 ||
4003 adev->asic_type == CHIP_VEGA20 ||
476e955d 4004 adev->asic_type == CHIP_NAVI10 ||
fce651e3 4005 adev->asic_type == CHIP_NAVI14 ||
fbd2afe5 4006 adev->asic_type == CHIP_NAVI12 ||
79037324
BL
4007#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
4008 adev->asic_type == CHIP_SIENNA_CICHLID ||
a6c5308f 4009 adev->asic_type == CHIP_NAVY_FLOUNDER ||
469989ca 4010#endif
2a411205
BL
4011#if defined(CONFIG_DRM_AMD_DC_DCN3_02)
4012 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4013#endif
469989ca
RL
4014#if defined(CONFIG_DRM_AMD_DC_DCN3_01)
4015 adev->asic_type == CHIP_VANGOGH ||
79037324 4016#endif
30221ad8 4017 adev->asic_type == CHIP_RENOIR ||
09e5665a
NK
4018 adev->asic_type == CHIP_RAVEN) {
4019 /* Fill GFX9 params */
4020 tiling_info->gfx9.num_pipes =
4021 adev->gfx.config.gb_addr_config_fields.num_pipes;
4022 tiling_info->gfx9.num_banks =
4023 adev->gfx.config.gb_addr_config_fields.num_banks;
4024 tiling_info->gfx9.pipe_interleave =
4025 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4026 tiling_info->gfx9.num_shader_engines =
4027 adev->gfx.config.gb_addr_config_fields.num_se;
4028 tiling_info->gfx9.max_compressed_frags =
4029 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4030 tiling_info->gfx9.num_rb_per_se =
4031 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4032 tiling_info->gfx9.swizzle =
4033 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
4034 tiling_info->gfx9.shaderEnable = 1;
4035
79037324 4036#ifdef CONFIG_DRM_AMD_DC_DCN3_0
a6c5308f 4037 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
2a411205 4038 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4a3a1dc0
BN
4039 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4040 adev->asic_type == CHIP_VANGOGH)
79037324 4041 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
79037324 4042#endif
695af5f9
NK
4043 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4044 plane_size, tiling_info,
87b7ebc2
RS
4045 tiling_flags, dcc, address,
4046 force_disable_dcc);
09e5665a
NK
4047 if (ret)
4048 return ret;
4049 }
4050
4051 return 0;
7df7e505
NK
4052}
4053
d74004b6 4054static void
695af5f9 4055fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4056 bool *per_pixel_alpha, bool *global_alpha,
4057 int *global_alpha_value)
4058{
4059 *per_pixel_alpha = false;
4060 *global_alpha = false;
4061 *global_alpha_value = 0xff;
4062
4063 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4064 return;
4065
4066 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4067 static const uint32_t alpha_formats[] = {
4068 DRM_FORMAT_ARGB8888,
4069 DRM_FORMAT_RGBA8888,
4070 DRM_FORMAT_ABGR8888,
4071 };
4072 uint32_t format = plane_state->fb->format->format;
4073 unsigned int i;
4074
4075 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4076 if (format == alpha_formats[i]) {
4077 *per_pixel_alpha = true;
4078 break;
4079 }
4080 }
4081 }
4082
4083 if (plane_state->alpha < 0xffff) {
4084 *global_alpha = true;
4085 *global_alpha_value = plane_state->alpha >> 8;
4086 }
4087}
4088
004fefa3
NK
4089static int
4090fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4091 const enum surface_pixel_format format,
004fefa3
NK
4092 enum dc_color_space *color_space)
4093{
4094 bool full_range;
4095
4096 *color_space = COLOR_SPACE_SRGB;
4097
4098 /* DRM color properties only affect non-RGB formats. */
695af5f9 4099 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4100 return 0;
4101
4102 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4103
4104 switch (plane_state->color_encoding) {
4105 case DRM_COLOR_YCBCR_BT601:
4106 if (full_range)
4107 *color_space = COLOR_SPACE_YCBCR601;
4108 else
4109 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4110 break;
4111
4112 case DRM_COLOR_YCBCR_BT709:
4113 if (full_range)
4114 *color_space = COLOR_SPACE_YCBCR709;
4115 else
4116 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4117 break;
4118
4119 case DRM_COLOR_YCBCR_BT2020:
4120 if (full_range)
4121 *color_space = COLOR_SPACE_2020_YCBCR;
4122 else
4123 return -EINVAL;
4124 break;
4125
4126 default:
4127 return -EINVAL;
4128 }
4129
4130 return 0;
4131}
4132
695af5f9
NK
4133static int
4134fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4135 const struct drm_plane_state *plane_state,
4136 const uint64_t tiling_flags,
4137 struct dc_plane_info *plane_info,
87b7ebc2 4138 struct dc_plane_address *address,
5888f07a 4139 bool tmz_surface,
87b7ebc2 4140 bool force_disable_dcc)
695af5f9
NK
4141{
4142 const struct drm_framebuffer *fb = plane_state->fb;
4143 const struct amdgpu_framebuffer *afb =
4144 to_amdgpu_framebuffer(plane_state->fb);
4145 struct drm_format_name_buf format_name;
4146 int ret;
4147
4148 memset(plane_info, 0, sizeof(*plane_info));
4149
4150 switch (fb->format->format) {
4151 case DRM_FORMAT_C8:
4152 plane_info->format =
4153 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4154 break;
4155 case DRM_FORMAT_RGB565:
4156 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4157 break;
4158 case DRM_FORMAT_XRGB8888:
4159 case DRM_FORMAT_ARGB8888:
4160 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4161 break;
4162 case DRM_FORMAT_XRGB2101010:
4163 case DRM_FORMAT_ARGB2101010:
4164 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4165 break;
4166 case DRM_FORMAT_XBGR2101010:
4167 case DRM_FORMAT_ABGR2101010:
4168 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4169 break;
4170 case DRM_FORMAT_XBGR8888:
4171 case DRM_FORMAT_ABGR8888:
4172 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4173 break;
4174 case DRM_FORMAT_NV21:
4175 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4176 break;
4177 case DRM_FORMAT_NV12:
4178 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4179 break;
cbec6477
SW
4180 case DRM_FORMAT_P010:
4181 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4182 break;
492548dc
SW
4183 case DRM_FORMAT_XRGB16161616F:
4184 case DRM_FORMAT_ARGB16161616F:
4185 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4186 break;
2a5195dc
MK
4187 case DRM_FORMAT_XBGR16161616F:
4188 case DRM_FORMAT_ABGR16161616F:
4189 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4190 break;
695af5f9
NK
4191 default:
4192 DRM_ERROR(
4193 "Unsupported screen format %s\n",
4194 drm_get_format_name(fb->format->format, &format_name));
4195 return -EINVAL;
4196 }
4197
4198 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4199 case DRM_MODE_ROTATE_0:
4200 plane_info->rotation = ROTATION_ANGLE_0;
4201 break;
4202 case DRM_MODE_ROTATE_90:
4203 plane_info->rotation = ROTATION_ANGLE_90;
4204 break;
4205 case DRM_MODE_ROTATE_180:
4206 plane_info->rotation = ROTATION_ANGLE_180;
4207 break;
4208 case DRM_MODE_ROTATE_270:
4209 plane_info->rotation = ROTATION_ANGLE_270;
4210 break;
4211 default:
4212 plane_info->rotation = ROTATION_ANGLE_0;
4213 break;
4214 }
4215
4216 plane_info->visible = true;
4217 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4218
6d83a32d
MS
4219 plane_info->layer_index = 0;
4220
695af5f9
NK
4221 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4222 &plane_info->color_space);
4223 if (ret)
4224 return ret;
4225
4226 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4227 plane_info->rotation, tiling_flags,
4228 &plane_info->tiling_info,
4229 &plane_info->plane_size,
5888f07a 4230 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4231 force_disable_dcc);
695af5f9
NK
4232 if (ret)
4233 return ret;
4234
4235 fill_blending_from_plane_state(
4236 plane_state, &plane_info->per_pixel_alpha,
4237 &plane_info->global_alpha, &plane_info->global_alpha_value);
4238
4239 return 0;
4240}
4241
4242static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4243 struct dc_plane_state *dc_plane_state,
4244 struct drm_plane_state *plane_state,
4245 struct drm_crtc_state *crtc_state)
e7b07cee 4246{
cf020d49 4247 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
707477b0 4248 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
695af5f9
NK
4249 struct dc_scaling_info scaling_info;
4250 struct dc_plane_info plane_info;
695af5f9 4251 int ret;
87b7ebc2 4252 bool force_disable_dcc = false;
e7b07cee 4253
695af5f9
NK
4254 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4255 if (ret)
4256 return ret;
e7b07cee 4257
695af5f9
NK
4258 dc_plane_state->src_rect = scaling_info.src_rect;
4259 dc_plane_state->dst_rect = scaling_info.dst_rect;
4260 dc_plane_state->clip_rect = scaling_info.clip_rect;
4261 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4262
87b7ebc2 4263 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0
NK
4264 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4265 dm_plane_state->tiling_flags,
695af5f9 4266 &plane_info,
87b7ebc2 4267 &dc_plane_state->address,
707477b0 4268 dm_plane_state->tmz_surface,
87b7ebc2 4269 force_disable_dcc);
004fefa3
NK
4270 if (ret)
4271 return ret;
4272
695af5f9
NK
4273 dc_plane_state->format = plane_info.format;
4274 dc_plane_state->color_space = plane_info.color_space;
4275 dc_plane_state->format = plane_info.format;
4276 dc_plane_state->plane_size = plane_info.plane_size;
4277 dc_plane_state->rotation = plane_info.rotation;
4278 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4279 dc_plane_state->stereo_format = plane_info.stereo_format;
4280 dc_plane_state->tiling_info = plane_info.tiling_info;
4281 dc_plane_state->visible = plane_info.visible;
4282 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4283 dc_plane_state->global_alpha = plane_info.global_alpha;
4284 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4285 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4286 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
695af5f9 4287
e277adc5
LSL
4288 /*
4289 * Always set input transfer function, since plane state is refreshed
4290 * every time.
4291 */
cf020d49
NK
4292 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4293 if (ret)
4294 return ret;
e7b07cee 4295
cf020d49 4296 return 0;
e7b07cee
HW
4297}
4298
3ee6b26b
AD
4299static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4300 const struct dm_connector_state *dm_state,
4301 struct dc_stream_state *stream)
e7b07cee
HW
4302{
4303 enum amdgpu_rmx_type rmx_type;
4304
4305 struct rect src = { 0 }; /* viewport in composition space*/
4306 struct rect dst = { 0 }; /* stream addressable area */
4307
4308 /* no mode. nothing to be done */
4309 if (!mode)
4310 return;
4311
4312 /* Full screen scaling by default */
4313 src.width = mode->hdisplay;
4314 src.height = mode->vdisplay;
4315 dst.width = stream->timing.h_addressable;
4316 dst.height = stream->timing.v_addressable;
4317
f4791779
HW
4318 if (dm_state) {
4319 rmx_type = dm_state->scaling;
4320 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4321 if (src.width * dst.height <
4322 src.height * dst.width) {
4323 /* height needs less upscaling/more downscaling */
4324 dst.width = src.width *
4325 dst.height / src.height;
4326 } else {
4327 /* width needs less upscaling/more downscaling */
4328 dst.height = src.height *
4329 dst.width / src.width;
4330 }
4331 } else if (rmx_type == RMX_CENTER) {
4332 dst = src;
e7b07cee 4333 }
e7b07cee 4334
f4791779
HW
4335 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4336 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4337
f4791779
HW
4338 if (dm_state->underscan_enable) {
4339 dst.x += dm_state->underscan_hborder / 2;
4340 dst.y += dm_state->underscan_vborder / 2;
4341 dst.width -= dm_state->underscan_hborder;
4342 dst.height -= dm_state->underscan_vborder;
4343 }
e7b07cee
HW
4344 }
4345
4346 stream->src = src;
4347 stream->dst = dst;
4348
f1ad2f5e 4349 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4350 dst.x, dst.y, dst.width, dst.height);
4351
4352}
4353
3ee6b26b 4354static enum dc_color_depth
42ba01fc 4355convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4356 bool is_y420, int requested_bpc)
e7b07cee 4357{
1bc22f20 4358 uint8_t bpc;
01c22997 4359
1bc22f20
SW
4360 if (is_y420) {
4361 bpc = 8;
4362
4363 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4364 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4365 bpc = 16;
4366 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4367 bpc = 12;
4368 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4369 bpc = 10;
4370 } else {
4371 bpc = (uint8_t)connector->display_info.bpc;
4372 /* Assume 8 bpc by default if no bpc is specified. */
4373 bpc = bpc ? bpc : 8;
4374 }
e7b07cee 4375
cbd14ae7 4376 if (requested_bpc > 0) {
01c22997
NK
4377 /*
4378 * Cap display bpc based on the user requested value.
4379 *
4380 * The value for state->max_bpc may not correctly updated
4381 * depending on when the connector gets added to the state
4382 * or if this was called outside of atomic check, so it
4383 * can't be used directly.
4384 */
cbd14ae7 4385 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4386
1825fd34
NK
4387 /* Round down to the nearest even number. */
4388 bpc = bpc - (bpc & 1);
4389 }
07e3a1cf 4390
e7b07cee
HW
4391 switch (bpc) {
4392 case 0:
1f6010a9
DF
4393 /*
4394 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4395 * EDID revision before 1.4
4396 * TODO: Fix edid parsing
4397 */
4398 return COLOR_DEPTH_888;
4399 case 6:
4400 return COLOR_DEPTH_666;
4401 case 8:
4402 return COLOR_DEPTH_888;
4403 case 10:
4404 return COLOR_DEPTH_101010;
4405 case 12:
4406 return COLOR_DEPTH_121212;
4407 case 14:
4408 return COLOR_DEPTH_141414;
4409 case 16:
4410 return COLOR_DEPTH_161616;
4411 default:
4412 return COLOR_DEPTH_UNDEFINED;
4413 }
4414}
4415
3ee6b26b
AD
4416static enum dc_aspect_ratio
4417get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4418{
e11d4147
LSL
4419 /* 1-1 mapping, since both enums follow the HDMI spec. */
4420 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4421}
4422
3ee6b26b
AD
4423static enum dc_color_space
4424get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4425{
4426 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4427
4428 switch (dc_crtc_timing->pixel_encoding) {
4429 case PIXEL_ENCODING_YCBCR422:
4430 case PIXEL_ENCODING_YCBCR444:
4431 case PIXEL_ENCODING_YCBCR420:
4432 {
4433 /*
4434 * 27030khz is the separation point between HDTV and SDTV
4435 * according to HDMI spec, we use YCbCr709 and YCbCr601
4436 * respectively
4437 */
380604e2 4438 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4439 if (dc_crtc_timing->flags.Y_ONLY)
4440 color_space =
4441 COLOR_SPACE_YCBCR709_LIMITED;
4442 else
4443 color_space = COLOR_SPACE_YCBCR709;
4444 } else {
4445 if (dc_crtc_timing->flags.Y_ONLY)
4446 color_space =
4447 COLOR_SPACE_YCBCR601_LIMITED;
4448 else
4449 color_space = COLOR_SPACE_YCBCR601;
4450 }
4451
4452 }
4453 break;
4454 case PIXEL_ENCODING_RGB:
4455 color_space = COLOR_SPACE_SRGB;
4456 break;
4457
4458 default:
4459 WARN_ON(1);
4460 break;
4461 }
4462
4463 return color_space;
4464}
4465
ea117312
TA
4466static bool adjust_colour_depth_from_display_info(
4467 struct dc_crtc_timing *timing_out,
4468 const struct drm_display_info *info)
400443e8 4469{
ea117312 4470 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4471 int normalized_clk;
400443e8 4472 do {
380604e2 4473 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4474 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4475 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4476 normalized_clk /= 2;
4477 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4478 switch (depth) {
4479 case COLOR_DEPTH_888:
4480 break;
400443e8
ML
4481 case COLOR_DEPTH_101010:
4482 normalized_clk = (normalized_clk * 30) / 24;
4483 break;
4484 case COLOR_DEPTH_121212:
4485 normalized_clk = (normalized_clk * 36) / 24;
4486 break;
4487 case COLOR_DEPTH_161616:
4488 normalized_clk = (normalized_clk * 48) / 24;
4489 break;
4490 default:
ea117312
TA
4491 /* The above depths are the only ones valid for HDMI. */
4492 return false;
400443e8 4493 }
ea117312
TA
4494 if (normalized_clk <= info->max_tmds_clock) {
4495 timing_out->display_color_depth = depth;
4496 return true;
4497 }
4498 } while (--depth > COLOR_DEPTH_666);
4499 return false;
400443e8 4500}
e7b07cee 4501
42ba01fc
NK
4502static void fill_stream_properties_from_drm_display_mode(
4503 struct dc_stream_state *stream,
4504 const struct drm_display_mode *mode_in,
4505 const struct drm_connector *connector,
4506 const struct drm_connector_state *connector_state,
cbd14ae7
SW
4507 const struct dc_stream_state *old_stream,
4508 int requested_bpc)
e7b07cee
HW
4509{
4510 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 4511 const struct drm_display_info *info = &connector->display_info;
d4252eee 4512 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
4513 struct hdmi_vendor_infoframe hv_frame;
4514 struct hdmi_avi_infoframe avi_frame;
e7b07cee 4515
acf83f86
WL
4516 memset(&hv_frame, 0, sizeof(hv_frame));
4517 memset(&avi_frame, 0, sizeof(avi_frame));
4518
e7b07cee
HW
4519 timing_out->h_border_left = 0;
4520 timing_out->h_border_right = 0;
4521 timing_out->v_border_top = 0;
4522 timing_out->v_border_bottom = 0;
4523 /* TODO: un-hardcode */
fe61a2f1 4524 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 4525 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 4526 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
4527 else if (drm_mode_is_420_also(info, mode_in)
4528 && aconnector->force_yuv420_output)
4529 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 4530 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 4531 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
4532 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4533 else
4534 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4535
4536 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4537 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
4538 connector,
4539 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4540 requested_bpc);
e7b07cee
HW
4541 timing_out->scan_type = SCANNING_TYPE_NODATA;
4542 timing_out->hdmi_vic = 0;
b333730d
BL
4543
4544 if(old_stream) {
4545 timing_out->vic = old_stream->timing.vic;
4546 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4547 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4548 } else {
4549 timing_out->vic = drm_match_cea_mode(mode_in);
4550 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4551 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4552 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4553 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4554 }
e7b07cee 4555
1cb1d477
WL
4556 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4557 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4558 timing_out->vic = avi_frame.video_code;
4559 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4560 timing_out->hdmi_vic = hv_frame.vic;
4561 }
4562
e7b07cee
HW
4563 timing_out->h_addressable = mode_in->crtc_hdisplay;
4564 timing_out->h_total = mode_in->crtc_htotal;
4565 timing_out->h_sync_width =
4566 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4567 timing_out->h_front_porch =
4568 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4569 timing_out->v_total = mode_in->crtc_vtotal;
4570 timing_out->v_addressable = mode_in->crtc_vdisplay;
4571 timing_out->v_front_porch =
4572 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4573 timing_out->v_sync_width =
4574 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
380604e2 4575 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 4576 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
4577
4578 stream->output_color_space = get_output_color_space(timing_out);
4579
e43a432c
AK
4580 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4581 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
4582 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4583 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4584 drm_mode_is_420_also(info, mode_in) &&
4585 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4586 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4587 adjust_colour_depth_from_display_info(timing_out, info);
4588 }
4589 }
e7b07cee
HW
4590}
4591
3ee6b26b
AD
4592static void fill_audio_info(struct audio_info *audio_info,
4593 const struct drm_connector *drm_connector,
4594 const struct dc_sink *dc_sink)
e7b07cee
HW
4595{
4596 int i = 0;
4597 int cea_revision = 0;
4598 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4599
4600 audio_info->manufacture_id = edid_caps->manufacturer_id;
4601 audio_info->product_id = edid_caps->product_id;
4602
4603 cea_revision = drm_connector->display_info.cea_rev;
4604
090afc1e 4605 strscpy(audio_info->display_name,
d2b2562c 4606 edid_caps->display_name,
090afc1e 4607 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 4608
b830ebc9 4609 if (cea_revision >= 3) {
e7b07cee
HW
4610 audio_info->mode_count = edid_caps->audio_mode_count;
4611
4612 for (i = 0; i < audio_info->mode_count; ++i) {
4613 audio_info->modes[i].format_code =
4614 (enum audio_format_code)
4615 (edid_caps->audio_modes[i].format_code);
4616 audio_info->modes[i].channel_count =
4617 edid_caps->audio_modes[i].channel_count;
4618 audio_info->modes[i].sample_rates.all =
4619 edid_caps->audio_modes[i].sample_rate;
4620 audio_info->modes[i].sample_size =
4621 edid_caps->audio_modes[i].sample_size;
4622 }
4623 }
4624
4625 audio_info->flags.all = edid_caps->speaker_flags;
4626
4627 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 4628 if (drm_connector->latency_present[0]) {
e7b07cee
HW
4629 audio_info->video_latency = drm_connector->video_latency[0];
4630 audio_info->audio_latency = drm_connector->audio_latency[0];
4631 }
4632
4633 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4634
4635}
4636
3ee6b26b
AD
4637static void
4638copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4639 struct drm_display_mode *dst_mode)
e7b07cee
HW
4640{
4641 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4642 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4643 dst_mode->crtc_clock = src_mode->crtc_clock;
4644 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4645 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 4646 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
4647 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4648 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4649 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4650 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4651 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4652 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4653 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4654 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4655}
4656
3ee6b26b
AD
4657static void
4658decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4659 const struct drm_display_mode *native_mode,
4660 bool scale_enabled)
e7b07cee
HW
4661{
4662 if (scale_enabled) {
4663 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4664 } else if (native_mode->clock == drm_mode->clock &&
4665 native_mode->htotal == drm_mode->htotal &&
4666 native_mode->vtotal == drm_mode->vtotal) {
4667 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4668 } else {
4669 /* no scaling nor amdgpu inserted, no need to patch */
4670 }
4671}
4672
aed15309
ML
4673static struct dc_sink *
4674create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 4675{
2e0ac3d6 4676 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 4677 struct dc_sink *sink = NULL;
2e0ac3d6
HW
4678 sink_init_data.link = aconnector->dc_link;
4679 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4680
4681 sink = dc_sink_create(&sink_init_data);
423788c7 4682 if (!sink) {
2e0ac3d6 4683 DRM_ERROR("Failed to create sink!\n");
aed15309 4684 return NULL;
423788c7 4685 }
2e0ac3d6 4686 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 4687
aed15309 4688 return sink;
2e0ac3d6
HW
4689}
4690
fa2123db
ML
4691static void set_multisync_trigger_params(
4692 struct dc_stream_state *stream)
4693{
4694 if (stream->triggered_crtc_reset.enabled) {
4695 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4696 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4697 }
4698}
4699
4700static void set_master_stream(struct dc_stream_state *stream_set[],
4701 int stream_count)
4702{
4703 int j, highest_rfr = 0, master_stream = 0;
4704
4705 for (j = 0; j < stream_count; j++) {
4706 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4707 int refresh_rate = 0;
4708
380604e2 4709 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
4710 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4711 if (refresh_rate > highest_rfr) {
4712 highest_rfr = refresh_rate;
4713 master_stream = j;
4714 }
4715 }
4716 }
4717 for (j = 0; j < stream_count; j++) {
03736f4c 4718 if (stream_set[j])
fa2123db
ML
4719 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4720 }
4721}
4722
4723static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4724{
4725 int i = 0;
4726
4727 if (context->stream_count < 2)
4728 return;
4729 for (i = 0; i < context->stream_count ; i++) {
4730 if (!context->streams[i])
4731 continue;
1f6010a9
DF
4732 /*
4733 * TODO: add a function to read AMD VSDB bits and set
fa2123db 4734 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 4735 * For now it's set to false
fa2123db
ML
4736 */
4737 set_multisync_trigger_params(context->streams[i]);
4738 }
4739 set_master_stream(context->streams, context->stream_count);
4740}
4741
3ee6b26b
AD
4742static struct dc_stream_state *
4743create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4744 const struct drm_display_mode *drm_mode,
b333730d 4745 const struct dm_connector_state *dm_state,
cbd14ae7
SW
4746 const struct dc_stream_state *old_stream,
4747 int requested_bpc)
e7b07cee
HW
4748{
4749 struct drm_display_mode *preferred_mode = NULL;
391ef035 4750 struct drm_connector *drm_connector;
42ba01fc
NK
4751 const struct drm_connector_state *con_state =
4752 dm_state ? &dm_state->base : NULL;
0971c40e 4753 struct dc_stream_state *stream = NULL;
e7b07cee
HW
4754 struct drm_display_mode mode = *drm_mode;
4755 bool native_mode_found = false;
b333730d
BL
4756 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4757 int mode_refresh;
58124bf8 4758 int preferred_refresh = 0;
defeb878 4759#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 4760 struct dsc_dec_dpcd_caps dsc_caps;
df2f1015 4761#endif
df2f1015 4762 uint32_t link_bandwidth_kbps;
b333730d 4763
aed15309 4764 struct dc_sink *sink = NULL;
b830ebc9 4765 if (aconnector == NULL) {
e7b07cee 4766 DRM_ERROR("aconnector is NULL!\n");
64245fa7 4767 return stream;
e7b07cee
HW
4768 }
4769
e7b07cee 4770 drm_connector = &aconnector->base;
2e0ac3d6 4771
f4ac176e 4772 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
4773 sink = create_fake_sink(aconnector);
4774 if (!sink)
4775 return stream;
aed15309
ML
4776 } else {
4777 sink = aconnector->dc_sink;
dcd5fb82 4778 dc_sink_retain(sink);
f4ac176e 4779 }
2e0ac3d6 4780
aed15309 4781 stream = dc_create_stream_for_sink(sink);
4562236b 4782
b830ebc9 4783 if (stream == NULL) {
e7b07cee 4784 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 4785 goto finish;
e7b07cee
HW
4786 }
4787
ceb3dbb4
JL
4788 stream->dm_stream_context = aconnector;
4789
4a36fcba
WL
4790 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4791 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4792
e7b07cee
HW
4793 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4794 /* Search for preferred mode */
4795 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4796 native_mode_found = true;
4797 break;
4798 }
4799 }
4800 if (!native_mode_found)
4801 preferred_mode = list_first_entry_or_null(
4802 &aconnector->base.modes,
4803 struct drm_display_mode,
4804 head);
4805
b333730d
BL
4806 mode_refresh = drm_mode_vrefresh(&mode);
4807
b830ebc9 4808 if (preferred_mode == NULL) {
1f6010a9
DF
4809 /*
4810 * This may not be an error, the use case is when we have no
e7b07cee
HW
4811 * usermode calls to reset and set mode upon hotplug. In this
4812 * case, we call set mode ourselves to restore the previous mode
4813 * and the modelist may not be filled in in time.
4814 */
f1ad2f5e 4815 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee
HW
4816 } else {
4817 decide_crtc_timing_for_drm_display_mode(
4818 &mode, preferred_mode,
f4791779 4819 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 4820 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
4821 }
4822
f783577c
JFZ
4823 if (!dm_state)
4824 drm_mode_set_crtcinfo(&mode, 0);
4825
b333730d
BL
4826 /*
4827 * If scaling is enabled and refresh rate didn't change
4828 * we copy the vic and polarities of the old timings
4829 */
4830 if (!scale || mode_refresh != preferred_refresh)
4831 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4832 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d
BL
4833 else
4834 fill_stream_properties_from_drm_display_mode(stream,
cbd14ae7 4835 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 4836
df2f1015
DF
4837 stream->timing.flags.DSC = 0;
4838
4839 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 4840#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
4841 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4842 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 4843 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015 4844 &dsc_caps);
defeb878 4845#endif
df2f1015
DF
4846 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4847 dc_link_get_link_cap(aconnector->dc_link));
4848
defeb878 4849#if defined(CONFIG_DRM_AMD_DC_DCN)
0749ddeb 4850 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 4851 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
4852 dc_dsc_policy_set_enable_dsc_when_not_needed(
4853 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 4854
0417df16 4855 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 4856 &dsc_caps,
0417df16 4857 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
df2f1015
DF
4858 link_bandwidth_kbps,
4859 &stream->timing,
4860 &stream->timing.dsc_cfg))
4861 stream->timing.flags.DSC = 1;
27e84dd7 4862 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 4863 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 4864 stream->timing.flags.DSC = 1;
734e4c97 4865
28b2f656
EB
4866 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4867 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 4868
28b2f656
EB
4869 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4870 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
4871
4872 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4873 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 4874 }
39a4eb85 4875#endif
df2f1015 4876 }
39a4eb85 4877
e7b07cee
HW
4878 update_stream_scaling_settings(&mode, dm_state, stream);
4879
4880 fill_audio_info(
4881 &stream->audio_info,
4882 drm_connector,
aed15309 4883 sink);
e7b07cee 4884
ceb3dbb4 4885 update_stream_signal(stream, sink);
9182b4cb 4886
d832fc3b 4887 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
4888 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4889
8a488f5d
RL
4890 if (stream->link->psr_settings.psr_feature_enabled) {
4891 //
4892 // should decide stream support vsc sdp colorimetry capability
4893 // before building vsc info packet
4894 //
4895 stream->use_vsc_sdp_for_colorimetry = false;
4896 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4897 stream->use_vsc_sdp_for_colorimetry =
4898 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4899 } else {
4900 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4901 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 4902 }
8a488f5d 4903 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 4904 }
aed15309 4905finish:
dcd5fb82 4906 dc_sink_release(sink);
9e3efe3e 4907
e7b07cee
HW
4908 return stream;
4909}
4910
7578ecda 4911static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
4912{
4913 drm_crtc_cleanup(crtc);
4914 kfree(crtc);
4915}
4916
4917static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 4918 struct drm_crtc_state *state)
e7b07cee
HW
4919{
4920 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4921
4922 /* TODO Destroy dc_stream objects are stream object is flattened */
4923 if (cur->stream)
4924 dc_stream_release(cur->stream);
4925
4926
4927 __drm_atomic_helper_crtc_destroy_state(state);
4928
4929
4930 kfree(state);
4931}
4932
4933static void dm_crtc_reset_state(struct drm_crtc *crtc)
4934{
4935 struct dm_crtc_state *state;
4936
4937 if (crtc->state)
4938 dm_crtc_destroy_state(crtc, crtc->state);
4939
4940 state = kzalloc(sizeof(*state), GFP_KERNEL);
4941 if (WARN_ON(!state))
4942 return;
4943
1f8a52ec 4944 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
4945}
4946
4947static struct drm_crtc_state *
4948dm_crtc_duplicate_state(struct drm_crtc *crtc)
4949{
4950 struct dm_crtc_state *state, *cur;
4951
4952 cur = to_dm_crtc_state(crtc->state);
4953
4954 if (WARN_ON(!crtc->state))
4955 return NULL;
4956
2004f45e 4957 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
4958 if (!state)
4959 return NULL;
e7b07cee
HW
4960
4961 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4962
4963 if (cur->stream) {
4964 state->stream = cur->stream;
4965 dc_stream_retain(state->stream);
4966 }
4967
d6ef9b41 4968 state->active_planes = cur->active_planes;
98e6436d 4969 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 4970 state->abm_level = cur->abm_level;
bb47de73
NK
4971 state->vrr_supported = cur->vrr_supported;
4972 state->freesync_config = cur->freesync_config;
14b25846 4973 state->crc_src = cur->crc_src;
cf020d49
NK
4974 state->cm_has_degamma = cur->cm_has_degamma;
4975 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
98e6436d 4976
e7b07cee
HW
4977 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4978
4979 return &state->base;
4980}
4981
d2574c33
MK
4982static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4983{
4984 enum dc_irq_source irq_source;
4985 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 4986 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
4987 int rc;
4988
4989 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4990
4991 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4992
4993 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4994 acrtc->crtc_id, enable ? "en" : "dis", rc);
4995 return rc;
4996}
589d2739
HW
4997
4998static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4999{
5000 enum dc_irq_source irq_source;
5001 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5002 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5003 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5004 int rc = 0;
5005
5006 if (enable) {
5007 /* vblank irq on -> Only need vupdate irq in vrr mode */
5008 if (amdgpu_dm_vrr_active(acrtc_state))
5009 rc = dm_set_vupdate_irq(crtc, true);
5010 } else {
5011 /* vblank irq off -> vupdate irq off */
5012 rc = dm_set_vupdate_irq(crtc, false);
5013 }
5014
5015 if (rc)
5016 return rc;
589d2739
HW
5017
5018 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
a0e30392 5019 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
589d2739
HW
5020}
5021
5022static int dm_enable_vblank(struct drm_crtc *crtc)
5023{
5024 return dm_set_vblank(crtc, true);
5025}
5026
5027static void dm_disable_vblank(struct drm_crtc *crtc)
5028{
5029 dm_set_vblank(crtc, false);
5030}
5031
e7b07cee
HW
5032/* Implemented only the options currently availible for the driver */
5033static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5034 .reset = dm_crtc_reset_state,
5035 .destroy = amdgpu_dm_crtc_destroy,
5036 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5037 .set_config = drm_atomic_helper_set_config,
5038 .page_flip = drm_atomic_helper_page_flip,
5039 .atomic_duplicate_state = dm_crtc_duplicate_state,
5040 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5041 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5042 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5043 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5044 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5045 .enable_vblank = dm_enable_vblank,
5046 .disable_vblank = dm_disable_vblank,
e3eff4b5 5047 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
e7b07cee
HW
5048};
5049
5050static enum drm_connector_status
5051amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5052{
5053 bool connected;
c84dec2f 5054 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5055
1f6010a9
DF
5056 /*
5057 * Notes:
e7b07cee
HW
5058 * 1. This interface is NOT called in context of HPD irq.
5059 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5060 * makes it a bad place for *any* MST-related activity.
5061 */
e7b07cee 5062
8580d60b
HW
5063 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5064 !aconnector->fake_enable)
e7b07cee
HW
5065 connected = (aconnector->dc_sink != NULL);
5066 else
5067 connected = (aconnector->base.force == DRM_FORCE_ON);
5068
0f877894
OV
5069 update_subconnector_property(aconnector);
5070
e7b07cee
HW
5071 return (connected ? connector_status_connected :
5072 connector_status_disconnected);
5073}
5074
3ee6b26b
AD
5075int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5076 struct drm_connector_state *connector_state,
5077 struct drm_property *property,
5078 uint64_t val)
e7b07cee
HW
5079{
5080 struct drm_device *dev = connector->dev;
1348969a 5081 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5082 struct dm_connector_state *dm_old_state =
5083 to_dm_connector_state(connector->state);
5084 struct dm_connector_state *dm_new_state =
5085 to_dm_connector_state(connector_state);
5086
5087 int ret = -EINVAL;
5088
5089 if (property == dev->mode_config.scaling_mode_property) {
5090 enum amdgpu_rmx_type rmx_type;
5091
5092 switch (val) {
5093 case DRM_MODE_SCALE_CENTER:
5094 rmx_type = RMX_CENTER;
5095 break;
5096 case DRM_MODE_SCALE_ASPECT:
5097 rmx_type = RMX_ASPECT;
5098 break;
5099 case DRM_MODE_SCALE_FULLSCREEN:
5100 rmx_type = RMX_FULL;
5101 break;
5102 case DRM_MODE_SCALE_NONE:
5103 default:
5104 rmx_type = RMX_OFF;
5105 break;
5106 }
5107
5108 if (dm_old_state->scaling == rmx_type)
5109 return 0;
5110
5111 dm_new_state->scaling = rmx_type;
5112 ret = 0;
5113 } else if (property == adev->mode_info.underscan_hborder_property) {
5114 dm_new_state->underscan_hborder = val;
5115 ret = 0;
5116 } else if (property == adev->mode_info.underscan_vborder_property) {
5117 dm_new_state->underscan_vborder = val;
5118 ret = 0;
5119 } else if (property == adev->mode_info.underscan_property) {
5120 dm_new_state->underscan_enable = val;
5121 ret = 0;
c1ee92f9
DF
5122 } else if (property == adev->mode_info.abm_level_property) {
5123 dm_new_state->abm_level = val;
5124 ret = 0;
e7b07cee
HW
5125 }
5126
5127 return ret;
5128}
5129
3ee6b26b
AD
5130int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5131 const struct drm_connector_state *state,
5132 struct drm_property *property,
5133 uint64_t *val)
e7b07cee
HW
5134{
5135 struct drm_device *dev = connector->dev;
1348969a 5136 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5137 struct dm_connector_state *dm_state =
5138 to_dm_connector_state(state);
5139 int ret = -EINVAL;
5140
5141 if (property == dev->mode_config.scaling_mode_property) {
5142 switch (dm_state->scaling) {
5143 case RMX_CENTER:
5144 *val = DRM_MODE_SCALE_CENTER;
5145 break;
5146 case RMX_ASPECT:
5147 *val = DRM_MODE_SCALE_ASPECT;
5148 break;
5149 case RMX_FULL:
5150 *val = DRM_MODE_SCALE_FULLSCREEN;
5151 break;
5152 case RMX_OFF:
5153 default:
5154 *val = DRM_MODE_SCALE_NONE;
5155 break;
5156 }
5157 ret = 0;
5158 } else if (property == adev->mode_info.underscan_hborder_property) {
5159 *val = dm_state->underscan_hborder;
5160 ret = 0;
5161 } else if (property == adev->mode_info.underscan_vborder_property) {
5162 *val = dm_state->underscan_vborder;
5163 ret = 0;
5164 } else if (property == adev->mode_info.underscan_property) {
5165 *val = dm_state->underscan_enable;
5166 ret = 0;
c1ee92f9
DF
5167 } else if (property == adev->mode_info.abm_level_property) {
5168 *val = dm_state->abm_level;
5169 ret = 0;
e7b07cee 5170 }
c1ee92f9 5171
e7b07cee
HW
5172 return ret;
5173}
5174
526c654a
ED
5175static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5176{
5177 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5178
5179 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5180}
5181
7578ecda 5182static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5183{
c84dec2f 5184 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5185 const struct dc_link *link = aconnector->dc_link;
1348969a 5186 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5187 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5188
f5213f82
AG
5189 /*
5190 * Call only if mst_mgr was iniitalized before since it's not done
5191 * for all connector types.
5192 */
5193 if (aconnector->mst_mgr.dev)
5194 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5195
e7b07cee
HW
5196#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5197 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5198
89fc8d4e 5199 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5200 link->type != dc_connection_none &&
5201 dm->backlight_dev) {
5202 backlight_device_unregister(dm->backlight_dev);
5203 dm->backlight_dev = NULL;
e7b07cee
HW
5204 }
5205#endif
dcd5fb82
MF
5206
5207 if (aconnector->dc_em_sink)
5208 dc_sink_release(aconnector->dc_em_sink);
5209 aconnector->dc_em_sink = NULL;
5210 if (aconnector->dc_sink)
5211 dc_sink_release(aconnector->dc_sink);
5212 aconnector->dc_sink = NULL;
5213
e86e8947 5214 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5215 drm_connector_unregister(connector);
5216 drm_connector_cleanup(connector);
526c654a
ED
5217 if (aconnector->i2c) {
5218 i2c_del_adapter(&aconnector->i2c->base);
5219 kfree(aconnector->i2c);
5220 }
7daec99f 5221 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5222
e7b07cee
HW
5223 kfree(connector);
5224}
5225
5226void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5227{
5228 struct dm_connector_state *state =
5229 to_dm_connector_state(connector->state);
5230
df099b9b
LSL
5231 if (connector->state)
5232 __drm_atomic_helper_connector_destroy_state(connector->state);
5233
e7b07cee
HW
5234 kfree(state);
5235
5236 state = kzalloc(sizeof(*state), GFP_KERNEL);
5237
5238 if (state) {
5239 state->scaling = RMX_OFF;
5240 state->underscan_enable = false;
5241 state->underscan_hborder = 0;
5242 state->underscan_vborder = 0;
01933ba4 5243 state->base.max_requested_bpc = 8;
3261e013
ML
5244 state->vcpi_slots = 0;
5245 state->pbn = 0;
c3e50f89
NK
5246 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5247 state->abm_level = amdgpu_dm_abm_level;
5248
df099b9b 5249 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5250 }
5251}
5252
3ee6b26b
AD
5253struct drm_connector_state *
5254amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5255{
5256 struct dm_connector_state *state =
5257 to_dm_connector_state(connector->state);
5258
5259 struct dm_connector_state *new_state =
5260 kmemdup(state, sizeof(*state), GFP_KERNEL);
5261
98e6436d
AK
5262 if (!new_state)
5263 return NULL;
e7b07cee 5264
98e6436d
AK
5265 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5266
5267 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5268 new_state->abm_level = state->abm_level;
922454c2
NK
5269 new_state->scaling = state->scaling;
5270 new_state->underscan_enable = state->underscan_enable;
5271 new_state->underscan_hborder = state->underscan_hborder;
5272 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5273 new_state->vcpi_slots = state->vcpi_slots;
5274 new_state->pbn = state->pbn;
98e6436d 5275 return &new_state->base;
e7b07cee
HW
5276}
5277
14f04fa4
AD
5278static int
5279amdgpu_dm_connector_late_register(struct drm_connector *connector)
5280{
5281 struct amdgpu_dm_connector *amdgpu_dm_connector =
5282 to_amdgpu_dm_connector(connector);
00a8037e 5283 int r;
14f04fa4 5284
00a8037e
AD
5285 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5286 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5287 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5288 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5289 if (r)
5290 return r;
5291 }
5292
5293#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5294 connector_debugfs_init(amdgpu_dm_connector);
5295#endif
5296
5297 return 0;
5298}
5299
e7b07cee
HW
5300static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5301 .reset = amdgpu_dm_connector_funcs_reset,
5302 .detect = amdgpu_dm_connector_detect,
5303 .fill_modes = drm_helper_probe_single_connector_modes,
5304 .destroy = amdgpu_dm_connector_destroy,
5305 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5306 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5307 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5308 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5309 .late_register = amdgpu_dm_connector_late_register,
526c654a 5310 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5311};
5312
e7b07cee
HW
5313static int get_modes(struct drm_connector *connector)
5314{
5315 return amdgpu_dm_connector_get_modes(connector);
5316}
5317
c84dec2f 5318static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5319{
5320 struct dc_sink_init_data init_params = {
5321 .link = aconnector->dc_link,
5322 .sink_signal = SIGNAL_TYPE_VIRTUAL
5323 };
70e8ffc5 5324 struct edid *edid;
e7b07cee 5325
a89ff457 5326 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5327 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5328 aconnector->base.name);
5329
5330 aconnector->base.force = DRM_FORCE_OFF;
5331 aconnector->base.override_edid = false;
5332 return;
5333 }
5334
70e8ffc5
HW
5335 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5336
e7b07cee
HW
5337 aconnector->edid = edid;
5338
5339 aconnector->dc_em_sink = dc_link_add_remote_sink(
5340 aconnector->dc_link,
5341 (uint8_t *)edid,
5342 (edid->extensions + 1) * EDID_LENGTH,
5343 &init_params);
5344
dcd5fb82 5345 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5346 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5347 aconnector->dc_link->local_sink :
5348 aconnector->dc_em_sink;
dcd5fb82
MF
5349 dc_sink_retain(aconnector->dc_sink);
5350 }
e7b07cee
HW
5351}
5352
c84dec2f 5353static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5354{
5355 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5356
1f6010a9
DF
5357 /*
5358 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5359 * Those settings have to be != 0 to get initial modeset
5360 */
5361 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5362 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5363 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5364 }
5365
5366
5367 aconnector->base.override_edid = true;
5368 create_eml_sink(aconnector);
5369}
5370
cbd14ae7
SW
5371static struct dc_stream_state *
5372create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5373 const struct drm_display_mode *drm_mode,
5374 const struct dm_connector_state *dm_state,
5375 const struct dc_stream_state *old_stream)
5376{
5377 struct drm_connector *connector = &aconnector->base;
1348969a 5378 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 5379 struct dc_stream_state *stream;
4b7da34b
SW
5380 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5381 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5382 enum dc_status dc_result = DC_OK;
5383
5384 do {
5385 stream = create_stream_for_sink(aconnector, drm_mode,
5386 dm_state, old_stream,
5387 requested_bpc);
5388 if (stream == NULL) {
5389 DRM_ERROR("Failed to create stream for sink!\n");
5390 break;
5391 }
5392
5393 dc_result = dc_validate_stream(adev->dm.dc, stream);
5394
5395 if (dc_result != DC_OK) {
74a16675 5396 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5397 drm_mode->hdisplay,
5398 drm_mode->vdisplay,
5399 drm_mode->clock,
74a16675
RS
5400 dc_result,
5401 dc_status_to_str(dc_result));
cbd14ae7
SW
5402
5403 dc_stream_release(stream);
5404 stream = NULL;
5405 requested_bpc -= 2; /* lower bpc to retry validation */
5406 }
5407
5408 } while (stream == NULL && requested_bpc >= 6);
5409
5410 return stream;
5411}
5412
ba9ca088 5413enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5414 struct drm_display_mode *mode)
e7b07cee
HW
5415{
5416 int result = MODE_ERROR;
5417 struct dc_sink *dc_sink;
e7b07cee 5418 /* TODO: Unhardcode stream count */
0971c40e 5419 struct dc_stream_state *stream;
c84dec2f 5420 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5421
5422 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5423 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5424 return result;
5425
1f6010a9
DF
5426 /*
5427 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5428 * EDID mgmt
5429 */
5430 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5431 !aconnector->dc_em_sink)
5432 handle_edid_mgmt(aconnector);
5433
c84dec2f 5434 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5435
b830ebc9 5436 if (dc_sink == NULL) {
e7b07cee
HW
5437 DRM_ERROR("dc_sink is NULL!\n");
5438 goto fail;
5439 }
5440
cbd14ae7
SW
5441 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5442 if (stream) {
5443 dc_stream_release(stream);
e7b07cee 5444 result = MODE_OK;
cbd14ae7 5445 }
e7b07cee
HW
5446
5447fail:
5448 /* TODO: error handling*/
5449 return result;
5450}
5451
88694af9
NK
5452static int fill_hdr_info_packet(const struct drm_connector_state *state,
5453 struct dc_info_packet *out)
5454{
5455 struct hdmi_drm_infoframe frame;
5456 unsigned char buf[30]; /* 26 + 4 */
5457 ssize_t len;
5458 int ret, i;
5459
5460 memset(out, 0, sizeof(*out));
5461
5462 if (!state->hdr_output_metadata)
5463 return 0;
5464
5465 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5466 if (ret)
5467 return ret;
5468
5469 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5470 if (len < 0)
5471 return (int)len;
5472
5473 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5474 if (len != 30)
5475 return -EINVAL;
5476
5477 /* Prepare the infopacket for DC. */
5478 switch (state->connector->connector_type) {
5479 case DRM_MODE_CONNECTOR_HDMIA:
5480 out->hb0 = 0x87; /* type */
5481 out->hb1 = 0x01; /* version */
5482 out->hb2 = 0x1A; /* length */
5483 out->sb[0] = buf[3]; /* checksum */
5484 i = 1;
5485 break;
5486
5487 case DRM_MODE_CONNECTOR_DisplayPort:
5488 case DRM_MODE_CONNECTOR_eDP:
5489 out->hb0 = 0x00; /* sdp id, zero */
5490 out->hb1 = 0x87; /* type */
5491 out->hb2 = 0x1D; /* payload len - 1 */
5492 out->hb3 = (0x13 << 2); /* sdp version */
5493 out->sb[0] = 0x01; /* version */
5494 out->sb[1] = 0x1A; /* length */
5495 i = 2;
5496 break;
5497
5498 default:
5499 return -EINVAL;
5500 }
5501
5502 memcpy(&out->sb[i], &buf[4], 26);
5503 out->valid = true;
5504
5505 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5506 sizeof(out->sb), false);
5507
5508 return 0;
5509}
5510
5511static bool
5512is_hdr_metadata_different(const struct drm_connector_state *old_state,
5513 const struct drm_connector_state *new_state)
5514{
5515 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5516 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5517
5518 if (old_blob != new_blob) {
5519 if (old_blob && new_blob &&
5520 old_blob->length == new_blob->length)
5521 return memcmp(old_blob->data, new_blob->data,
5522 old_blob->length);
5523
5524 return true;
5525 }
5526
5527 return false;
5528}
5529
5530static int
5531amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 5532 struct drm_atomic_state *state)
88694af9 5533{
51e857af
SP
5534 struct drm_connector_state *new_con_state =
5535 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
5536 struct drm_connector_state *old_con_state =
5537 drm_atomic_get_old_connector_state(state, conn);
5538 struct drm_crtc *crtc = new_con_state->crtc;
5539 struct drm_crtc_state *new_crtc_state;
5540 int ret;
5541
5542 if (!crtc)
5543 return 0;
5544
5545 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5546 struct dc_info_packet hdr_infopacket;
5547
5548 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5549 if (ret)
5550 return ret;
5551
5552 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5553 if (IS_ERR(new_crtc_state))
5554 return PTR_ERR(new_crtc_state);
5555
5556 /*
5557 * DC considers the stream backends changed if the
5558 * static metadata changes. Forcing the modeset also
5559 * gives a simple way for userspace to switch from
b232d4ed
NK
5560 * 8bpc to 10bpc when setting the metadata to enter
5561 * or exit HDR.
5562 *
5563 * Changing the static metadata after it's been
5564 * set is permissible, however. So only force a
5565 * modeset if we're entering or exiting HDR.
88694af9 5566 */
b232d4ed
NK
5567 new_crtc_state->mode_changed =
5568 !old_con_state->hdr_output_metadata ||
5569 !new_con_state->hdr_output_metadata;
88694af9
NK
5570 }
5571
5572 return 0;
5573}
5574
e7b07cee
HW
5575static const struct drm_connector_helper_funcs
5576amdgpu_dm_connector_helper_funcs = {
5577 /*
1f6010a9 5578 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 5579 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 5580 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
5581 * in get_modes call back, not just return the modes count
5582 */
e7b07cee
HW
5583 .get_modes = get_modes,
5584 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 5585 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
5586};
5587
5588static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5589{
5590}
5591
d6ef9b41 5592static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
5593{
5594 struct drm_atomic_state *state = new_crtc_state->state;
5595 struct drm_plane *plane;
5596 int num_active = 0;
5597
5598 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5599 struct drm_plane_state *new_plane_state;
5600
5601 /* Cursor planes are "fake". */
5602 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5603 continue;
5604
5605 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5606
5607 if (!new_plane_state) {
5608 /*
5609 * The plane is enable on the CRTC and hasn't changed
5610 * state. This means that it previously passed
5611 * validation and is therefore enabled.
5612 */
5613 num_active += 1;
5614 continue;
5615 }
5616
5617 /* We need a framebuffer to be considered enabled. */
5618 num_active += (new_plane_state->fb != NULL);
5619 }
5620
d6ef9b41
NK
5621 return num_active;
5622}
5623
8fe684e9
NK
5624static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5625 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
5626{
5627 struct dm_crtc_state *dm_new_crtc_state =
5628 to_dm_crtc_state(new_crtc_state);
5629
5630 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
5631
5632 if (!dm_new_crtc_state->stream)
5633 return;
5634
5635 dm_new_crtc_state->active_planes =
5636 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
5637}
5638
3ee6b26b
AD
5639static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5640 struct drm_crtc_state *state)
e7b07cee 5641{
1348969a 5642 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee
HW
5643 struct dc *dc = adev->dm.dc;
5644 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5645 int ret = -EINVAL;
5646
8fe684e9 5647 dm_update_crtc_active_planes(crtc, state);
d6ef9b41 5648
9b690ef3
BL
5649 if (unlikely(!dm_crtc_state->stream &&
5650 modeset_required(state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
5651 WARN_ON(1);
5652 return ret;
5653 }
5654
bc92c065 5655 /*
b836a274
MD
5656 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5657 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5658 * planes are disabled, which is not supported by the hardware. And there is legacy
5659 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 5660 */
b836a274
MD
5661 if (state->enable &&
5662 !(state->plane_mask & drm_plane_mask(crtc->primary)))
c14a005c
NK
5663 return -EINVAL;
5664
b836a274
MD
5665 /* In some use cases, like reset, no stream is attached */
5666 if (!dm_crtc_state->stream)
5667 return 0;
5668
62c933f9 5669 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
5670 return 0;
5671
5672 return ret;
5673}
5674
3ee6b26b
AD
5675static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5676 const struct drm_display_mode *mode,
5677 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
5678{
5679 return true;
5680}
5681
5682static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5683 .disable = dm_crtc_helper_disable,
5684 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
5685 .mode_fixup = dm_crtc_helper_mode_fixup,
5686 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
5687};
5688
5689static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5690{
5691
5692}
5693
3261e013
ML
5694static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5695{
5696 switch (display_color_depth) {
5697 case COLOR_DEPTH_666:
5698 return 6;
5699 case COLOR_DEPTH_888:
5700 return 8;
5701 case COLOR_DEPTH_101010:
5702 return 10;
5703 case COLOR_DEPTH_121212:
5704 return 12;
5705 case COLOR_DEPTH_141414:
5706 return 14;
5707 case COLOR_DEPTH_161616:
5708 return 16;
5709 default:
5710 break;
5711 }
5712 return 0;
5713}
5714
3ee6b26b
AD
5715static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5716 struct drm_crtc_state *crtc_state,
5717 struct drm_connector_state *conn_state)
e7b07cee 5718{
3261e013
ML
5719 struct drm_atomic_state *state = crtc_state->state;
5720 struct drm_connector *connector = conn_state->connector;
5721 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5722 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5723 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5724 struct drm_dp_mst_topology_mgr *mst_mgr;
5725 struct drm_dp_mst_port *mst_port;
5726 enum dc_color_depth color_depth;
5727 int clock, bpp = 0;
1bc22f20 5728 bool is_y420 = false;
3261e013
ML
5729
5730 if (!aconnector->port || !aconnector->dc_sink)
5731 return 0;
5732
5733 mst_port = aconnector->port;
5734 mst_mgr = &aconnector->mst_port->mst_mgr;
5735
5736 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5737 return 0;
5738
5739 if (!state->duplicated) {
cbd14ae7 5740 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
5741 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5742 aconnector->force_yuv420_output;
cbd14ae7
SW
5743 color_depth = convert_color_depth_from_display_info(connector,
5744 is_y420,
5745 max_bpc);
3261e013
ML
5746 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5747 clock = adjusted_mode->clock;
dc48529f 5748 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
5749 }
5750 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5751 mst_mgr,
5752 mst_port,
1c6c1cb5 5753 dm_new_connector_state->pbn,
03ca9600 5754 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
5755 if (dm_new_connector_state->vcpi_slots < 0) {
5756 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5757 return dm_new_connector_state->vcpi_slots;
5758 }
e7b07cee
HW
5759 return 0;
5760}
5761
5762const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5763 .disable = dm_encoder_helper_disable,
5764 .atomic_check = dm_encoder_helper_atomic_check
5765};
5766
d9fe1a4c 5767#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
5768static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5769 struct dc_state *dc_state)
5770{
5771 struct dc_stream_state *stream = NULL;
5772 struct drm_connector *connector;
5773 struct drm_connector_state *new_con_state, *old_con_state;
5774 struct amdgpu_dm_connector *aconnector;
5775 struct dm_connector_state *dm_conn_state;
5776 int i, j, clock, bpp;
5777 int vcpi, pbn_div, pbn = 0;
5778
5779 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5780
5781 aconnector = to_amdgpu_dm_connector(connector);
5782
5783 if (!aconnector->port)
5784 continue;
5785
5786 if (!new_con_state || !new_con_state->crtc)
5787 continue;
5788
5789 dm_conn_state = to_dm_connector_state(new_con_state);
5790
5791 for (j = 0; j < dc_state->stream_count; j++) {
5792 stream = dc_state->streams[j];
5793 if (!stream)
5794 continue;
5795
5796 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5797 break;
5798
5799 stream = NULL;
5800 }
5801
5802 if (!stream)
5803 continue;
5804
5805 if (stream->timing.flags.DSC != 1) {
5806 drm_dp_mst_atomic_enable_dsc(state,
5807 aconnector->port,
5808 dm_conn_state->pbn,
5809 0,
5810 false);
5811 continue;
5812 }
5813
5814 pbn_div = dm_mst_get_pbn_divider(stream->link);
5815 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5816 clock = stream->timing.pix_clk_100hz / 10;
5817 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5818 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5819 aconnector->port,
5820 pbn, pbn_div,
5821 true);
5822 if (vcpi < 0)
5823 return vcpi;
5824
5825 dm_conn_state->pbn = pbn;
5826 dm_conn_state->vcpi_slots = vcpi;
5827 }
5828 return 0;
5829}
d9fe1a4c 5830#endif
29b9ba74 5831
e7b07cee
HW
5832static void dm_drm_plane_reset(struct drm_plane *plane)
5833{
5834 struct dm_plane_state *amdgpu_state = NULL;
5835
5836 if (plane->state)
5837 plane->funcs->atomic_destroy_state(plane, plane->state);
5838
5839 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 5840 WARN_ON(amdgpu_state == NULL);
1f6010a9 5841
7ddaef96
NK
5842 if (amdgpu_state)
5843 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
5844}
5845
5846static struct drm_plane_state *
5847dm_drm_plane_duplicate_state(struct drm_plane *plane)
5848{
5849 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5850
5851 old_dm_plane_state = to_dm_plane_state(plane->state);
5852 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5853 if (!dm_plane_state)
5854 return NULL;
5855
5856 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5857
3be5262e
HW
5858 if (old_dm_plane_state->dc_state) {
5859 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5860 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
5861 }
5862
707477b0
NK
5863 /* Framebuffer hasn't been updated yet, so retain old flags. */
5864 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5865 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5866
e7b07cee
HW
5867 return &dm_plane_state->base;
5868}
5869
dfd84d90 5870static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 5871 struct drm_plane_state *state)
e7b07cee
HW
5872{
5873 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5874
3be5262e
HW
5875 if (dm_plane_state->dc_state)
5876 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 5877
0627bbd3 5878 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
5879}
5880
5881static const struct drm_plane_funcs dm_plane_funcs = {
5882 .update_plane = drm_atomic_helper_update_plane,
5883 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 5884 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
5885 .reset = dm_drm_plane_reset,
5886 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5887 .atomic_destroy_state = dm_drm_plane_destroy_state,
5888};
5889
3ee6b26b
AD
5890static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5891 struct drm_plane_state *new_state)
e7b07cee
HW
5892{
5893 struct amdgpu_framebuffer *afb;
5894 struct drm_gem_object *obj;
5d43be0c 5895 struct amdgpu_device *adev;
e7b07cee 5896 struct amdgpu_bo *rbo;
e7b07cee 5897 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
5898 struct list_head list;
5899 struct ttm_validate_buffer tv;
5900 struct ww_acquire_ctx ticket;
5d43be0c
CK
5901 uint32_t domain;
5902 int r;
e7b07cee
HW
5903
5904 if (!new_state->fb) {
f1ad2f5e 5905 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
5906 return 0;
5907 }
5908
5909 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 5910 obj = new_state->fb->obj[0];
e7b07cee 5911 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 5912 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
5913 INIT_LIST_HEAD(&list);
5914
5915 tv.bo = &rbo->tbo;
5916 tv.num_shared = 1;
5917 list_add(&tv.head, &list);
5918
9165fb87 5919 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
5920 if (r) {
5921 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 5922 return r;
0f257b09 5923 }
e7b07cee 5924
5d43be0c 5925 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 5926 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
5927 else
5928 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 5929
7b7c6c81 5930 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 5931 if (unlikely(r != 0)) {
30b7c614
HW
5932 if (r != -ERESTARTSYS)
5933 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 5934 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
5935 return r;
5936 }
5937
bb812f1e
JZ
5938 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5939 if (unlikely(r != 0)) {
5940 amdgpu_bo_unpin(rbo);
0f257b09 5941 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5942 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
5943 return r;
5944 }
7df7e505 5945
0f257b09 5946 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 5947
7b7c6c81 5948 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
5949
5950 amdgpu_bo_ref(rbo);
5951
cf322b49
NK
5952 /**
5953 * We don't do surface updates on planes that have been newly created,
5954 * but we also don't have the afb->address during atomic check.
5955 *
5956 * Fill in buffer attributes depending on the address here, but only on
5957 * newly created planes since they're not being used by DC yet and this
5958 * won't modify global state.
5959 */
5960 dm_plane_state_old = to_dm_plane_state(plane->state);
5961 dm_plane_state_new = to_dm_plane_state(new_state);
5962
3be5262e 5963 if (dm_plane_state_new->dc_state &&
cf322b49
NK
5964 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5965 struct dc_plane_state *plane_state =
5966 dm_plane_state_new->dc_state;
5967 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 5968
320932bf 5969 fill_plane_buffer_attributes(
695af5f9 5970 adev, afb, plane_state->format, plane_state->rotation,
cf322b49
NK
5971 dm_plane_state_new->tiling_flags,
5972 &plane_state->tiling_info, &plane_state->plane_size,
5973 &plane_state->dcc, &plane_state->address,
5974 dm_plane_state_new->tmz_surface, force_disable_dcc);
e7b07cee
HW
5975 }
5976
e7b07cee
HW
5977 return 0;
5978}
5979
3ee6b26b
AD
5980static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5981 struct drm_plane_state *old_state)
e7b07cee
HW
5982{
5983 struct amdgpu_bo *rbo;
e7b07cee
HW
5984 int r;
5985
5986 if (!old_state->fb)
5987 return;
5988
e68d14dd 5989 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
5990 r = amdgpu_bo_reserve(rbo, false);
5991 if (unlikely(r)) {
5992 DRM_ERROR("failed to reserve rbo before unpin\n");
5993 return;
b830ebc9
HW
5994 }
5995
5996 amdgpu_bo_unpin(rbo);
5997 amdgpu_bo_unreserve(rbo);
5998 amdgpu_bo_unref(&rbo);
e7b07cee
HW
5999}
6000
8c44515b
AP
6001static int dm_plane_helper_check_state(struct drm_plane_state *state,
6002 struct drm_crtc_state *new_crtc_state)
6003{
6004 int max_downscale = 0;
6005 int max_upscale = INT_MAX;
6006
6007 /* TODO: These should be checked against DC plane caps */
6008 return drm_atomic_helper_check_plane_state(
6009 state, new_crtc_state, max_downscale, max_upscale, true, true);
6010}
6011
7578ecda
AD
6012static int dm_plane_atomic_check(struct drm_plane *plane,
6013 struct drm_plane_state *state)
cbd19488 6014{
1348969a 6015 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6016 struct dc *dc = adev->dm.dc;
78171832 6017 struct dm_plane_state *dm_plane_state;
695af5f9 6018 struct dc_scaling_info scaling_info;
8c44515b 6019 struct drm_crtc_state *new_crtc_state;
695af5f9 6020 int ret;
78171832
NK
6021
6022 dm_plane_state = to_dm_plane_state(state);
cbd19488 6023
3be5262e 6024 if (!dm_plane_state->dc_state)
9a3329b1 6025 return 0;
cbd19488 6026
8c44515b
AP
6027 new_crtc_state =
6028 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6029 if (!new_crtc_state)
6030 return -EINVAL;
6031
6032 ret = dm_plane_helper_check_state(state, new_crtc_state);
6033 if (ret)
6034 return ret;
6035
695af5f9
NK
6036 ret = fill_dc_scaling_info(state, &scaling_info);
6037 if (ret)
6038 return ret;
a05bcff1 6039
62c933f9 6040 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6041 return 0;
6042
6043 return -EINVAL;
6044}
6045
674e78ac
NK
6046static int dm_plane_atomic_async_check(struct drm_plane *plane,
6047 struct drm_plane_state *new_plane_state)
6048{
6049 /* Only support async updates on cursor planes. */
6050 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6051 return -EINVAL;
6052
6053 return 0;
6054}
6055
6056static void dm_plane_atomic_async_update(struct drm_plane *plane,
6057 struct drm_plane_state *new_state)
6058{
6059 struct drm_plane_state *old_state =
6060 drm_atomic_get_old_plane_state(new_state->state, plane);
6061
332af874 6062 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6063
6064 plane->state->src_x = new_state->src_x;
6065 plane->state->src_y = new_state->src_y;
6066 plane->state->src_w = new_state->src_w;
6067 plane->state->src_h = new_state->src_h;
6068 plane->state->crtc_x = new_state->crtc_x;
6069 plane->state->crtc_y = new_state->crtc_y;
6070 plane->state->crtc_w = new_state->crtc_w;
6071 plane->state->crtc_h = new_state->crtc_h;
6072
6073 handle_cursor_update(plane, old_state);
6074}
6075
e7b07cee
HW
6076static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6077 .prepare_fb = dm_plane_helper_prepare_fb,
6078 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6079 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6080 .atomic_async_check = dm_plane_atomic_async_check,
6081 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6082};
6083
6084/*
6085 * TODO: these are currently initialized to rgb formats only.
6086 * For future use cases we should either initialize them dynamically based on
6087 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6088 * check will succeed, and let DC implement proper check
e7b07cee 6089 */
d90371b0 6090static const uint32_t rgb_formats[] = {
e7b07cee
HW
6091 DRM_FORMAT_XRGB8888,
6092 DRM_FORMAT_ARGB8888,
6093 DRM_FORMAT_RGBA8888,
6094 DRM_FORMAT_XRGB2101010,
6095 DRM_FORMAT_XBGR2101010,
6096 DRM_FORMAT_ARGB2101010,
6097 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6098 DRM_FORMAT_XBGR8888,
6099 DRM_FORMAT_ABGR8888,
46dd9ff7 6100 DRM_FORMAT_RGB565,
e7b07cee
HW
6101};
6102
0d579c7e
NK
6103static const uint32_t overlay_formats[] = {
6104 DRM_FORMAT_XRGB8888,
6105 DRM_FORMAT_ARGB8888,
6106 DRM_FORMAT_RGBA8888,
6107 DRM_FORMAT_XBGR8888,
6108 DRM_FORMAT_ABGR8888,
7267a1a9 6109 DRM_FORMAT_RGB565
e7b07cee
HW
6110};
6111
6112static const u32 cursor_formats[] = {
6113 DRM_FORMAT_ARGB8888
6114};
6115
37c6a93b
NK
6116static int get_plane_formats(const struct drm_plane *plane,
6117 const struct dc_plane_cap *plane_cap,
6118 uint32_t *formats, int max_formats)
e7b07cee 6119{
37c6a93b
NK
6120 int i, num_formats = 0;
6121
6122 /*
6123 * TODO: Query support for each group of formats directly from
6124 * DC plane caps. This will require adding more formats to the
6125 * caps list.
6126 */
e7b07cee 6127
f180b4bc 6128 switch (plane->type) {
e7b07cee 6129 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6130 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6131 if (num_formats >= max_formats)
6132 break;
6133
6134 formats[num_formats++] = rgb_formats[i];
6135 }
6136
ea36ad34 6137 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6138 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6139 if (plane_cap && plane_cap->pixel_format_support.p010)
6140 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6141 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6142 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6143 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6144 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6145 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6146 }
e7b07cee 6147 break;
37c6a93b 6148
e7b07cee 6149 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6150 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6151 if (num_formats >= max_formats)
6152 break;
6153
6154 formats[num_formats++] = overlay_formats[i];
6155 }
e7b07cee 6156 break;
37c6a93b 6157
e7b07cee 6158 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6159 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6160 if (num_formats >= max_formats)
6161 break;
6162
6163 formats[num_formats++] = cursor_formats[i];
6164 }
e7b07cee
HW
6165 break;
6166 }
6167
37c6a93b
NK
6168 return num_formats;
6169}
6170
6171static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6172 struct drm_plane *plane,
6173 unsigned long possible_crtcs,
6174 const struct dc_plane_cap *plane_cap)
6175{
6176 uint32_t formats[32];
6177 int num_formats;
6178 int res = -EPERM;
ecc874a6 6179 unsigned int supported_rotations;
37c6a93b
NK
6180
6181 num_formats = get_plane_formats(plane, plane_cap, formats,
6182 ARRAY_SIZE(formats));
6183
4a580877 6184 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b
NK
6185 &dm_plane_funcs, formats, num_formats,
6186 NULL, plane->type, NULL);
6187 if (res)
6188 return res;
6189
cc1fec57
NK
6190 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6191 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6192 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6193 BIT(DRM_MODE_BLEND_PREMULTI);
6194
6195 drm_plane_create_alpha_property(plane);
6196 drm_plane_create_blend_mode_property(plane, blend_caps);
6197 }
6198
fc8e5230 6199 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6200 plane_cap &&
6201 (plane_cap->pixel_format_support.nv12 ||
6202 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6203 /* This only affects YUV formats. */
6204 drm_plane_create_color_properties(
6205 plane,
6206 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6207 BIT(DRM_COLOR_YCBCR_BT709) |
6208 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6209 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6210 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6211 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6212 }
6213
ecc874a6
PLG
6214 supported_rotations =
6215 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6216 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6217
f784112f
MR
6218 if (dm->adev->asic_type >= CHIP_BONAIRE)
6219 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6220 supported_rotations);
ecc874a6 6221
f180b4bc 6222 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6223
96719c54 6224 /* Create (reset) the plane state */
f180b4bc
HW
6225 if (plane->funcs->reset)
6226 plane->funcs->reset(plane);
96719c54 6227
37c6a93b 6228 return 0;
e7b07cee
HW
6229}
6230
7578ecda
AD
6231static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6232 struct drm_plane *plane,
6233 uint32_t crtc_index)
e7b07cee
HW
6234{
6235 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6236 struct drm_plane *cursor_plane;
e7b07cee
HW
6237
6238 int res = -ENOMEM;
6239
6240 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6241 if (!cursor_plane)
6242 goto fail;
6243
f180b4bc 6244 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6245 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6246
6247 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6248 if (!acrtc)
6249 goto fail;
6250
6251 res = drm_crtc_init_with_planes(
6252 dm->ddev,
6253 &acrtc->base,
6254 plane,
f180b4bc 6255 cursor_plane,
e7b07cee
HW
6256 &amdgpu_dm_crtc_funcs, NULL);
6257
6258 if (res)
6259 goto fail;
6260
6261 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6262
96719c54
HW
6263 /* Create (reset) the plane state */
6264 if (acrtc->base.funcs->reset)
6265 acrtc->base.funcs->reset(&acrtc->base);
6266
e7b07cee
HW
6267 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6268 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6269
6270 acrtc->crtc_id = crtc_index;
6271 acrtc->base.enabled = false;
c37e2d29 6272 acrtc->otg_inst = -1;
e7b07cee
HW
6273
6274 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6275 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6276 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6277 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e7b07cee
HW
6278
6279 return 0;
6280
6281fail:
b830ebc9
HW
6282 kfree(acrtc);
6283 kfree(cursor_plane);
e7b07cee
HW
6284 return res;
6285}
6286
6287
6288static int to_drm_connector_type(enum signal_type st)
6289{
6290 switch (st) {
6291 case SIGNAL_TYPE_HDMI_TYPE_A:
6292 return DRM_MODE_CONNECTOR_HDMIA;
6293 case SIGNAL_TYPE_EDP:
6294 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6295 case SIGNAL_TYPE_LVDS:
6296 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6297 case SIGNAL_TYPE_RGB:
6298 return DRM_MODE_CONNECTOR_VGA;
6299 case SIGNAL_TYPE_DISPLAY_PORT:
6300 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6301 return DRM_MODE_CONNECTOR_DisplayPort;
6302 case SIGNAL_TYPE_DVI_DUAL_LINK:
6303 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6304 return DRM_MODE_CONNECTOR_DVID;
6305 case SIGNAL_TYPE_VIRTUAL:
6306 return DRM_MODE_CONNECTOR_VIRTUAL;
6307
6308 default:
6309 return DRM_MODE_CONNECTOR_Unknown;
6310 }
6311}
6312
2b4c1c05
DV
6313static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6314{
62afb4ad
JRS
6315 struct drm_encoder *encoder;
6316
6317 /* There is only one encoder per connector */
6318 drm_connector_for_each_possible_encoder(connector, encoder)
6319 return encoder;
6320
6321 return NULL;
2b4c1c05
DV
6322}
6323
e7b07cee
HW
6324static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6325{
e7b07cee
HW
6326 struct drm_encoder *encoder;
6327 struct amdgpu_encoder *amdgpu_encoder;
6328
2b4c1c05 6329 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6330
6331 if (encoder == NULL)
6332 return;
6333
6334 amdgpu_encoder = to_amdgpu_encoder(encoder);
6335
6336 amdgpu_encoder->native_mode.clock = 0;
6337
6338 if (!list_empty(&connector->probed_modes)) {
6339 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6340
e7b07cee 6341 list_for_each_entry(preferred_mode,
b830ebc9
HW
6342 &connector->probed_modes,
6343 head) {
6344 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6345 amdgpu_encoder->native_mode = *preferred_mode;
6346
e7b07cee
HW
6347 break;
6348 }
6349
6350 }
6351}
6352
3ee6b26b
AD
6353static struct drm_display_mode *
6354amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6355 char *name,
6356 int hdisplay, int vdisplay)
e7b07cee
HW
6357{
6358 struct drm_device *dev = encoder->dev;
6359 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6360 struct drm_display_mode *mode = NULL;
6361 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6362
6363 mode = drm_mode_duplicate(dev, native_mode);
6364
b830ebc9 6365 if (mode == NULL)
e7b07cee
HW
6366 return NULL;
6367
6368 mode->hdisplay = hdisplay;
6369 mode->vdisplay = vdisplay;
6370 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6371 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6372
6373 return mode;
6374
6375}
6376
6377static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6378 struct drm_connector *connector)
e7b07cee
HW
6379{
6380 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6381 struct drm_display_mode *mode = NULL;
6382 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6383 struct amdgpu_dm_connector *amdgpu_dm_connector =
6384 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6385 int i;
6386 int n;
6387 struct mode_size {
6388 char name[DRM_DISPLAY_MODE_LEN];
6389 int w;
6390 int h;
b830ebc9 6391 } common_modes[] = {
e7b07cee
HW
6392 { "640x480", 640, 480},
6393 { "800x600", 800, 600},
6394 { "1024x768", 1024, 768},
6395 { "1280x720", 1280, 720},
6396 { "1280x800", 1280, 800},
6397 {"1280x1024", 1280, 1024},
6398 { "1440x900", 1440, 900},
6399 {"1680x1050", 1680, 1050},
6400 {"1600x1200", 1600, 1200},
6401 {"1920x1080", 1920, 1080},
6402 {"1920x1200", 1920, 1200}
6403 };
6404
b830ebc9 6405 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
6406
6407 for (i = 0; i < n; i++) {
6408 struct drm_display_mode *curmode = NULL;
6409 bool mode_existed = false;
6410
6411 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
6412 common_modes[i].h > native_mode->vdisplay ||
6413 (common_modes[i].w == native_mode->hdisplay &&
6414 common_modes[i].h == native_mode->vdisplay))
6415 continue;
e7b07cee
HW
6416
6417 list_for_each_entry(curmode, &connector->probed_modes, head) {
6418 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 6419 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
6420 mode_existed = true;
6421 break;
6422 }
6423 }
6424
6425 if (mode_existed)
6426 continue;
6427
6428 mode = amdgpu_dm_create_common_mode(encoder,
6429 common_modes[i].name, common_modes[i].w,
6430 common_modes[i].h);
6431 drm_mode_probed_add(connector, mode);
c84dec2f 6432 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
6433 }
6434}
6435
3ee6b26b
AD
6436static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6437 struct edid *edid)
e7b07cee 6438{
c84dec2f
HW
6439 struct amdgpu_dm_connector *amdgpu_dm_connector =
6440 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6441
6442 if (edid) {
6443 /* empty probed_modes */
6444 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 6445 amdgpu_dm_connector->num_modes =
e7b07cee
HW
6446 drm_add_edid_modes(connector, edid);
6447
f1e5e913
YMM
6448 /* sorting the probed modes before calling function
6449 * amdgpu_dm_get_native_mode() since EDID can have
6450 * more than one preferred mode. The modes that are
6451 * later in the probed mode list could be of higher
6452 * and preferred resolution. For example, 3840x2160
6453 * resolution in base EDID preferred timing and 4096x2160
6454 * preferred resolution in DID extension block later.
6455 */
6456 drm_mode_sort(&connector->probed_modes);
e7b07cee 6457 amdgpu_dm_get_native_mode(connector);
a8d8d3dc 6458 } else {
c84dec2f 6459 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 6460 }
e7b07cee
HW
6461}
6462
7578ecda 6463static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 6464{
c84dec2f
HW
6465 struct amdgpu_dm_connector *amdgpu_dm_connector =
6466 to_amdgpu_dm_connector(connector);
e7b07cee 6467 struct drm_encoder *encoder;
c84dec2f 6468 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 6469
2b4c1c05 6470 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 6471
85ee15d6 6472 if (!edid || !drm_edid_is_valid(edid)) {
1b369d3c
ML
6473 amdgpu_dm_connector->num_modes =
6474 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
6475 } else {
6476 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6477 amdgpu_dm_connector_add_common_modes(encoder, connector);
6478 }
3e332d3a 6479 amdgpu_dm_fbc_init(connector);
5099114b 6480
c84dec2f 6481 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
6482}
6483
3ee6b26b
AD
6484void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6485 struct amdgpu_dm_connector *aconnector,
6486 int connector_type,
6487 struct dc_link *link,
6488 int link_index)
e7b07cee 6489{
1348969a 6490 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 6491
f04bee34
NK
6492 /*
6493 * Some of the properties below require access to state, like bpc.
6494 * Allocate some default initial connector state with our reset helper.
6495 */
6496 if (aconnector->base.funcs->reset)
6497 aconnector->base.funcs->reset(&aconnector->base);
6498
e7b07cee
HW
6499 aconnector->connector_id = link_index;
6500 aconnector->dc_link = link;
6501 aconnector->base.interlace_allowed = false;
6502 aconnector->base.doublescan_allowed = false;
6503 aconnector->base.stereo_allowed = false;
6504 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6505 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 6506 aconnector->audio_inst = -1;
e7b07cee
HW
6507 mutex_init(&aconnector->hpd_lock);
6508
1f6010a9
DF
6509 /*
6510 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
6511 * which means HPD hot plug not supported
6512 */
e7b07cee
HW
6513 switch (connector_type) {
6514 case DRM_MODE_CONNECTOR_HDMIA:
6515 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6516 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6517 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
6518 break;
6519 case DRM_MODE_CONNECTOR_DisplayPort:
6520 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 6521 aconnector->base.ycbcr_420_allowed =
9ea59d5a 6522 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
6523 break;
6524 case DRM_MODE_CONNECTOR_DVID:
6525 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6526 break;
6527 default:
6528 break;
6529 }
6530
6531 drm_object_attach_property(&aconnector->base.base,
6532 dm->ddev->mode_config.scaling_mode_property,
6533 DRM_MODE_SCALE_NONE);
6534
6535 drm_object_attach_property(&aconnector->base.base,
6536 adev->mode_info.underscan_property,
6537 UNDERSCAN_OFF);
6538 drm_object_attach_property(&aconnector->base.base,
6539 adev->mode_info.underscan_hborder_property,
6540 0);
6541 drm_object_attach_property(&aconnector->base.base,
6542 adev->mode_info.underscan_vborder_property,
6543 0);
1825fd34 6544
8c61b31e
JFZ
6545 if (!aconnector->mst_port)
6546 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 6547
4a8ca46b
RL
6548 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6549 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6550 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 6551
c1ee92f9 6552 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 6553 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
6554 drm_object_attach_property(&aconnector->base.base,
6555 adev->mode_info.abm_level_property, 0);
6556 }
bb47de73
NK
6557
6558 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
6559 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6560 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
6561 drm_object_attach_property(
6562 &aconnector->base.base,
6563 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6564
8c61b31e
JFZ
6565 if (!aconnector->mst_port)
6566 drm_connector_attach_vrr_capable_property(&aconnector->base);
6567
0c8620d6 6568#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 6569 if (adev->dm.hdcp_workqueue)
53e108aa 6570 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 6571#endif
bb47de73 6572 }
e7b07cee
HW
6573}
6574
7578ecda
AD
6575static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6576 struct i2c_msg *msgs, int num)
e7b07cee
HW
6577{
6578 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6579 struct ddc_service *ddc_service = i2c->ddc_service;
6580 struct i2c_command cmd;
6581 int i;
6582 int result = -EIO;
6583
b830ebc9 6584 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
6585
6586 if (!cmd.payloads)
6587 return result;
6588
6589 cmd.number_of_payloads = num;
6590 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6591 cmd.speed = 100;
6592
6593 for (i = 0; i < num; i++) {
6594 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6595 cmd.payloads[i].address = msgs[i].addr;
6596 cmd.payloads[i].length = msgs[i].len;
6597 cmd.payloads[i].data = msgs[i].buf;
6598 }
6599
c85e6e54
DF
6600 if (dc_submit_i2c(
6601 ddc_service->ctx->dc,
6602 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
6603 &cmd))
6604 result = num;
6605
6606 kfree(cmd.payloads);
6607 return result;
6608}
6609
7578ecda 6610static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
6611{
6612 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6613}
6614
6615static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6616 .master_xfer = amdgpu_dm_i2c_xfer,
6617 .functionality = amdgpu_dm_i2c_func,
6618};
6619
3ee6b26b
AD
6620static struct amdgpu_i2c_adapter *
6621create_i2c(struct ddc_service *ddc_service,
6622 int link_index,
6623 int *res)
e7b07cee
HW
6624{
6625 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6626 struct amdgpu_i2c_adapter *i2c;
6627
b830ebc9 6628 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
6629 if (!i2c)
6630 return NULL;
e7b07cee
HW
6631 i2c->base.owner = THIS_MODULE;
6632 i2c->base.class = I2C_CLASS_DDC;
6633 i2c->base.dev.parent = &adev->pdev->dev;
6634 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 6635 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
6636 i2c_set_adapdata(&i2c->base, i2c);
6637 i2c->ddc_service = ddc_service;
c85e6e54 6638 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
6639
6640 return i2c;
6641}
6642
89fc8d4e 6643
1f6010a9
DF
6644/*
6645 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
6646 * dc_link which will be represented by this aconnector.
6647 */
7578ecda
AD
6648static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6649 struct amdgpu_dm_connector *aconnector,
6650 uint32_t link_index,
6651 struct amdgpu_encoder *aencoder)
e7b07cee
HW
6652{
6653 int res = 0;
6654 int connector_type;
6655 struct dc *dc = dm->dc;
6656 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6657 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
6658
6659 link->priv = aconnector;
e7b07cee 6660
f1ad2f5e 6661 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
6662
6663 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
6664 if (!i2c) {
6665 DRM_ERROR("Failed to create i2c adapter data\n");
6666 return -ENOMEM;
6667 }
6668
e7b07cee
HW
6669 aconnector->i2c = i2c;
6670 res = i2c_add_adapter(&i2c->base);
6671
6672 if (res) {
6673 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6674 goto out_free;
6675 }
6676
6677 connector_type = to_drm_connector_type(link->connector_signal);
6678
17165de2 6679 res = drm_connector_init_with_ddc(
e7b07cee
HW
6680 dm->ddev,
6681 &aconnector->base,
6682 &amdgpu_dm_connector_funcs,
17165de2
AP
6683 connector_type,
6684 &i2c->base);
e7b07cee
HW
6685
6686 if (res) {
6687 DRM_ERROR("connector_init failed\n");
6688 aconnector->connector_id = -1;
6689 goto out_free;
6690 }
6691
6692 drm_connector_helper_add(
6693 &aconnector->base,
6694 &amdgpu_dm_connector_helper_funcs);
6695
6696 amdgpu_dm_connector_init_helper(
6697 dm,
6698 aconnector,
6699 connector_type,
6700 link,
6701 link_index);
6702
cde4c44d 6703 drm_connector_attach_encoder(
e7b07cee
HW
6704 &aconnector->base, &aencoder->base);
6705
e7b07cee
HW
6706 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6707 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 6708 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 6709
e7b07cee
HW
6710out_free:
6711 if (res) {
6712 kfree(i2c);
6713 aconnector->i2c = NULL;
6714 }
6715 return res;
6716}
6717
6718int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6719{
6720 switch (adev->mode_info.num_crtc) {
6721 case 1:
6722 return 0x1;
6723 case 2:
6724 return 0x3;
6725 case 3:
6726 return 0x7;
6727 case 4:
6728 return 0xf;
6729 case 5:
6730 return 0x1f;
6731 case 6:
6732 default:
6733 return 0x3f;
6734 }
6735}
6736
7578ecda
AD
6737static int amdgpu_dm_encoder_init(struct drm_device *dev,
6738 struct amdgpu_encoder *aencoder,
6739 uint32_t link_index)
e7b07cee 6740{
1348969a 6741 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6742
6743 int res = drm_encoder_init(dev,
6744 &aencoder->base,
6745 &amdgpu_dm_encoder_funcs,
6746 DRM_MODE_ENCODER_TMDS,
6747 NULL);
6748
6749 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6750
6751 if (!res)
6752 aencoder->encoder_id = link_index;
6753 else
6754 aencoder->encoder_id = -1;
6755
6756 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6757
6758 return res;
6759}
6760
3ee6b26b
AD
6761static void manage_dm_interrupts(struct amdgpu_device *adev,
6762 struct amdgpu_crtc *acrtc,
6763 bool enable)
e7b07cee
HW
6764{
6765 /*
8fe684e9
NK
6766 * We have no guarantee that the frontend index maps to the same
6767 * backend index - some even map to more than one.
6768 *
6769 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
6770 */
6771 int irq_type =
734dd01d 6772 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
6773 adev,
6774 acrtc->crtc_id);
6775
6776 if (enable) {
6777 drm_crtc_vblank_on(&acrtc->base);
6778 amdgpu_irq_get(
6779 adev,
6780 &adev->pageflip_irq,
6781 irq_type);
6782 } else {
6783
6784 amdgpu_irq_put(
6785 adev,
6786 &adev->pageflip_irq,
6787 irq_type);
6788 drm_crtc_vblank_off(&acrtc->base);
6789 }
6790}
6791
8fe684e9
NK
6792static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6793 struct amdgpu_crtc *acrtc)
6794{
6795 int irq_type =
6796 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6797
6798 /**
6799 * This reads the current state for the IRQ and force reapplies
6800 * the setting to hardware.
6801 */
6802 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6803}
6804
3ee6b26b
AD
6805static bool
6806is_scaling_state_different(const struct dm_connector_state *dm_state,
6807 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
6808{
6809 if (dm_state->scaling != old_dm_state->scaling)
6810 return true;
6811 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6812 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6813 return true;
6814 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6815 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6816 return true;
b830ebc9
HW
6817 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6818 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6819 return true;
e7b07cee
HW
6820 return false;
6821}
6822
0c8620d6
BL
6823#ifdef CONFIG_DRM_AMD_DC_HDCP
6824static bool is_content_protection_different(struct drm_connector_state *state,
6825 const struct drm_connector_state *old_state,
6826 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6827{
6828 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6829
53e108aa
BL
6830 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6831 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6832 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6833 return true;
6834 }
6835
0c8620d6
BL
6836 /* CP is being re enabled, ignore this */
6837 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6838 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6839 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6840 return false;
6841 }
6842
6843 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6844 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6845 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6846 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6847
6848 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6849 * hot-plug, headless s3, dpms
6850 */
6851 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6852 aconnector->dc_sink != NULL)
6853 return true;
6854
6855 if (old_state->content_protection == state->content_protection)
6856 return false;
6857
6858 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6859 return true;
6860
6861 return false;
6862}
6863
0c8620d6 6864#endif
3ee6b26b
AD
6865static void remove_stream(struct amdgpu_device *adev,
6866 struct amdgpu_crtc *acrtc,
6867 struct dc_stream_state *stream)
e7b07cee
HW
6868{
6869 /* this is the update mode case */
e7b07cee
HW
6870
6871 acrtc->otg_inst = -1;
6872 acrtc->enabled = false;
6873}
6874
7578ecda
AD
6875static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6876 struct dc_cursor_position *position)
2a8f6ccb 6877{
f4c2cc43 6878 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
6879 int x, y;
6880 int xorigin = 0, yorigin = 0;
6881
e371e19c
NK
6882 position->enable = false;
6883 position->x = 0;
6884 position->y = 0;
6885
6886 if (!crtc || !plane->state->fb)
2a8f6ccb 6887 return 0;
2a8f6ccb
HW
6888
6889 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6890 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6891 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6892 __func__,
6893 plane->state->crtc_w,
6894 plane->state->crtc_h);
6895 return -EINVAL;
6896 }
6897
6898 x = plane->state->crtc_x;
6899 y = plane->state->crtc_y;
c14a005c 6900
e371e19c
NK
6901 if (x <= -amdgpu_crtc->max_cursor_width ||
6902 y <= -amdgpu_crtc->max_cursor_height)
6903 return 0;
6904
2a8f6ccb
HW
6905 if (x < 0) {
6906 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6907 x = 0;
6908 }
6909 if (y < 0) {
6910 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6911 y = 0;
6912 }
6913 position->enable = true;
d243b6ff 6914 position->translate_by_source = true;
2a8f6ccb
HW
6915 position->x = x;
6916 position->y = y;
6917 position->x_hotspot = xorigin;
6918 position->y_hotspot = yorigin;
6919
6920 return 0;
6921}
6922
3ee6b26b
AD
6923static void handle_cursor_update(struct drm_plane *plane,
6924 struct drm_plane_state *old_plane_state)
e7b07cee 6925{
1348969a 6926 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
6927 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6928 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6929 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6930 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6931 uint64_t address = afb ? afb->address : 0;
6932 struct dc_cursor_position position;
6933 struct dc_cursor_attributes attributes;
6934 int ret;
6935
e7b07cee
HW
6936 if (!plane->state->fb && !old_plane_state->fb)
6937 return;
6938
f1ad2f5e 6939 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
6940 __func__,
6941 amdgpu_crtc->crtc_id,
6942 plane->state->crtc_w,
6943 plane->state->crtc_h);
2a8f6ccb
HW
6944
6945 ret = get_cursor_position(plane, crtc, &position);
6946 if (ret)
6947 return;
6948
6949 if (!position.enable) {
6950 /* turn off cursor */
674e78ac
NK
6951 if (crtc_state && crtc_state->stream) {
6952 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
6953 dc_stream_set_cursor_position(crtc_state->stream,
6954 &position);
674e78ac
NK
6955 mutex_unlock(&adev->dm.dc_lock);
6956 }
2a8f6ccb 6957 return;
e7b07cee 6958 }
e7b07cee 6959
2a8f6ccb
HW
6960 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6961 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6962
c1cefe11 6963 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
6964 attributes.address.high_part = upper_32_bits(address);
6965 attributes.address.low_part = lower_32_bits(address);
6966 attributes.width = plane->state->crtc_w;
6967 attributes.height = plane->state->crtc_h;
6968 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6969 attributes.rotation_angle = 0;
6970 attributes.attribute_flags.value = 0;
6971
6972 attributes.pitch = attributes.width;
6973
886daac9 6974 if (crtc_state->stream) {
674e78ac 6975 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
6976 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6977 &attributes))
6978 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 6979
2a8f6ccb
HW
6980 if (!dc_stream_set_cursor_position(crtc_state->stream,
6981 &position))
6982 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 6983 mutex_unlock(&adev->dm.dc_lock);
886daac9 6984 }
2a8f6ccb 6985}
e7b07cee
HW
6986
6987static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6988{
6989
6990 assert_spin_locked(&acrtc->base.dev->event_lock);
6991 WARN_ON(acrtc->event);
6992
6993 acrtc->event = acrtc->base.state->event;
6994
6995 /* Set the flip status */
6996 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6997
6998 /* Mark this event as consumed */
6999 acrtc->base.state->event = NULL;
7000
7001 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7002 acrtc->crtc_id);
7003}
7004
bb47de73
NK
7005static void update_freesync_state_on_stream(
7006 struct amdgpu_display_manager *dm,
7007 struct dm_crtc_state *new_crtc_state,
180db303
NK
7008 struct dc_stream_state *new_stream,
7009 struct dc_plane_state *surface,
7010 u32 flip_timestamp_in_us)
bb47de73 7011{
09aef2c4 7012 struct mod_vrr_params vrr_params;
bb47de73 7013 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7014 struct amdgpu_device *adev = dm->adev;
585d450c 7015 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7016 unsigned long flags;
bb47de73
NK
7017
7018 if (!new_stream)
7019 return;
7020
7021 /*
7022 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7023 * For now it's sufficient to just guard against these conditions.
7024 */
7025
7026 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7027 return;
7028
4a580877 7029 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7030 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7031
180db303
NK
7032 if (surface) {
7033 mod_freesync_handle_preflip(
7034 dm->freesync_module,
7035 surface,
7036 new_stream,
7037 flip_timestamp_in_us,
7038 &vrr_params);
09aef2c4
MK
7039
7040 if (adev->family < AMDGPU_FAMILY_AI &&
7041 amdgpu_dm_vrr_active(new_crtc_state)) {
7042 mod_freesync_handle_v_update(dm->freesync_module,
7043 new_stream, &vrr_params);
e63e2491
EB
7044
7045 /* Need to call this before the frame ends. */
7046 dc_stream_adjust_vmin_vmax(dm->dc,
7047 new_crtc_state->stream,
7048 &vrr_params.adjust);
09aef2c4 7049 }
180db303 7050 }
bb47de73
NK
7051
7052 mod_freesync_build_vrr_infopacket(
7053 dm->freesync_module,
7054 new_stream,
180db303 7055 &vrr_params,
ecd0136b
HT
7056 PACKET_TYPE_VRR,
7057 TRANSFER_FUNC_UNKNOWN,
bb47de73
NK
7058 &vrr_infopacket);
7059
8a48b44c 7060 new_crtc_state->freesync_timing_changed |=
585d450c 7061 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7062 &vrr_params.adjust,
7063 sizeof(vrr_params.adjust)) != 0);
bb47de73 7064
8a48b44c 7065 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7066 (memcmp(&new_crtc_state->vrr_infopacket,
7067 &vrr_infopacket,
7068 sizeof(vrr_infopacket)) != 0);
7069
585d450c 7070 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7071 new_crtc_state->vrr_infopacket = vrr_infopacket;
7072
585d450c 7073 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7074 new_stream->vrr_infopacket = vrr_infopacket;
7075
7076 if (new_crtc_state->freesync_vrr_info_changed)
7077 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7078 new_crtc_state->base.crtc->base.id,
7079 (int)new_crtc_state->base.vrr_enabled,
180db303 7080 (int)vrr_params.state);
09aef2c4 7081
4a580877 7082 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7083}
7084
585d450c 7085static void update_stream_irq_parameters(
e854194c
MK
7086 struct amdgpu_display_manager *dm,
7087 struct dm_crtc_state *new_crtc_state)
7088{
7089 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7090 struct mod_vrr_params vrr_params;
e854194c 7091 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7092 struct amdgpu_device *adev = dm->adev;
585d450c 7093 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7094 unsigned long flags;
e854194c
MK
7095
7096 if (!new_stream)
7097 return;
7098
7099 /*
7100 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7101 * For now it's sufficient to just guard against these conditions.
7102 */
7103 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7104 return;
7105
4a580877 7106 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7107 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7108
e854194c
MK
7109 if (new_crtc_state->vrr_supported &&
7110 config.min_refresh_in_uhz &&
7111 config.max_refresh_in_uhz) {
7112 config.state = new_crtc_state->base.vrr_enabled ?
7113 VRR_STATE_ACTIVE_VARIABLE :
7114 VRR_STATE_INACTIVE;
7115 } else {
7116 config.state = VRR_STATE_UNSUPPORTED;
7117 }
7118
7119 mod_freesync_build_vrr_params(dm->freesync_module,
7120 new_stream,
7121 &config, &vrr_params);
7122
7123 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7124 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7125 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7126
585d450c
AP
7127 new_crtc_state->freesync_config = config;
7128 /* Copy state for access from DM IRQ handler */
7129 acrtc->dm_irq_params.freesync_config = config;
7130 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7131 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7132 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7133}
7134
66b0c973
MK
7135static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7136 struct dm_crtc_state *new_state)
7137{
7138 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7139 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7140
7141 if (!old_vrr_active && new_vrr_active) {
7142 /* Transition VRR inactive -> active:
7143 * While VRR is active, we must not disable vblank irq, as a
7144 * reenable after disable would compute bogus vblank/pflip
7145 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7146 *
7147 * We also need vupdate irq for the actual core vblank handling
7148 * at end of vblank.
66b0c973 7149 */
d2574c33 7150 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7151 drm_crtc_vblank_get(new_state->base.crtc);
7152 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7153 __func__, new_state->base.crtc->base.id);
7154 } else if (old_vrr_active && !new_vrr_active) {
7155 /* Transition VRR active -> inactive:
7156 * Allow vblank irq disable again for fixed refresh rate.
7157 */
d2574c33 7158 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7159 drm_crtc_vblank_put(new_state->base.crtc);
7160 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7161 __func__, new_state->base.crtc->base.id);
7162 }
7163}
7164
8ad27806
NK
7165static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7166{
7167 struct drm_plane *plane;
7168 struct drm_plane_state *old_plane_state, *new_plane_state;
7169 int i;
7170
7171 /*
7172 * TODO: Make this per-stream so we don't issue redundant updates for
7173 * commits with multiple streams.
7174 */
7175 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7176 new_plane_state, i)
7177 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7178 handle_cursor_update(plane, old_plane_state);
7179}
7180
3be5262e 7181static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7182 struct dc_state *dc_state,
3ee6b26b
AD
7183 struct drm_device *dev,
7184 struct amdgpu_display_manager *dm,
7185 struct drm_crtc *pcrtc,
420cd472 7186 bool wait_for_vblank)
e7b07cee 7187{
570c91d5 7188 uint32_t i;
8a48b44c 7189 uint64_t timestamp_ns;
e7b07cee 7190 struct drm_plane *plane;
0bc9706d 7191 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7192 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7193 struct drm_crtc_state *new_pcrtc_state =
7194 drm_atomic_get_new_crtc_state(state, pcrtc);
7195 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7196 struct dm_crtc_state *dm_old_crtc_state =
7197 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7198 int planes_count = 0, vpos, hpos;
570c91d5 7199 long r;
e7b07cee 7200 unsigned long flags;
8a48b44c 7201 struct amdgpu_bo *abo;
fdd1fe57
MK
7202 uint32_t target_vblank, last_flip_vblank;
7203 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7204 bool pflip_present = false;
bc7f670e
DF
7205 struct {
7206 struct dc_surface_update surface_updates[MAX_SURFACES];
7207 struct dc_plane_info plane_infos[MAX_SURFACES];
7208 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7209 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7210 struct dc_stream_update stream_update;
74aa7bd4 7211 } *bundle;
bc7f670e 7212
74aa7bd4 7213 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7214
74aa7bd4
DF
7215 if (!bundle) {
7216 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7217 goto cleanup;
7218 }
e7b07cee 7219
8ad27806
NK
7220 /*
7221 * Disable the cursor first if we're disabling all the planes.
7222 * It'll remain on the screen after the planes are re-enabled
7223 * if we don't.
7224 */
7225 if (acrtc_state->active_planes == 0)
7226 amdgpu_dm_commit_cursors(state);
7227
e7b07cee 7228 /* update planes when needed */
0bc9706d
LSL
7229 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7230 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7231 struct drm_crtc_state *new_crtc_state;
0bc9706d 7232 struct drm_framebuffer *fb = new_plane_state->fb;
34bafd27 7233 bool plane_needs_flip;
c7af5f77 7234 struct dc_plane_state *dc_plane;
54d76575 7235 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7236
80c218d5
NK
7237 /* Cursor plane is handled after stream updates */
7238 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7239 continue;
e7b07cee 7240
f5ba60fe
DD
7241 if (!fb || !crtc || pcrtc != crtc)
7242 continue;
7243
7244 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7245 if (!new_crtc_state->active)
e7b07cee
HW
7246 continue;
7247
bc7f670e 7248 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7249
74aa7bd4 7250 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7251 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7252 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7253 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7254 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7255 }
8a48b44c 7256
695af5f9
NK
7257 fill_dc_scaling_info(new_plane_state,
7258 &bundle->scaling_infos[planes_count]);
8a48b44c 7259
695af5f9
NK
7260 bundle->surface_updates[planes_count].scaling_info =
7261 &bundle->scaling_infos[planes_count];
8a48b44c 7262
f5031000 7263 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7264
f5031000 7265 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7266
f5031000
DF
7267 if (!plane_needs_flip) {
7268 planes_count += 1;
7269 continue;
7270 }
8a48b44c 7271
2fac0f53
CK
7272 abo = gem_to_amdgpu_bo(fb->obj[0]);
7273
f8308898
AG
7274 /*
7275 * Wait for all fences on this FB. Do limited wait to avoid
7276 * deadlock during GPU reset when this fence will not signal
7277 * but we hold reservation lock for the BO.
7278 */
52791eee 7279 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7280 false,
f8308898
AG
7281 msecs_to_jiffies(5000));
7282 if (unlikely(r <= 0))
ed8a5fb2 7283 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7284
695af5f9 7285 fill_dc_plane_info_and_addr(
8ce5d842
NK
7286 dm->adev, new_plane_state,
7287 dm_new_plane_state->tiling_flags,
695af5f9 7288 &bundle->plane_infos[planes_count],
87b7ebc2 7289 &bundle->flip_addrs[planes_count].address,
8ce5d842 7290 dm_new_plane_state->tmz_surface, false);
87b7ebc2
RS
7291
7292 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7293 new_plane_state->plane->index,
7294 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7295
7296 bundle->surface_updates[planes_count].plane_info =
7297 &bundle->plane_infos[planes_count];
8a48b44c 7298
caff0e66
NK
7299 /*
7300 * Only allow immediate flips for fast updates that don't
7301 * change FB pitch, DCC state, rotation or mirroing.
7302 */
f5031000 7303 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7304 crtc->state->async_flip &&
caff0e66 7305 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7306
f5031000
DF
7307 timestamp_ns = ktime_get_ns();
7308 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7309 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7310 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7311
f5031000
DF
7312 if (!bundle->surface_updates[planes_count].surface) {
7313 DRM_ERROR("No surface for CRTC: id=%d\n",
7314 acrtc_attach->crtc_id);
7315 continue;
bc7f670e
DF
7316 }
7317
f5031000
DF
7318 if (plane == pcrtc->primary)
7319 update_freesync_state_on_stream(
7320 dm,
7321 acrtc_state,
7322 acrtc_state->stream,
7323 dc_plane,
7324 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7325
f5031000
DF
7326 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7327 __func__,
7328 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7329 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7330
7331 planes_count += 1;
7332
8a48b44c
DF
7333 }
7334
74aa7bd4 7335 if (pflip_present) {
634092b1
MK
7336 if (!vrr_active) {
7337 /* Use old throttling in non-vrr fixed refresh rate mode
7338 * to keep flip scheduling based on target vblank counts
7339 * working in a backwards compatible way, e.g., for
7340 * clients using the GLX_OML_sync_control extension or
7341 * DRI3/Present extension with defined target_msc.
7342 */
e3eff4b5 7343 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7344 }
7345 else {
7346 /* For variable refresh rate mode only:
7347 * Get vblank of last completed flip to avoid > 1 vrr
7348 * flips per video frame by use of throttling, but allow
7349 * flip programming anywhere in the possibly large
7350 * variable vrr vblank interval for fine-grained flip
7351 * timing control and more opportunity to avoid stutter
7352 * on late submission of flips.
7353 */
7354 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 7355 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
7356 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7357 }
7358
fdd1fe57 7359 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
7360
7361 /*
7362 * Wait until we're out of the vertical blank period before the one
7363 * targeted by the flip
7364 */
7365 while ((acrtc_attach->enabled &&
7366 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7367 0, &vpos, &hpos, NULL,
7368 NULL, &pcrtc->hwmode)
7369 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7370 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7371 (int)(target_vblank -
e3eff4b5 7372 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
7373 usleep_range(1000, 1100);
7374 }
7375
8fe684e9
NK
7376 /**
7377 * Prepare the flip event for the pageflip interrupt to handle.
7378 *
7379 * This only works in the case where we've already turned on the
7380 * appropriate hardware blocks (eg. HUBP) so in the transition case
7381 * from 0 -> n planes we have to skip a hardware generated event
7382 * and rely on sending it from software.
7383 */
7384 if (acrtc_attach->base.state->event &&
7385 acrtc_state->active_planes > 0) {
8a48b44c
DF
7386 drm_crtc_vblank_get(pcrtc);
7387
7388 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7389
7390 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7391 prepare_flip_isr(acrtc_attach);
7392
7393 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7394 }
7395
7396 if (acrtc_state->stream) {
8a48b44c 7397 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 7398 bundle->stream_update.vrr_infopacket =
8a48b44c 7399 &acrtc_state->stream->vrr_infopacket;
e7b07cee 7400 }
e7b07cee
HW
7401 }
7402
bc92c065 7403 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
7404 if ((planes_count || acrtc_state->active_planes == 0) &&
7405 acrtc_state->stream) {
b6e881c9 7406 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 7407 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
7408 bundle->stream_update.src = acrtc_state->stream->src;
7409 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
7410 }
7411
cf020d49
NK
7412 if (new_pcrtc_state->color_mgmt_changed) {
7413 /*
7414 * TODO: This isn't fully correct since we've actually
7415 * already modified the stream in place.
7416 */
7417 bundle->stream_update.gamut_remap =
7418 &acrtc_state->stream->gamut_remap_matrix;
7419 bundle->stream_update.output_csc_transform =
7420 &acrtc_state->stream->csc_color_matrix;
7421 bundle->stream_update.out_transfer_func =
7422 acrtc_state->stream->out_transfer_func;
7423 }
bc7f670e 7424
8a48b44c 7425 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 7426 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 7427 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 7428
e63e2491
EB
7429 /*
7430 * If FreeSync state on the stream has changed then we need to
7431 * re-adjust the min/max bounds now that DC doesn't handle this
7432 * as part of commit.
7433 */
7434 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7435 amdgpu_dm_vrr_active(acrtc_state)) {
7436 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7437 dc_stream_adjust_vmin_vmax(
7438 dm->dc, acrtc_state->stream,
585d450c 7439 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
7440 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7441 }
bc7f670e 7442 mutex_lock(&dm->dc_lock);
8c322309 7443 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 7444 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
7445 amdgpu_dm_psr_disable(acrtc_state->stream);
7446
bc7f670e 7447 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 7448 bundle->surface_updates,
bc7f670e
DF
7449 planes_count,
7450 acrtc_state->stream,
74aa7bd4 7451 &bundle->stream_update,
bc7f670e 7452 dc_state);
8c322309 7453
8fe684e9
NK
7454 /**
7455 * Enable or disable the interrupts on the backend.
7456 *
7457 * Most pipes are put into power gating when unused.
7458 *
7459 * When power gating is enabled on a pipe we lose the
7460 * interrupt enablement state when power gating is disabled.
7461 *
7462 * So we need to update the IRQ control state in hardware
7463 * whenever the pipe turns on (since it could be previously
7464 * power gated) or off (since some pipes can't be power gated
7465 * on some ASICs).
7466 */
7467 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
7468 dm_update_pflip_irq_state(drm_to_adev(dev),
7469 acrtc_attach);
8fe684e9 7470
8c322309 7471 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 7472 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 7473 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
7474 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7475 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
7476 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7477 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
7478 amdgpu_dm_psr_enable(acrtc_state->stream);
7479 }
7480
bc7f670e 7481 mutex_unlock(&dm->dc_lock);
e7b07cee 7482 }
4b510503 7483
8ad27806
NK
7484 /*
7485 * Update cursor state *after* programming all the planes.
7486 * This avoids redundant programming in the case where we're going
7487 * to be disabling a single plane - those pipes are being disabled.
7488 */
7489 if (acrtc_state->active_planes)
7490 amdgpu_dm_commit_cursors(state);
80c218d5 7491
4b510503 7492cleanup:
74aa7bd4 7493 kfree(bundle);
e7b07cee
HW
7494}
7495
6ce8f316
NK
7496static void amdgpu_dm_commit_audio(struct drm_device *dev,
7497 struct drm_atomic_state *state)
7498{
1348969a 7499 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
7500 struct amdgpu_dm_connector *aconnector;
7501 struct drm_connector *connector;
7502 struct drm_connector_state *old_con_state, *new_con_state;
7503 struct drm_crtc_state *new_crtc_state;
7504 struct dm_crtc_state *new_dm_crtc_state;
7505 const struct dc_stream_status *status;
7506 int i, inst;
7507
7508 /* Notify device removals. */
7509 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7510 if (old_con_state->crtc != new_con_state->crtc) {
7511 /* CRTC changes require notification. */
7512 goto notify;
7513 }
7514
7515 if (!new_con_state->crtc)
7516 continue;
7517
7518 new_crtc_state = drm_atomic_get_new_crtc_state(
7519 state, new_con_state->crtc);
7520
7521 if (!new_crtc_state)
7522 continue;
7523
7524 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7525 continue;
7526
7527 notify:
7528 aconnector = to_amdgpu_dm_connector(connector);
7529
7530 mutex_lock(&adev->dm.audio_lock);
7531 inst = aconnector->audio_inst;
7532 aconnector->audio_inst = -1;
7533 mutex_unlock(&adev->dm.audio_lock);
7534
7535 amdgpu_dm_audio_eld_notify(adev, inst);
7536 }
7537
7538 /* Notify audio device additions. */
7539 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7540 if (!new_con_state->crtc)
7541 continue;
7542
7543 new_crtc_state = drm_atomic_get_new_crtc_state(
7544 state, new_con_state->crtc);
7545
7546 if (!new_crtc_state)
7547 continue;
7548
7549 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7550 continue;
7551
7552 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7553 if (!new_dm_crtc_state->stream)
7554 continue;
7555
7556 status = dc_stream_get_status(new_dm_crtc_state->stream);
7557 if (!status)
7558 continue;
7559
7560 aconnector = to_amdgpu_dm_connector(connector);
7561
7562 mutex_lock(&adev->dm.audio_lock);
7563 inst = status->audio_inst;
7564 aconnector->audio_inst = inst;
7565 mutex_unlock(&adev->dm.audio_lock);
7566
7567 amdgpu_dm_audio_eld_notify(adev, inst);
7568 }
7569}
7570
1f6010a9 7571/*
27b3f4fc
LSL
7572 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7573 * @crtc_state: the DRM CRTC state
7574 * @stream_state: the DC stream state.
7575 *
7576 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7577 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7578 */
7579static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7580 struct dc_stream_state *stream_state)
7581{
b9952f93 7582 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 7583}
e7b07cee 7584
7578ecda
AD
7585static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7586 struct drm_atomic_state *state,
7587 bool nonblock)
e7b07cee 7588{
1f6010a9
DF
7589 /*
7590 * Add check here for SoC's that support hardware cursor plane, to
7591 * unset legacy_cursor_update
7592 */
e7b07cee
HW
7593
7594 return drm_atomic_helper_commit(dev, state, nonblock);
7595
7596 /*TODO Handle EINTR, reenable IRQ*/
7597}
7598
b8592b48
LL
7599/**
7600 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7601 * @state: The atomic state to commit
7602 *
7603 * This will tell DC to commit the constructed DC state from atomic_check,
7604 * programming the hardware. Any failures here implies a hardware failure, since
7605 * atomic check should have filtered anything non-kosher.
7606 */
7578ecda 7607static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
7608{
7609 struct drm_device *dev = state->dev;
1348969a 7610 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7611 struct amdgpu_display_manager *dm = &adev->dm;
7612 struct dm_atomic_state *dm_state;
eb3dc897 7613 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 7614 uint32_t i, j;
5cc6dcbd 7615 struct drm_crtc *crtc;
0bc9706d 7616 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
7617 unsigned long flags;
7618 bool wait_for_vblank = true;
7619 struct drm_connector *connector;
c2cea706 7620 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 7621 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 7622 int crtc_disable_count = 0;
6ee90e88 7623 bool mode_set_reset_required = false;
e7b07cee
HW
7624
7625 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7626
eb3dc897
NK
7627 dm_state = dm_atomic_get_new_state(state);
7628 if (dm_state && dm_state->context) {
7629 dc_state = dm_state->context;
7630 } else {
7631 /* No state changes, retain current state. */
813d20dc 7632 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
7633 ASSERT(dc_state_temp);
7634 dc_state = dc_state_temp;
7635 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7636 }
e7b07cee 7637
6d90a208
AP
7638 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7639 new_crtc_state, i) {
7640 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7641
7642 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7643
7644 if (old_crtc_state->active &&
7645 (!new_crtc_state->active ||
7646 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7647 manage_dm_interrupts(adev, acrtc, false);
7648 dc_stream_release(dm_old_crtc_state->stream);
7649 }
7650 }
7651
e7b07cee 7652 /* update changed items */
0bc9706d 7653 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 7654 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7655
54d76575
LSL
7656 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7657 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 7658
f1ad2f5e 7659 DRM_DEBUG_DRIVER(
e7b07cee
HW
7660 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7661 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7662 "connectors_changed:%d\n",
7663 acrtc->crtc_id,
0bc9706d
LSL
7664 new_crtc_state->enable,
7665 new_crtc_state->active,
7666 new_crtc_state->planes_changed,
7667 new_crtc_state->mode_changed,
7668 new_crtc_state->active_changed,
7669 new_crtc_state->connectors_changed);
e7b07cee 7670
27b3f4fc
LSL
7671 /* Copy all transient state flags into dc state */
7672 if (dm_new_crtc_state->stream) {
7673 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7674 dm_new_crtc_state->stream);
7675 }
7676
e7b07cee
HW
7677 /* handles headless hotplug case, updating new_state and
7678 * aconnector as needed
7679 */
7680
54d76575 7681 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 7682
f1ad2f5e 7683 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7684
54d76575 7685 if (!dm_new_crtc_state->stream) {
e7b07cee 7686 /*
b830ebc9
HW
7687 * this could happen because of issues with
7688 * userspace notifications delivery.
7689 * In this case userspace tries to set mode on
1f6010a9
DF
7690 * display which is disconnected in fact.
7691 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
7692 * We expect reset mode will come soon.
7693 *
7694 * This can also happen when unplug is done
7695 * during resume sequence ended
7696 *
7697 * In this case, we want to pretend we still
7698 * have a sink to keep the pipe running so that
7699 * hw state is consistent with the sw state
7700 */
f1ad2f5e 7701 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
7702 __func__, acrtc->base.base.id);
7703 continue;
7704 }
7705
54d76575
LSL
7706 if (dm_old_crtc_state->stream)
7707 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 7708
97028037
LP
7709 pm_runtime_get_noresume(dev->dev);
7710
e7b07cee 7711 acrtc->enabled = true;
0bc9706d
LSL
7712 acrtc->hw_mode = new_crtc_state->mode;
7713 crtc->hwmode = new_crtc_state->mode;
6ee90e88 7714 mode_set_reset_required = true;
0bc9706d 7715 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 7716 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 7717 /* i.e. reset mode */
6ee90e88 7718 if (dm_old_crtc_state->stream)
54d76575 7719 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 7720 mode_set_reset_required = true;
e7b07cee
HW
7721 }
7722 } /* for_each_crtc_in_state() */
7723
eb3dc897 7724 if (dc_state) {
6ee90e88 7725 /* if there mode set or reset, disable eDP PSR */
7726 if (mode_set_reset_required)
7727 amdgpu_dm_psr_disable_all(dm);
7728
eb3dc897 7729 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 7730 mutex_lock(&dm->dc_lock);
eb3dc897 7731 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 7732 mutex_unlock(&dm->dc_lock);
fa2123db 7733 }
e7b07cee 7734
0bc9706d 7735 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7736 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 7737
54d76575 7738 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7739
54d76575 7740 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 7741 const struct dc_stream_status *status =
54d76575 7742 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7743
eb3dc897 7744 if (!status)
09f609c3
LL
7745 status = dc_stream_get_status_from_state(dc_state,
7746 dm_new_crtc_state->stream);
e7b07cee 7747 if (!status)
54d76575 7748 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
7749 else
7750 acrtc->otg_inst = status->primary_otg_inst;
7751 }
7752 }
0c8620d6
BL
7753#ifdef CONFIG_DRM_AMD_DC_HDCP
7754 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7755 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7756 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7757 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7758
7759 new_crtc_state = NULL;
7760
7761 if (acrtc)
7762 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7763
7764 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7765
7766 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7767 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7768 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7769 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7770 continue;
7771 }
7772
7773 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
7774 hdcp_update_display(
7775 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 7776 new_con_state->hdcp_content_type,
b1abe558
BL
7777 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7778 : false);
0c8620d6
BL
7779 }
7780#endif
e7b07cee 7781
02d6a6fc 7782 /* Handle connector state changes */
c2cea706 7783 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
7784 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7785 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7786 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
19afd799
NC
7787 struct dc_surface_update dummy_updates[MAX_SURFACES];
7788 struct dc_stream_update stream_update;
b232d4ed 7789 struct dc_info_packet hdr_packet;
e7b07cee 7790 struct dc_stream_status *status = NULL;
b232d4ed 7791 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 7792
19afd799
NC
7793 memset(&dummy_updates, 0, sizeof(dummy_updates));
7794 memset(&stream_update, 0, sizeof(stream_update));
7795
44d09c6a 7796 if (acrtc) {
0bc9706d 7797 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
7798 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7799 }
0bc9706d 7800
e7b07cee 7801 /* Skip any modesets/resets */
0bc9706d 7802 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
7803 continue;
7804
54d76575 7805 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
7806 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7807
b232d4ed
NK
7808 scaling_changed = is_scaling_state_different(dm_new_con_state,
7809 dm_old_con_state);
7810
7811 abm_changed = dm_new_crtc_state->abm_level !=
7812 dm_old_crtc_state->abm_level;
7813
7814 hdr_changed =
7815 is_hdr_metadata_different(old_con_state, new_con_state);
7816
7817 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 7818 continue;
e7b07cee 7819
b6e881c9 7820 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 7821 if (scaling_changed) {
02d6a6fc 7822 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 7823 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 7824
02d6a6fc
DF
7825 stream_update.src = dm_new_crtc_state->stream->src;
7826 stream_update.dst = dm_new_crtc_state->stream->dst;
7827 }
7828
b232d4ed 7829 if (abm_changed) {
02d6a6fc
DF
7830 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7831
7832 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7833 }
70e8ffc5 7834
b232d4ed
NK
7835 if (hdr_changed) {
7836 fill_hdr_info_packet(new_con_state, &hdr_packet);
7837 stream_update.hdr_static_metadata = &hdr_packet;
7838 }
7839
54d76575 7840 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 7841 WARN_ON(!status);
3be5262e 7842 WARN_ON(!status->plane_count);
e7b07cee 7843
02d6a6fc
DF
7844 /*
7845 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7846 * Here we create an empty update on each plane.
7847 * To fix this, DC should permit updating only stream properties.
7848 */
7849 for (j = 0; j < status->plane_count; j++)
7850 dummy_updates[j].surface = status->plane_states[0];
7851
7852
7853 mutex_lock(&dm->dc_lock);
7854 dc_commit_updates_for_stream(dm->dc,
7855 dummy_updates,
7856 status->plane_count,
7857 dm_new_crtc_state->stream,
7858 &stream_update,
7859 dc_state);
7860 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
7861 }
7862
b5e83f6f 7863 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 7864 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 7865 new_crtc_state, i) {
fe2a1965
LP
7866 if (old_crtc_state->active && !new_crtc_state->active)
7867 crtc_disable_count++;
7868
54d76575 7869 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 7870 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 7871
585d450c
AP
7872 /* For freesync config update on crtc state and params for irq */
7873 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 7874
66b0c973
MK
7875 /* Handle vrr on->off / off->on transitions */
7876 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7877 dm_new_crtc_state);
e7b07cee
HW
7878 }
7879
8fe684e9
NK
7880 /**
7881 * Enable interrupts for CRTCs that are newly enabled or went through
7882 * a modeset. It was intentionally deferred until after the front end
7883 * state was modified to wait until the OTG was on and so the IRQ
7884 * handlers didn't access stale or invalid state.
7885 */
7886 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7887 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7888
585d450c
AP
7889 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7890
8fe684e9
NK
7891 if (new_crtc_state->active &&
7892 (!old_crtc_state->active ||
7893 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
7894 dc_stream_retain(dm_new_crtc_state->stream);
7895 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 7896 manage_dm_interrupts(adev, acrtc, true);
585d450c 7897
8fe684e9
NK
7898#ifdef CONFIG_DEBUG_FS
7899 /**
7900 * Frontend may have changed so reapply the CRC capture
7901 * settings for the stream.
7902 */
7903 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7904
7905 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7906 amdgpu_dm_crtc_configure_crc_source(
7907 crtc, dm_new_crtc_state,
7908 dm_new_crtc_state->crc_src);
7909 }
7910#endif
7911 }
7912 }
e7b07cee 7913
420cd472 7914 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 7915 if (new_crtc_state->async_flip)
420cd472
DF
7916 wait_for_vblank = false;
7917
e7b07cee 7918 /* update planes when needed per crtc*/
5cc6dcbd 7919 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 7920 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 7921
54d76575 7922 if (dm_new_crtc_state->stream)
eb3dc897 7923 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 7924 dm, crtc, wait_for_vblank);
e7b07cee
HW
7925 }
7926
6ce8f316
NK
7927 /* Update audio instances for each connector. */
7928 amdgpu_dm_commit_audio(dev, state);
7929
e7b07cee
HW
7930 /*
7931 * send vblank event on all events not handled in flip and
7932 * mark consumed event for drm_atomic_helper_commit_hw_done
7933 */
4a580877 7934 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 7935 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 7936
0bc9706d
LSL
7937 if (new_crtc_state->event)
7938 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 7939
0bc9706d 7940 new_crtc_state->event = NULL;
e7b07cee 7941 }
4a580877 7942 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 7943
29c8f234
LL
7944 /* Signal HW programming completion */
7945 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
7946
7947 if (wait_for_vblank)
320a1274 7948 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
7949
7950 drm_atomic_helper_cleanup_planes(dev, state);
97028037 7951
1f6010a9
DF
7952 /*
7953 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
7954 * so we can put the GPU into runtime suspend if we're not driving any
7955 * displays anymore
7956 */
fe2a1965
LP
7957 for (i = 0; i < crtc_disable_count; i++)
7958 pm_runtime_put_autosuspend(dev->dev);
97028037 7959 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
7960
7961 if (dc_state_temp)
7962 dc_release_state(dc_state_temp);
e7b07cee
HW
7963}
7964
7965
7966static int dm_force_atomic_commit(struct drm_connector *connector)
7967{
7968 int ret = 0;
7969 struct drm_device *ddev = connector->dev;
7970 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7971 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7972 struct drm_plane *plane = disconnected_acrtc->base.primary;
7973 struct drm_connector_state *conn_state;
7974 struct drm_crtc_state *crtc_state;
7975 struct drm_plane_state *plane_state;
7976
7977 if (!state)
7978 return -ENOMEM;
7979
7980 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7981
7982 /* Construct an atomic state to restore previous display setting */
7983
7984 /*
7985 * Attach connectors to drm_atomic_state
7986 */
7987 conn_state = drm_atomic_get_connector_state(state, connector);
7988
7989 ret = PTR_ERR_OR_ZERO(conn_state);
7990 if (ret)
7991 goto err;
7992
7993 /* Attach crtc to drm_atomic_state*/
7994 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7995
7996 ret = PTR_ERR_OR_ZERO(crtc_state);
7997 if (ret)
7998 goto err;
7999
8000 /* force a restore */
8001 crtc_state->mode_changed = true;
8002
8003 /* Attach plane to drm_atomic_state */
8004 plane_state = drm_atomic_get_plane_state(state, plane);
8005
8006 ret = PTR_ERR_OR_ZERO(plane_state);
8007 if (ret)
8008 goto err;
8009
8010
8011 /* Call commit internally with the state we just constructed */
8012 ret = drm_atomic_commit(state);
8013 if (!ret)
8014 return 0;
8015
8016err:
8017 DRM_ERROR("Restoring old state failed with %i\n", ret);
8018 drm_atomic_state_put(state);
8019
8020 return ret;
8021}
8022
8023/*
1f6010a9
DF
8024 * This function handles all cases when set mode does not come upon hotplug.
8025 * This includes when a display is unplugged then plugged back into the
8026 * same port and when running without usermode desktop manager supprot
e7b07cee 8027 */
3ee6b26b
AD
8028void dm_restore_drm_connector_state(struct drm_device *dev,
8029 struct drm_connector *connector)
e7b07cee 8030{
c84dec2f 8031 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8032 struct amdgpu_crtc *disconnected_acrtc;
8033 struct dm_crtc_state *acrtc_state;
8034
8035 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8036 return;
8037
8038 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8039 if (!disconnected_acrtc)
8040 return;
e7b07cee 8041
70e8ffc5
HW
8042 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8043 if (!acrtc_state->stream)
e7b07cee
HW
8044 return;
8045
8046 /*
8047 * If the previous sink is not released and different from the current,
8048 * we deduce we are in a state where we can not rely on usermode call
8049 * to turn on the display, so we do it here
8050 */
8051 if (acrtc_state->stream->sink != aconnector->dc_sink)
8052 dm_force_atomic_commit(&aconnector->base);
8053}
8054
1f6010a9 8055/*
e7b07cee
HW
8056 * Grabs all modesetting locks to serialize against any blocking commits,
8057 * Waits for completion of all non blocking commits.
8058 */
3ee6b26b
AD
8059static int do_aquire_global_lock(struct drm_device *dev,
8060 struct drm_atomic_state *state)
e7b07cee
HW
8061{
8062 struct drm_crtc *crtc;
8063 struct drm_crtc_commit *commit;
8064 long ret;
8065
1f6010a9
DF
8066 /*
8067 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8068 * ensure that when the framework release it the
8069 * extra locks we are locking here will get released to
8070 */
8071 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8072 if (ret)
8073 return ret;
8074
8075 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8076 spin_lock(&crtc->commit_lock);
8077 commit = list_first_entry_or_null(&crtc->commit_list,
8078 struct drm_crtc_commit, commit_entry);
8079 if (commit)
8080 drm_crtc_commit_get(commit);
8081 spin_unlock(&crtc->commit_lock);
8082
8083 if (!commit)
8084 continue;
8085
1f6010a9
DF
8086 /*
8087 * Make sure all pending HW programming completed and
e7b07cee
HW
8088 * page flips done
8089 */
8090 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8091
8092 if (ret > 0)
8093 ret = wait_for_completion_interruptible_timeout(
8094 &commit->flip_done, 10*HZ);
8095
8096 if (ret == 0)
8097 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8098 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8099
8100 drm_crtc_commit_put(commit);
8101 }
8102
8103 return ret < 0 ? ret : 0;
8104}
8105
bb47de73
NK
8106static void get_freesync_config_for_crtc(
8107 struct dm_crtc_state *new_crtc_state,
8108 struct dm_connector_state *new_con_state)
98e6436d
AK
8109{
8110 struct mod_freesync_config config = {0};
98e6436d
AK
8111 struct amdgpu_dm_connector *aconnector =
8112 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8113 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8114 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 8115
a057ec46 8116 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8117 vrefresh >= aconnector->min_vfreq &&
8118 vrefresh <= aconnector->max_vfreq;
bb47de73 8119
a057ec46
IB
8120 if (new_crtc_state->vrr_supported) {
8121 new_crtc_state->stream->ignore_msa_timing_param = true;
bb47de73 8122 config.state = new_crtc_state->base.vrr_enabled ?
98e6436d
AK
8123 VRR_STATE_ACTIVE_VARIABLE :
8124 VRR_STATE_INACTIVE;
8125 config.min_refresh_in_uhz =
8126 aconnector->min_vfreq * 1000000;
8127 config.max_refresh_in_uhz =
8128 aconnector->max_vfreq * 1000000;
69ff8845 8129 config.vsif_supported = true;
180db303 8130 config.btr = true;
98e6436d
AK
8131 }
8132
bb47de73
NK
8133 new_crtc_state->freesync_config = config;
8134}
98e6436d 8135
bb47de73
NK
8136static void reset_freesync_config_for_crtc(
8137 struct dm_crtc_state *new_crtc_state)
8138{
8139 new_crtc_state->vrr_supported = false;
98e6436d 8140
bb47de73
NK
8141 memset(&new_crtc_state->vrr_infopacket, 0,
8142 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8143}
8144
4b9674e5
LL
8145static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8146 struct drm_atomic_state *state,
8147 struct drm_crtc *crtc,
8148 struct drm_crtc_state *old_crtc_state,
8149 struct drm_crtc_state *new_crtc_state,
8150 bool enable,
8151 bool *lock_and_validation_needed)
e7b07cee 8152{
eb3dc897 8153 struct dm_atomic_state *dm_state = NULL;
54d76575 8154 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 8155 struct dc_stream_state *new_stream;
62f55537 8156 int ret = 0;
d4d4a645 8157
1f6010a9
DF
8158 /*
8159 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8160 * update changed items
8161 */
4b9674e5
LL
8162 struct amdgpu_crtc *acrtc = NULL;
8163 struct amdgpu_dm_connector *aconnector = NULL;
8164 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8165 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 8166
4b9674e5 8167 new_stream = NULL;
9635b754 8168
4b9674e5
LL
8169 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8170 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8171 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 8172 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 8173
4b9674e5
LL
8174 /* TODO This hack should go away */
8175 if (aconnector && enable) {
8176 /* Make sure fake sink is created in plug-in scenario */
8177 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8178 &aconnector->base);
8179 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8180 &aconnector->base);
19f89e23 8181
4b9674e5
LL
8182 if (IS_ERR(drm_new_conn_state)) {
8183 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8184 goto fail;
8185 }
19f89e23 8186
4b9674e5
LL
8187 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8188 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8189
02d35a67
JFZ
8190 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8191 goto skip_modeset;
8192
cbd14ae7
SW
8193 new_stream = create_validate_stream_for_sink(aconnector,
8194 &new_crtc_state->mode,
8195 dm_new_conn_state,
8196 dm_old_crtc_state->stream);
19f89e23 8197
4b9674e5
LL
8198 /*
8199 * we can have no stream on ACTION_SET if a display
8200 * was disconnected during S3, in this case it is not an
8201 * error, the OS will be updated after detection, and
8202 * will do the right thing on next atomic commit
8203 */
19f89e23 8204
4b9674e5
LL
8205 if (!new_stream) {
8206 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8207 __func__, acrtc->base.base.id);
8208 ret = -ENOMEM;
8209 goto fail;
8210 }
e7b07cee 8211
3d4e52d0
VL
8212 /*
8213 * TODO: Check VSDB bits to decide whether this should
8214 * be enabled or not.
8215 */
8216 new_stream->triggered_crtc_reset.enabled =
8217 dm->force_timing_sync;
8218
4b9674e5 8219 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8220
88694af9
NK
8221 ret = fill_hdr_info_packet(drm_new_conn_state,
8222 &new_stream->hdr_static_metadata);
8223 if (ret)
8224 goto fail;
8225
7e930949
NK
8226 /*
8227 * If we already removed the old stream from the context
8228 * (and set the new stream to NULL) then we can't reuse
8229 * the old stream even if the stream and scaling are unchanged.
8230 * We'll hit the BUG_ON and black screen.
8231 *
8232 * TODO: Refactor this function to allow this check to work
8233 * in all conditions.
8234 */
8235 if (dm_new_crtc_state->stream &&
8236 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8237 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8238 new_crtc_state->mode_changed = false;
8239 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8240 new_crtc_state->mode_changed);
62f55537 8241 }
4b9674e5 8242 }
b830ebc9 8243
02d35a67 8244 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8245 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8246 goto skip_modeset;
e7b07cee 8247
4b9674e5
LL
8248 DRM_DEBUG_DRIVER(
8249 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8250 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8251 "connectors_changed:%d\n",
8252 acrtc->crtc_id,
8253 new_crtc_state->enable,
8254 new_crtc_state->active,
8255 new_crtc_state->planes_changed,
8256 new_crtc_state->mode_changed,
8257 new_crtc_state->active_changed,
8258 new_crtc_state->connectors_changed);
62f55537 8259
4b9674e5
LL
8260 /* Remove stream for any changed/disabled CRTC */
8261 if (!enable) {
62f55537 8262
4b9674e5
LL
8263 if (!dm_old_crtc_state->stream)
8264 goto skip_modeset;
eb3dc897 8265
4b9674e5
LL
8266 ret = dm_atomic_get_state(state, &dm_state);
8267 if (ret)
8268 goto fail;
e7b07cee 8269
4b9674e5
LL
8270 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8271 crtc->base.id);
62f55537 8272
4b9674e5
LL
8273 /* i.e. reset mode */
8274 if (dc_remove_stream_from_ctx(
8275 dm->dc,
8276 dm_state->context,
8277 dm_old_crtc_state->stream) != DC_OK) {
8278 ret = -EINVAL;
8279 goto fail;
8280 }
62f55537 8281
4b9674e5
LL
8282 dc_stream_release(dm_old_crtc_state->stream);
8283 dm_new_crtc_state->stream = NULL;
bb47de73 8284
4b9674e5 8285 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8286
4b9674e5 8287 *lock_and_validation_needed = true;
62f55537 8288
4b9674e5
LL
8289 } else {/* Add stream for any updated/enabled CRTC */
8290 /*
8291 * Quick fix to prevent NULL pointer on new_stream when
8292 * added MST connectors not found in existing crtc_state in the chained mode
8293 * TODO: need to dig out the root cause of that
8294 */
8295 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8296 goto skip_modeset;
62f55537 8297
4b9674e5
LL
8298 if (modereset_required(new_crtc_state))
8299 goto skip_modeset;
62f55537 8300
4b9674e5
LL
8301 if (modeset_required(new_crtc_state, new_stream,
8302 dm_old_crtc_state->stream)) {
62f55537 8303
4b9674e5 8304 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8305
4b9674e5
LL
8306 ret = dm_atomic_get_state(state, &dm_state);
8307 if (ret)
8308 goto fail;
27b3f4fc 8309
4b9674e5 8310 dm_new_crtc_state->stream = new_stream;
62f55537 8311
4b9674e5 8312 dc_stream_retain(new_stream);
1dc90497 8313
4b9674e5
LL
8314 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8315 crtc->base.id);
1dc90497 8316
4b9674e5
LL
8317 if (dc_add_stream_to_ctx(
8318 dm->dc,
8319 dm_state->context,
8320 dm_new_crtc_state->stream) != DC_OK) {
8321 ret = -EINVAL;
8322 goto fail;
9b690ef3
BL
8323 }
8324
4b9674e5
LL
8325 *lock_and_validation_needed = true;
8326 }
8327 }
e277adc5 8328
4b9674e5
LL
8329skip_modeset:
8330 /* Release extra reference */
8331 if (new_stream)
8332 dc_stream_release(new_stream);
e277adc5 8333
4b9674e5
LL
8334 /*
8335 * We want to do dc stream updates that do not require a
8336 * full modeset below.
8337 */
2afda735 8338 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8339 return 0;
8340 /*
8341 * Given above conditions, the dc state cannot be NULL because:
8342 * 1. We're in the process of enabling CRTCs (just been added
8343 * to the dc context, or already is on the context)
8344 * 2. Has a valid connector attached, and
8345 * 3. Is currently active and enabled.
8346 * => The dc stream state currently exists.
8347 */
8348 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 8349
4b9674e5
LL
8350 /* Scaling or underscan settings */
8351 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8352 update_stream_scaling_settings(
8353 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 8354
b05e2c5e
DF
8355 /* ABM settings */
8356 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8357
4b9674e5
LL
8358 /*
8359 * Color management settings. We also update color properties
8360 * when a modeset is needed, to ensure it gets reprogrammed.
8361 */
8362 if (dm_new_crtc_state->base.color_mgmt_changed ||
8363 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 8364 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
8365 if (ret)
8366 goto fail;
62f55537 8367 }
e7b07cee 8368
4b9674e5
LL
8369 /* Update Freesync settings. */
8370 get_freesync_config_for_crtc(dm_new_crtc_state,
8371 dm_new_conn_state);
8372
62f55537 8373 return ret;
9635b754
DS
8374
8375fail:
8376 if (new_stream)
8377 dc_stream_release(new_stream);
8378 return ret;
62f55537 8379}
9b690ef3 8380
f6ff2a08
NK
8381static bool should_reset_plane(struct drm_atomic_state *state,
8382 struct drm_plane *plane,
8383 struct drm_plane_state *old_plane_state,
8384 struct drm_plane_state *new_plane_state)
8385{
8386 struct drm_plane *other;
8387 struct drm_plane_state *old_other_state, *new_other_state;
8388 struct drm_crtc_state *new_crtc_state;
8389 int i;
8390
70a1efac
NK
8391 /*
8392 * TODO: Remove this hack once the checks below are sufficient
8393 * enough to determine when we need to reset all the planes on
8394 * the stream.
8395 */
8396 if (state->allow_modeset)
8397 return true;
8398
f6ff2a08
NK
8399 /* Exit early if we know that we're adding or removing the plane. */
8400 if (old_plane_state->crtc != new_plane_state->crtc)
8401 return true;
8402
8403 /* old crtc == new_crtc == NULL, plane not in context. */
8404 if (!new_plane_state->crtc)
8405 return false;
8406
8407 new_crtc_state =
8408 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8409
8410 if (!new_crtc_state)
8411 return true;
8412
7316c4ad
NK
8413 /* CRTC Degamma changes currently require us to recreate planes. */
8414 if (new_crtc_state->color_mgmt_changed)
8415 return true;
8416
f6ff2a08
NK
8417 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8418 return true;
8419
8420 /*
8421 * If there are any new primary or overlay planes being added or
8422 * removed then the z-order can potentially change. To ensure
8423 * correct z-order and pipe acquisition the current DC architecture
8424 * requires us to remove and recreate all existing planes.
8425 *
8426 * TODO: Come up with a more elegant solution for this.
8427 */
8428 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9a81cc60
NK
8429 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8430
f6ff2a08
NK
8431 if (other->type == DRM_PLANE_TYPE_CURSOR)
8432 continue;
8433
8434 if (old_other_state->crtc != new_plane_state->crtc &&
8435 new_other_state->crtc != new_plane_state->crtc)
8436 continue;
8437
8438 if (old_other_state->crtc != new_other_state->crtc)
8439 return true;
8440
dc4cb30d
NK
8441 /* Src/dst size and scaling updates. */
8442 if (old_other_state->src_w != new_other_state->src_w ||
8443 old_other_state->src_h != new_other_state->src_h ||
8444 old_other_state->crtc_w != new_other_state->crtc_w ||
8445 old_other_state->crtc_h != new_other_state->crtc_h)
8446 return true;
8447
8448 /* Rotation / mirroring updates. */
8449 if (old_other_state->rotation != new_other_state->rotation)
8450 return true;
8451
8452 /* Blending updates. */
8453 if (old_other_state->pixel_blend_mode !=
8454 new_other_state->pixel_blend_mode)
8455 return true;
8456
8457 /* Alpha updates. */
8458 if (old_other_state->alpha != new_other_state->alpha)
8459 return true;
8460
8461 /* Colorspace changes. */
8462 if (old_other_state->color_range != new_other_state->color_range ||
8463 old_other_state->color_encoding != new_other_state->color_encoding)
8464 return true;
8465
9a81cc60
NK
8466 /* Framebuffer checks fall at the end. */
8467 if (!old_other_state->fb || !new_other_state->fb)
8468 continue;
8469
8470 /* Pixel format changes can require bandwidth updates. */
8471 if (old_other_state->fb->format != new_other_state->fb->format)
8472 return true;
8473
8474 old_dm_plane_state = to_dm_plane_state(old_other_state);
8475 new_dm_plane_state = to_dm_plane_state(new_other_state);
8476
8477 /* Tiling and DCC changes also require bandwidth updates. */
8478 if (old_dm_plane_state->tiling_flags !=
8479 new_dm_plane_state->tiling_flags)
f6ff2a08
NK
8480 return true;
8481 }
8482
8483 return false;
8484}
8485
9e869063
LL
8486static int dm_update_plane_state(struct dc *dc,
8487 struct drm_atomic_state *state,
8488 struct drm_plane *plane,
8489 struct drm_plane_state *old_plane_state,
8490 struct drm_plane_state *new_plane_state,
8491 bool enable,
8492 bool *lock_and_validation_needed)
62f55537 8493{
eb3dc897
NK
8494
8495 struct dm_atomic_state *dm_state = NULL;
62f55537 8496 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 8497 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 8498 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 8499 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 8500 struct amdgpu_crtc *new_acrtc;
f6ff2a08 8501 bool needs_reset;
62f55537 8502 int ret = 0;
e7b07cee 8503
9b690ef3 8504
9e869063
LL
8505 new_plane_crtc = new_plane_state->crtc;
8506 old_plane_crtc = old_plane_state->crtc;
8507 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8508 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 8509
626bf90f
SS
8510 /*TODO Implement better atomic check for cursor plane */
8511 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8512 if (!enable || !new_plane_crtc ||
8513 drm_atomic_plane_disabling(plane->state, new_plane_state))
8514 return 0;
8515
8516 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8517
8518 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8519 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8520 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8521 new_plane_state->crtc_w, new_plane_state->crtc_h);
8522 return -EINVAL;
8523 }
8524
9e869063 8525 return 0;
626bf90f 8526 }
9b690ef3 8527
f6ff2a08
NK
8528 needs_reset = should_reset_plane(state, plane, old_plane_state,
8529 new_plane_state);
8530
9e869063
LL
8531 /* Remove any changed/removed planes */
8532 if (!enable) {
f6ff2a08 8533 if (!needs_reset)
9e869063 8534 return 0;
a7b06724 8535
9e869063
LL
8536 if (!old_plane_crtc)
8537 return 0;
62f55537 8538
9e869063
LL
8539 old_crtc_state = drm_atomic_get_old_crtc_state(
8540 state, old_plane_crtc);
8541 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 8542
9e869063
LL
8543 if (!dm_old_crtc_state->stream)
8544 return 0;
62f55537 8545
9e869063
LL
8546 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8547 plane->base.id, old_plane_crtc->base.id);
9b690ef3 8548
9e869063
LL
8549 ret = dm_atomic_get_state(state, &dm_state);
8550 if (ret)
8551 return ret;
eb3dc897 8552
9e869063
LL
8553 if (!dc_remove_plane_from_context(
8554 dc,
8555 dm_old_crtc_state->stream,
8556 dm_old_plane_state->dc_state,
8557 dm_state->context)) {
62f55537 8558
c3537613 8559 return -EINVAL;
9e869063 8560 }
e7b07cee 8561
9b690ef3 8562
9e869063
LL
8563 dc_plane_state_release(dm_old_plane_state->dc_state);
8564 dm_new_plane_state->dc_state = NULL;
1dc90497 8565
9e869063 8566 *lock_and_validation_needed = true;
1dc90497 8567
9e869063
LL
8568 } else { /* Add new planes */
8569 struct dc_plane_state *dc_new_plane_state;
1dc90497 8570
9e869063
LL
8571 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8572 return 0;
e7b07cee 8573
9e869063
LL
8574 if (!new_plane_crtc)
8575 return 0;
e7b07cee 8576
9e869063
LL
8577 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8578 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 8579
9e869063
LL
8580 if (!dm_new_crtc_state->stream)
8581 return 0;
62f55537 8582
f6ff2a08 8583 if (!needs_reset)
9e869063 8584 return 0;
62f55537 8585
8c44515b
AP
8586 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8587 if (ret)
8588 return ret;
8589
9e869063 8590 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 8591
9e869063
LL
8592 dc_new_plane_state = dc_create_plane_state(dc);
8593 if (!dc_new_plane_state)
8594 return -ENOMEM;
62f55537 8595
9e869063
LL
8596 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8597 plane->base.id, new_plane_crtc->base.id);
8c45c5db 8598
695af5f9 8599 ret = fill_dc_plane_attributes(
1348969a 8600 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
8601 dc_new_plane_state,
8602 new_plane_state,
8603 new_crtc_state);
8604 if (ret) {
8605 dc_plane_state_release(dc_new_plane_state);
8606 return ret;
8607 }
62f55537 8608
9e869063
LL
8609 ret = dm_atomic_get_state(state, &dm_state);
8610 if (ret) {
8611 dc_plane_state_release(dc_new_plane_state);
8612 return ret;
8613 }
eb3dc897 8614
9e869063
LL
8615 /*
8616 * Any atomic check errors that occur after this will
8617 * not need a release. The plane state will be attached
8618 * to the stream, and therefore part of the atomic
8619 * state. It'll be released when the atomic state is
8620 * cleaned.
8621 */
8622 if (!dc_add_plane_to_context(
8623 dc,
8624 dm_new_crtc_state->stream,
8625 dc_new_plane_state,
8626 dm_state->context)) {
62f55537 8627
9e869063
LL
8628 dc_plane_state_release(dc_new_plane_state);
8629 return -EINVAL;
8630 }
8c45c5db 8631
9e869063 8632 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 8633
9e869063
LL
8634 /* Tell DC to do a full surface update every time there
8635 * is a plane change. Inefficient, but works for now.
8636 */
8637 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8638
8639 *lock_and_validation_needed = true;
62f55537 8640 }
e7b07cee
HW
8641
8642
62f55537
AG
8643 return ret;
8644}
a87fa993 8645
e10517b3 8646#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
8647static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8648{
8649 struct drm_connector *connector;
8650 struct drm_connector_state *conn_state;
8651 struct amdgpu_dm_connector *aconnector = NULL;
8652 int i;
8653 for_each_new_connector_in_state(state, connector, conn_state, i) {
8654 if (conn_state->crtc != crtc)
8655 continue;
8656
8657 aconnector = to_amdgpu_dm_connector(connector);
8658 if (!aconnector->port || !aconnector->mst_port)
8659 aconnector = NULL;
8660 else
8661 break;
8662 }
8663
8664 if (!aconnector)
8665 return 0;
8666
8667 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8668}
e10517b3 8669#endif
44be939f 8670
b8592b48
LL
8671/**
8672 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8673 * @dev: The DRM device
8674 * @state: The atomic state to commit
8675 *
8676 * Validate that the given atomic state is programmable by DC into hardware.
8677 * This involves constructing a &struct dc_state reflecting the new hardware
8678 * state we wish to commit, then querying DC to see if it is programmable. It's
8679 * important not to modify the existing DC state. Otherwise, atomic_check
8680 * may unexpectedly commit hardware changes.
8681 *
8682 * When validating the DC state, it's important that the right locks are
8683 * acquired. For full updates case which removes/adds/updates streams on one
8684 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8685 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 8686 * flip using DRMs synchronization events.
b8592b48
LL
8687 *
8688 * Note that DM adds the affected connectors for all CRTCs in state, when that
8689 * might not seem necessary. This is because DC stream creation requires the
8690 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8691 * be possible but non-trivial - a possible TODO item.
8692 *
8693 * Return: -Error code if validation failed.
8694 */
7578ecda
AD
8695static int amdgpu_dm_atomic_check(struct drm_device *dev,
8696 struct drm_atomic_state *state)
62f55537 8697{
1348969a 8698 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 8699 struct dm_atomic_state *dm_state = NULL;
62f55537 8700 struct dc *dc = adev->dm.dc;
62f55537 8701 struct drm_connector *connector;
c2cea706 8702 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 8703 struct drm_crtc *crtc;
fc9e9920 8704 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
8705 struct drm_plane *plane;
8706 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 8707 enum dc_status status;
1e88ad0a 8708 int ret, i;
62f55537
AG
8709 bool lock_and_validation_needed = false;
8710
c44a22b3
EB
8711 amdgpu_check_debugfs_connector_property_change(adev, state);
8712
62f55537 8713 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
8714 if (ret)
8715 goto fail;
62f55537 8716
c5892a10
SW
8717 /* Check connector changes */
8718 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8719 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8720 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8721
8722 /* Skip connectors that are disabled or part of modeset already. */
8723 if (!old_con_state->crtc && !new_con_state->crtc)
8724 continue;
8725
8726 if (!new_con_state->crtc)
8727 continue;
8728
8729 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8730 if (IS_ERR(new_crtc_state)) {
8731 ret = PTR_ERR(new_crtc_state);
8732 goto fail;
8733 }
8734
8735 if (dm_old_con_state->abm_level !=
8736 dm_new_con_state->abm_level)
8737 new_crtc_state->connectors_changed = true;
8738 }
8739
e10517b3 8740#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
8741 if (adev->asic_type >= CHIP_NAVI10) {
8742 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8743 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8744 ret = add_affected_mst_dsc_crtcs(state, crtc);
8745 if (ret)
8746 goto fail;
8747 }
8748 }
8749 }
e10517b3 8750#endif
1e88ad0a
S
8751 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8752 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 8753 !new_crtc_state->color_mgmt_changed &&
a93587b3 8754 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
1e88ad0a 8755 continue;
7bef1af3 8756
1e88ad0a
S
8757 if (!new_crtc_state->enable)
8758 continue;
fc9e9920 8759
1e88ad0a
S
8760 ret = drm_atomic_add_affected_connectors(state, crtc);
8761 if (ret)
8762 return ret;
fc9e9920 8763
1e88ad0a
S
8764 ret = drm_atomic_add_affected_planes(state, crtc);
8765 if (ret)
8766 goto fail;
e7b07cee
HW
8767 }
8768
2d9e6431
NK
8769 /*
8770 * Add all primary and overlay planes on the CRTC to the state
8771 * whenever a plane is enabled to maintain correct z-ordering
8772 * and to enable fast surface updates.
8773 */
8774 drm_for_each_crtc(crtc, dev) {
8775 bool modified = false;
8776
8777 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8778 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8779 continue;
8780
8781 if (new_plane_state->crtc == crtc ||
8782 old_plane_state->crtc == crtc) {
8783 modified = true;
8784 break;
8785 }
8786 }
8787
8788 if (!modified)
8789 continue;
8790
8791 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8792 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8793 continue;
8794
8795 new_plane_state =
8796 drm_atomic_get_plane_state(state, plane);
8797
8798 if (IS_ERR(new_plane_state)) {
8799 ret = PTR_ERR(new_plane_state);
8800 goto fail;
8801 }
8802 }
8803 }
8804
707477b0
NK
8805 /* Prepass for updating tiling flags on new planes. */
8806 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8807 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8808 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8809
8810 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8811 &new_dm_plane_state->tmz_surface);
8812 if (ret)
8813 goto fail;
8814 }
8815
62f55537 8816 /* Remove exiting planes if they are modified */
9e869063
LL
8817 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8818 ret = dm_update_plane_state(dc, state, plane,
8819 old_plane_state,
8820 new_plane_state,
8821 false,
8822 &lock_and_validation_needed);
8823 if (ret)
8824 goto fail;
62f55537
AG
8825 }
8826
8827 /* Disable all crtcs which require disable */
4b9674e5
LL
8828 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8829 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8830 old_crtc_state,
8831 new_crtc_state,
8832 false,
8833 &lock_and_validation_needed);
8834 if (ret)
8835 goto fail;
62f55537
AG
8836 }
8837
8838 /* Enable all crtcs which require enable */
4b9674e5
LL
8839 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8840 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8841 old_crtc_state,
8842 new_crtc_state,
8843 true,
8844 &lock_and_validation_needed);
8845 if (ret)
8846 goto fail;
62f55537
AG
8847 }
8848
8849 /* Add new/modified planes */
9e869063
LL
8850 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8851 ret = dm_update_plane_state(dc, state, plane,
8852 old_plane_state,
8853 new_plane_state,
8854 true,
8855 &lock_and_validation_needed);
8856 if (ret)
8857 goto fail;
62f55537
AG
8858 }
8859
b349f76e
ES
8860 /* Run this here since we want to validate the streams we created */
8861 ret = drm_atomic_helper_check_planes(dev, state);
8862 if (ret)
8863 goto fail;
62f55537 8864
43d10d30
NK
8865 if (state->legacy_cursor_update) {
8866 /*
8867 * This is a fast cursor update coming from the plane update
8868 * helper, check if it can be done asynchronously for better
8869 * performance.
8870 */
8871 state->async_update =
8872 !drm_atomic_helper_async_check(dev, state);
8873
8874 /*
8875 * Skip the remaining global validation if this is an async
8876 * update. Cursor updates can be done without affecting
8877 * state or bandwidth calcs and this avoids the performance
8878 * penalty of locking the private state object and
8879 * allocating a new dc_state.
8880 */
8881 if (state->async_update)
8882 return 0;
8883 }
8884
ebdd27e1 8885 /* Check scaling and underscan changes*/
1f6010a9 8886 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
8887 * new stream into context w\o causing full reset. Need to
8888 * decide how to handle.
8889 */
c2cea706 8890 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8891 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8892 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8893 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
8894
8895 /* Skip any modesets/resets */
0bc9706d
LSL
8896 if (!acrtc || drm_atomic_crtc_needs_modeset(
8897 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
8898 continue;
8899
b830ebc9 8900 /* Skip any thing not scale or underscan changes */
54d76575 8901 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
8902 continue;
8903
8904 lock_and_validation_needed = true;
8905 }
8906
f6d7c7fa
NK
8907 /**
8908 * Streams and planes are reset when there are changes that affect
8909 * bandwidth. Anything that affects bandwidth needs to go through
8910 * DC global validation to ensure that the configuration can be applied
8911 * to hardware.
8912 *
8913 * We have to currently stall out here in atomic_check for outstanding
8914 * commits to finish in this case because our IRQ handlers reference
8915 * DRM state directly - we can end up disabling interrupts too early
8916 * if we don't.
8917 *
8918 * TODO: Remove this stall and drop DM state private objects.
a87fa993 8919 */
f6d7c7fa 8920 if (lock_and_validation_needed) {
eb3dc897
NK
8921 ret = dm_atomic_get_state(state, &dm_state);
8922 if (ret)
8923 goto fail;
e7b07cee
HW
8924
8925 ret = do_aquire_global_lock(dev, state);
8926 if (ret)
8927 goto fail;
1dc90497 8928
d9fe1a4c 8929#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
8930 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8931 goto fail;
8932
29b9ba74
ML
8933 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8934 if (ret)
8935 goto fail;
d9fe1a4c 8936#endif
29b9ba74 8937
ded58c7b
ZL
8938 /*
8939 * Perform validation of MST topology in the state:
8940 * We need to perform MST atomic check before calling
8941 * dc_validate_global_state(), or there is a chance
8942 * to get stuck in an infinite loop and hang eventually.
8943 */
8944 ret = drm_dp_mst_atomic_check(state);
8945 if (ret)
8946 goto fail;
74a16675
RS
8947 status = dc_validate_global_state(dc, dm_state->context, false);
8948 if (status != DC_OK) {
8949 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8950 dc_status_to_str(status), status);
e7b07cee
HW
8951 ret = -EINVAL;
8952 goto fail;
8953 }
bd200d19 8954 } else {
674e78ac 8955 /*
bd200d19
NK
8956 * The commit is a fast update. Fast updates shouldn't change
8957 * the DC context, affect global validation, and can have their
8958 * commit work done in parallel with other commits not touching
8959 * the same resource. If we have a new DC context as part of
8960 * the DM atomic state from validation we need to free it and
8961 * retain the existing one instead.
fde9f39a
MR
8962 *
8963 * Furthermore, since the DM atomic state only contains the DC
8964 * context and can safely be annulled, we can free the state
8965 * and clear the associated private object now to free
8966 * some memory and avoid a possible use-after-free later.
674e78ac 8967 */
bd200d19 8968
fde9f39a
MR
8969 for (i = 0; i < state->num_private_objs; i++) {
8970 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 8971
fde9f39a
MR
8972 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8973 int j = state->num_private_objs-1;
bd200d19 8974
fde9f39a
MR
8975 dm_atomic_destroy_state(obj,
8976 state->private_objs[i].state);
8977
8978 /* If i is not at the end of the array then the
8979 * last element needs to be moved to where i was
8980 * before the array can safely be truncated.
8981 */
8982 if (i != j)
8983 state->private_objs[i] =
8984 state->private_objs[j];
bd200d19 8985
fde9f39a
MR
8986 state->private_objs[j].ptr = NULL;
8987 state->private_objs[j].state = NULL;
8988 state->private_objs[j].old_state = NULL;
8989 state->private_objs[j].new_state = NULL;
8990
8991 state->num_private_objs = j;
8992 break;
8993 }
bd200d19 8994 }
e7b07cee
HW
8995 }
8996
caff0e66
NK
8997 /* Store the overall update type for use later in atomic check. */
8998 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8999 struct dm_crtc_state *dm_new_crtc_state =
9000 to_dm_crtc_state(new_crtc_state);
9001
f6d7c7fa
NK
9002 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9003 UPDATE_TYPE_FULL :
9004 UPDATE_TYPE_FAST;
e7b07cee
HW
9005 }
9006
9007 /* Must be success */
9008 WARN_ON(ret);
9009 return ret;
9010
9011fail:
9012 if (ret == -EDEADLK)
01e28f9c 9013 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 9014 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 9015 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 9016 else
01e28f9c 9017 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee
HW
9018
9019 return ret;
9020}
9021
3ee6b26b
AD
9022static bool is_dp_capable_without_timing_msa(struct dc *dc,
9023 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
9024{
9025 uint8_t dpcd_data;
9026 bool capable = false;
9027
c84dec2f 9028 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
9029 dm_helpers_dp_read_dpcd(
9030 NULL,
c84dec2f 9031 amdgpu_dm_connector->dc_link,
e7b07cee
HW
9032 DP_DOWN_STREAM_PORT_COUNT,
9033 &dpcd_data,
9034 sizeof(dpcd_data))) {
9035 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9036 }
9037
9038 return capable;
9039}
98e6436d
AK
9040void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9041 struct edid *edid)
e7b07cee
HW
9042{
9043 int i;
e7b07cee
HW
9044 bool edid_check_required;
9045 struct detailed_timing *timing;
9046 struct detailed_non_pixel *data;
9047 struct detailed_data_monitor_range *range;
c84dec2f
HW
9048 struct amdgpu_dm_connector *amdgpu_dm_connector =
9049 to_amdgpu_dm_connector(connector);
bb47de73 9050 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
9051
9052 struct drm_device *dev = connector->dev;
1348969a 9053 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 9054 bool freesync_capable = false;
b830ebc9 9055
8218d7f1
HW
9056 if (!connector->state) {
9057 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9058 goto update;
8218d7f1
HW
9059 }
9060
98e6436d
AK
9061 if (!edid) {
9062 dm_con_state = to_dm_connector_state(connector->state);
9063
9064 amdgpu_dm_connector->min_vfreq = 0;
9065 amdgpu_dm_connector->max_vfreq = 0;
9066 amdgpu_dm_connector->pixel_clock_mhz = 0;
9067
bb47de73 9068 goto update;
98e6436d
AK
9069 }
9070
8218d7f1
HW
9071 dm_con_state = to_dm_connector_state(connector->state);
9072
e7b07cee 9073 edid_check_required = false;
c84dec2f 9074 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9075 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9076 goto update;
e7b07cee
HW
9077 }
9078 if (!adev->dm.freesync_module)
bb47de73 9079 goto update;
e7b07cee
HW
9080 /*
9081 * if edid non zero restrict freesync only for dp and edp
9082 */
9083 if (edid) {
c84dec2f
HW
9084 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9085 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
e7b07cee
HW
9086 edid_check_required = is_dp_capable_without_timing_msa(
9087 adev->dm.dc,
c84dec2f 9088 amdgpu_dm_connector);
e7b07cee
HW
9089 }
9090 }
e7b07cee
HW
9091 if (edid_check_required == true && (edid->version > 1 ||
9092 (edid->version == 1 && edid->revision > 1))) {
9093 for (i = 0; i < 4; i++) {
9094
9095 timing = &edid->detailed_timings[i];
9096 data = &timing->data.other_data;
9097 range = &data->data.range;
9098 /*
9099 * Check if monitor has continuous frequency mode
9100 */
9101 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9102 continue;
9103 /*
9104 * Check for flag range limits only. If flag == 1 then
9105 * no additional timing information provided.
9106 * Default GTF, GTF Secondary curve and CVT are not
9107 * supported
9108 */
9109 if (range->flags != 1)
9110 continue;
9111
c84dec2f
HW
9112 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9113 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9114 amdgpu_dm_connector->pixel_clock_mhz =
e7b07cee
HW
9115 range->pixel_clock_mhz * 10;
9116 break;
9117 }
9118
c84dec2f 9119 if (amdgpu_dm_connector->max_vfreq -
98e6436d
AK
9120 amdgpu_dm_connector->min_vfreq > 10) {
9121
bb47de73 9122 freesync_capable = true;
e7b07cee
HW
9123 }
9124 }
bb47de73
NK
9125
9126update:
9127 if (dm_con_state)
9128 dm_con_state->freesync_capable = freesync_capable;
9129
9130 if (connector->vrr_capable_property)
9131 drm_connector_set_vrr_capable_property(connector,
9132 freesync_capable);
e7b07cee
HW
9133}
9134
8c322309
RL
9135static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9136{
9137 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9138
9139 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9140 return;
9141 if (link->type == dc_connection_none)
9142 return;
9143 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9144 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
9145 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9146
9147 if (dpcd_data[0] == 0) {
1cfbbdde 9148 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
9149 link->psr_settings.psr_feature_enabled = false;
9150 } else {
1cfbbdde 9151 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
9152 link->psr_settings.psr_feature_enabled = true;
9153 }
9154
9155 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9156 }
9157}
9158
9159/*
9160 * amdgpu_dm_link_setup_psr() - configure psr link
9161 * @stream: stream state
9162 *
9163 * Return: true if success
9164 */
9165static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9166{
9167 struct dc_link *link = NULL;
9168 struct psr_config psr_config = {0};
9169 struct psr_context psr_context = {0};
8c322309
RL
9170 bool ret = false;
9171
9172 if (stream == NULL)
9173 return false;
9174
9175 link = stream->link;
8c322309 9176
d1ebfdd8 9177 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
9178
9179 if (psr_config.psr_version > 0) {
9180 psr_config.psr_exit_link_training_required = 0x1;
9181 psr_config.psr_frame_capture_indication_req = 0;
9182 psr_config.psr_rfb_setup_time = 0x37;
9183 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9184 psr_config.allow_smu_optimizations = 0x0;
9185
9186 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9187
9188 }
d1ebfdd8 9189 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
9190
9191 return ret;
9192}
9193
9194/*
9195 * amdgpu_dm_psr_enable() - enable psr f/w
9196 * @stream: stream state
9197 *
9198 * Return: true if success
9199 */
9200bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9201{
9202 struct dc_link *link = stream->link;
5b5abe95
AK
9203 unsigned int vsync_rate_hz = 0;
9204 struct dc_static_screen_params params = {0};
9205 /* Calculate number of static frames before generating interrupt to
9206 * enter PSR.
9207 */
5b5abe95
AK
9208 // Init fail safe of 2 frames static
9209 unsigned int num_frames_static = 2;
8c322309
RL
9210
9211 DRM_DEBUG_DRIVER("Enabling psr...\n");
9212
5b5abe95
AK
9213 vsync_rate_hz = div64_u64(div64_u64((
9214 stream->timing.pix_clk_100hz * 100),
9215 stream->timing.v_total),
9216 stream->timing.h_total);
9217
9218 /* Round up
9219 * Calculate number of frames such that at least 30 ms of time has
9220 * passed.
9221 */
7aa62404
RL
9222 if (vsync_rate_hz != 0) {
9223 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 9224 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 9225 }
5b5abe95
AK
9226
9227 params.triggers.cursor_update = true;
9228 params.triggers.overlay_update = true;
9229 params.triggers.surface_update = true;
9230 params.num_frames = num_frames_static;
8c322309 9231
5b5abe95 9232 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 9233 &stream, 1,
5b5abe95 9234 &params);
8c322309
RL
9235
9236 return dc_link_set_psr_allow_active(link, true, false);
9237}
9238
9239/*
9240 * amdgpu_dm_psr_disable() - disable psr f/w
9241 * @stream: stream state
9242 *
9243 * Return: true if success
9244 */
9245static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9246{
9247
9248 DRM_DEBUG_DRIVER("Disabling psr...\n");
9249
9250 return dc_link_set_psr_allow_active(stream->link, false, true);
9251}
3d4e52d0 9252
6ee90e88 9253/*
9254 * amdgpu_dm_psr_disable() - disable psr f/w
9255 * if psr is enabled on any stream
9256 *
9257 * Return: true if success
9258 */
9259static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9260{
9261 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9262 return dc_set_psr_allow_active(dm->dc, false);
9263}
9264
3d4e52d0
VL
9265void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9266{
1348969a 9267 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
9268 struct dc *dc = adev->dm.dc;
9269 int i;
9270
9271 mutex_lock(&adev->dm.dc_lock);
9272 if (dc->current_state) {
9273 for (i = 0; i < dc->current_state->stream_count; ++i)
9274 dc->current_state->streams[i]
9275 ->triggered_crtc_reset.enabled =
9276 adev->dm.force_timing_sync;
9277
9278 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9279 dc_trigger_sync(dc, dc->current_state);
9280 }
9281 mutex_unlock(&adev->dm.dc_lock);
9282}