drm/amd/display: System black screen hangs on driver load
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
f9b4f20c 37#include "dc/dc_edid_parser.h"
9d83722d 38#include "amdgpu_dm_trace.h"
4562236b
HW
39
40#include "vid.h"
41#include "amdgpu.h"
a49dcb88 42#include "amdgpu_display.h"
a94d5569 43#include "amdgpu_ucode.h"
4562236b
HW
44#include "atom.h"
45#include "amdgpu_dm.h"
52704fca
BL
46#ifdef CONFIG_DRM_AMD_DC_HDCP
47#include "amdgpu_dm_hdcp.h"
fd0161a5 48#include <drm/drm_hdcp.h>
52704fca 49#endif
e7b07cee 50#include "amdgpu_pm.h"
4562236b
HW
51
52#include "amd_shared.h"
53#include "amdgpu_dm_irq.h"
54#include "dm_helpers.h"
e7b07cee 55#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
56#if defined(CONFIG_DEBUG_FS)
57#include "amdgpu_dm_debugfs.h"
58#endif
4562236b
HW
59
60#include "ivsrcid/ivsrcid_vislands30.h"
61
62#include <linux/module.h>
63#include <linux/moduleparam.h>
e7b07cee 64#include <linux/types.h>
97028037 65#include <linux/pm_runtime.h>
09d21852 66#include <linux/pci.h>
a94d5569 67#include <linux/firmware.h>
6ce8f316 68#include <linux/component.h>
4562236b
HW
69
70#include <drm/drm_atomic.h>
674e78ac 71#include <drm/drm_atomic_uapi.h>
4562236b
HW
72#include <drm/drm_atomic_helper.h>
73#include <drm/drm_dp_mst_helper.h>
e7b07cee 74#include <drm/drm_fb_helper.h>
09d21852 75#include <drm/drm_fourcc.h>
e7b07cee 76#include <drm/drm_edid.h>
09d21852 77#include <drm/drm_vblank.h>
6ce8f316 78#include <drm/drm_audio_component.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
99#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
101#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
103#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
105#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
2200eb9e 107
a94d5569
DF
108#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 110
5ea23931
RL
111#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
8c7aea40
NK
114/* Number of bytes in PSP header for firmware. */
115#define PSP_HEADER_BYTES 0x100
116
117/* Number of bytes in PSP footer for firmware. */
118#define PSP_FOOTER_BYTES 0x100
119
b8592b48
LL
120/**
121 * DOC: overview
122 *
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
126 *
127 * The root control structure is &struct amdgpu_display_manager.
128 */
129
7578ecda
AD
130/* basic init/fini API */
131static int amdgpu_dm_init(struct amdgpu_device *adev);
132static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
0f877894
OV
134static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135{
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 default:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
150 }
151}
152
153static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154{
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 return;
161
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
164
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
167 subconnector);
168}
169
1f6010a9
DF
170/*
171 * initializes drm_device display related structures, based on the information
7578ecda
AD
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
174 *
175 * Returns 0 on success
176 */
177static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178/* removes and deallocates the drm structures, created by the above function */
179static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
7578ecda 181static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 182 struct drm_plane *plane,
cc1fec57
NK
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
7578ecda
AD
185static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
190 uint32_t link_index,
191 struct amdgpu_encoder *amdgpu_encoder);
192static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
195
196static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
7578ecda
AD
198static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
202
674e78ac
NK
203static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
7578ecda 205
8c322309
RL
206static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 210static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 211
dfbbfe3c
BN
212static const struct drm_format_info *
213amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
4562236b
HW
215/*
216 * dm_vblank_get_counter
217 *
218 * @brief
219 * Get counter for number of vertical blanks
220 *
221 * @param
222 * struct amdgpu_device *adev - [in] desired amdgpu device
223 * int disp_idx - [in] which CRTC to get the counter from
224 *
225 * @return
226 * Counter for vertical blanks
227 */
228static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229{
230 if (crtc >= adev->mode_info.num_crtc)
231 return 0;
232 else {
233 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234
585d450c 235 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 crtc);
4562236b
HW
238 return 0;
239 }
240
585d450c 241 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
242 }
243}
244
245static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 246 u32 *vbl, u32 *position)
4562236b 247{
81c50963
ST
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
249
4562236b
HW
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 return -EINVAL;
252 else {
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
585d450c 255 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 crtc);
4562236b
HW
258 return 0;
259 }
260
81c50963
ST
261 /*
262 * TODO rework base driver to use values directly.
263 * for now parse it back into reg-format
264 */
585d450c 265 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
266 &v_blank_start,
267 &v_blank_end,
268 &h_position,
269 &v_position);
270
e806208d
AG
271 *position = v_position | (h_position << 16);
272 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
273 }
274
275 return 0;
276}
277
278static bool dm_is_idle(void *handle)
279{
280 /* XXX todo */
281 return true;
282}
283
284static int dm_wait_for_idle(void *handle)
285{
286 /* XXX todo */
287 return 0;
288}
289
290static bool dm_check_soft_reset(void *handle)
291{
292 return false;
293}
294
295static int dm_soft_reset(void *handle)
296{
297 /* XXX todo */
298 return 0;
299}
300
3ee6b26b
AD
301static struct amdgpu_crtc *
302get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 int otg_inst)
4562236b 304{
4a580877 305 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
306 struct drm_crtc *crtc;
307 struct amdgpu_crtc *amdgpu_crtc;
308
4562236b
HW
309 if (otg_inst == -1) {
310 WARN_ON(1);
311 return adev->mode_info.crtcs[0];
312 }
313
314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 amdgpu_crtc = to_amdgpu_crtc(crtc);
316
317 if (amdgpu_crtc->otg_inst == otg_inst)
318 return amdgpu_crtc;
319 }
320
321 return NULL;
322}
323
585d450c
AP
324static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325{
326 return acrtc->dm_irq_params.freesync_config.state ==
327 VRR_STATE_ACTIVE_VARIABLE ||
328 acrtc->dm_irq_params.freesync_config.state ==
329 VRR_STATE_ACTIVE_FIXED;
330}
331
66b0c973
MK
332static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333{
334 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336}
337
b8e8c934
HW
338/**
339 * dm_pflip_high_irq() - Handle pageflip interrupt
340 * @interrupt_params: ignored
341 *
342 * Handles the pageflip interrupt by notifying all interested parties
343 * that the pageflip has been completed.
344 */
4562236b
HW
345static void dm_pflip_high_irq(void *interrupt_params)
346{
4562236b
HW
347 struct amdgpu_crtc *amdgpu_crtc;
348 struct common_irq_params *irq_params = interrupt_params;
349 struct amdgpu_device *adev = irq_params->adev;
350 unsigned long flags;
71bbe51a 351 struct drm_pending_vblank_event *e;
71bbe51a
MK
352 uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 bool vrr_active;
4562236b
HW
354
355 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356
357 /* IRQ could occur when in initial stage */
1f6010a9 358 /* TODO work and BO cleanup */
4562236b
HW
359 if (amdgpu_crtc == NULL) {
360 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 return;
362 }
363
4a580877 364 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
365
366 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 amdgpu_crtc->pflip_status,
369 AMDGPU_FLIP_SUBMITTED,
370 amdgpu_crtc->crtc_id,
371 amdgpu_crtc);
4a580877 372 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
373 return;
374 }
375
71bbe51a
MK
376 /* page flip completed. */
377 e = amdgpu_crtc->event;
378 amdgpu_crtc->event = NULL;
4562236b 379
71bbe51a
MK
380 if (!e)
381 WARN_ON(1);
1159898a 382
585d450c 383 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
384
385 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 if (!vrr_active ||
585d450c 387 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
388 &v_blank_end, &hpos, &vpos) ||
389 (vpos < v_blank_start)) {
390 /* Update to correct count and vblank timestamp if racing with
391 * vblank irq. This also updates to the correct vblank timestamp
392 * even in VRR mode, as scanout is past the front-porch atm.
393 */
394 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 395
71bbe51a
MK
396 /* Wake up userspace by sending the pageflip event with proper
397 * count and timestamp of vblank of flip completion.
398 */
399 if (e) {
400 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401
402 /* Event sent, so done with vblank for this flip */
403 drm_crtc_vblank_put(&amdgpu_crtc->base);
404 }
405 } else if (e) {
406 /* VRR active and inside front-porch: vblank count and
407 * timestamp for pageflip event will only be up to date after
408 * drm_crtc_handle_vblank() has been executed from late vblank
409 * irq handler after start of back-porch (vline 0). We queue the
410 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 * updated timestamp and count, once it runs after us.
412 *
413 * We need to open-code this instead of using the helper
414 * drm_crtc_arm_vblank_event(), as that helper would
415 * call drm_crtc_accurate_vblank_count(), which we must
416 * not call in VRR mode while we are in front-porch!
417 */
418
419 /* sequence will be replaced by real count during send-out. */
420 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 e->pipe = amdgpu_crtc->crtc_id;
422
4a580877 423 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
424 e = NULL;
425 }
4562236b 426
fdd1fe57
MK
427 /* Keep track of vblank of this flip for flip throttling. We use the
428 * cooked hw counter, as that one incremented at start of this vblank
429 * of pageflip completion, so last_flip_vblank is the forbidden count
430 * for queueing new pageflips if vsync + VRR is enabled.
431 */
5d1c59c4 432 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 433 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 434
54f5499a 435 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 436 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 437
71bbe51a
MK
438 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 vrr_active, (int) !e);
4562236b
HW
441}
442
d2574c33
MK
443static void dm_vupdate_high_irq(void *interrupt_params)
444{
445 struct common_irq_params *irq_params = interrupt_params;
446 struct amdgpu_device *adev = irq_params->adev;
447 struct amdgpu_crtc *acrtc;
09aef2c4 448 unsigned long flags;
585d450c 449 int vrr_active;
d2574c33
MK
450
451 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452
453 if (acrtc) {
585d450c 454 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
d2574c33 455
7f2be468
LP
456 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 acrtc->crtc_id,
585d450c 458 vrr_active);
d2574c33
MK
459
460 /* Core vblank handling is done here after end of front-porch in
461 * vrr mode, as vblank timestamping will give valid results
462 * while now done after front-porch. This will also deliver
463 * page-flip completion events that have been queued to us
464 * if a pageflip happened inside front-porch.
465 */
585d450c 466 if (vrr_active) {
d2574c33 467 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
468
469 /* BTR processing for pre-DCE12 ASICs */
585d450c 470 if (acrtc->dm_irq_params.stream &&
09aef2c4 471 adev->family < AMDGPU_FAMILY_AI) {
4a580877 472 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
473 mod_freesync_handle_v_update(
474 adev->dm.freesync_module,
585d450c
AP
475 acrtc->dm_irq_params.stream,
476 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
477
478 dc_stream_adjust_vmin_vmax(
479 adev->dm.dc,
585d450c
AP
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 482 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
483 }
484 }
d2574c33
MK
485 }
486}
487
b8e8c934
HW
488/**
489 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 490 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
491 *
492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493 * event handler.
494 */
4562236b
HW
495static void dm_crtc_high_irq(void *interrupt_params)
496{
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
4562236b 499 struct amdgpu_crtc *acrtc;
09aef2c4 500 unsigned long flags;
585d450c 501 int vrr_active;
4562236b 502
b57de80a 503 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
504 if (!acrtc)
505 return;
506
585d450c 507 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 508
2b5aed9a 509 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 510 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 511
2346ef47
NK
512 /**
513 * Core vblank handling at start of front-porch is only possible
514 * in non-vrr mode, as only there vblank timestamping will give
515 * valid results while done in front-porch. Otherwise defer it
516 * to dm_vupdate_high_irq after end of front-porch.
517 */
585d450c 518 if (!vrr_active)
2346ef47
NK
519 drm_crtc_handle_vblank(&acrtc->base);
520
521 /**
522 * Following stuff must happen at start of vblank, for crc
523 * computation and below-the-range btr support in vrr mode.
524 */
16f17eda 525 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
526
527 /* BTR updates need to happen before VUPDATE on Vega and above. */
528 if (adev->family < AMDGPU_FAMILY_AI)
529 return;
16f17eda 530
4a580877 531 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 532
585d450c
AP
533 if (acrtc->dm_irq_params.stream &&
534 acrtc->dm_irq_params.vrr_params.supported &&
535 acrtc->dm_irq_params.freesync_config.state ==
536 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 537 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
538 acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params);
16f17eda 540
585d450c
AP
541 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
543 }
544
2b5aed9a
MK
545 /*
546 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 * In that case, pageflip completion interrupts won't fire and pageflip
548 * completion events won't get delivered. Prevent this by sending
549 * pending pageflip events from here if a flip is still pending.
550 *
551 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 * avoid race conditions between flip programming and completion,
553 * which could cause too early flip completion events.
554 */
2346ef47
NK
555 if (adev->family >= AMDGPU_FAMILY_RV &&
556 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 557 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
558 if (acrtc->event) {
559 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 acrtc->event = NULL;
561 drm_crtc_vblank_put(&acrtc->base);
562 }
563 acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 }
565
4a580877 566 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
567}
568
86bc2219
WL
569#if defined(CONFIG_DRM_AMD_DC_DCN)
570/**
571 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
572 * DCN generation ASICs
573 * @interrupt params - interrupt parameters
574 *
575 * Used to set crc window/read out crc value at vertical line 0 position
576 */
577#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
578static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
579{
580 struct common_irq_params *irq_params = interrupt_params;
581 struct amdgpu_device *adev = irq_params->adev;
582 struct amdgpu_crtc *acrtc;
583
584 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
585
586 if (!acrtc)
587 return;
588
589 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
590}
591#endif
592#endif
593
4562236b
HW
594static int dm_set_clockgating_state(void *handle,
595 enum amd_clockgating_state state)
596{
597 return 0;
598}
599
600static int dm_set_powergating_state(void *handle,
601 enum amd_powergating_state state)
602{
603 return 0;
604}
605
606/* Prototypes of private functions */
607static int dm_early_init(void* handle);
608
a32e24b4 609/* Allocate memory for FBC compressed data */
3e332d3a 610static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 611{
3e332d3a 612 struct drm_device *dev = connector->dev;
1348969a 613 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 614 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
615 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
616 struct drm_display_mode *mode;
42e67c3b
RL
617 unsigned long max_size = 0;
618
619 if (adev->dm.dc->fbc_compressor == NULL)
620 return;
a32e24b4 621
3e332d3a 622 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
623 return;
624
3e332d3a
RL
625 if (compressor->bo_ptr)
626 return;
42e67c3b 627
42e67c3b 628
3e332d3a
RL
629 list_for_each_entry(mode, &connector->modes, head) {
630 if (max_size < mode->htotal * mode->vtotal)
631 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
632 }
633
634 if (max_size) {
635 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 636 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 637 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
638
639 if (r)
42e67c3b
RL
640 DRM_ERROR("DM: Failed to initialize FBC\n");
641 else {
642 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
643 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
644 }
645
a32e24b4
RL
646 }
647
648}
a32e24b4 649
6ce8f316
NK
650static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
651 int pipe, bool *enabled,
652 unsigned char *buf, int max_bytes)
653{
654 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 655 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
656 struct drm_connector *connector;
657 struct drm_connector_list_iter conn_iter;
658 struct amdgpu_dm_connector *aconnector;
659 int ret = 0;
660
661 *enabled = false;
662
663 mutex_lock(&adev->dm.audio_lock);
664
665 drm_connector_list_iter_begin(dev, &conn_iter);
666 drm_for_each_connector_iter(connector, &conn_iter) {
667 aconnector = to_amdgpu_dm_connector(connector);
668 if (aconnector->audio_inst != port)
669 continue;
670
671 *enabled = true;
672 ret = drm_eld_size(connector->eld);
673 memcpy(buf, connector->eld, min(max_bytes, ret));
674
675 break;
676 }
677 drm_connector_list_iter_end(&conn_iter);
678
679 mutex_unlock(&adev->dm.audio_lock);
680
681 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
682
683 return ret;
684}
685
686static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
687 .get_eld = amdgpu_dm_audio_component_get_eld,
688};
689
690static int amdgpu_dm_audio_component_bind(struct device *kdev,
691 struct device *hda_kdev, void *data)
692{
693 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 694 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
695 struct drm_audio_component *acomp = data;
696
697 acomp->ops = &amdgpu_dm_audio_component_ops;
698 acomp->dev = kdev;
699 adev->dm.audio_component = acomp;
700
701 return 0;
702}
703
704static void amdgpu_dm_audio_component_unbind(struct device *kdev,
705 struct device *hda_kdev, void *data)
706{
707 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 708 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
709 struct drm_audio_component *acomp = data;
710
711 acomp->ops = NULL;
712 acomp->dev = NULL;
713 adev->dm.audio_component = NULL;
714}
715
716static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
717 .bind = amdgpu_dm_audio_component_bind,
718 .unbind = amdgpu_dm_audio_component_unbind,
719};
720
721static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
722{
723 int i, ret;
724
725 if (!amdgpu_audio)
726 return 0;
727
728 adev->mode_info.audio.enabled = true;
729
730 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
731
732 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
733 adev->mode_info.audio.pin[i].channels = -1;
734 adev->mode_info.audio.pin[i].rate = -1;
735 adev->mode_info.audio.pin[i].bits_per_sample = -1;
736 adev->mode_info.audio.pin[i].status_bits = 0;
737 adev->mode_info.audio.pin[i].category_code = 0;
738 adev->mode_info.audio.pin[i].connected = false;
739 adev->mode_info.audio.pin[i].id =
740 adev->dm.dc->res_pool->audios[i]->inst;
741 adev->mode_info.audio.pin[i].offset = 0;
742 }
743
744 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
745 if (ret < 0)
746 return ret;
747
748 adev->dm.audio_registered = true;
749
750 return 0;
751}
752
753static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
754{
755 if (!amdgpu_audio)
756 return;
757
758 if (!adev->mode_info.audio.enabled)
759 return;
760
761 if (adev->dm.audio_registered) {
762 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
763 adev->dm.audio_registered = false;
764 }
765
766 /* TODO: Disable audio? */
767
768 adev->mode_info.audio.enabled = false;
769}
770
dfd84d90 771static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
772{
773 struct drm_audio_component *acomp = adev->dm.audio_component;
774
775 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
776 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
777
778 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
779 pin, -1);
780 }
781}
782
743b9786
NK
783static int dm_dmub_hw_init(struct amdgpu_device *adev)
784{
743b9786
NK
785 const struct dmcub_firmware_header_v1_0 *hdr;
786 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 787 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
788 const struct firmware *dmub_fw = adev->dm.dmub_fw;
789 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
790 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
791 struct dmub_srv_hw_params hw_params;
792 enum dmub_status status;
793 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 794 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
795 bool has_hw_support;
796
797 if (!dmub_srv)
798 /* DMUB isn't supported on the ASIC. */
799 return 0;
800
8c7aea40
NK
801 if (!fb_info) {
802 DRM_ERROR("No framebuffer info for DMUB service.\n");
803 return -EINVAL;
804 }
805
743b9786
NK
806 if (!dmub_fw) {
807 /* Firmware required for DMUB support. */
808 DRM_ERROR("No firmware provided for DMUB.\n");
809 return -EINVAL;
810 }
811
812 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
813 if (status != DMUB_STATUS_OK) {
814 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
815 return -EINVAL;
816 }
817
818 if (!has_hw_support) {
819 DRM_INFO("DMUB unsupported on ASIC\n");
820 return 0;
821 }
822
823 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
824
743b9786
NK
825 fw_inst_const = dmub_fw->data +
826 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 827 PSP_HEADER_BYTES;
743b9786
NK
828
829 fw_bss_data = dmub_fw->data +
830 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
831 le32_to_cpu(hdr->inst_const_bytes);
832
833 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
834 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
835 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
836
837 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
838
ddde28a5
HW
839 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
840 * amdgpu_ucode_init_single_fw will load dmub firmware
841 * fw_inst_const part to cw0; otherwise, the firmware back door load
842 * will be done by dm_dmub_hw_init
843 */
844 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
845 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
846 fw_inst_const_size);
847 }
848
a576b345
NK
849 if (fw_bss_data_size)
850 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
851 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
852
853 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
854 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
855 adev->bios_size);
856
857 /* Reset regions that need to be reset. */
858 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
859 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
860
861 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
862 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
863
864 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
865 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
866
867 /* Initialize hardware. */
868 memset(&hw_params, 0, sizeof(hw_params));
869 hw_params.fb_base = adev->gmc.fb_start;
870 hw_params.fb_offset = adev->gmc.aper_base;
871
31a7f4bb
HW
872 /* backdoor load firmware and trigger dmub running */
873 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
874 hw_params.load_inst_const = true;
875
743b9786
NK
876 if (dmcu)
877 hw_params.psp_version = dmcu->psp_version;
878
8c7aea40
NK
879 for (i = 0; i < fb_info->num_fb; ++i)
880 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
881
882 status = dmub_srv_hw_init(dmub_srv, &hw_params);
883 if (status != DMUB_STATUS_OK) {
884 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
885 return -EINVAL;
886 }
887
888 /* Wait for firmware load to finish. */
889 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
890 if (status != DMUB_STATUS_OK)
891 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
892
893 /* Init DMCU and ABM if available. */
894 if (dmcu && abm) {
895 dmcu->funcs->dmcu_init(dmcu);
896 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
897 }
898
9a71c7d3
NK
899 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
900 if (!adev->dm.dc->ctx->dmub_srv) {
901 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
902 return -ENOMEM;
903 }
904
743b9786
NK
905 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
906 adev->dm.dmcub_fw_version);
907
908 return 0;
909}
910
e6cd859d 911#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 912static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 913{
c0fb85ae
YZ
914 uint64_t pt_base;
915 uint32_t logical_addr_low;
916 uint32_t logical_addr_high;
917 uint32_t agp_base, agp_bot, agp_top;
918 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 919
c0fb85ae
YZ
920 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
921 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 922
c0fb85ae
YZ
923 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
924 /*
925 * Raven2 has a HW issue that it is unable to use the vram which
926 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
927 * workaround that increase system aperture high address (add 1)
928 * to get rid of the VM fault and hardware hang.
929 */
930 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
931 else
932 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 933
c0fb85ae
YZ
934 agp_base = 0;
935 agp_bot = adev->gmc.agp_start >> 24;
936 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 937
c44a22b3 938
c0fb85ae
YZ
939 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
940 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
941 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
942 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
943 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
944 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 945
c0fb85ae
YZ
946 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
947 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
948
949 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
950 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
951 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
952
953 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
954 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
955 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
956
957 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
958 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
959 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
960
961 pa_config->is_hvm_enabled = 0;
c44a22b3 962
c44a22b3 963}
e6cd859d 964#endif
d7faf6f5
QZ
965#if defined(CONFIG_DRM_AMD_DC_DCN)
966static void event_mall_stutter(struct work_struct *work)
967{
968
969 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
970 struct amdgpu_display_manager *dm = vblank_work->dm;
971
972 mutex_lock(&dm->dc_lock);
973
974 if (vblank_work->enable)
975 dm->active_vblank_irq_count++;
976 else
977 dm->active_vblank_irq_count--;
978
d7faf6f5 979 dc_allow_idle_optimizations(
864f8b84 980 dm->dc, dm->active_vblank_irq_count == 0);
d7faf6f5
QZ
981
982 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
983
984
985 mutex_unlock(&dm->dc_lock);
986}
987
988static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
989{
990
991 int max_caps = dc->caps.max_links;
992 struct vblank_workqueue *vblank_work;
993 int i = 0;
994
995 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
996 if (ZERO_OR_NULL_PTR(vblank_work)) {
997 kfree(vblank_work);
998 return NULL;
999 }
c44a22b3 1000
d7faf6f5
QZ
1001 for (i = 0; i < max_caps; i++)
1002 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1003
1004 return vblank_work;
1005}
1006#endif
7578ecda 1007static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1008{
1009 struct dc_init_data init_data;
52704fca
BL
1010#ifdef CONFIG_DRM_AMD_DC_HDCP
1011 struct dc_callback_init init_params;
1012#endif
743b9786 1013 int r;
52704fca 1014
4a580877 1015 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1016 adev->dm.adev = adev;
1017
4562236b
HW
1018 /* Zero all the fields */
1019 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1020#ifdef CONFIG_DRM_AMD_DC_HDCP
1021 memset(&init_params, 0, sizeof(init_params));
1022#endif
4562236b 1023
674e78ac 1024 mutex_init(&adev->dm.dc_lock);
6ce8f316 1025 mutex_init(&adev->dm.audio_lock);
d7faf6f5
QZ
1026#if defined(CONFIG_DRM_AMD_DC_DCN)
1027 spin_lock_init(&adev->dm.vblank_lock);
1028#endif
674e78ac 1029
4562236b
HW
1030 if(amdgpu_dm_irq_init(adev)) {
1031 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1032 goto error;
1033 }
1034
1035 init_data.asic_id.chip_family = adev->family;
1036
2dc31ca1 1037 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1038 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1039
770d13b1 1040 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1041 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1042 init_data.asic_id.atombios_base_address =
1043 adev->mode_info.atom_context->bios;
1044
1045 init_data.driver = adev;
1046
1047 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1048
1049 if (!adev->dm.cgs_device) {
1050 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1051 goto error;
1052 }
1053
1054 init_data.cgs_device = adev->dm.cgs_device;
1055
4562236b
HW
1056 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1057
60fb100b
AD
1058 switch (adev->asic_type) {
1059 case CHIP_CARRIZO:
1060 case CHIP_STONEY:
1061 case CHIP_RAVEN:
fe3db437 1062 case CHIP_RENOIR:
6e227308 1063 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1064 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1065 init_data.flags.disable_dmcu = true;
60fb100b 1066 break;
6df9218a
CL
1067#if defined(CONFIG_DRM_AMD_DC_DCN)
1068 case CHIP_VANGOGH:
1069 init_data.flags.gpu_vm_support = true;
1070 break;
1071#endif
60fb100b
AD
1072 default:
1073 break;
1074 }
6e227308 1075
04b94af4
AD
1076 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1077 init_data.flags.fbc_support = true;
1078
d99f38ae
AD
1079 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1080 init_data.flags.multi_mon_pp_mclk_switch = true;
1081
eaf56410
LL
1082 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1083 init_data.flags.disable_fractional_pwm = true;
1084
27eaa492 1085 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1086
0dd79532 1087 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1088 /* Display Core create. */
1089 adev->dm.dc = dc_create(&init_data);
1090
423788c7 1091 if (adev->dm.dc) {
76121231 1092 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1093 } else {
76121231 1094 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1095 goto error;
1096 }
4562236b 1097
8a791dab
HW
1098 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1099 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1100 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1101 }
1102
f99d8762
HW
1103 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1104 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1105
8a791dab
HW
1106 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1107 adev->dm.dc->debug.disable_stutter = true;
1108
1109 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1110 adev->dm.dc->debug.disable_dsc = true;
1111
1112 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1113 adev->dm.dc->debug.disable_clock_gate = true;
1114
743b9786
NK
1115 r = dm_dmub_hw_init(adev);
1116 if (r) {
1117 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1118 goto error;
1119 }
1120
bb6785c1
NK
1121 dc_hardware_init(adev->dm.dc);
1122
0b08c54b 1123#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1124 if (adev->apu_flags) {
e6cd859d
AD
1125 struct dc_phy_addr_space_config pa_config;
1126
0b08c54b 1127 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1128
0b08c54b
YZ
1129 // Call the DC init_memory func
1130 dc_setup_system_context(adev->dm.dc, &pa_config);
1131 }
1132#endif
c0fb85ae 1133
4562236b
HW
1134 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1135 if (!adev->dm.freesync_module) {
1136 DRM_ERROR(
1137 "amdgpu: failed to initialize freesync_module.\n");
1138 } else
f1ad2f5e 1139 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1140 adev->dm.freesync_module);
1141
e277adc5
LSL
1142 amdgpu_dm_init_color_mod();
1143
d7faf6f5
QZ
1144#if defined(CONFIG_DRM_AMD_DC_DCN)
1145 if (adev->dm.dc->caps.max_links > 0) {
1146 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1147
1148 if (!adev->dm.vblank_workqueue)
1149 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1150 else
1151 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1152 }
1153#endif
1154
52704fca 1155#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1156 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1157 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1158
96a3b32e
BL
1159 if (!adev->dm.hdcp_workqueue)
1160 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1161 else
1162 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1163
96a3b32e
BL
1164 dc_init_callbacks(adev->dm.dc, &init_params);
1165 }
9a65df19
WL
1166#endif
1167#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1168 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1169#endif
4562236b
HW
1170 if (amdgpu_dm_initialize_drm_device(adev)) {
1171 DRM_ERROR(
1172 "amdgpu: failed to initialize sw for display support.\n");
1173 goto error;
1174 }
1175
f74367e4
AD
1176 /* create fake encoders for MST */
1177 dm_dp_create_fake_mst_encoders(adev);
1178
4562236b
HW
1179 /* TODO: Add_display_info? */
1180
1181 /* TODO use dynamic cursor width */
4a580877
LT
1182 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1183 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1184
4a580877 1185 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1186 DRM_ERROR(
1187 "amdgpu: failed to initialize sw for display support.\n");
1188 goto error;
1189 }
1190
c0fb85ae 1191
f1ad2f5e 1192 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1193
1194 return 0;
1195error:
1196 amdgpu_dm_fini(adev);
1197
59d0f396 1198 return -EINVAL;
4562236b
HW
1199}
1200
7578ecda 1201static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1202{
f74367e4
AD
1203 int i;
1204
1205 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1206 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1207 }
1208
6ce8f316
NK
1209 amdgpu_dm_audio_fini(adev);
1210
4562236b 1211 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1212
9a65df19
WL
1213#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1214 if (adev->dm.crc_rd_wrk) {
1215 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1216 kfree(adev->dm.crc_rd_wrk);
1217 adev->dm.crc_rd_wrk = NULL;
1218 }
1219#endif
52704fca
BL
1220#ifdef CONFIG_DRM_AMD_DC_HDCP
1221 if (adev->dm.hdcp_workqueue) {
e96b1b29 1222 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1223 adev->dm.hdcp_workqueue = NULL;
1224 }
1225
1226 if (adev->dm.dc)
1227 dc_deinit_callbacks(adev->dm.dc);
1228#endif
9a71c7d3
NK
1229 if (adev->dm.dc->ctx->dmub_srv) {
1230 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1231 adev->dm.dc->ctx->dmub_srv = NULL;
1232 }
1233
743b9786
NK
1234 if (adev->dm.dmub_bo)
1235 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1236 &adev->dm.dmub_bo_gpu_addr,
1237 &adev->dm.dmub_bo_cpu_addr);
52704fca 1238
c8bdf2b6
ED
1239 /* DC Destroy TODO: Replace destroy DAL */
1240 if (adev->dm.dc)
1241 dc_destroy(&adev->dm.dc);
4562236b
HW
1242 /*
1243 * TODO: pageflip, vlank interrupt
1244 *
1245 * amdgpu_dm_irq_fini(adev);
1246 */
1247
1248 if (adev->dm.cgs_device) {
1249 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1250 adev->dm.cgs_device = NULL;
1251 }
1252 if (adev->dm.freesync_module) {
1253 mod_freesync_destroy(adev->dm.freesync_module);
1254 adev->dm.freesync_module = NULL;
1255 }
674e78ac 1256
6ce8f316 1257 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1258 mutex_destroy(&adev->dm.dc_lock);
1259
4562236b
HW
1260 return;
1261}
1262
a94d5569 1263static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1264{
a7669aff 1265 const char *fw_name_dmcu = NULL;
a94d5569
DF
1266 int r;
1267 const struct dmcu_firmware_header_v1_0 *hdr;
1268
1269 switch(adev->asic_type) {
55e56389
MR
1270#if defined(CONFIG_DRM_AMD_DC_SI)
1271 case CHIP_TAHITI:
1272 case CHIP_PITCAIRN:
1273 case CHIP_VERDE:
1274 case CHIP_OLAND:
1275#endif
a94d5569
DF
1276 case CHIP_BONAIRE:
1277 case CHIP_HAWAII:
1278 case CHIP_KAVERI:
1279 case CHIP_KABINI:
1280 case CHIP_MULLINS:
1281 case CHIP_TONGA:
1282 case CHIP_FIJI:
1283 case CHIP_CARRIZO:
1284 case CHIP_STONEY:
1285 case CHIP_POLARIS11:
1286 case CHIP_POLARIS10:
1287 case CHIP_POLARIS12:
1288 case CHIP_VEGAM:
1289 case CHIP_VEGA10:
1290 case CHIP_VEGA12:
1291 case CHIP_VEGA20:
476e955d 1292 case CHIP_NAVI10:
baebcf2e 1293 case CHIP_NAVI14:
30221ad8 1294 case CHIP_RENOIR:
79037324 1295 case CHIP_SIENNA_CICHLID:
a6c5308f 1296 case CHIP_NAVY_FLOUNDER:
2a411205 1297 case CHIP_DIMGREY_CAVEFISH:
469989ca 1298 case CHIP_VANGOGH:
a94d5569 1299 return 0;
5ea23931
RL
1300 case CHIP_NAVI12:
1301 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1302 break;
a94d5569 1303 case CHIP_RAVEN:
a7669aff
HW
1304 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1305 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1306 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1307 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1308 else
a7669aff 1309 return 0;
a94d5569
DF
1310 break;
1311 default:
1312 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1313 return -EINVAL;
a94d5569
DF
1314 }
1315
1316 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1317 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1318 return 0;
1319 }
1320
1321 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1322 if (r == -ENOENT) {
1323 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1324 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1325 adev->dm.fw_dmcu = NULL;
1326 return 0;
1327 }
1328 if (r) {
1329 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1330 fw_name_dmcu);
1331 return r;
1332 }
1333
1334 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1335 if (r) {
1336 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1337 fw_name_dmcu);
1338 release_firmware(adev->dm.fw_dmcu);
1339 adev->dm.fw_dmcu = NULL;
1340 return r;
1341 }
1342
1343 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1344 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1345 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1346 adev->firmware.fw_size +=
1347 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1348
1349 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1350 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1351 adev->firmware.fw_size +=
1352 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1353
ee6e89c0
DF
1354 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1355
a94d5569
DF
1356 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1357
4562236b
HW
1358 return 0;
1359}
1360
743b9786
NK
1361static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1362{
1363 struct amdgpu_device *adev = ctx;
1364
1365 return dm_read_reg(adev->dm.dc->ctx, address);
1366}
1367
1368static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1369 uint32_t value)
1370{
1371 struct amdgpu_device *adev = ctx;
1372
1373 return dm_write_reg(adev->dm.dc->ctx, address, value);
1374}
1375
1376static int dm_dmub_sw_init(struct amdgpu_device *adev)
1377{
1378 struct dmub_srv_create_params create_params;
8c7aea40
NK
1379 struct dmub_srv_region_params region_params;
1380 struct dmub_srv_region_info region_info;
1381 struct dmub_srv_fb_params fb_params;
1382 struct dmub_srv_fb_info *fb_info;
1383 struct dmub_srv *dmub_srv;
743b9786
NK
1384 const struct dmcub_firmware_header_v1_0 *hdr;
1385 const char *fw_name_dmub;
1386 enum dmub_asic dmub_asic;
1387 enum dmub_status status;
1388 int r;
1389
1390 switch (adev->asic_type) {
1391 case CHIP_RENOIR:
1392 dmub_asic = DMUB_ASIC_DCN21;
1393 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1394 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1395 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1396 break;
79037324
BL
1397 case CHIP_SIENNA_CICHLID:
1398 dmub_asic = DMUB_ASIC_DCN30;
1399 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1400 break;
5ce868fc
BL
1401 case CHIP_NAVY_FLOUNDER:
1402 dmub_asic = DMUB_ASIC_DCN30;
1403 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1404 break;
469989ca
RL
1405 case CHIP_VANGOGH:
1406 dmub_asic = DMUB_ASIC_DCN301;
1407 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1408 break;
2a411205
BL
1409 case CHIP_DIMGREY_CAVEFISH:
1410 dmub_asic = DMUB_ASIC_DCN302;
1411 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1412 break;
743b9786
NK
1413
1414 default:
1415 /* ASIC doesn't support DMUB. */
1416 return 0;
1417 }
1418
743b9786
NK
1419 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1420 if (r) {
1421 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1422 return 0;
1423 }
1424
1425 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1426 if (r) {
1427 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1428 return 0;
1429 }
1430
743b9786 1431 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1432
9a6ed547
NK
1433 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1434 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1435 AMDGPU_UCODE_ID_DMCUB;
1436 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1437 adev->dm.dmub_fw;
1438 adev->firmware.fw_size +=
1439 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1440
9a6ed547
NK
1441 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1442 adev->dm.dmcub_fw_version);
1443 }
1444
1445 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1446
8c7aea40
NK
1447 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1448 dmub_srv = adev->dm.dmub_srv;
1449
1450 if (!dmub_srv) {
1451 DRM_ERROR("Failed to allocate DMUB service!\n");
1452 return -ENOMEM;
1453 }
1454
1455 memset(&create_params, 0, sizeof(create_params));
1456 create_params.user_ctx = adev;
1457 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1458 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1459 create_params.asic = dmub_asic;
1460
1461 /* Create the DMUB service. */
1462 status = dmub_srv_create(dmub_srv, &create_params);
1463 if (status != DMUB_STATUS_OK) {
1464 DRM_ERROR("Error creating DMUB service: %d\n", status);
1465 return -EINVAL;
1466 }
1467
1468 /* Calculate the size of all the regions for the DMUB service. */
1469 memset(&region_params, 0, sizeof(region_params));
1470
1471 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1472 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1473 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1474 region_params.vbios_size = adev->bios_size;
0922b899 1475 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1476 adev->dm.dmub_fw->data +
1477 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1478 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1479 region_params.fw_inst_const =
1480 adev->dm.dmub_fw->data +
1481 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1482 PSP_HEADER_BYTES;
8c7aea40
NK
1483
1484 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1485 &region_info);
1486
1487 if (status != DMUB_STATUS_OK) {
1488 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1489 return -EINVAL;
1490 }
1491
1492 /*
1493 * Allocate a framebuffer based on the total size of all the regions.
1494 * TODO: Move this into GART.
1495 */
1496 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1497 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1498 &adev->dm.dmub_bo_gpu_addr,
1499 &adev->dm.dmub_bo_cpu_addr);
1500 if (r)
1501 return r;
1502
1503 /* Rebase the regions on the framebuffer address. */
1504 memset(&fb_params, 0, sizeof(fb_params));
1505 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1506 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1507 fb_params.region_info = &region_info;
1508
1509 adev->dm.dmub_fb_info =
1510 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1511 fb_info = adev->dm.dmub_fb_info;
1512
1513 if (!fb_info) {
1514 DRM_ERROR(
1515 "Failed to allocate framebuffer info for DMUB service!\n");
1516 return -ENOMEM;
1517 }
1518
1519 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1520 if (status != DMUB_STATUS_OK) {
1521 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1522 return -EINVAL;
1523 }
1524
743b9786
NK
1525 return 0;
1526}
1527
a94d5569
DF
1528static int dm_sw_init(void *handle)
1529{
1530 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1531 int r;
1532
1533 r = dm_dmub_sw_init(adev);
1534 if (r)
1535 return r;
a94d5569
DF
1536
1537 return load_dmcu_fw(adev);
1538}
1539
4562236b
HW
1540static int dm_sw_fini(void *handle)
1541{
a94d5569
DF
1542 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1543
8c7aea40
NK
1544 kfree(adev->dm.dmub_fb_info);
1545 adev->dm.dmub_fb_info = NULL;
1546
743b9786
NK
1547 if (adev->dm.dmub_srv) {
1548 dmub_srv_destroy(adev->dm.dmub_srv);
1549 adev->dm.dmub_srv = NULL;
1550 }
1551
75e1658e
ND
1552 release_firmware(adev->dm.dmub_fw);
1553 adev->dm.dmub_fw = NULL;
743b9786 1554
75e1658e
ND
1555 release_firmware(adev->dm.fw_dmcu);
1556 adev->dm.fw_dmcu = NULL;
a94d5569 1557
4562236b
HW
1558 return 0;
1559}
1560
7abcf6b5 1561static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1562{
c84dec2f 1563 struct amdgpu_dm_connector *aconnector;
4562236b 1564 struct drm_connector *connector;
f8d2d39e 1565 struct drm_connector_list_iter iter;
7abcf6b5 1566 int ret = 0;
4562236b 1567
f8d2d39e
LP
1568 drm_connector_list_iter_begin(dev, &iter);
1569 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1570 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1571 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1572 aconnector->mst_mgr.aux) {
f1ad2f5e 1573 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1574 aconnector,
1575 aconnector->base.base.id);
7abcf6b5
AG
1576
1577 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1578 if (ret < 0) {
1579 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1580 aconnector->dc_link->type =
1581 dc_connection_single;
1582 break;
7abcf6b5 1583 }
f8d2d39e 1584 }
4562236b 1585 }
f8d2d39e 1586 drm_connector_list_iter_end(&iter);
4562236b 1587
7abcf6b5
AG
1588 return ret;
1589}
1590
1591static int dm_late_init(void *handle)
1592{
42e67c3b 1593 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1594
bbf854dc
DF
1595 struct dmcu_iram_parameters params;
1596 unsigned int linear_lut[16];
1597 int i;
17bdb4a8 1598 struct dmcu *dmcu = NULL;
5cb32419 1599 bool ret = true;
bbf854dc 1600
17bdb4a8
JFZ
1601 dmcu = adev->dm.dc->res_pool->dmcu;
1602
bbf854dc
DF
1603 for (i = 0; i < 16; i++)
1604 linear_lut[i] = 0xFFFF * i / 15;
1605
1606 params.set = 0;
1607 params.backlight_ramping_start = 0xCCCC;
1608 params.backlight_ramping_reduction = 0xCCCCCCCC;
1609 params.backlight_lut_array_size = 16;
1610 params.backlight_lut_array = linear_lut;
1611
2ad0cdf9
AK
1612 /* Min backlight level after ABM reduction, Don't allow below 1%
1613 * 0xFFFF x 0.01 = 0x28F
1614 */
1615 params.min_abm_backlight = 0x28F;
1616
5cb32419
RL
1617 /* In the case where abm is implemented on dmcub,
1618 * dmcu object will be null.
1619 * ABM 2.4 and up are implemented on dmcub.
1620 */
1621 if (dmcu)
1622 ret = dmcu_load_iram(dmcu, params);
1623 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1624 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1625
14ed1c90
HW
1626 if (!ret)
1627 return -EINVAL;
bbf854dc 1628
4a580877 1629 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1630}
1631
1632static void s3_handle_mst(struct drm_device *dev, bool suspend)
1633{
c84dec2f 1634 struct amdgpu_dm_connector *aconnector;
4562236b 1635 struct drm_connector *connector;
f8d2d39e 1636 struct drm_connector_list_iter iter;
fe7553be
LP
1637 struct drm_dp_mst_topology_mgr *mgr;
1638 int ret;
1639 bool need_hotplug = false;
4562236b 1640
f8d2d39e
LP
1641 drm_connector_list_iter_begin(dev, &iter);
1642 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1643 aconnector = to_amdgpu_dm_connector(connector);
1644 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1645 aconnector->mst_port)
1646 continue;
1647
1648 mgr = &aconnector->mst_mgr;
1649
1650 if (suspend) {
1651 drm_dp_mst_topology_mgr_suspend(mgr);
1652 } else {
6f85f738 1653 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1654 if (ret < 0) {
1655 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1656 need_hotplug = true;
1657 }
1658 }
4562236b 1659 }
f8d2d39e 1660 drm_connector_list_iter_end(&iter);
fe7553be
LP
1661
1662 if (need_hotplug)
1663 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1664}
1665
9340dfd3
HW
1666static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1667{
1668 struct smu_context *smu = &adev->smu;
1669 int ret = 0;
1670
1671 if (!is_support_sw_smu(adev))
1672 return 0;
1673
1674 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1675 * on window driver dc implementation.
1676 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1677 * should be passed to smu during boot up and resume from s3.
1678 * boot up: dc calculate dcn watermark clock settings within dc_create,
1679 * dcn20_resource_construct
1680 * then call pplib functions below to pass the settings to smu:
1681 * smu_set_watermarks_for_clock_ranges
1682 * smu_set_watermarks_table
1683 * navi10_set_watermarks_table
1684 * smu_write_watermarks_table
1685 *
1686 * For Renoir, clock settings of dcn watermark are also fixed values.
1687 * dc has implemented different flow for window driver:
1688 * dc_hardware_init / dc_set_power_state
1689 * dcn10_init_hw
1690 * notify_wm_ranges
1691 * set_wm_ranges
1692 * -- Linux
1693 * smu_set_watermarks_for_clock_ranges
1694 * renoir_set_watermarks_table
1695 * smu_write_watermarks_table
1696 *
1697 * For Linux,
1698 * dc_hardware_init -> amdgpu_dm_init
1699 * dc_set_power_state --> dm_resume
1700 *
1701 * therefore, this function apply to navi10/12/14 but not Renoir
1702 * *
1703 */
1704 switch(adev->asic_type) {
1705 case CHIP_NAVI10:
1706 case CHIP_NAVI14:
1707 case CHIP_NAVI12:
1708 break;
1709 default:
1710 return 0;
1711 }
1712
e7a95eea
EQ
1713 ret = smu_write_watermarks_table(smu);
1714 if (ret) {
1715 DRM_ERROR("Failed to update WMTABLE!\n");
1716 return ret;
9340dfd3
HW
1717 }
1718
9340dfd3
HW
1719 return 0;
1720}
1721
b8592b48
LL
1722/**
1723 * dm_hw_init() - Initialize DC device
28d687ea 1724 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1725 *
1726 * Initialize the &struct amdgpu_display_manager device. This involves calling
1727 * the initializers of each DM component, then populating the struct with them.
1728 *
1729 * Although the function implies hardware initialization, both hardware and
1730 * software are initialized here. Splitting them out to their relevant init
1731 * hooks is a future TODO item.
1732 *
1733 * Some notable things that are initialized here:
1734 *
1735 * - Display Core, both software and hardware
1736 * - DC modules that we need (freesync and color management)
1737 * - DRM software states
1738 * - Interrupt sources and handlers
1739 * - Vblank support
1740 * - Debug FS entries, if enabled
1741 */
4562236b
HW
1742static int dm_hw_init(void *handle)
1743{
1744 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1745 /* Create DAL display manager */
1746 amdgpu_dm_init(adev);
4562236b
HW
1747 amdgpu_dm_hpd_init(adev);
1748
4562236b
HW
1749 return 0;
1750}
1751
b8592b48
LL
1752/**
1753 * dm_hw_fini() - Teardown DC device
28d687ea 1754 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1755 *
1756 * Teardown components within &struct amdgpu_display_manager that require
1757 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1758 * were loaded. Also flush IRQ workqueues and disable them.
1759 */
4562236b
HW
1760static int dm_hw_fini(void *handle)
1761{
1762 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1763
1764 amdgpu_dm_hpd_fini(adev);
1765
1766 amdgpu_dm_irq_fini(adev);
21de3396 1767 amdgpu_dm_fini(adev);
4562236b
HW
1768 return 0;
1769}
1770
cdaae837
BL
1771
1772static int dm_enable_vblank(struct drm_crtc *crtc);
1773static void dm_disable_vblank(struct drm_crtc *crtc);
1774
1775static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1776 struct dc_state *state, bool enable)
1777{
1778 enum dc_irq_source irq_source;
1779 struct amdgpu_crtc *acrtc;
1780 int rc = -EBUSY;
1781 int i = 0;
1782
1783 for (i = 0; i < state->stream_count; i++) {
1784 acrtc = get_crtc_by_otg_inst(
1785 adev, state->stream_status[i].primary_otg_inst);
1786
1787 if (acrtc && state->stream_status[i].plane_count != 0) {
1788 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1789 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1790 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1791 acrtc->crtc_id, enable ? "en" : "dis", rc);
1792 if (rc)
1793 DRM_WARN("Failed to %s pflip interrupts\n",
1794 enable ? "enable" : "disable");
1795
1796 if (enable) {
1797 rc = dm_enable_vblank(&acrtc->base);
1798 if (rc)
1799 DRM_WARN("Failed to enable vblank interrupts\n");
1800 } else {
1801 dm_disable_vblank(&acrtc->base);
1802 }
1803
1804 }
1805 }
1806
1807}
1808
dfd84d90 1809static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1810{
1811 struct dc_state *context = NULL;
1812 enum dc_status res = DC_ERROR_UNEXPECTED;
1813 int i;
1814 struct dc_stream_state *del_streams[MAX_PIPES];
1815 int del_streams_count = 0;
1816
1817 memset(del_streams, 0, sizeof(del_streams));
1818
1819 context = dc_create_state(dc);
1820 if (context == NULL)
1821 goto context_alloc_fail;
1822
1823 dc_resource_state_copy_construct_current(dc, context);
1824
1825 /* First remove from context all streams */
1826 for (i = 0; i < context->stream_count; i++) {
1827 struct dc_stream_state *stream = context->streams[i];
1828
1829 del_streams[del_streams_count++] = stream;
1830 }
1831
1832 /* Remove all planes for removed streams and then remove the streams */
1833 for (i = 0; i < del_streams_count; i++) {
1834 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1835 res = DC_FAIL_DETACH_SURFACES;
1836 goto fail;
1837 }
1838
1839 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1840 if (res != DC_OK)
1841 goto fail;
1842 }
1843
1844
1845 res = dc_validate_global_state(dc, context, false);
1846
1847 if (res != DC_OK) {
1848 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1849 goto fail;
1850 }
1851
1852 res = dc_commit_state(dc, context);
1853
1854fail:
1855 dc_release_state(context);
1856
1857context_alloc_fail:
1858 return res;
1859}
1860
4562236b
HW
1861static int dm_suspend(void *handle)
1862{
1863 struct amdgpu_device *adev = handle;
1864 struct amdgpu_display_manager *dm = &adev->dm;
1865 int ret = 0;
4562236b 1866
53b3f8f4 1867 if (amdgpu_in_reset(adev)) {
cdaae837 1868 mutex_lock(&dm->dc_lock);
98ab5f35
BL
1869
1870#if defined(CONFIG_DRM_AMD_DC_DCN)
1871 dc_allow_idle_optimizations(adev->dm.dc, false);
1872#endif
1873
cdaae837
BL
1874 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1875
1876 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1877
1878 amdgpu_dm_commit_zero_streams(dm->dc);
1879
1880 amdgpu_dm_irq_suspend(adev);
1881
1882 return ret;
1883 }
4562236b 1884
9a65df19
WL
1885#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1886 amdgpu_dm_crtc_secure_display_suspend(adev);
1887#endif
d2f0b53b 1888 WARN_ON(adev->dm.cached_state);
4a580877 1889 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1890
4a580877 1891 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1892
4562236b
HW
1893 amdgpu_dm_irq_suspend(adev);
1894
a3621485 1895
32f5062d 1896 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1897
1c2075d4 1898 return 0;
4562236b
HW
1899}
1900
1daf8c63
AD
1901static struct amdgpu_dm_connector *
1902amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1903 struct drm_crtc *crtc)
4562236b
HW
1904{
1905 uint32_t i;
c2cea706 1906 struct drm_connector_state *new_con_state;
4562236b
HW
1907 struct drm_connector *connector;
1908 struct drm_crtc *crtc_from_state;
1909
c2cea706
LSL
1910 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1911 crtc_from_state = new_con_state->crtc;
4562236b
HW
1912
1913 if (crtc_from_state == crtc)
c84dec2f 1914 return to_amdgpu_dm_connector(connector);
4562236b
HW
1915 }
1916
1917 return NULL;
1918}
1919
fbbdadf2
BL
1920static void emulated_link_detect(struct dc_link *link)
1921{
1922 struct dc_sink_init_data sink_init_data = { 0 };
1923 struct display_sink_capability sink_caps = { 0 };
1924 enum dc_edid_status edid_status;
1925 struct dc_context *dc_ctx = link->ctx;
1926 struct dc_sink *sink = NULL;
1927 struct dc_sink *prev_sink = NULL;
1928
1929 link->type = dc_connection_none;
1930 prev_sink = link->local_sink;
1931
30164a16
VL
1932 if (prev_sink)
1933 dc_sink_release(prev_sink);
fbbdadf2
BL
1934
1935 switch (link->connector_signal) {
1936 case SIGNAL_TYPE_HDMI_TYPE_A: {
1937 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1938 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1939 break;
1940 }
1941
1942 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1943 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1944 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1945 break;
1946 }
1947
1948 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1949 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1950 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1951 break;
1952 }
1953
1954 case SIGNAL_TYPE_LVDS: {
1955 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1956 sink_caps.signal = SIGNAL_TYPE_LVDS;
1957 break;
1958 }
1959
1960 case SIGNAL_TYPE_EDP: {
1961 sink_caps.transaction_type =
1962 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1963 sink_caps.signal = SIGNAL_TYPE_EDP;
1964 break;
1965 }
1966
1967 case SIGNAL_TYPE_DISPLAY_PORT: {
1968 sink_caps.transaction_type =
1969 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1970 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1971 break;
1972 }
1973
1974 default:
1975 DC_ERROR("Invalid connector type! signal:%d\n",
1976 link->connector_signal);
1977 return;
1978 }
1979
1980 sink_init_data.link = link;
1981 sink_init_data.sink_signal = sink_caps.signal;
1982
1983 sink = dc_sink_create(&sink_init_data);
1984 if (!sink) {
1985 DC_ERROR("Failed to create sink!\n");
1986 return;
1987 }
1988
dcd5fb82 1989 /* dc_sink_create returns a new reference */
fbbdadf2
BL
1990 link->local_sink = sink;
1991
1992 edid_status = dm_helpers_read_local_edid(
1993 link->ctx,
1994 link,
1995 sink);
1996
1997 if (edid_status != EDID_OK)
1998 DC_ERROR("Failed to read EDID");
1999
2000}
2001
cdaae837
BL
2002static void dm_gpureset_commit_state(struct dc_state *dc_state,
2003 struct amdgpu_display_manager *dm)
2004{
2005 struct {
2006 struct dc_surface_update surface_updates[MAX_SURFACES];
2007 struct dc_plane_info plane_infos[MAX_SURFACES];
2008 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2009 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2010 struct dc_stream_update stream_update;
2011 } * bundle;
2012 int k, m;
2013
2014 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2015
2016 if (!bundle) {
2017 dm_error("Failed to allocate update bundle\n");
2018 goto cleanup;
2019 }
2020
2021 for (k = 0; k < dc_state->stream_count; k++) {
2022 bundle->stream_update.stream = dc_state->streams[k];
2023
2024 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2025 bundle->surface_updates[m].surface =
2026 dc_state->stream_status->plane_states[m];
2027 bundle->surface_updates[m].surface->force_full_update =
2028 true;
2029 }
2030 dc_commit_updates_for_stream(
2031 dm->dc, bundle->surface_updates,
2032 dc_state->stream_status->plane_count,
263a4feb 2033 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2034 }
2035
2036cleanup:
2037 kfree(bundle);
2038
2039 return;
2040}
2041
3c4d55c9
AP
2042static void dm_set_dpms_off(struct dc_link *link)
2043{
2044 struct dc_stream_state *stream_state;
2045 struct amdgpu_dm_connector *aconnector = link->priv;
2046 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2047 struct dc_stream_update stream_update;
2048 bool dpms_off = true;
2049
2050 memset(&stream_update, 0, sizeof(stream_update));
2051 stream_update.dpms_off = &dpms_off;
2052
2053 mutex_lock(&adev->dm.dc_lock);
2054 stream_state = dc_stream_find_from_link(link);
2055
2056 if (stream_state == NULL) {
2057 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2058 mutex_unlock(&adev->dm.dc_lock);
2059 return;
2060 }
2061
2062 stream_update.stream = stream_state;
2063 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
263a4feb
AJ
2064 stream_state, &stream_update,
2065 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2066 mutex_unlock(&adev->dm.dc_lock);
2067}
2068
4562236b
HW
2069static int dm_resume(void *handle)
2070{
2071 struct amdgpu_device *adev = handle;
4a580877 2072 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2073 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2074 struct amdgpu_dm_connector *aconnector;
4562236b 2075 struct drm_connector *connector;
f8d2d39e 2076 struct drm_connector_list_iter iter;
4562236b 2077 struct drm_crtc *crtc;
c2cea706 2078 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2079 struct dm_crtc_state *dm_new_crtc_state;
2080 struct drm_plane *plane;
2081 struct drm_plane_state *new_plane_state;
2082 struct dm_plane_state *dm_new_plane_state;
113b7a01 2083 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2084 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2085 struct dc_state *dc_state;
2086 int i, r, j;
4562236b 2087
53b3f8f4 2088 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2089 dc_state = dm->cached_dc_state;
2090
2091 r = dm_dmub_hw_init(adev);
2092 if (r)
2093 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2094
2095 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2096 dc_resume(dm->dc);
2097
2098 amdgpu_dm_irq_resume_early(adev);
2099
2100 for (i = 0; i < dc_state->stream_count; i++) {
2101 dc_state->streams[i]->mode_changed = true;
2102 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2103 dc_state->stream_status->plane_states[j]->update_flags.raw
2104 = 0xffffffff;
2105 }
2106 }
2107
2108 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2109
cdaae837
BL
2110 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2111
2112 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2113
2114 dc_release_state(dm->cached_dc_state);
2115 dm->cached_dc_state = NULL;
2116
2117 amdgpu_dm_irq_resume_late(adev);
2118
2119 mutex_unlock(&dm->dc_lock);
2120
2121 return 0;
2122 }
113b7a01
LL
2123 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2124 dc_release_state(dm_state->context);
2125 dm_state->context = dc_create_state(dm->dc);
2126 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2127 dc_resource_state_construct(dm->dc, dm_state->context);
2128
8c7aea40
NK
2129 /* Before powering on DC we need to re-initialize DMUB. */
2130 r = dm_dmub_hw_init(adev);
2131 if (r)
2132 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2133
a80aa93d
ML
2134 /* power on hardware */
2135 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2136
4562236b
HW
2137 /* program HPD filter */
2138 dc_resume(dm->dc);
2139
4562236b
HW
2140 /*
2141 * early enable HPD Rx IRQ, should be done before set mode as short
2142 * pulse interrupts are used for MST
2143 */
2144 amdgpu_dm_irq_resume_early(adev);
2145
d20ebea8 2146 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2147 s3_handle_mst(ddev, false);
2148
4562236b 2149 /* Do detection*/
f8d2d39e
LP
2150 drm_connector_list_iter_begin(ddev, &iter);
2151 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2152 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2153
2154 /*
2155 * this is the case when traversing through already created
2156 * MST connectors, should be skipped
2157 */
2158 if (aconnector->mst_port)
2159 continue;
2160
03ea364c 2161 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2162 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2163 DRM_ERROR("KMS: Failed to detect connector\n");
2164
2165 if (aconnector->base.force && new_connection_type == dc_connection_none)
2166 emulated_link_detect(aconnector->dc_link);
2167 else
2168 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2169
2170 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2171 aconnector->fake_enable = false;
2172
dcd5fb82
MF
2173 if (aconnector->dc_sink)
2174 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2175 aconnector->dc_sink = NULL;
2176 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2177 mutex_unlock(&aconnector->hpd_lock);
4562236b 2178 }
f8d2d39e 2179 drm_connector_list_iter_end(&iter);
4562236b 2180
1f6010a9 2181 /* Force mode set in atomic commit */
a80aa93d 2182 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2183 new_crtc_state->active_changed = true;
4f346e65 2184
fcb4019e
LSL
2185 /*
2186 * atomic_check is expected to create the dc states. We need to release
2187 * them here, since they were duplicated as part of the suspend
2188 * procedure.
2189 */
a80aa93d 2190 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2191 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2192 if (dm_new_crtc_state->stream) {
2193 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2194 dc_stream_release(dm_new_crtc_state->stream);
2195 dm_new_crtc_state->stream = NULL;
2196 }
2197 }
2198
a80aa93d 2199 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2200 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2201 if (dm_new_plane_state->dc_state) {
2202 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2203 dc_plane_state_release(dm_new_plane_state->dc_state);
2204 dm_new_plane_state->dc_state = NULL;
2205 }
2206 }
2207
2d1af6a1 2208 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2209
a80aa93d 2210 dm->cached_state = NULL;
0a214e2f 2211
9a65df19
WL
2212#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2213 amdgpu_dm_crtc_secure_display_resume(adev);
2214#endif
2215
9faa4237 2216 amdgpu_dm_irq_resume_late(adev);
4562236b 2217
9340dfd3
HW
2218 amdgpu_dm_smu_write_watermarks_table(adev);
2219
2d1af6a1 2220 return 0;
4562236b
HW
2221}
2222
b8592b48
LL
2223/**
2224 * DOC: DM Lifecycle
2225 *
2226 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2227 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2228 * the base driver's device list to be initialized and torn down accordingly.
2229 *
2230 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2231 */
2232
4562236b
HW
2233static const struct amd_ip_funcs amdgpu_dm_funcs = {
2234 .name = "dm",
2235 .early_init = dm_early_init,
7abcf6b5 2236 .late_init = dm_late_init,
4562236b
HW
2237 .sw_init = dm_sw_init,
2238 .sw_fini = dm_sw_fini,
2239 .hw_init = dm_hw_init,
2240 .hw_fini = dm_hw_fini,
2241 .suspend = dm_suspend,
2242 .resume = dm_resume,
2243 .is_idle = dm_is_idle,
2244 .wait_for_idle = dm_wait_for_idle,
2245 .check_soft_reset = dm_check_soft_reset,
2246 .soft_reset = dm_soft_reset,
2247 .set_clockgating_state = dm_set_clockgating_state,
2248 .set_powergating_state = dm_set_powergating_state,
2249};
2250
2251const struct amdgpu_ip_block_version dm_ip_block =
2252{
2253 .type = AMD_IP_BLOCK_TYPE_DCE,
2254 .major = 1,
2255 .minor = 0,
2256 .rev = 0,
2257 .funcs = &amdgpu_dm_funcs,
2258};
2259
ca3268c4 2260
b8592b48
LL
2261/**
2262 * DOC: atomic
2263 *
2264 * *WIP*
2265 */
0a323b84 2266
b3663f70 2267static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2268 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2269 .get_format_info = amd_get_format_info,
366c1baa 2270 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2271 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2272 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2273};
2274
2275static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2276 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2277};
2278
94562810
RS
2279static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2280{
2281 u32 max_cll, min_cll, max, min, q, r;
2282 struct amdgpu_dm_backlight_caps *caps;
2283 struct amdgpu_display_manager *dm;
2284 struct drm_connector *conn_base;
2285 struct amdgpu_device *adev;
ec11fe37 2286 struct dc_link *link = NULL;
94562810
RS
2287 static const u8 pre_computed_values[] = {
2288 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2289 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2290
2291 if (!aconnector || !aconnector->dc_link)
2292 return;
2293
ec11fe37 2294 link = aconnector->dc_link;
2295 if (link->connector_signal != SIGNAL_TYPE_EDP)
2296 return;
2297
94562810 2298 conn_base = &aconnector->base;
1348969a 2299 adev = drm_to_adev(conn_base->dev);
94562810
RS
2300 dm = &adev->dm;
2301 caps = &dm->backlight_caps;
2302 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2303 caps->aux_support = false;
2304 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2305 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2306
2307 if (caps->ext_caps->bits.oled == 1 ||
2308 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2309 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2310 caps->aux_support = true;
2311
7c209847
TI
2312 if (amdgpu_backlight == 0)
2313 caps->aux_support = false;
2314 else if (amdgpu_backlight == 1)
2315 caps->aux_support = true;
2316
94562810
RS
2317 /* From the specification (CTA-861-G), for calculating the maximum
2318 * luminance we need to use:
2319 * Luminance = 50*2**(CV/32)
2320 * Where CV is a one-byte value.
2321 * For calculating this expression we may need float point precision;
2322 * to avoid this complexity level, we take advantage that CV is divided
2323 * by a constant. From the Euclids division algorithm, we know that CV
2324 * can be written as: CV = 32*q + r. Next, we replace CV in the
2325 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2326 * need to pre-compute the value of r/32. For pre-computing the values
2327 * We just used the following Ruby line:
2328 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2329 * The results of the above expressions can be verified at
2330 * pre_computed_values.
2331 */
2332 q = max_cll >> 5;
2333 r = max_cll % 32;
2334 max = (1 << q) * pre_computed_values[r];
2335
2336 // min luminance: maxLum * (CV/255)^2 / 100
2337 q = DIV_ROUND_CLOSEST(min_cll, 255);
2338 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2339
2340 caps->aux_max_input_signal = max;
2341 caps->aux_min_input_signal = min;
2342}
2343
97e51c16
HW
2344void amdgpu_dm_update_connector_after_detect(
2345 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2346{
2347 struct drm_connector *connector = &aconnector->base;
2348 struct drm_device *dev = connector->dev;
b73a22d3 2349 struct dc_sink *sink;
4562236b
HW
2350
2351 /* MST handled by drm_mst framework */
2352 if (aconnector->mst_mgr.mst_state == true)
2353 return;
2354
4562236b 2355 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2356 if (sink)
2357 dc_sink_retain(sink);
4562236b 2358
1f6010a9
DF
2359 /*
2360 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2361 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2362 * Skip if already done during boot.
4562236b
HW
2363 */
2364 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2365 && aconnector->dc_em_sink) {
2366
1f6010a9
DF
2367 /*
2368 * For S3 resume with headless use eml_sink to fake stream
2369 * because on resume connector->sink is set to NULL
4562236b
HW
2370 */
2371 mutex_lock(&dev->mode_config.mutex);
2372
2373 if (sink) {
922aa1e1 2374 if (aconnector->dc_sink) {
98e6436d 2375 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2376 /*
2377 * retain and release below are used to
2378 * bump up refcount for sink because the link doesn't point
2379 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2380 * reshuffle by UMD we will get into unwanted dc_sink release
2381 */
dcd5fb82 2382 dc_sink_release(aconnector->dc_sink);
922aa1e1 2383 }
4562236b 2384 aconnector->dc_sink = sink;
dcd5fb82 2385 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2386 amdgpu_dm_update_freesync_caps(connector,
2387 aconnector->edid);
4562236b 2388 } else {
98e6436d 2389 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2390 if (!aconnector->dc_sink) {
4562236b 2391 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2392 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2393 }
4562236b
HW
2394 }
2395
2396 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2397
2398 if (sink)
2399 dc_sink_release(sink);
4562236b
HW
2400 return;
2401 }
2402
2403 /*
2404 * TODO: temporary guard to look for proper fix
2405 * if this sink is MST sink, we should not do anything
2406 */
dcd5fb82
MF
2407 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2408 dc_sink_release(sink);
4562236b 2409 return;
dcd5fb82 2410 }
4562236b
HW
2411
2412 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2413 /*
2414 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2415 * Do nothing!!
2416 */
f1ad2f5e 2417 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2418 aconnector->connector_id);
dcd5fb82
MF
2419 if (sink)
2420 dc_sink_release(sink);
4562236b
HW
2421 return;
2422 }
2423
f1ad2f5e 2424 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2425 aconnector->connector_id, aconnector->dc_sink, sink);
2426
2427 mutex_lock(&dev->mode_config.mutex);
2428
1f6010a9
DF
2429 /*
2430 * 1. Update status of the drm connector
2431 * 2. Send an event and let userspace tell us what to do
2432 */
4562236b 2433 if (sink) {
1f6010a9
DF
2434 /*
2435 * TODO: check if we still need the S3 mode update workaround.
2436 * If yes, put it here.
2437 */
c64b0d6b 2438 if (aconnector->dc_sink) {
98e6436d 2439 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2440 dc_sink_release(aconnector->dc_sink);
2441 }
4562236b
HW
2442
2443 aconnector->dc_sink = sink;
dcd5fb82 2444 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2445 if (sink->dc_edid.length == 0) {
4562236b 2446 aconnector->edid = NULL;
e6142dd5
AP
2447 if (aconnector->dc_link->aux_mode) {
2448 drm_dp_cec_unset_edid(
2449 &aconnector->dm_dp_aux.aux);
2450 }
900b3cb1 2451 } else {
4562236b 2452 aconnector->edid =
e6142dd5 2453 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2454
c555f023 2455 drm_connector_update_edid_property(connector,
e6142dd5 2456 aconnector->edid);
e6142dd5
AP
2457 if (aconnector->dc_link->aux_mode)
2458 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2459 aconnector->edid);
4562236b 2460 }
e6142dd5 2461
98e6436d 2462 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2463 update_connector_ext_caps(aconnector);
4562236b 2464 } else {
e86e8947 2465 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2466 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2467 drm_connector_update_edid_property(connector, NULL);
4562236b 2468 aconnector->num_modes = 0;
dcd5fb82 2469 dc_sink_release(aconnector->dc_sink);
4562236b 2470 aconnector->dc_sink = NULL;
5326c452 2471 aconnector->edid = NULL;
0c8620d6
BL
2472#ifdef CONFIG_DRM_AMD_DC_HDCP
2473 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2474 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2475 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2476#endif
4562236b
HW
2477 }
2478
2479 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2480
0f877894
OV
2481 update_subconnector_property(aconnector);
2482
dcd5fb82
MF
2483 if (sink)
2484 dc_sink_release(sink);
4562236b
HW
2485}
2486
2487static void handle_hpd_irq(void *param)
2488{
c84dec2f 2489 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2490 struct drm_connector *connector = &aconnector->base;
2491 struct drm_device *dev = connector->dev;
fbbdadf2 2492 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2493#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2494 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2495 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2496#endif
4562236b 2497
1f6010a9
DF
2498 /*
2499 * In case of failure or MST no need to update connector status or notify the OS
2500 * since (for MST case) MST does this in its own context.
4562236b
HW
2501 */
2502 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2503
0c8620d6 2504#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2505 if (adev->dm.hdcp_workqueue) {
96a3b32e 2506 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2507 dm_con_state->update_hdcp = true;
2508 }
0c8620d6 2509#endif
2e0ac3d6
HW
2510 if (aconnector->fake_enable)
2511 aconnector->fake_enable = false;
2512
fbbdadf2
BL
2513 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2514 DRM_ERROR("KMS: Failed to detect connector\n");
2515
2516 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2517 emulated_link_detect(aconnector->dc_link);
2518
2519
2520 drm_modeset_lock_all(dev);
2521 dm_restore_drm_connector_state(dev, connector);
2522 drm_modeset_unlock_all(dev);
2523
2524 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2525 drm_kms_helper_hotplug_event(dev);
2526
2527 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2528 if (new_connection_type == dc_connection_none &&
2529 aconnector->dc_link->type == dc_connection_none)
2530 dm_set_dpms_off(aconnector->dc_link);
4562236b 2531
3c4d55c9 2532 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2533
2534 drm_modeset_lock_all(dev);
2535 dm_restore_drm_connector_state(dev, connector);
2536 drm_modeset_unlock_all(dev);
2537
2538 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2539 drm_kms_helper_hotplug_event(dev);
2540 }
2541 mutex_unlock(&aconnector->hpd_lock);
2542
2543}
2544
c84dec2f 2545static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2546{
2547 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2548 uint8_t dret;
2549 bool new_irq_handled = false;
2550 int dpcd_addr;
2551 int dpcd_bytes_to_read;
2552
2553 const int max_process_count = 30;
2554 int process_count = 0;
2555
2556 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2557
2558 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2559 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2560 /* DPCD 0x200 - 0x201 for downstream IRQ */
2561 dpcd_addr = DP_SINK_COUNT;
2562 } else {
2563 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2564 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2565 dpcd_addr = DP_SINK_COUNT_ESI;
2566 }
2567
2568 dret = drm_dp_dpcd_read(
2569 &aconnector->dm_dp_aux.aux,
2570 dpcd_addr,
2571 esi,
2572 dpcd_bytes_to_read);
2573
2574 while (dret == dpcd_bytes_to_read &&
2575 process_count < max_process_count) {
2576 uint8_t retry;
2577 dret = 0;
2578
2579 process_count++;
2580
f1ad2f5e 2581 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2582 /* handle HPD short pulse irq */
2583 if (aconnector->mst_mgr.mst_state)
2584 drm_dp_mst_hpd_irq(
2585 &aconnector->mst_mgr,
2586 esi,
2587 &new_irq_handled);
4562236b
HW
2588
2589 if (new_irq_handled) {
2590 /* ACK at DPCD to notify down stream */
2591 const int ack_dpcd_bytes_to_write =
2592 dpcd_bytes_to_read - 1;
2593
2594 for (retry = 0; retry < 3; retry++) {
2595 uint8_t wret;
2596
2597 wret = drm_dp_dpcd_write(
2598 &aconnector->dm_dp_aux.aux,
2599 dpcd_addr + 1,
2600 &esi[1],
2601 ack_dpcd_bytes_to_write);
2602 if (wret == ack_dpcd_bytes_to_write)
2603 break;
2604 }
2605
1f6010a9 2606 /* check if there is new irq to be handled */
4562236b
HW
2607 dret = drm_dp_dpcd_read(
2608 &aconnector->dm_dp_aux.aux,
2609 dpcd_addr,
2610 esi,
2611 dpcd_bytes_to_read);
2612
2613 new_irq_handled = false;
d4a6e8a9 2614 } else {
4562236b 2615 break;
d4a6e8a9 2616 }
4562236b
HW
2617 }
2618
2619 if (process_count == max_process_count)
f1ad2f5e 2620 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2621}
2622
2623static void handle_hpd_rx_irq(void *param)
2624{
c84dec2f 2625 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2626 struct drm_connector *connector = &aconnector->base;
2627 struct drm_device *dev = connector->dev;
53cbf65c 2628 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2629 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2630 bool result = false;
fbbdadf2 2631 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2632 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2633 union hpd_irq_data hpd_irq_data;
2a0f9270
BL
2634
2635 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2636
1f6010a9
DF
2637 /*
2638 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2639 * conflict, after implement i2c helper, this mutex should be
2640 * retired.
2641 */
53cbf65c 2642 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2643 mutex_lock(&aconnector->hpd_lock);
2644
3083a984
QZ
2645 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2646
2647 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2648 (dc_link->type == dc_connection_mst_branch)) {
2649 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2650 result = true;
2651 dm_handle_hpd_rx_irq(aconnector);
2652 goto out;
2653 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2654 result = false;
2655 dm_handle_hpd_rx_irq(aconnector);
2656 goto out;
2657 }
2658 }
2659
c8ea79a8 2660 mutex_lock(&adev->dm.dc_lock);
2a0f9270 2661#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2662 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2663#else
c8ea79a8 2664 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2665#endif
c8ea79a8
QZ
2666 mutex_unlock(&adev->dm.dc_lock);
2667
3083a984 2668out:
c8ea79a8 2669 if (result && !is_mst_root_connector) {
4562236b 2670 /* Downstream Port status changed. */
fbbdadf2
BL
2671 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2672 DRM_ERROR("KMS: Failed to detect connector\n");
2673
2674 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2675 emulated_link_detect(dc_link);
2676
2677 if (aconnector->fake_enable)
2678 aconnector->fake_enable = false;
2679
2680 amdgpu_dm_update_connector_after_detect(aconnector);
2681
2682
2683 drm_modeset_lock_all(dev);
2684 dm_restore_drm_connector_state(dev, connector);
2685 drm_modeset_unlock_all(dev);
2686
2687 drm_kms_helper_hotplug_event(dev);
2688 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2689
2690 if (aconnector->fake_enable)
2691 aconnector->fake_enable = false;
2692
4562236b
HW
2693 amdgpu_dm_update_connector_after_detect(aconnector);
2694
2695
2696 drm_modeset_lock_all(dev);
2697 dm_restore_drm_connector_state(dev, connector);
2698 drm_modeset_unlock_all(dev);
2699
2700 drm_kms_helper_hotplug_event(dev);
2701 }
2702 }
2a0f9270 2703#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2704 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2705 if (adev->dm.hdcp_workqueue)
2706 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2707 }
2a0f9270 2708#endif
4562236b 2709
e86e8947
HV
2710 if (dc_link->type != dc_connection_mst_branch) {
2711 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2712 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2713 }
4562236b
HW
2714}
2715
2716static void register_hpd_handlers(struct amdgpu_device *adev)
2717{
4a580877 2718 struct drm_device *dev = adev_to_drm(adev);
4562236b 2719 struct drm_connector *connector;
c84dec2f 2720 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2721 const struct dc_link *dc_link;
2722 struct dc_interrupt_params int_params = {0};
2723
2724 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2725 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2726
2727 list_for_each_entry(connector,
2728 &dev->mode_config.connector_list, head) {
2729
c84dec2f 2730 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2731 dc_link = aconnector->dc_link;
2732
2733 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2734 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2735 int_params.irq_source = dc_link->irq_source_hpd;
2736
2737 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2738 handle_hpd_irq,
2739 (void *) aconnector);
2740 }
2741
2742 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2743
2744 /* Also register for DP short pulse (hpd_rx). */
2745 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2746 int_params.irq_source = dc_link->irq_source_hpd_rx;
2747
2748 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2749 handle_hpd_rx_irq,
2750 (void *) aconnector);
2751 }
2752 }
2753}
2754
55e56389
MR
2755#if defined(CONFIG_DRM_AMD_DC_SI)
2756/* Register IRQ sources and initialize IRQ callbacks */
2757static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2758{
2759 struct dc *dc = adev->dm.dc;
2760 struct common_irq_params *c_irq_params;
2761 struct dc_interrupt_params int_params = {0};
2762 int r;
2763 int i;
2764 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2765
2766 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2767 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2768
2769 /*
2770 * Actions of amdgpu_irq_add_id():
2771 * 1. Register a set() function with base driver.
2772 * Base driver will call set() function to enable/disable an
2773 * interrupt in DC hardware.
2774 * 2. Register amdgpu_dm_irq_handler().
2775 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2776 * coming from DC hardware.
2777 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2778 * for acknowledging and handling. */
2779
2780 /* Use VBLANK interrupt */
2781 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2782 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2783 if (r) {
2784 DRM_ERROR("Failed to add crtc irq id!\n");
2785 return r;
2786 }
2787
2788 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2789 int_params.irq_source =
2790 dc_interrupt_to_irq_source(dc, i+1 , 0);
2791
2792 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2793
2794 c_irq_params->adev = adev;
2795 c_irq_params->irq_src = int_params.irq_source;
2796
2797 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2798 dm_crtc_high_irq, c_irq_params);
2799 }
2800
2801 /* Use GRPH_PFLIP interrupt */
2802 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2803 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2804 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2805 if (r) {
2806 DRM_ERROR("Failed to add page flip irq id!\n");
2807 return r;
2808 }
2809
2810 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2811 int_params.irq_source =
2812 dc_interrupt_to_irq_source(dc, i, 0);
2813
2814 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2815
2816 c_irq_params->adev = adev;
2817 c_irq_params->irq_src = int_params.irq_source;
2818
2819 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2820 dm_pflip_high_irq, c_irq_params);
2821
2822 }
2823
2824 /* HPD */
2825 r = amdgpu_irq_add_id(adev, client_id,
2826 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2827 if (r) {
2828 DRM_ERROR("Failed to add hpd irq id!\n");
2829 return r;
2830 }
2831
2832 register_hpd_handlers(adev);
2833
2834 return 0;
2835}
2836#endif
2837
4562236b
HW
2838/* Register IRQ sources and initialize IRQ callbacks */
2839static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2840{
2841 struct dc *dc = adev->dm.dc;
2842 struct common_irq_params *c_irq_params;
2843 struct dc_interrupt_params int_params = {0};
2844 int r;
2845 int i;
1ffdeca6 2846 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2847
84374725 2848 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2849 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2850
2851 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2852 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2853
1f6010a9
DF
2854 /*
2855 * Actions of amdgpu_irq_add_id():
4562236b
HW
2856 * 1. Register a set() function with base driver.
2857 * Base driver will call set() function to enable/disable an
2858 * interrupt in DC hardware.
2859 * 2. Register amdgpu_dm_irq_handler().
2860 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2861 * coming from DC hardware.
2862 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2863 * for acknowledging and handling. */
2864
b57de80a 2865 /* Use VBLANK interrupt */
e9029155 2866 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2867 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2868 if (r) {
2869 DRM_ERROR("Failed to add crtc irq id!\n");
2870 return r;
2871 }
2872
2873 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2874 int_params.irq_source =
3d761e79 2875 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2876
b57de80a 2877 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2878
2879 c_irq_params->adev = adev;
2880 c_irq_params->irq_src = int_params.irq_source;
2881
2882 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2883 dm_crtc_high_irq, c_irq_params);
2884 }
2885
d2574c33
MK
2886 /* Use VUPDATE interrupt */
2887 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2888 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2889 if (r) {
2890 DRM_ERROR("Failed to add vupdate irq id!\n");
2891 return r;
2892 }
2893
2894 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2895 int_params.irq_source =
2896 dc_interrupt_to_irq_source(dc, i, 0);
2897
2898 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2899
2900 c_irq_params->adev = adev;
2901 c_irq_params->irq_src = int_params.irq_source;
2902
2903 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2904 dm_vupdate_high_irq, c_irq_params);
2905 }
2906
3d761e79 2907 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2908 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2909 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2910 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2911 if (r) {
2912 DRM_ERROR("Failed to add page flip irq id!\n");
2913 return r;
2914 }
2915
2916 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2917 int_params.irq_source =
2918 dc_interrupt_to_irq_source(dc, i, 0);
2919
2920 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2921
2922 c_irq_params->adev = adev;
2923 c_irq_params->irq_src = int_params.irq_source;
2924
2925 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2926 dm_pflip_high_irq, c_irq_params);
2927
2928 }
2929
2930 /* HPD */
2c8ad2d5
AD
2931 r = amdgpu_irq_add_id(adev, client_id,
2932 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2933 if (r) {
2934 DRM_ERROR("Failed to add hpd irq id!\n");
2935 return r;
2936 }
2937
2938 register_hpd_handlers(adev);
2939
2940 return 0;
2941}
2942
b86a1aa3 2943#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
2944/* Register IRQ sources and initialize IRQ callbacks */
2945static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2946{
2947 struct dc *dc = adev->dm.dc;
2948 struct common_irq_params *c_irq_params;
2949 struct dc_interrupt_params int_params = {0};
2950 int r;
2951 int i;
2952
2953 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2954 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2955
1f6010a9
DF
2956 /*
2957 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
2958 * 1. Register a set() function with base driver.
2959 * Base driver will call set() function to enable/disable an
2960 * interrupt in DC hardware.
2961 * 2. Register amdgpu_dm_irq_handler().
2962 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2963 * coming from DC hardware.
2964 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2965 * for acknowledging and handling.
1f6010a9 2966 */
ff5ef992
AD
2967
2968 /* Use VSTARTUP interrupt */
2969 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2970 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2971 i++) {
3760f76c 2972 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
2973
2974 if (r) {
2975 DRM_ERROR("Failed to add crtc irq id!\n");
2976 return r;
2977 }
2978
2979 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2980 int_params.irq_source =
2981 dc_interrupt_to_irq_source(dc, i, 0);
2982
2983 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2984
2985 c_irq_params->adev = adev;
2986 c_irq_params->irq_src = int_params.irq_source;
2987
2346ef47
NK
2988 amdgpu_dm_irq_register_interrupt(
2989 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2990 }
2991
86bc2219
WL
2992 /* Use otg vertical line interrupt */
2993#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
2994 for (i = DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL;
2995 i <= DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL
2996 + adev->mode_info.num_crtc - 1;
2997 i++) {
2998 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vline0_irq);
2999
3000 if (r) {
3001 DRM_ERROR("Failed to add vline0 irq id!\n");
3002 return r;
3003 }
3004
3005 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3006 int_params.irq_source =
3007 dc_interrupt_to_irq_source(dc, i, 0);
3008
3009 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3010 - DC_IRQ_SOURCE_DC1_VLINE0];
3011
3012 c_irq_params->adev = adev;
3013 c_irq_params->irq_src = int_params.irq_source;
3014
3015 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3016 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3017 }
3018#endif
3019
2346ef47
NK
3020 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3021 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3022 * to trigger at end of each vblank, regardless of state of the lock,
3023 * matching DCE behaviour.
3024 */
3025 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3026 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3027 i++) {
3028 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3029
3030 if (r) {
3031 DRM_ERROR("Failed to add vupdate irq id!\n");
3032 return r;
3033 }
3034
3035 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3036 int_params.irq_source =
3037 dc_interrupt_to_irq_source(dc, i, 0);
3038
3039 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3040
3041 c_irq_params->adev = adev;
3042 c_irq_params->irq_src = int_params.irq_source;
3043
ff5ef992 3044 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3045 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3046 }
3047
ff5ef992
AD
3048 /* Use GRPH_PFLIP interrupt */
3049 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3050 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3051 i++) {
3760f76c 3052 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3053 if (r) {
3054 DRM_ERROR("Failed to add page flip irq id!\n");
3055 return r;
3056 }
3057
3058 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3059 int_params.irq_source =
3060 dc_interrupt_to_irq_source(dc, i, 0);
3061
3062 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3063
3064 c_irq_params->adev = adev;
3065 c_irq_params->irq_src = int_params.irq_source;
3066
3067 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3068 dm_pflip_high_irq, c_irq_params);
3069
3070 }
3071
3072 /* HPD */
3760f76c 3073 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
3074 &adev->hpd_irq);
3075 if (r) {
3076 DRM_ERROR("Failed to add hpd irq id!\n");
3077 return r;
3078 }
3079
3080 register_hpd_handlers(adev);
3081
3082 return 0;
3083}
3084#endif
3085
eb3dc897
NK
3086/*
3087 * Acquires the lock for the atomic state object and returns
3088 * the new atomic state.
3089 *
3090 * This should only be called during atomic check.
3091 */
3092static int dm_atomic_get_state(struct drm_atomic_state *state,
3093 struct dm_atomic_state **dm_state)
3094{
3095 struct drm_device *dev = state->dev;
1348969a 3096 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3097 struct amdgpu_display_manager *dm = &adev->dm;
3098 struct drm_private_state *priv_state;
eb3dc897
NK
3099
3100 if (*dm_state)
3101 return 0;
3102
eb3dc897
NK
3103 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3104 if (IS_ERR(priv_state))
3105 return PTR_ERR(priv_state);
3106
3107 *dm_state = to_dm_atomic_state(priv_state);
3108
3109 return 0;
3110}
3111
dfd84d90 3112static struct dm_atomic_state *
eb3dc897
NK
3113dm_atomic_get_new_state(struct drm_atomic_state *state)
3114{
3115 struct drm_device *dev = state->dev;
1348969a 3116 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3117 struct amdgpu_display_manager *dm = &adev->dm;
3118 struct drm_private_obj *obj;
3119 struct drm_private_state *new_obj_state;
3120 int i;
3121
3122 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3123 if (obj->funcs == dm->atomic_obj.funcs)
3124 return to_dm_atomic_state(new_obj_state);
3125 }
3126
3127 return NULL;
3128}
3129
eb3dc897
NK
3130static struct drm_private_state *
3131dm_atomic_duplicate_state(struct drm_private_obj *obj)
3132{
3133 struct dm_atomic_state *old_state, *new_state;
3134
3135 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3136 if (!new_state)
3137 return NULL;
3138
3139 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3140
813d20dc
AW
3141 old_state = to_dm_atomic_state(obj->state);
3142
3143 if (old_state && old_state->context)
3144 new_state->context = dc_copy_state(old_state->context);
3145
eb3dc897
NK
3146 if (!new_state->context) {
3147 kfree(new_state);
3148 return NULL;
3149 }
3150
eb3dc897
NK
3151 return &new_state->base;
3152}
3153
3154static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3155 struct drm_private_state *state)
3156{
3157 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3158
3159 if (dm_state && dm_state->context)
3160 dc_release_state(dm_state->context);
3161
3162 kfree(dm_state);
3163}
3164
3165static struct drm_private_state_funcs dm_atomic_state_funcs = {
3166 .atomic_duplicate_state = dm_atomic_duplicate_state,
3167 .atomic_destroy_state = dm_atomic_destroy_state,
3168};
3169
4562236b
HW
3170static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3171{
eb3dc897 3172 struct dm_atomic_state *state;
4562236b
HW
3173 int r;
3174
3175 adev->mode_info.mode_config_initialized = true;
3176
4a580877
LT
3177 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3178 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3179
4a580877
LT
3180 adev_to_drm(adev)->mode_config.max_width = 16384;
3181 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3182
4a580877
LT
3183 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3184 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3185 /* indicates support for immediate flip */
4a580877 3186 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3187
4a580877 3188 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3189
eb3dc897
NK
3190 state = kzalloc(sizeof(*state), GFP_KERNEL);
3191 if (!state)
3192 return -ENOMEM;
3193
813d20dc 3194 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3195 if (!state->context) {
3196 kfree(state);
3197 return -ENOMEM;
3198 }
3199
3200 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3201
4a580877 3202 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3203 &adev->dm.atomic_obj,
eb3dc897
NK
3204 &state->base,
3205 &dm_atomic_state_funcs);
3206
3dc9b1ce 3207 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3208 if (r) {
3209 dc_release_state(state->context);
3210 kfree(state);
4562236b 3211 return r;
b67a468a 3212 }
4562236b 3213
6ce8f316 3214 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3215 if (r) {
3216 dc_release_state(state->context);
3217 kfree(state);
6ce8f316 3218 return r;
b67a468a 3219 }
6ce8f316 3220
4562236b
HW
3221 return 0;
3222}
3223
206bbafe
DF
3224#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3225#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3226#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3227
4562236b
HW
3228#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3229 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3230
206bbafe
DF
3231static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3232{
3233#if defined(CONFIG_ACPI)
3234 struct amdgpu_dm_backlight_caps caps;
3235
58965855
FS
3236 memset(&caps, 0, sizeof(caps));
3237
206bbafe
DF
3238 if (dm->backlight_caps.caps_valid)
3239 return;
3240
3241 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3242 if (caps.caps_valid) {
94562810
RS
3243 dm->backlight_caps.caps_valid = true;
3244 if (caps.aux_support)
3245 return;
206bbafe
DF
3246 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3247 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3248 } else {
3249 dm->backlight_caps.min_input_signal =
3250 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3251 dm->backlight_caps.max_input_signal =
3252 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3253 }
3254#else
94562810
RS
3255 if (dm->backlight_caps.aux_support)
3256 return;
3257
8bcbc9ef
DF
3258 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3259 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3260#endif
3261}
3262
69d9f427
AM
3263static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3264 unsigned *min, unsigned *max)
94562810 3265{
94562810 3266 if (!caps)
69d9f427 3267 return 0;
94562810 3268
69d9f427
AM
3269 if (caps->aux_support) {
3270 // Firmware limits are in nits, DC API wants millinits.
3271 *max = 1000 * caps->aux_max_input_signal;
3272 *min = 1000 * caps->aux_min_input_signal;
94562810 3273 } else {
69d9f427
AM
3274 // Firmware limits are 8-bit, PWM control is 16-bit.
3275 *max = 0x101 * caps->max_input_signal;
3276 *min = 0x101 * caps->min_input_signal;
94562810 3277 }
69d9f427
AM
3278 return 1;
3279}
94562810 3280
69d9f427
AM
3281static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3282 uint32_t brightness)
3283{
3284 unsigned min, max;
94562810 3285
69d9f427
AM
3286 if (!get_brightness_range(caps, &min, &max))
3287 return brightness;
3288
3289 // Rescale 0..255 to min..max
3290 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3291 AMDGPU_MAX_BL_LEVEL);
3292}
3293
3294static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3295 uint32_t brightness)
3296{
3297 unsigned min, max;
3298
3299 if (!get_brightness_range(caps, &min, &max))
3300 return brightness;
3301
3302 if (brightness < min)
3303 return 0;
3304 // Rescale min..max to 0..255
3305 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3306 max - min);
94562810
RS
3307}
3308
4562236b
HW
3309static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3310{
3311 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3312 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3313 struct dc_link *link = NULL;
3314 u32 brightness;
3315 bool rc;
4562236b 3316
206bbafe
DF
3317 amdgpu_dm_update_backlight_caps(dm);
3318 caps = dm->backlight_caps;
94562810
RS
3319
3320 link = (struct dc_link *)dm->backlight_link;
3321
69d9f427 3322 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3323 // Change brightness based on AUX property
3324 if (caps.aux_support)
3c8e99cc
AD
3325 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3326 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3327 else
3328 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
94562810
RS
3329
3330 return rc ? 0 : 1;
4562236b
HW
3331}
3332
3333static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3334{
620a0d27 3335 struct amdgpu_display_manager *dm = bl_get_data(bd);
f275e875
AD
3336 struct amdgpu_dm_backlight_caps caps;
3337
3338 amdgpu_dm_update_backlight_caps(dm);
3339 caps = dm->backlight_caps;
620a0d27 3340
f275e875
AD
3341 if (caps.aux_support) {
3342 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3343 u32 avg, peak;
3344 bool rc;
3345
3346 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3347 if (!rc)
3348 return bd->props.brightness;
3349 return convert_brightness_to_user(&caps, avg);
3350 } else {
3351 int ret = dc_link_get_backlight_level(dm->backlight_link);
3352
3353 if (ret == DC_ERROR_UNEXPECTED)
3354 return bd->props.brightness;
3355 return convert_brightness_to_user(&caps, ret);
3356 }
4562236b
HW
3357}
3358
3359static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3360 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3361 .get_brightness = amdgpu_dm_backlight_get_brightness,
3362 .update_status = amdgpu_dm_backlight_update_status,
3363};
3364
7578ecda
AD
3365static void
3366amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3367{
3368 char bl_name[16];
3369 struct backlight_properties props = { 0 };
3370
206bbafe
DF
3371 amdgpu_dm_update_backlight_caps(dm);
3372
4562236b 3373 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3374 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3375 props.type = BACKLIGHT_RAW;
3376
3377 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3378 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3379
3380 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3381 adev_to_drm(dm->adev)->dev,
3382 dm,
3383 &amdgpu_dm_backlight_ops,
3384 &props);
4562236b 3385
74baea42 3386 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3387 DRM_ERROR("DM: Backlight registration failed!\n");
3388 else
f1ad2f5e 3389 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3390}
3391
3392#endif
3393
df534fff 3394static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3395 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3396 enum drm_plane_type plane_type,
3397 const struct dc_plane_cap *plane_cap)
df534fff 3398{
f180b4bc 3399 struct drm_plane *plane;
df534fff
S
3400 unsigned long possible_crtcs;
3401 int ret = 0;
3402
f180b4bc 3403 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3404 if (!plane) {
3405 DRM_ERROR("KMS: Failed to allocate plane\n");
3406 return -ENOMEM;
3407 }
b2fddb13 3408 plane->type = plane_type;
df534fff
S
3409
3410 /*
b2fddb13
NK
3411 * HACK: IGT tests expect that the primary plane for a CRTC
3412 * can only have one possible CRTC. Only expose support for
3413 * any CRTC if they're not going to be used as a primary plane
3414 * for a CRTC - like overlay or underlay planes.
df534fff
S
3415 */
3416 possible_crtcs = 1 << plane_id;
3417 if (plane_id >= dm->dc->caps.max_streams)
3418 possible_crtcs = 0xff;
3419
cc1fec57 3420 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3421
3422 if (ret) {
3423 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3424 kfree(plane);
df534fff
S
3425 return ret;
3426 }
3427
54087768
NK
3428 if (mode_info)
3429 mode_info->planes[plane_id] = plane;
3430
df534fff
S
3431 return ret;
3432}
3433
89fc8d4e
HW
3434
3435static void register_backlight_device(struct amdgpu_display_manager *dm,
3436 struct dc_link *link)
3437{
3438#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3439 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3440
3441 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3442 link->type != dc_connection_none) {
1f6010a9
DF
3443 /*
3444 * Event if registration failed, we should continue with
89fc8d4e
HW
3445 * DM initialization because not having a backlight control
3446 * is better then a black screen.
3447 */
3448 amdgpu_dm_register_backlight_device(dm);
3449
3450 if (dm->backlight_dev)
3451 dm->backlight_link = link;
3452 }
3453#endif
3454}
3455
3456
1f6010a9
DF
3457/*
3458 * In this architecture, the association
4562236b
HW
3459 * connector -> encoder -> crtc
3460 * id not really requried. The crtc and connector will hold the
3461 * display_index as an abstraction to use with DAL component
3462 *
3463 * Returns 0 on success
3464 */
7578ecda 3465static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3466{
3467 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3468 int32_t i;
c84dec2f 3469 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3470 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3471 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3472 uint32_t link_cnt;
cc1fec57 3473 int32_t primary_planes;
fbbdadf2 3474 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3475 const struct dc_plane_cap *plane;
4562236b 3476
d58159de
AD
3477 dm->display_indexes_num = dm->dc->caps.max_streams;
3478 /* Update the actual used number of crtc */
3479 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3480
4562236b 3481 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3482 if (amdgpu_dm_mode_config_init(dm->adev)) {
3483 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3484 return -EINVAL;
4562236b
HW
3485 }
3486
b2fddb13
NK
3487 /* There is one primary plane per CRTC */
3488 primary_planes = dm->dc->caps.max_streams;
54087768 3489 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3490
b2fddb13
NK
3491 /*
3492 * Initialize primary planes, implicit planes for legacy IOCTLS.
3493 * Order is reversed to match iteration order in atomic check.
3494 */
3495 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3496 plane = &dm->dc->caps.planes[i];
3497
b2fddb13 3498 if (initialize_plane(dm, mode_info, i,
cc1fec57 3499 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3500 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3501 goto fail;
d4e13b0d 3502 }
df534fff 3503 }
92f3ac40 3504
0d579c7e
NK
3505 /*
3506 * Initialize overlay planes, index starting after primary planes.
3507 * These planes have a higher DRM index than the primary planes since
3508 * they should be considered as having a higher z-order.
3509 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3510 *
3511 * Only support DCN for now, and only expose one so we don't encourage
3512 * userspace to use up all the pipes.
0d579c7e 3513 */
cc1fec57
NK
3514 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3515 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3516
3517 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3518 continue;
3519
3520 if (!plane->blends_with_above || !plane->blends_with_below)
3521 continue;
3522
ea36ad34 3523 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3524 continue;
3525
54087768 3526 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3527 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3528 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3529 goto fail;
d4e13b0d 3530 }
cc1fec57
NK
3531
3532 /* Only create one overlay plane. */
3533 break;
d4e13b0d 3534 }
4562236b 3535
d4e13b0d 3536 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3537 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3538 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3539 goto fail;
4562236b 3540 }
4562236b 3541
4562236b
HW
3542 /* loops over all connectors on the board */
3543 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3544 struct dc_link *link = NULL;
4562236b
HW
3545
3546 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3547 DRM_ERROR(
3548 "KMS: Cannot support more than %d display indexes\n",
3549 AMDGPU_DM_MAX_DISPLAY_INDEX);
3550 continue;
3551 }
3552
3553 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3554 if (!aconnector)
cd8a2ae8 3555 goto fail;
4562236b
HW
3556
3557 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3558 if (!aencoder)
cd8a2ae8 3559 goto fail;
4562236b
HW
3560
3561 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3562 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3563 goto fail;
4562236b
HW
3564 }
3565
3566 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3567 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3568 goto fail;
4562236b
HW
3569 }
3570
89fc8d4e
HW
3571 link = dc_get_link_at_index(dm->dc, i);
3572
fbbdadf2
BL
3573 if (!dc_link_detect_sink(link, &new_connection_type))
3574 DRM_ERROR("KMS: Failed to detect connector\n");
3575
3576 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3577 emulated_link_detect(link);
3578 amdgpu_dm_update_connector_after_detect(aconnector);
3579
3580 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3581 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3582 register_backlight_device(dm, link);
397a9bc5
RL
3583 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3584 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3585 }
3586
3587
4562236b
HW
3588 }
3589
3590 /* Software is initialized. Now we can register interrupt handlers. */
3591 switch (adev->asic_type) {
55e56389
MR
3592#if defined(CONFIG_DRM_AMD_DC_SI)
3593 case CHIP_TAHITI:
3594 case CHIP_PITCAIRN:
3595 case CHIP_VERDE:
3596 case CHIP_OLAND:
3597 if (dce60_register_irq_handlers(dm->adev)) {
3598 DRM_ERROR("DM: Failed to initialize IRQ\n");
3599 goto fail;
3600 }
3601 break;
3602#endif
4562236b
HW
3603 case CHIP_BONAIRE:
3604 case CHIP_HAWAII:
cd4b356f
AD
3605 case CHIP_KAVERI:
3606 case CHIP_KABINI:
3607 case CHIP_MULLINS:
4562236b
HW
3608 case CHIP_TONGA:
3609 case CHIP_FIJI:
3610 case CHIP_CARRIZO:
3611 case CHIP_STONEY:
3612 case CHIP_POLARIS11:
3613 case CHIP_POLARIS10:
b264d345 3614 case CHIP_POLARIS12:
7737de91 3615 case CHIP_VEGAM:
2c8ad2d5 3616 case CHIP_VEGA10:
2325ff30 3617 case CHIP_VEGA12:
1fe6bf2f 3618 case CHIP_VEGA20:
4562236b
HW
3619 if (dce110_register_irq_handlers(dm->adev)) {
3620 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3621 goto fail;
4562236b
HW
3622 }
3623 break;
b86a1aa3 3624#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3625 case CHIP_RAVEN:
fbd2afe5 3626 case CHIP_NAVI12:
476e955d 3627 case CHIP_NAVI10:
fce651e3 3628 case CHIP_NAVI14:
30221ad8 3629 case CHIP_RENOIR:
79037324 3630 case CHIP_SIENNA_CICHLID:
a6c5308f 3631 case CHIP_NAVY_FLOUNDER:
2a411205 3632 case CHIP_DIMGREY_CAVEFISH:
469989ca 3633 case CHIP_VANGOGH:
ff5ef992
AD
3634 if (dcn10_register_irq_handlers(dm->adev)) {
3635 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3636 goto fail;
ff5ef992
AD
3637 }
3638 break;
3639#endif
4562236b 3640 default:
e63f8673 3641 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3642 goto fail;
4562236b
HW
3643 }
3644
4562236b 3645 return 0;
cd8a2ae8 3646fail:
4562236b 3647 kfree(aencoder);
4562236b 3648 kfree(aconnector);
54087768 3649
59d0f396 3650 return -EINVAL;
4562236b
HW
3651}
3652
7578ecda 3653static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3654{
3655 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3656 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3657 return;
3658}
3659
3660/******************************************************************************
3661 * amdgpu_display_funcs functions
3662 *****************************************************************************/
3663
1f6010a9 3664/*
4562236b
HW
3665 * dm_bandwidth_update - program display watermarks
3666 *
3667 * @adev: amdgpu_device pointer
3668 *
3669 * Calculate and program the display watermarks and line buffer allocation.
3670 */
3671static void dm_bandwidth_update(struct amdgpu_device *adev)
3672{
49c07a99 3673 /* TODO: implement later */
4562236b
HW
3674}
3675
39cc5be2 3676static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3677 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3678 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3679 .backlight_set_level = NULL, /* never called for DC */
3680 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3681 .hpd_sense = NULL,/* called unconditionally */
3682 .hpd_set_polarity = NULL, /* called unconditionally */
3683 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3684 .page_flip_get_scanoutpos =
3685 dm_crtc_get_scanoutpos,/* called unconditionally */
3686 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3687 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3688};
3689
3690#if defined(CONFIG_DEBUG_KERNEL_DC)
3691
3ee6b26b
AD
3692static ssize_t s3_debug_store(struct device *device,
3693 struct device_attribute *attr,
3694 const char *buf,
3695 size_t count)
4562236b
HW
3696{
3697 int ret;
3698 int s3_state;
ef1de361 3699 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3700 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3701
3702 ret = kstrtoint(buf, 0, &s3_state);
3703
3704 if (ret == 0) {
3705 if (s3_state) {
3706 dm_resume(adev);
4a580877 3707 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3708 } else
3709 dm_suspend(adev);
3710 }
3711
3712 return ret == 0 ? count : 0;
3713}
3714
3715DEVICE_ATTR_WO(s3_debug);
3716
3717#endif
3718
3719static int dm_early_init(void *handle)
3720{
3721 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3722
4562236b 3723 switch (adev->asic_type) {
55e56389
MR
3724#if defined(CONFIG_DRM_AMD_DC_SI)
3725 case CHIP_TAHITI:
3726 case CHIP_PITCAIRN:
3727 case CHIP_VERDE:
3728 adev->mode_info.num_crtc = 6;
3729 adev->mode_info.num_hpd = 6;
3730 adev->mode_info.num_dig = 6;
3731 break;
3732 case CHIP_OLAND:
3733 adev->mode_info.num_crtc = 2;
3734 adev->mode_info.num_hpd = 2;
3735 adev->mode_info.num_dig = 2;
3736 break;
3737#endif
4562236b
HW
3738 case CHIP_BONAIRE:
3739 case CHIP_HAWAII:
3740 adev->mode_info.num_crtc = 6;
3741 adev->mode_info.num_hpd = 6;
3742 adev->mode_info.num_dig = 6;
4562236b 3743 break;
cd4b356f
AD
3744 case CHIP_KAVERI:
3745 adev->mode_info.num_crtc = 4;
3746 adev->mode_info.num_hpd = 6;
3747 adev->mode_info.num_dig = 7;
cd4b356f
AD
3748 break;
3749 case CHIP_KABINI:
3750 case CHIP_MULLINS:
3751 adev->mode_info.num_crtc = 2;
3752 adev->mode_info.num_hpd = 6;
3753 adev->mode_info.num_dig = 6;
cd4b356f 3754 break;
4562236b
HW
3755 case CHIP_FIJI:
3756 case CHIP_TONGA:
3757 adev->mode_info.num_crtc = 6;
3758 adev->mode_info.num_hpd = 6;
3759 adev->mode_info.num_dig = 7;
4562236b
HW
3760 break;
3761 case CHIP_CARRIZO:
3762 adev->mode_info.num_crtc = 3;
3763 adev->mode_info.num_hpd = 6;
3764 adev->mode_info.num_dig = 9;
4562236b
HW
3765 break;
3766 case CHIP_STONEY:
3767 adev->mode_info.num_crtc = 2;
3768 adev->mode_info.num_hpd = 6;
3769 adev->mode_info.num_dig = 9;
4562236b
HW
3770 break;
3771 case CHIP_POLARIS11:
b264d345 3772 case CHIP_POLARIS12:
4562236b
HW
3773 adev->mode_info.num_crtc = 5;
3774 adev->mode_info.num_hpd = 5;
3775 adev->mode_info.num_dig = 5;
4562236b
HW
3776 break;
3777 case CHIP_POLARIS10:
7737de91 3778 case CHIP_VEGAM:
4562236b
HW
3779 adev->mode_info.num_crtc = 6;
3780 adev->mode_info.num_hpd = 6;
3781 adev->mode_info.num_dig = 6;
4562236b 3782 break;
2c8ad2d5 3783 case CHIP_VEGA10:
2325ff30 3784 case CHIP_VEGA12:
1fe6bf2f 3785 case CHIP_VEGA20:
2c8ad2d5
AD
3786 adev->mode_info.num_crtc = 6;
3787 adev->mode_info.num_hpd = 6;
3788 adev->mode_info.num_dig = 6;
3789 break;
b86a1aa3 3790#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3791 case CHIP_RAVEN:
20f2ffe5
AD
3792 case CHIP_RENOIR:
3793 case CHIP_VANGOGH:
ff5ef992
AD
3794 adev->mode_info.num_crtc = 4;
3795 adev->mode_info.num_hpd = 4;
3796 adev->mode_info.num_dig = 4;
ff5ef992 3797 break;
476e955d 3798 case CHIP_NAVI10:
fbd2afe5 3799 case CHIP_NAVI12:
79037324 3800 case CHIP_SIENNA_CICHLID:
a6c5308f 3801 case CHIP_NAVY_FLOUNDER:
476e955d
HW
3802 adev->mode_info.num_crtc = 6;
3803 adev->mode_info.num_hpd = 6;
3804 adev->mode_info.num_dig = 6;
3805 break;
fce651e3 3806 case CHIP_NAVI14:
2a411205 3807 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
3808 adev->mode_info.num_crtc = 5;
3809 adev->mode_info.num_hpd = 5;
3810 adev->mode_info.num_dig = 5;
3811 break;
20f2ffe5 3812#endif
4562236b 3813 default:
e63f8673 3814 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3815 return -EINVAL;
3816 }
3817
c8dd5715
MD
3818 amdgpu_dm_set_irq_funcs(adev);
3819
39cc5be2
AD
3820 if (adev->mode_info.funcs == NULL)
3821 adev->mode_info.funcs = &dm_display_funcs;
3822
1f6010a9
DF
3823 /*
3824 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3825 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3826 * amdgpu_device_init()
3827 */
4562236b
HW
3828#if defined(CONFIG_DEBUG_KERNEL_DC)
3829 device_create_file(
4a580877 3830 adev_to_drm(adev)->dev,
4562236b
HW
3831 &dev_attr_s3_debug);
3832#endif
3833
3834 return 0;
3835}
3836
9b690ef3 3837static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3838 struct dc_stream_state *new_stream,
3839 struct dc_stream_state *old_stream)
9b690ef3 3840{
2afda735 3841 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3842}
3843
3844static bool modereset_required(struct drm_crtc_state *crtc_state)
3845{
2afda735 3846 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3847}
3848
7578ecda 3849static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3850{
3851 drm_encoder_cleanup(encoder);
3852 kfree(encoder);
3853}
3854
3855static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3856 .destroy = amdgpu_dm_encoder_destroy,
3857};
3858
e7b07cee 3859
6300b3bd
MK
3860static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3861 struct drm_framebuffer *fb,
3862 int *min_downscale, int *max_upscale)
3863{
3864 struct amdgpu_device *adev = drm_to_adev(dev);
3865 struct dc *dc = adev->dm.dc;
3866 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3867 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3868
3869 switch (fb->format->format) {
3870 case DRM_FORMAT_P010:
3871 case DRM_FORMAT_NV12:
3872 case DRM_FORMAT_NV21:
3873 *max_upscale = plane_cap->max_upscale_factor.nv12;
3874 *min_downscale = plane_cap->max_downscale_factor.nv12;
3875 break;
3876
3877 case DRM_FORMAT_XRGB16161616F:
3878 case DRM_FORMAT_ARGB16161616F:
3879 case DRM_FORMAT_XBGR16161616F:
3880 case DRM_FORMAT_ABGR16161616F:
3881 *max_upscale = plane_cap->max_upscale_factor.fp16;
3882 *min_downscale = plane_cap->max_downscale_factor.fp16;
3883 break;
3884
3885 default:
3886 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3887 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3888 break;
3889 }
3890
3891 /*
3892 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3893 * scaling factor of 1.0 == 1000 units.
3894 */
3895 if (*max_upscale == 1)
3896 *max_upscale = 1000;
3897
3898 if (*min_downscale == 1)
3899 *min_downscale = 1000;
3900}
3901
3902
695af5f9
NK
3903static int fill_dc_scaling_info(const struct drm_plane_state *state,
3904 struct dc_scaling_info *scaling_info)
e7b07cee 3905{
6300b3bd 3906 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 3907
695af5f9 3908 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 3909
695af5f9
NK
3910 /* Source is fixed 16.16 but we ignore mantissa for now... */
3911 scaling_info->src_rect.x = state->src_x >> 16;
3912 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 3913
695af5f9
NK
3914 scaling_info->src_rect.width = state->src_w >> 16;
3915 if (scaling_info->src_rect.width == 0)
3916 return -EINVAL;
3917
3918 scaling_info->src_rect.height = state->src_h >> 16;
3919 if (scaling_info->src_rect.height == 0)
3920 return -EINVAL;
3921
3922 scaling_info->dst_rect.x = state->crtc_x;
3923 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
3924
3925 if (state->crtc_w == 0)
695af5f9 3926 return -EINVAL;
e7b07cee 3927
695af5f9 3928 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
3929
3930 if (state->crtc_h == 0)
695af5f9 3931 return -EINVAL;
e7b07cee 3932
695af5f9 3933 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 3934
695af5f9
NK
3935 /* DRM doesn't specify clipping on destination output. */
3936 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 3937
6300b3bd
MK
3938 /* Validate scaling per-format with DC plane caps */
3939 if (state->plane && state->plane->dev && state->fb) {
3940 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3941 &min_downscale, &max_upscale);
3942 } else {
3943 min_downscale = 250;
3944 max_upscale = 16000;
3945 }
3946
6491f0c0
NK
3947 scale_w = scaling_info->dst_rect.width * 1000 /
3948 scaling_info->src_rect.width;
e7b07cee 3949
6300b3bd 3950 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
3951 return -EINVAL;
3952
3953 scale_h = scaling_info->dst_rect.height * 1000 /
3954 scaling_info->src_rect.height;
3955
6300b3bd 3956 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
3957 return -EINVAL;
3958
695af5f9
NK
3959 /*
3960 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3961 * assume reasonable defaults based on the format.
3962 */
e7b07cee 3963
695af5f9 3964 return 0;
4562236b 3965}
695af5f9 3966
a3241991
BN
3967static void
3968fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3969 uint64_t tiling_flags)
e7b07cee 3970{
a3241991
BN
3971 /* Fill GFX8 params */
3972 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3973 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 3974
a3241991
BN
3975 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3976 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3977 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3978 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3979 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 3980
a3241991
BN
3981 /* XXX fix me for VI */
3982 tiling_info->gfx8.num_banks = num_banks;
3983 tiling_info->gfx8.array_mode =
3984 DC_ARRAY_2D_TILED_THIN1;
3985 tiling_info->gfx8.tile_split = tile_split;
3986 tiling_info->gfx8.bank_width = bankw;
3987 tiling_info->gfx8.bank_height = bankh;
3988 tiling_info->gfx8.tile_aspect = mtaspect;
3989 tiling_info->gfx8.tile_mode =
3990 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3991 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3992 == DC_ARRAY_1D_TILED_THIN1) {
3993 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
3994 }
3995
a3241991
BN
3996 tiling_info->gfx8.pipe_config =
3997 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
3998}
3999
a3241991
BN
4000static void
4001fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4002 union dc_tiling_info *tiling_info)
4003{
4004 tiling_info->gfx9.num_pipes =
4005 adev->gfx.config.gb_addr_config_fields.num_pipes;
4006 tiling_info->gfx9.num_banks =
4007 adev->gfx.config.gb_addr_config_fields.num_banks;
4008 tiling_info->gfx9.pipe_interleave =
4009 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4010 tiling_info->gfx9.num_shader_engines =
4011 adev->gfx.config.gb_addr_config_fields.num_se;
4012 tiling_info->gfx9.max_compressed_frags =
4013 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4014 tiling_info->gfx9.num_rb_per_se =
4015 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4016 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
4017 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4018 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4019 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4020 adev->asic_type == CHIP_VANGOGH)
4021 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4022}
4023
695af5f9 4024static int
a3241991
BN
4025validate_dcc(struct amdgpu_device *adev,
4026 const enum surface_pixel_format format,
4027 const enum dc_rotation_angle rotation,
4028 const union dc_tiling_info *tiling_info,
4029 const struct dc_plane_dcc_param *dcc,
4030 const struct dc_plane_address *address,
4031 const struct plane_size *plane_size)
7df7e505
NK
4032{
4033 struct dc *dc = adev->dm.dc;
8daa1218
NC
4034 struct dc_dcc_surface_param input;
4035 struct dc_surface_dcc_cap output;
7df7e505 4036
8daa1218
NC
4037 memset(&input, 0, sizeof(input));
4038 memset(&output, 0, sizeof(output));
4039
a3241991 4040 if (!dcc->enable)
87b7ebc2
RS
4041 return 0;
4042
a3241991
BN
4043 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4044 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4045 return -EINVAL;
7df7e505 4046
695af5f9 4047 input.format = format;
12e2b2d4
DL
4048 input.surface_size.width = plane_size->surface_size.width;
4049 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4050 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4051
695af5f9 4052 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4053 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4054 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4055 input.scan = SCAN_DIRECTION_VERTICAL;
4056
4057 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4058 return -EINVAL;
7df7e505
NK
4059
4060 if (!output.capable)
09e5665a 4061 return -EINVAL;
7df7e505 4062
a3241991
BN
4063 if (dcc->independent_64b_blks == 0 &&
4064 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4065 return -EINVAL;
7df7e505 4066
a3241991
BN
4067 return 0;
4068}
4069
37384b3f
BN
4070static bool
4071modifier_has_dcc(uint64_t modifier)
4072{
4073 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4074}
4075
4076static unsigned
4077modifier_gfx9_swizzle_mode(uint64_t modifier)
4078{
4079 if (modifier == DRM_FORMAT_MOD_LINEAR)
4080 return 0;
4081
4082 return AMD_FMT_MOD_GET(TILE, modifier);
4083}
4084
dfbbfe3c
BN
4085static const struct drm_format_info *
4086amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4087{
816853f9 4088 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4089}
4090
37384b3f
BN
4091static void
4092fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4093 union dc_tiling_info *tiling_info,
4094 uint64_t modifier)
4095{
4096 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4097 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4098 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4099 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4100
4101 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4102
4103 if (!IS_AMD_FMT_MOD(modifier))
4104 return;
4105
4106 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4107 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4108
4109 if (adev->family >= AMDGPU_FAMILY_NV) {
4110 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4111 } else {
4112 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4113
4114 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4115 }
4116}
4117
faa37f54
BN
4118enum dm_micro_swizzle {
4119 MICRO_SWIZZLE_Z = 0,
4120 MICRO_SWIZZLE_S = 1,
4121 MICRO_SWIZZLE_D = 2,
4122 MICRO_SWIZZLE_R = 3
4123};
4124
4125static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4126 uint32_t format,
4127 uint64_t modifier)
4128{
4129 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4130 const struct drm_format_info *info = drm_format_info(format);
4131
4132 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4133
4134 if (!info)
4135 return false;
4136
4137 /*
4138 * We always have to allow this modifier, because core DRM still
4139 * checks LINEAR support if userspace does not provide modifers.
4140 */
4141 if (modifier == DRM_FORMAT_MOD_LINEAR)
4142 return true;
4143
4144 /*
4145 * The arbitrary tiling support for multiplane formats has not been hooked
4146 * up.
4147 */
4148 if (info->num_planes > 1)
4149 return false;
4150
4151 /*
4152 * For D swizzle the canonical modifier depends on the bpp, so check
4153 * it here.
4154 */
4155 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4156 adev->family >= AMDGPU_FAMILY_NV) {
4157 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4158 return false;
4159 }
4160
4161 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4162 info->cpp[0] < 8)
4163 return false;
4164
4165 if (modifier_has_dcc(modifier)) {
4166 /* Per radeonsi comments 16/64 bpp are more complicated. */
4167 if (info->cpp[0] != 4)
4168 return false;
4169 }
4170
4171 return true;
4172}
4173
4174static void
4175add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4176{
4177 if (!*mods)
4178 return;
4179
4180 if (*cap - *size < 1) {
4181 uint64_t new_cap = *cap * 2;
4182 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4183
4184 if (!new_mods) {
4185 kfree(*mods);
4186 *mods = NULL;
4187 return;
4188 }
4189
4190 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4191 kfree(*mods);
4192 *mods = new_mods;
4193 *cap = new_cap;
4194 }
4195
4196 (*mods)[*size] = mod;
4197 *size += 1;
4198}
4199
4200static void
4201add_gfx9_modifiers(const struct amdgpu_device *adev,
4202 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4203{
4204 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4205 int pipe_xor_bits = min(8, pipes +
4206 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4207 int bank_xor_bits = min(8 - pipe_xor_bits,
4208 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4209 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4210 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4211
4212
4213 if (adev->family == AMDGPU_FAMILY_RV) {
4214 /* Raven2 and later */
4215 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4216
4217 /*
4218 * No _D DCC swizzles yet because we only allow 32bpp, which
4219 * doesn't support _D on DCN
4220 */
4221
4222 if (has_constant_encode) {
4223 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4224 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4225 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4226 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4227 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4228 AMD_FMT_MOD_SET(DCC, 1) |
4229 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4230 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4231 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4232 }
4233
4234 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4235 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4236 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4237 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4238 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4239 AMD_FMT_MOD_SET(DCC, 1) |
4240 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4241 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4242 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4243
4244 if (has_constant_encode) {
4245 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4246 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4247 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4248 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4249 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4250 AMD_FMT_MOD_SET(DCC, 1) |
4251 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4252 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4253 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4254
4255 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4256 AMD_FMT_MOD_SET(RB, rb) |
4257 AMD_FMT_MOD_SET(PIPE, pipes));
4258 }
4259
4260 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4261 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4262 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4263 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4264 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4265 AMD_FMT_MOD_SET(DCC, 1) |
4266 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4267 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4268 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4269 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4270 AMD_FMT_MOD_SET(RB, rb) |
4271 AMD_FMT_MOD_SET(PIPE, pipes));
4272 }
4273
4274 /*
4275 * Only supported for 64bpp on Raven, will be filtered on format in
4276 * dm_plane_format_mod_supported.
4277 */
4278 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4279 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4280 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4281 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4282 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4283
4284 if (adev->family == AMDGPU_FAMILY_RV) {
4285 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4286 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4287 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4288 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4289 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4290 }
4291
4292 /*
4293 * Only supported for 64bpp on Raven, will be filtered on format in
4294 * dm_plane_format_mod_supported.
4295 */
4296 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4297 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4298 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4299
4300 if (adev->family == AMDGPU_FAMILY_RV) {
4301 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4303 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4304 }
4305}
4306
4307static void
4308add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4309 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4310{
4311 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4312
4313 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4314 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4315 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4316 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4317 AMD_FMT_MOD_SET(DCC, 1) |
4318 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4319 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4320 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4321
4322 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4323 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4324 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4325 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4326 AMD_FMT_MOD_SET(DCC, 1) |
4327 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4328 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4329 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4330 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4331
4332 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4333 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4334 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4335 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4336
4337 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4338 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4339 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4340 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4341
4342
4343 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4344 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4345 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4346 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4347
4348 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4349 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4350 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4351}
4352
4353static void
4354add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4355 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4356{
4357 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4358 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4359
4360 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4361 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4362 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4363 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4364 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4365 AMD_FMT_MOD_SET(DCC, 1) |
4366 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4367 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4368 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4369 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4370
4371 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4372 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4373 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4374 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4375 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4376 AMD_FMT_MOD_SET(DCC, 1) |
4377 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4378 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4379 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4380 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4381 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4382
4383 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4384 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4385 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4386 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4387 AMD_FMT_MOD_SET(PACKERS, pkrs));
4388
4389 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4390 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4391 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4392 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4393 AMD_FMT_MOD_SET(PACKERS, pkrs));
4394
4395 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4396 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4397 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4398 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4399
4400 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4401 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4402 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4403}
4404
4405static int
4406get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4407{
4408 uint64_t size = 0, capacity = 128;
4409 *mods = NULL;
4410
4411 /* We have not hooked up any pre-GFX9 modifiers. */
4412 if (adev->family < AMDGPU_FAMILY_AI)
4413 return 0;
4414
4415 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4416
4417 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4418 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4419 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4420 return *mods ? 0 : -ENOMEM;
4421 }
4422
4423 switch (adev->family) {
4424 case AMDGPU_FAMILY_AI:
4425 case AMDGPU_FAMILY_RV:
4426 add_gfx9_modifiers(adev, mods, &size, &capacity);
4427 break;
4428 case AMDGPU_FAMILY_NV:
4429 case AMDGPU_FAMILY_VGH:
4430 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4431 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4432 else
4433 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4434 break;
4435 }
4436
4437 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4438
4439 /* INVALID marks the end of the list. */
4440 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4441
4442 if (!*mods)
4443 return -ENOMEM;
4444
4445 return 0;
4446}
4447
37384b3f
BN
4448static int
4449fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4450 const struct amdgpu_framebuffer *afb,
4451 const enum surface_pixel_format format,
4452 const enum dc_rotation_angle rotation,
4453 const struct plane_size *plane_size,
4454 union dc_tiling_info *tiling_info,
4455 struct dc_plane_dcc_param *dcc,
4456 struct dc_plane_address *address,
4457 const bool force_disable_dcc)
4458{
4459 const uint64_t modifier = afb->base.modifier;
4460 int ret;
4461
4462 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4463 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4464
4465 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4466 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4467
4468 dcc->enable = 1;
4469 dcc->meta_pitch = afb->base.pitches[1];
4470 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4471
4472 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4473 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4474 }
4475
4476 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4477 if (ret)
4478 return ret;
7df7e505 4479
09e5665a
NK
4480 return 0;
4481}
4482
4483static int
320932bf 4484fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4485 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4486 const enum surface_pixel_format format,
4487 const enum dc_rotation_angle rotation,
4488 const uint64_t tiling_flags,
09e5665a 4489 union dc_tiling_info *tiling_info,
12e2b2d4 4490 struct plane_size *plane_size,
09e5665a 4491 struct dc_plane_dcc_param *dcc,
87b7ebc2 4492 struct dc_plane_address *address,
5888f07a 4493 bool tmz_surface,
87b7ebc2 4494 bool force_disable_dcc)
09e5665a 4495{
320932bf 4496 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4497 int ret;
4498
4499 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4500 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4501 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4502 memset(address, 0, sizeof(*address));
4503
5888f07a
HW
4504 address->tmz_surface = tmz_surface;
4505
695af5f9 4506 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4507 uint64_t addr = afb->address + fb->offsets[0];
4508
12e2b2d4
DL
4509 plane_size->surface_size.x = 0;
4510 plane_size->surface_size.y = 0;
4511 plane_size->surface_size.width = fb->width;
4512 plane_size->surface_size.height = fb->height;
4513 plane_size->surface_pitch =
320932bf
NK
4514 fb->pitches[0] / fb->format->cpp[0];
4515
e0634e8d 4516 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4517 address->grph.addr.low_part = lower_32_bits(addr);
4518 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4519 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4520 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4521 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4522
12e2b2d4
DL
4523 plane_size->surface_size.x = 0;
4524 plane_size->surface_size.y = 0;
4525 plane_size->surface_size.width = fb->width;
4526 plane_size->surface_size.height = fb->height;
4527 plane_size->surface_pitch =
320932bf
NK
4528 fb->pitches[0] / fb->format->cpp[0];
4529
12e2b2d4
DL
4530 plane_size->chroma_size.x = 0;
4531 plane_size->chroma_size.y = 0;
320932bf 4532 /* TODO: set these based on surface format */
12e2b2d4
DL
4533 plane_size->chroma_size.width = fb->width / 2;
4534 plane_size->chroma_size.height = fb->height / 2;
320932bf 4535
12e2b2d4 4536 plane_size->chroma_pitch =
320932bf
NK
4537 fb->pitches[1] / fb->format->cpp[1];
4538
e0634e8d
NK
4539 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4540 address->video_progressive.luma_addr.low_part =
be7b9b32 4541 lower_32_bits(luma_addr);
e0634e8d 4542 address->video_progressive.luma_addr.high_part =
be7b9b32 4543 upper_32_bits(luma_addr);
e0634e8d
NK
4544 address->video_progressive.chroma_addr.low_part =
4545 lower_32_bits(chroma_addr);
4546 address->video_progressive.chroma_addr.high_part =
4547 upper_32_bits(chroma_addr);
4548 }
09e5665a 4549
a3241991 4550 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4551 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4552 rotation, plane_size,
4553 tiling_info, dcc,
4554 address,
4555 force_disable_dcc);
09e5665a
NK
4556 if (ret)
4557 return ret;
a3241991
BN
4558 } else {
4559 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4560 }
4561
4562 return 0;
7df7e505
NK
4563}
4564
d74004b6 4565static void
695af5f9 4566fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4567 bool *per_pixel_alpha, bool *global_alpha,
4568 int *global_alpha_value)
4569{
4570 *per_pixel_alpha = false;
4571 *global_alpha = false;
4572 *global_alpha_value = 0xff;
4573
4574 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4575 return;
4576
4577 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4578 static const uint32_t alpha_formats[] = {
4579 DRM_FORMAT_ARGB8888,
4580 DRM_FORMAT_RGBA8888,
4581 DRM_FORMAT_ABGR8888,
4582 };
4583 uint32_t format = plane_state->fb->format->format;
4584 unsigned int i;
4585
4586 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4587 if (format == alpha_formats[i]) {
4588 *per_pixel_alpha = true;
4589 break;
4590 }
4591 }
4592 }
4593
4594 if (plane_state->alpha < 0xffff) {
4595 *global_alpha = true;
4596 *global_alpha_value = plane_state->alpha >> 8;
4597 }
4598}
4599
004fefa3
NK
4600static int
4601fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4602 const enum surface_pixel_format format,
004fefa3
NK
4603 enum dc_color_space *color_space)
4604{
4605 bool full_range;
4606
4607 *color_space = COLOR_SPACE_SRGB;
4608
4609 /* DRM color properties only affect non-RGB formats. */
695af5f9 4610 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4611 return 0;
4612
4613 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4614
4615 switch (plane_state->color_encoding) {
4616 case DRM_COLOR_YCBCR_BT601:
4617 if (full_range)
4618 *color_space = COLOR_SPACE_YCBCR601;
4619 else
4620 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4621 break;
4622
4623 case DRM_COLOR_YCBCR_BT709:
4624 if (full_range)
4625 *color_space = COLOR_SPACE_YCBCR709;
4626 else
4627 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4628 break;
4629
4630 case DRM_COLOR_YCBCR_BT2020:
4631 if (full_range)
4632 *color_space = COLOR_SPACE_2020_YCBCR;
4633 else
4634 return -EINVAL;
4635 break;
4636
4637 default:
4638 return -EINVAL;
4639 }
4640
4641 return 0;
4642}
4643
695af5f9
NK
4644static int
4645fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4646 const struct drm_plane_state *plane_state,
4647 const uint64_t tiling_flags,
4648 struct dc_plane_info *plane_info,
87b7ebc2 4649 struct dc_plane_address *address,
5888f07a 4650 bool tmz_surface,
87b7ebc2 4651 bool force_disable_dcc)
695af5f9
NK
4652{
4653 const struct drm_framebuffer *fb = plane_state->fb;
4654 const struct amdgpu_framebuffer *afb =
4655 to_amdgpu_framebuffer(plane_state->fb);
4656 struct drm_format_name_buf format_name;
4657 int ret;
4658
4659 memset(plane_info, 0, sizeof(*plane_info));
4660
4661 switch (fb->format->format) {
4662 case DRM_FORMAT_C8:
4663 plane_info->format =
4664 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4665 break;
4666 case DRM_FORMAT_RGB565:
4667 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4668 break;
4669 case DRM_FORMAT_XRGB8888:
4670 case DRM_FORMAT_ARGB8888:
4671 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4672 break;
4673 case DRM_FORMAT_XRGB2101010:
4674 case DRM_FORMAT_ARGB2101010:
4675 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4676 break;
4677 case DRM_FORMAT_XBGR2101010:
4678 case DRM_FORMAT_ABGR2101010:
4679 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4680 break;
4681 case DRM_FORMAT_XBGR8888:
4682 case DRM_FORMAT_ABGR8888:
4683 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4684 break;
4685 case DRM_FORMAT_NV21:
4686 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4687 break;
4688 case DRM_FORMAT_NV12:
4689 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4690 break;
cbec6477
SW
4691 case DRM_FORMAT_P010:
4692 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4693 break;
492548dc
SW
4694 case DRM_FORMAT_XRGB16161616F:
4695 case DRM_FORMAT_ARGB16161616F:
4696 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4697 break;
2a5195dc
MK
4698 case DRM_FORMAT_XBGR16161616F:
4699 case DRM_FORMAT_ABGR16161616F:
4700 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4701 break;
695af5f9
NK
4702 default:
4703 DRM_ERROR(
4704 "Unsupported screen format %s\n",
4705 drm_get_format_name(fb->format->format, &format_name));
4706 return -EINVAL;
4707 }
4708
4709 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4710 case DRM_MODE_ROTATE_0:
4711 plane_info->rotation = ROTATION_ANGLE_0;
4712 break;
4713 case DRM_MODE_ROTATE_90:
4714 plane_info->rotation = ROTATION_ANGLE_90;
4715 break;
4716 case DRM_MODE_ROTATE_180:
4717 plane_info->rotation = ROTATION_ANGLE_180;
4718 break;
4719 case DRM_MODE_ROTATE_270:
4720 plane_info->rotation = ROTATION_ANGLE_270;
4721 break;
4722 default:
4723 plane_info->rotation = ROTATION_ANGLE_0;
4724 break;
4725 }
4726
4727 plane_info->visible = true;
4728 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4729
6d83a32d
MS
4730 plane_info->layer_index = 0;
4731
695af5f9
NK
4732 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4733 &plane_info->color_space);
4734 if (ret)
4735 return ret;
4736
4737 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4738 plane_info->rotation, tiling_flags,
4739 &plane_info->tiling_info,
4740 &plane_info->plane_size,
5888f07a 4741 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4742 force_disable_dcc);
695af5f9
NK
4743 if (ret)
4744 return ret;
4745
4746 fill_blending_from_plane_state(
4747 plane_state, &plane_info->per_pixel_alpha,
4748 &plane_info->global_alpha, &plane_info->global_alpha_value);
4749
4750 return 0;
4751}
4752
4753static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4754 struct dc_plane_state *dc_plane_state,
4755 struct drm_plane_state *plane_state,
4756 struct drm_crtc_state *crtc_state)
e7b07cee 4757{
cf020d49 4758 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 4759 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
4760 struct dc_scaling_info scaling_info;
4761 struct dc_plane_info plane_info;
695af5f9 4762 int ret;
87b7ebc2 4763 bool force_disable_dcc = false;
e7b07cee 4764
695af5f9
NK
4765 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4766 if (ret)
4767 return ret;
e7b07cee 4768
695af5f9
NK
4769 dc_plane_state->src_rect = scaling_info.src_rect;
4770 dc_plane_state->dst_rect = scaling_info.dst_rect;
4771 dc_plane_state->clip_rect = scaling_info.clip_rect;
4772 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4773
87b7ebc2 4774 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 4775 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 4776 afb->tiling_flags,
695af5f9 4777 &plane_info,
87b7ebc2 4778 &dc_plane_state->address,
6eed95b0 4779 afb->tmz_surface,
87b7ebc2 4780 force_disable_dcc);
004fefa3
NK
4781 if (ret)
4782 return ret;
4783
695af5f9
NK
4784 dc_plane_state->format = plane_info.format;
4785 dc_plane_state->color_space = plane_info.color_space;
4786 dc_plane_state->format = plane_info.format;
4787 dc_plane_state->plane_size = plane_info.plane_size;
4788 dc_plane_state->rotation = plane_info.rotation;
4789 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4790 dc_plane_state->stereo_format = plane_info.stereo_format;
4791 dc_plane_state->tiling_info = plane_info.tiling_info;
4792 dc_plane_state->visible = plane_info.visible;
4793 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4794 dc_plane_state->global_alpha = plane_info.global_alpha;
4795 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4796 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4797 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
0c66824b 4798 dc_plane_state->flip_int_enabled = true;
695af5f9 4799
e277adc5
LSL
4800 /*
4801 * Always set input transfer function, since plane state is refreshed
4802 * every time.
4803 */
cf020d49
NK
4804 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4805 if (ret)
4806 return ret;
e7b07cee 4807
cf020d49 4808 return 0;
e7b07cee
HW
4809}
4810
3ee6b26b
AD
4811static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4812 const struct dm_connector_state *dm_state,
4813 struct dc_stream_state *stream)
e7b07cee
HW
4814{
4815 enum amdgpu_rmx_type rmx_type;
4816
4817 struct rect src = { 0 }; /* viewport in composition space*/
4818 struct rect dst = { 0 }; /* stream addressable area */
4819
4820 /* no mode. nothing to be done */
4821 if (!mode)
4822 return;
4823
4824 /* Full screen scaling by default */
4825 src.width = mode->hdisplay;
4826 src.height = mode->vdisplay;
4827 dst.width = stream->timing.h_addressable;
4828 dst.height = stream->timing.v_addressable;
4829
f4791779
HW
4830 if (dm_state) {
4831 rmx_type = dm_state->scaling;
4832 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4833 if (src.width * dst.height <
4834 src.height * dst.width) {
4835 /* height needs less upscaling/more downscaling */
4836 dst.width = src.width *
4837 dst.height / src.height;
4838 } else {
4839 /* width needs less upscaling/more downscaling */
4840 dst.height = src.height *
4841 dst.width / src.width;
4842 }
4843 } else if (rmx_type == RMX_CENTER) {
4844 dst = src;
e7b07cee 4845 }
e7b07cee 4846
f4791779
HW
4847 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4848 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4849
f4791779
HW
4850 if (dm_state->underscan_enable) {
4851 dst.x += dm_state->underscan_hborder / 2;
4852 dst.y += dm_state->underscan_vborder / 2;
4853 dst.width -= dm_state->underscan_hborder;
4854 dst.height -= dm_state->underscan_vborder;
4855 }
e7b07cee
HW
4856 }
4857
4858 stream->src = src;
4859 stream->dst = dst;
4860
f1ad2f5e 4861 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
e7b07cee
HW
4862 dst.x, dst.y, dst.width, dst.height);
4863
4864}
4865
3ee6b26b 4866static enum dc_color_depth
42ba01fc 4867convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4868 bool is_y420, int requested_bpc)
e7b07cee 4869{
1bc22f20 4870 uint8_t bpc;
01c22997 4871
1bc22f20
SW
4872 if (is_y420) {
4873 bpc = 8;
4874
4875 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4876 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4877 bpc = 16;
4878 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4879 bpc = 12;
4880 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4881 bpc = 10;
4882 } else {
4883 bpc = (uint8_t)connector->display_info.bpc;
4884 /* Assume 8 bpc by default if no bpc is specified. */
4885 bpc = bpc ? bpc : 8;
4886 }
e7b07cee 4887
cbd14ae7 4888 if (requested_bpc > 0) {
01c22997
NK
4889 /*
4890 * Cap display bpc based on the user requested value.
4891 *
4892 * The value for state->max_bpc may not correctly updated
4893 * depending on when the connector gets added to the state
4894 * or if this was called outside of atomic check, so it
4895 * can't be used directly.
4896 */
cbd14ae7 4897 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4898
1825fd34
NK
4899 /* Round down to the nearest even number. */
4900 bpc = bpc - (bpc & 1);
4901 }
07e3a1cf 4902
e7b07cee
HW
4903 switch (bpc) {
4904 case 0:
1f6010a9
DF
4905 /*
4906 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
4907 * EDID revision before 1.4
4908 * TODO: Fix edid parsing
4909 */
4910 return COLOR_DEPTH_888;
4911 case 6:
4912 return COLOR_DEPTH_666;
4913 case 8:
4914 return COLOR_DEPTH_888;
4915 case 10:
4916 return COLOR_DEPTH_101010;
4917 case 12:
4918 return COLOR_DEPTH_121212;
4919 case 14:
4920 return COLOR_DEPTH_141414;
4921 case 16:
4922 return COLOR_DEPTH_161616;
4923 default:
4924 return COLOR_DEPTH_UNDEFINED;
4925 }
4926}
4927
3ee6b26b
AD
4928static enum dc_aspect_ratio
4929get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 4930{
e11d4147
LSL
4931 /* 1-1 mapping, since both enums follow the HDMI spec. */
4932 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
4933}
4934
3ee6b26b
AD
4935static enum dc_color_space
4936get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
4937{
4938 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4939
4940 switch (dc_crtc_timing->pixel_encoding) {
4941 case PIXEL_ENCODING_YCBCR422:
4942 case PIXEL_ENCODING_YCBCR444:
4943 case PIXEL_ENCODING_YCBCR420:
4944 {
4945 /*
4946 * 27030khz is the separation point between HDTV and SDTV
4947 * according to HDMI spec, we use YCbCr709 and YCbCr601
4948 * respectively
4949 */
380604e2 4950 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
4951 if (dc_crtc_timing->flags.Y_ONLY)
4952 color_space =
4953 COLOR_SPACE_YCBCR709_LIMITED;
4954 else
4955 color_space = COLOR_SPACE_YCBCR709;
4956 } else {
4957 if (dc_crtc_timing->flags.Y_ONLY)
4958 color_space =
4959 COLOR_SPACE_YCBCR601_LIMITED;
4960 else
4961 color_space = COLOR_SPACE_YCBCR601;
4962 }
4963
4964 }
4965 break;
4966 case PIXEL_ENCODING_RGB:
4967 color_space = COLOR_SPACE_SRGB;
4968 break;
4969
4970 default:
4971 WARN_ON(1);
4972 break;
4973 }
4974
4975 return color_space;
4976}
4977
ea117312
TA
4978static bool adjust_colour_depth_from_display_info(
4979 struct dc_crtc_timing *timing_out,
4980 const struct drm_display_info *info)
400443e8 4981{
ea117312 4982 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 4983 int normalized_clk;
400443e8 4984 do {
380604e2 4985 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
4986 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4987 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4988 normalized_clk /= 2;
4989 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
4990 switch (depth) {
4991 case COLOR_DEPTH_888:
4992 break;
400443e8
ML
4993 case COLOR_DEPTH_101010:
4994 normalized_clk = (normalized_clk * 30) / 24;
4995 break;
4996 case COLOR_DEPTH_121212:
4997 normalized_clk = (normalized_clk * 36) / 24;
4998 break;
4999 case COLOR_DEPTH_161616:
5000 normalized_clk = (normalized_clk * 48) / 24;
5001 break;
5002 default:
ea117312
TA
5003 /* The above depths are the only ones valid for HDMI. */
5004 return false;
400443e8 5005 }
ea117312
TA
5006 if (normalized_clk <= info->max_tmds_clock) {
5007 timing_out->display_color_depth = depth;
5008 return true;
5009 }
5010 } while (--depth > COLOR_DEPTH_666);
5011 return false;
400443e8 5012}
e7b07cee 5013
42ba01fc
NK
5014static void fill_stream_properties_from_drm_display_mode(
5015 struct dc_stream_state *stream,
5016 const struct drm_display_mode *mode_in,
5017 const struct drm_connector *connector,
5018 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5019 const struct dc_stream_state *old_stream,
5020 int requested_bpc)
e7b07cee
HW
5021{
5022 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5023 const struct drm_display_info *info = &connector->display_info;
d4252eee 5024 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5025 struct hdmi_vendor_infoframe hv_frame;
5026 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5027
acf83f86
WL
5028 memset(&hv_frame, 0, sizeof(hv_frame));
5029 memset(&avi_frame, 0, sizeof(avi_frame));
5030
e7b07cee
HW
5031 timing_out->h_border_left = 0;
5032 timing_out->h_border_right = 0;
5033 timing_out->v_border_top = 0;
5034 timing_out->v_border_bottom = 0;
5035 /* TODO: un-hardcode */
fe61a2f1 5036 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5037 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5038 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5039 else if (drm_mode_is_420_also(info, mode_in)
5040 && aconnector->force_yuv420_output)
5041 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5042 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5043 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5044 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5045 else
5046 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5047
5048 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5049 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5050 connector,
5051 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5052 requested_bpc);
e7b07cee
HW
5053 timing_out->scan_type = SCANNING_TYPE_NODATA;
5054 timing_out->hdmi_vic = 0;
b333730d
BL
5055
5056 if(old_stream) {
5057 timing_out->vic = old_stream->timing.vic;
5058 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5059 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5060 } else {
5061 timing_out->vic = drm_match_cea_mode(mode_in);
5062 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5063 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5064 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5065 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5066 }
e7b07cee 5067
1cb1d477
WL
5068 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5069 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5070 timing_out->vic = avi_frame.video_code;
5071 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5072 timing_out->hdmi_vic = hv_frame.vic;
5073 }
5074
c0ea73a4
AP
5075 timing_out->h_addressable = mode_in->crtc_hdisplay;
5076 timing_out->h_total = mode_in->crtc_htotal;
5077 timing_out->h_sync_width =
5078 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5079 timing_out->h_front_porch =
5080 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5081 timing_out->v_total = mode_in->crtc_vtotal;
5082 timing_out->v_addressable = mode_in->crtc_vdisplay;
5083 timing_out->v_front_porch =
5084 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5085 timing_out->v_sync_width =
5086 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5087 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
e7b07cee 5088 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5089
5090 stream->output_color_space = get_output_color_space(timing_out);
5091
e43a432c
AK
5092 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5093 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5094 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5095 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5096 drm_mode_is_420_also(info, mode_in) &&
5097 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5098 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5099 adjust_colour_depth_from_display_info(timing_out, info);
5100 }
5101 }
e7b07cee
HW
5102}
5103
3ee6b26b
AD
5104static void fill_audio_info(struct audio_info *audio_info,
5105 const struct drm_connector *drm_connector,
5106 const struct dc_sink *dc_sink)
e7b07cee
HW
5107{
5108 int i = 0;
5109 int cea_revision = 0;
5110 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5111
5112 audio_info->manufacture_id = edid_caps->manufacturer_id;
5113 audio_info->product_id = edid_caps->product_id;
5114
5115 cea_revision = drm_connector->display_info.cea_rev;
5116
090afc1e 5117 strscpy(audio_info->display_name,
d2b2562c 5118 edid_caps->display_name,
090afc1e 5119 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5120
b830ebc9 5121 if (cea_revision >= 3) {
e7b07cee
HW
5122 audio_info->mode_count = edid_caps->audio_mode_count;
5123
5124 for (i = 0; i < audio_info->mode_count; ++i) {
5125 audio_info->modes[i].format_code =
5126 (enum audio_format_code)
5127 (edid_caps->audio_modes[i].format_code);
5128 audio_info->modes[i].channel_count =
5129 edid_caps->audio_modes[i].channel_count;
5130 audio_info->modes[i].sample_rates.all =
5131 edid_caps->audio_modes[i].sample_rate;
5132 audio_info->modes[i].sample_size =
5133 edid_caps->audio_modes[i].sample_size;
5134 }
5135 }
5136
5137 audio_info->flags.all = edid_caps->speaker_flags;
5138
5139 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5140 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5141 audio_info->video_latency = drm_connector->video_latency[0];
5142 audio_info->audio_latency = drm_connector->audio_latency[0];
5143 }
5144
5145 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5146
5147}
5148
3ee6b26b
AD
5149static void
5150copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5151 struct drm_display_mode *dst_mode)
e7b07cee
HW
5152{
5153 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5154 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5155 dst_mode->crtc_clock = src_mode->crtc_clock;
5156 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5157 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5158 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5159 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5160 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5161 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5162 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5163 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5164 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5165 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5166 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5167}
5168
3ee6b26b
AD
5169static void
5170decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5171 const struct drm_display_mode *native_mode,
5172 bool scale_enabled)
e7b07cee
HW
5173{
5174 if (scale_enabled) {
5175 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5176 } else if (native_mode->clock == drm_mode->clock &&
5177 native_mode->htotal == drm_mode->htotal &&
5178 native_mode->vtotal == drm_mode->vtotal) {
5179 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5180 } else {
5181 /* no scaling nor amdgpu inserted, no need to patch */
5182 }
5183}
5184
aed15309
ML
5185static struct dc_sink *
5186create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5187{
2e0ac3d6 5188 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5189 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5190 sink_init_data.link = aconnector->dc_link;
5191 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5192
5193 sink = dc_sink_create(&sink_init_data);
423788c7 5194 if (!sink) {
2e0ac3d6 5195 DRM_ERROR("Failed to create sink!\n");
aed15309 5196 return NULL;
423788c7 5197 }
2e0ac3d6 5198 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5199
aed15309 5200 return sink;
2e0ac3d6
HW
5201}
5202
fa2123db
ML
5203static void set_multisync_trigger_params(
5204 struct dc_stream_state *stream)
5205{
5206 if (stream->triggered_crtc_reset.enabled) {
5207 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5208 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5209 }
5210}
5211
5212static void set_master_stream(struct dc_stream_state *stream_set[],
5213 int stream_count)
5214{
5215 int j, highest_rfr = 0, master_stream = 0;
5216
5217 for (j = 0; j < stream_count; j++) {
5218 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5219 int refresh_rate = 0;
5220
380604e2 5221 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5222 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5223 if (refresh_rate > highest_rfr) {
5224 highest_rfr = refresh_rate;
5225 master_stream = j;
5226 }
5227 }
5228 }
5229 for (j = 0; j < stream_count; j++) {
03736f4c 5230 if (stream_set[j])
fa2123db
ML
5231 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5232 }
5233}
5234
5235static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5236{
5237 int i = 0;
5238
5239 if (context->stream_count < 2)
5240 return;
5241 for (i = 0; i < context->stream_count ; i++) {
5242 if (!context->streams[i])
5243 continue;
1f6010a9
DF
5244 /*
5245 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5246 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5247 * For now it's set to false
fa2123db
ML
5248 */
5249 set_multisync_trigger_params(context->streams[i]);
5250 }
5251 set_master_stream(context->streams, context->stream_count);
5252}
5253
3ee6b26b
AD
5254static struct dc_stream_state *
5255create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5256 const struct drm_display_mode *drm_mode,
b333730d 5257 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5258 const struct dc_stream_state *old_stream,
5259 int requested_bpc)
e7b07cee
HW
5260{
5261 struct drm_display_mode *preferred_mode = NULL;
391ef035 5262 struct drm_connector *drm_connector;
42ba01fc
NK
5263 const struct drm_connector_state *con_state =
5264 dm_state ? &dm_state->base : NULL;
0971c40e 5265 struct dc_stream_state *stream = NULL;
e7b07cee
HW
5266 struct drm_display_mode mode = *drm_mode;
5267 bool native_mode_found = false;
c0ea73a4 5268 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 5269 int mode_refresh;
58124bf8 5270 int preferred_refresh = 0;
defeb878 5271#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015
DF
5272 struct dsc_dec_dpcd_caps dsc_caps;
5273 uint32_t link_bandwidth_kbps;
7c431455 5274#endif
aed15309 5275 struct dc_sink *sink = NULL;
b830ebc9 5276 if (aconnector == NULL) {
e7b07cee 5277 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5278 return stream;
e7b07cee
HW
5279 }
5280
e7b07cee 5281 drm_connector = &aconnector->base;
2e0ac3d6 5282
f4ac176e 5283 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5284 sink = create_fake_sink(aconnector);
5285 if (!sink)
5286 return stream;
aed15309
ML
5287 } else {
5288 sink = aconnector->dc_sink;
dcd5fb82 5289 dc_sink_retain(sink);
f4ac176e 5290 }
2e0ac3d6 5291
aed15309 5292 stream = dc_create_stream_for_sink(sink);
4562236b 5293
b830ebc9 5294 if (stream == NULL) {
e7b07cee 5295 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5296 goto finish;
e7b07cee
HW
5297 }
5298
ceb3dbb4
JL
5299 stream->dm_stream_context = aconnector;
5300
4a36fcba
WL
5301 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5302 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5303
e7b07cee
HW
5304 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5305 /* Search for preferred mode */
5306 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5307 native_mode_found = true;
5308 break;
5309 }
5310 }
5311 if (!native_mode_found)
5312 preferred_mode = list_first_entry_or_null(
5313 &aconnector->base.modes,
5314 struct drm_display_mode,
5315 head);
5316
b333730d
BL
5317 mode_refresh = drm_mode_vrefresh(&mode);
5318
b830ebc9 5319 if (preferred_mode == NULL) {
1f6010a9
DF
5320 /*
5321 * This may not be an error, the use case is when we have no
e7b07cee
HW
5322 * usermode calls to reset and set mode upon hotplug. In this
5323 * case, we call set mode ourselves to restore the previous mode
5324 * and the modelist may not be filled in in time.
5325 */
f1ad2f5e 5326 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 5327 } else {
c0ea73a4 5328 decide_crtc_timing_for_drm_display_mode(
e7b07cee 5329 &mode, preferred_mode,
f4791779 5330 dm_state ? (dm_state->scaling != RMX_OFF) : false);
58124bf8 5331 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
5332 }
5333
c0ea73a4 5334 if (!dm_state)
f783577c
JFZ
5335 drm_mode_set_crtcinfo(&mode, 0);
5336
c0ea73a4 5337 /*
b333730d
BL
5338 * If scaling is enabled and refresh rate didn't change
5339 * we copy the vic and polarities of the old timings
5340 */
c0ea73a4
AP
5341 if (!scale || mode_refresh != preferred_refresh)
5342 fill_stream_properties_from_drm_display_mode(stream,
5343 &mode, &aconnector->base, con_state, NULL, requested_bpc);
b333730d 5344 else
c0ea73a4
AP
5345 fill_stream_properties_from_drm_display_mode(stream,
5346 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
b333730d 5347
df2f1015
DF
5348 stream->timing.flags.DSC = 0;
5349
5350 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 5351#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
5352 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5353 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 5354 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015
DF
5355 &dsc_caps);
5356 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5357 dc_link_get_link_cap(aconnector->dc_link));
5358
0749ddeb 5359 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 5360 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
5361 dc_dsc_policy_set_enable_dsc_when_not_needed(
5362 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 5363
0417df16 5364 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 5365 &dsc_caps,
0417df16 5366 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 5367 0,
df2f1015
DF
5368 link_bandwidth_kbps,
5369 &stream->timing,
5370 &stream->timing.dsc_cfg))
5371 stream->timing.flags.DSC = 1;
27e84dd7 5372 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 5373 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 5374 stream->timing.flags.DSC = 1;
734e4c97 5375
28b2f656
EB
5376 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5377 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 5378
28b2f656
EB
5379 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5380 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
5381
5382 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5383 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 5384 }
39a4eb85 5385#endif
df2f1015 5386 }
39a4eb85 5387
e7b07cee
HW
5388 update_stream_scaling_settings(&mode, dm_state, stream);
5389
5390 fill_audio_info(
5391 &stream->audio_info,
5392 drm_connector,
aed15309 5393 sink);
e7b07cee 5394
ceb3dbb4 5395 update_stream_signal(stream, sink);
9182b4cb 5396
d832fc3b 5397 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5398 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5399
8a488f5d
RL
5400 if (stream->link->psr_settings.psr_feature_enabled) {
5401 //
5402 // should decide stream support vsc sdp colorimetry capability
5403 // before building vsc info packet
5404 //
5405 stream->use_vsc_sdp_for_colorimetry = false;
5406 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5407 stream->use_vsc_sdp_for_colorimetry =
5408 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5409 } else {
5410 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5411 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5412 }
8a488f5d 5413 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5414 }
aed15309 5415finish:
dcd5fb82 5416 dc_sink_release(sink);
9e3efe3e 5417
e7b07cee
HW
5418 return stream;
5419}
5420
7578ecda 5421static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5422{
5423 drm_crtc_cleanup(crtc);
5424 kfree(crtc);
5425}
5426
5427static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5428 struct drm_crtc_state *state)
e7b07cee
HW
5429{
5430 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5431
5432 /* TODO Destroy dc_stream objects are stream object is flattened */
5433 if (cur->stream)
5434 dc_stream_release(cur->stream);
5435
5436
5437 __drm_atomic_helper_crtc_destroy_state(state);
5438
5439
5440 kfree(state);
5441}
5442
5443static void dm_crtc_reset_state(struct drm_crtc *crtc)
5444{
5445 struct dm_crtc_state *state;
5446
5447 if (crtc->state)
5448 dm_crtc_destroy_state(crtc, crtc->state);
5449
5450 state = kzalloc(sizeof(*state), GFP_KERNEL);
5451 if (WARN_ON(!state))
5452 return;
5453
1f8a52ec 5454 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5455}
5456
5457static struct drm_crtc_state *
5458dm_crtc_duplicate_state(struct drm_crtc *crtc)
5459{
5460 struct dm_crtc_state *state, *cur;
5461
5462 cur = to_dm_crtc_state(crtc->state);
5463
5464 if (WARN_ON(!crtc->state))
5465 return NULL;
5466
2004f45e 5467 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5468 if (!state)
5469 return NULL;
e7b07cee
HW
5470
5471 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5472
5473 if (cur->stream) {
5474 state->stream = cur->stream;
5475 dc_stream_retain(state->stream);
5476 }
5477
d6ef9b41 5478 state->active_planes = cur->active_planes;
98e6436d 5479 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5480 state->abm_level = cur->abm_level;
bb47de73
NK
5481 state->vrr_supported = cur->vrr_supported;
5482 state->freesync_config = cur->freesync_config;
cf020d49
NK
5483 state->cm_has_degamma = cur->cm_has_degamma;
5484 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
e7b07cee
HW
5485 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5486
5487 return &state->base;
5488}
5489
86bc2219
WL
5490#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5491int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5492{
5493 crtc_debugfs_init(crtc);
5494
5495 return 0;
5496}
5497#endif
5498
d2574c33
MK
5499static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5500{
5501 enum dc_irq_source irq_source;
5502 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5503 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5504 int rc;
5505
5506 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5507
5508 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5509
5510 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5511 acrtc->crtc_id, enable ? "en" : "dis", rc);
5512 return rc;
5513}
589d2739
HW
5514
5515static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5516{
5517 enum dc_irq_source irq_source;
5518 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5519 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 5520 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
d7faf6f5 5521#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 5522 struct amdgpu_display_manager *dm = &adev->dm;
d7faf6f5
QZ
5523 unsigned long flags;
5524#endif
d2574c33
MK
5525 int rc = 0;
5526
5527 if (enable) {
5528 /* vblank irq on -> Only need vupdate irq in vrr mode */
5529 if (amdgpu_dm_vrr_active(acrtc_state))
5530 rc = dm_set_vupdate_irq(crtc, true);
5531 } else {
5532 /* vblank irq off -> vupdate irq off */
5533 rc = dm_set_vupdate_irq(crtc, false);
5534 }
5535
5536 if (rc)
5537 return rc;
589d2739
HW
5538
5539 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
5540
5541 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5542 return -EBUSY;
5543
98ab5f35
BL
5544 if (amdgpu_in_reset(adev))
5545 return 0;
5546
4928b480 5547#if defined(CONFIG_DRM_AMD_DC_DCN)
d7faf6f5
QZ
5548 spin_lock_irqsave(&dm->vblank_lock, flags);
5549 dm->vblank_workqueue->dm = dm;
5550 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5551 dm->vblank_workqueue->enable = enable;
5552 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5553 schedule_work(&dm->vblank_workqueue->mall_work);
4928b480 5554#endif
71338cb4 5555
71338cb4 5556 return 0;
589d2739
HW
5557}
5558
5559static int dm_enable_vblank(struct drm_crtc *crtc)
5560{
5561 return dm_set_vblank(crtc, true);
5562}
5563
5564static void dm_disable_vblank(struct drm_crtc *crtc)
5565{
5566 dm_set_vblank(crtc, false);
5567}
5568
e7b07cee
HW
5569/* Implemented only the options currently availible for the driver */
5570static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5571 .reset = dm_crtc_reset_state,
5572 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
5573 .set_config = drm_atomic_helper_set_config,
5574 .page_flip = drm_atomic_helper_page_flip,
5575 .atomic_duplicate_state = dm_crtc_duplicate_state,
5576 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5577 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5578 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5579 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5580 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5581 .enable_vblank = dm_enable_vblank,
5582 .disable_vblank = dm_disable_vblank,
e3eff4b5 5583 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
5584#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5585 .late_register = amdgpu_dm_crtc_late_register,
5586#endif
e7b07cee
HW
5587};
5588
5589static enum drm_connector_status
5590amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5591{
5592 bool connected;
c84dec2f 5593 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5594
1f6010a9
DF
5595 /*
5596 * Notes:
e7b07cee
HW
5597 * 1. This interface is NOT called in context of HPD irq.
5598 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5599 * makes it a bad place for *any* MST-related activity.
5600 */
e7b07cee 5601
8580d60b
HW
5602 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5603 !aconnector->fake_enable)
e7b07cee
HW
5604 connected = (aconnector->dc_sink != NULL);
5605 else
5606 connected = (aconnector->base.force == DRM_FORCE_ON);
5607
0f877894
OV
5608 update_subconnector_property(aconnector);
5609
e7b07cee
HW
5610 return (connected ? connector_status_connected :
5611 connector_status_disconnected);
5612}
5613
3ee6b26b
AD
5614int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5615 struct drm_connector_state *connector_state,
5616 struct drm_property *property,
5617 uint64_t val)
e7b07cee
HW
5618{
5619 struct drm_device *dev = connector->dev;
1348969a 5620 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5621 struct dm_connector_state *dm_old_state =
5622 to_dm_connector_state(connector->state);
5623 struct dm_connector_state *dm_new_state =
5624 to_dm_connector_state(connector_state);
5625
5626 int ret = -EINVAL;
5627
5628 if (property == dev->mode_config.scaling_mode_property) {
5629 enum amdgpu_rmx_type rmx_type;
5630
5631 switch (val) {
5632 case DRM_MODE_SCALE_CENTER:
5633 rmx_type = RMX_CENTER;
5634 break;
5635 case DRM_MODE_SCALE_ASPECT:
5636 rmx_type = RMX_ASPECT;
5637 break;
5638 case DRM_MODE_SCALE_FULLSCREEN:
5639 rmx_type = RMX_FULL;
5640 break;
5641 case DRM_MODE_SCALE_NONE:
5642 default:
5643 rmx_type = RMX_OFF;
5644 break;
5645 }
5646
5647 if (dm_old_state->scaling == rmx_type)
5648 return 0;
5649
5650 dm_new_state->scaling = rmx_type;
5651 ret = 0;
5652 } else if (property == adev->mode_info.underscan_hborder_property) {
5653 dm_new_state->underscan_hborder = val;
5654 ret = 0;
5655 } else if (property == adev->mode_info.underscan_vborder_property) {
5656 dm_new_state->underscan_vborder = val;
5657 ret = 0;
5658 } else if (property == adev->mode_info.underscan_property) {
5659 dm_new_state->underscan_enable = val;
5660 ret = 0;
c1ee92f9
DF
5661 } else if (property == adev->mode_info.abm_level_property) {
5662 dm_new_state->abm_level = val;
5663 ret = 0;
e7b07cee
HW
5664 }
5665
5666 return ret;
5667}
5668
3ee6b26b
AD
5669int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5670 const struct drm_connector_state *state,
5671 struct drm_property *property,
5672 uint64_t *val)
e7b07cee
HW
5673{
5674 struct drm_device *dev = connector->dev;
1348969a 5675 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5676 struct dm_connector_state *dm_state =
5677 to_dm_connector_state(state);
5678 int ret = -EINVAL;
5679
5680 if (property == dev->mode_config.scaling_mode_property) {
5681 switch (dm_state->scaling) {
5682 case RMX_CENTER:
5683 *val = DRM_MODE_SCALE_CENTER;
5684 break;
5685 case RMX_ASPECT:
5686 *val = DRM_MODE_SCALE_ASPECT;
5687 break;
5688 case RMX_FULL:
5689 *val = DRM_MODE_SCALE_FULLSCREEN;
5690 break;
5691 case RMX_OFF:
5692 default:
5693 *val = DRM_MODE_SCALE_NONE;
5694 break;
5695 }
5696 ret = 0;
5697 } else if (property == adev->mode_info.underscan_hborder_property) {
5698 *val = dm_state->underscan_hborder;
5699 ret = 0;
5700 } else if (property == adev->mode_info.underscan_vborder_property) {
5701 *val = dm_state->underscan_vborder;
5702 ret = 0;
5703 } else if (property == adev->mode_info.underscan_property) {
5704 *val = dm_state->underscan_enable;
5705 ret = 0;
c1ee92f9
DF
5706 } else if (property == adev->mode_info.abm_level_property) {
5707 *val = dm_state->abm_level;
5708 ret = 0;
e7b07cee 5709 }
c1ee92f9 5710
e7b07cee
HW
5711 return ret;
5712}
5713
526c654a
ED
5714static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5715{
5716 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5717
5718 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5719}
5720
7578ecda 5721static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5722{
c84dec2f 5723 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5724 const struct dc_link *link = aconnector->dc_link;
1348969a 5725 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5726 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5727
5dff80bd
AG
5728 /*
5729 * Call only if mst_mgr was iniitalized before since it's not done
5730 * for all connector types.
5731 */
5732 if (aconnector->mst_mgr.dev)
5733 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5734
e7b07cee
HW
5735#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5736 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5737
89fc8d4e 5738 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5739 link->type != dc_connection_none &&
5740 dm->backlight_dev) {
5741 backlight_device_unregister(dm->backlight_dev);
5742 dm->backlight_dev = NULL;
e7b07cee
HW
5743 }
5744#endif
dcd5fb82
MF
5745
5746 if (aconnector->dc_em_sink)
5747 dc_sink_release(aconnector->dc_em_sink);
5748 aconnector->dc_em_sink = NULL;
5749 if (aconnector->dc_sink)
5750 dc_sink_release(aconnector->dc_sink);
5751 aconnector->dc_sink = NULL;
5752
e86e8947 5753 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5754 drm_connector_unregister(connector);
5755 drm_connector_cleanup(connector);
526c654a
ED
5756 if (aconnector->i2c) {
5757 i2c_del_adapter(&aconnector->i2c->base);
5758 kfree(aconnector->i2c);
5759 }
7daec99f 5760 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5761
e7b07cee
HW
5762 kfree(connector);
5763}
5764
5765void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5766{
5767 struct dm_connector_state *state =
5768 to_dm_connector_state(connector->state);
5769
df099b9b
LSL
5770 if (connector->state)
5771 __drm_atomic_helper_connector_destroy_state(connector->state);
5772
e7b07cee
HW
5773 kfree(state);
5774
5775 state = kzalloc(sizeof(*state), GFP_KERNEL);
5776
5777 if (state) {
5778 state->scaling = RMX_OFF;
5779 state->underscan_enable = false;
5780 state->underscan_hborder = 0;
5781 state->underscan_vborder = 0;
01933ba4 5782 state->base.max_requested_bpc = 8;
3261e013
ML
5783 state->vcpi_slots = 0;
5784 state->pbn = 0;
c3e50f89
NK
5785 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5786 state->abm_level = amdgpu_dm_abm_level;
5787
df099b9b 5788 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
5789 }
5790}
5791
3ee6b26b
AD
5792struct drm_connector_state *
5793amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
5794{
5795 struct dm_connector_state *state =
5796 to_dm_connector_state(connector->state);
5797
5798 struct dm_connector_state *new_state =
5799 kmemdup(state, sizeof(*state), GFP_KERNEL);
5800
98e6436d
AK
5801 if (!new_state)
5802 return NULL;
e7b07cee 5803
98e6436d
AK
5804 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5805
5806 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 5807 new_state->abm_level = state->abm_level;
922454c2
NK
5808 new_state->scaling = state->scaling;
5809 new_state->underscan_enable = state->underscan_enable;
5810 new_state->underscan_hborder = state->underscan_hborder;
5811 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
5812 new_state->vcpi_slots = state->vcpi_slots;
5813 new_state->pbn = state->pbn;
98e6436d 5814 return &new_state->base;
e7b07cee
HW
5815}
5816
14f04fa4
AD
5817static int
5818amdgpu_dm_connector_late_register(struct drm_connector *connector)
5819{
5820 struct amdgpu_dm_connector *amdgpu_dm_connector =
5821 to_amdgpu_dm_connector(connector);
00a8037e 5822 int r;
14f04fa4 5823
00a8037e
AD
5824 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5825 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5826 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5827 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5828 if (r)
5829 return r;
5830 }
5831
5832#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
5833 connector_debugfs_init(amdgpu_dm_connector);
5834#endif
5835
5836 return 0;
5837}
5838
e7b07cee
HW
5839static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5840 .reset = amdgpu_dm_connector_funcs_reset,
5841 .detect = amdgpu_dm_connector_detect,
5842 .fill_modes = drm_helper_probe_single_connector_modes,
5843 .destroy = amdgpu_dm_connector_destroy,
5844 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5845 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5846 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 5847 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 5848 .late_register = amdgpu_dm_connector_late_register,
526c654a 5849 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
5850};
5851
e7b07cee
HW
5852static int get_modes(struct drm_connector *connector)
5853{
5854 return amdgpu_dm_connector_get_modes(connector);
5855}
5856
c84dec2f 5857static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5858{
5859 struct dc_sink_init_data init_params = {
5860 .link = aconnector->dc_link,
5861 .sink_signal = SIGNAL_TYPE_VIRTUAL
5862 };
70e8ffc5 5863 struct edid *edid;
e7b07cee 5864
a89ff457 5865 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
5866 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5867 aconnector->base.name);
5868
5869 aconnector->base.force = DRM_FORCE_OFF;
5870 aconnector->base.override_edid = false;
5871 return;
5872 }
5873
70e8ffc5
HW
5874 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5875
e7b07cee
HW
5876 aconnector->edid = edid;
5877
5878 aconnector->dc_em_sink = dc_link_add_remote_sink(
5879 aconnector->dc_link,
5880 (uint8_t *)edid,
5881 (edid->extensions + 1) * EDID_LENGTH,
5882 &init_params);
5883
dcd5fb82 5884 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
5885 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5886 aconnector->dc_link->local_sink :
5887 aconnector->dc_em_sink;
dcd5fb82
MF
5888 dc_sink_retain(aconnector->dc_sink);
5889 }
e7b07cee
HW
5890}
5891
c84dec2f 5892static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
5893{
5894 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5895
1f6010a9
DF
5896 /*
5897 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
5898 * Those settings have to be != 0 to get initial modeset
5899 */
5900 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5901 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5902 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5903 }
5904
5905
5906 aconnector->base.override_edid = true;
5907 create_eml_sink(aconnector);
5908}
5909
cbd14ae7
SW
5910static struct dc_stream_state *
5911create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5912 const struct drm_display_mode *drm_mode,
5913 const struct dm_connector_state *dm_state,
5914 const struct dc_stream_state *old_stream)
5915{
5916 struct drm_connector *connector = &aconnector->base;
1348969a 5917 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 5918 struct dc_stream_state *stream;
4b7da34b
SW
5919 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5920 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
5921 enum dc_status dc_result = DC_OK;
5922
5923 do {
5924 stream = create_stream_for_sink(aconnector, drm_mode,
5925 dm_state, old_stream,
5926 requested_bpc);
5927 if (stream == NULL) {
5928 DRM_ERROR("Failed to create stream for sink!\n");
5929 break;
5930 }
5931
5932 dc_result = dc_validate_stream(adev->dm.dc, stream);
5933
5934 if (dc_result != DC_OK) {
74a16675 5935 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
5936 drm_mode->hdisplay,
5937 drm_mode->vdisplay,
5938 drm_mode->clock,
74a16675
RS
5939 dc_result,
5940 dc_status_to_str(dc_result));
cbd14ae7
SW
5941
5942 dc_stream_release(stream);
5943 stream = NULL;
5944 requested_bpc -= 2; /* lower bpc to retry validation */
5945 }
5946
5947 } while (stream == NULL && requested_bpc >= 6);
5948
5949 return stream;
5950}
5951
ba9ca088 5952enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 5953 struct drm_display_mode *mode)
e7b07cee
HW
5954{
5955 int result = MODE_ERROR;
5956 struct dc_sink *dc_sink;
e7b07cee 5957 /* TODO: Unhardcode stream count */
0971c40e 5958 struct dc_stream_state *stream;
c84dec2f 5959 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
5960
5961 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5962 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5963 return result;
5964
1f6010a9
DF
5965 /*
5966 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
5967 * EDID mgmt
5968 */
5969 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5970 !aconnector->dc_em_sink)
5971 handle_edid_mgmt(aconnector);
5972
c84dec2f 5973 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 5974
ad975f44
VL
5975 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5976 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
5977 DRM_ERROR("dc_sink is NULL!\n");
5978 goto fail;
5979 }
5980
cbd14ae7
SW
5981 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5982 if (stream) {
5983 dc_stream_release(stream);
e7b07cee 5984 result = MODE_OK;
cbd14ae7 5985 }
e7b07cee
HW
5986
5987fail:
5988 /* TODO: error handling*/
5989 return result;
5990}
5991
88694af9
NK
5992static int fill_hdr_info_packet(const struct drm_connector_state *state,
5993 struct dc_info_packet *out)
5994{
5995 struct hdmi_drm_infoframe frame;
5996 unsigned char buf[30]; /* 26 + 4 */
5997 ssize_t len;
5998 int ret, i;
5999
6000 memset(out, 0, sizeof(*out));
6001
6002 if (!state->hdr_output_metadata)
6003 return 0;
6004
6005 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6006 if (ret)
6007 return ret;
6008
6009 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6010 if (len < 0)
6011 return (int)len;
6012
6013 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6014 if (len != 30)
6015 return -EINVAL;
6016
6017 /* Prepare the infopacket for DC. */
6018 switch (state->connector->connector_type) {
6019 case DRM_MODE_CONNECTOR_HDMIA:
6020 out->hb0 = 0x87; /* type */
6021 out->hb1 = 0x01; /* version */
6022 out->hb2 = 0x1A; /* length */
6023 out->sb[0] = buf[3]; /* checksum */
6024 i = 1;
6025 break;
6026
6027 case DRM_MODE_CONNECTOR_DisplayPort:
6028 case DRM_MODE_CONNECTOR_eDP:
6029 out->hb0 = 0x00; /* sdp id, zero */
6030 out->hb1 = 0x87; /* type */
6031 out->hb2 = 0x1D; /* payload len - 1 */
6032 out->hb3 = (0x13 << 2); /* sdp version */
6033 out->sb[0] = 0x01; /* version */
6034 out->sb[1] = 0x1A; /* length */
6035 i = 2;
6036 break;
6037
6038 default:
6039 return -EINVAL;
6040 }
6041
6042 memcpy(&out->sb[i], &buf[4], 26);
6043 out->valid = true;
6044
6045 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6046 sizeof(out->sb), false);
6047
6048 return 0;
6049}
6050
6051static bool
6052is_hdr_metadata_different(const struct drm_connector_state *old_state,
6053 const struct drm_connector_state *new_state)
6054{
6055 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6056 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6057
6058 if (old_blob != new_blob) {
6059 if (old_blob && new_blob &&
6060 old_blob->length == new_blob->length)
6061 return memcmp(old_blob->data, new_blob->data,
6062 old_blob->length);
6063
6064 return true;
6065 }
6066
6067 return false;
6068}
6069
6070static int
6071amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6072 struct drm_atomic_state *state)
88694af9 6073{
51e857af
SP
6074 struct drm_connector_state *new_con_state =
6075 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6076 struct drm_connector_state *old_con_state =
6077 drm_atomic_get_old_connector_state(state, conn);
6078 struct drm_crtc *crtc = new_con_state->crtc;
6079 struct drm_crtc_state *new_crtc_state;
6080 int ret;
6081
e8a98235
RS
6082 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6083
88694af9
NK
6084 if (!crtc)
6085 return 0;
6086
6087 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6088 struct dc_info_packet hdr_infopacket;
6089
6090 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6091 if (ret)
6092 return ret;
6093
6094 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6095 if (IS_ERR(new_crtc_state))
6096 return PTR_ERR(new_crtc_state);
6097
6098 /*
6099 * DC considers the stream backends changed if the
6100 * static metadata changes. Forcing the modeset also
6101 * gives a simple way for userspace to switch from
b232d4ed
NK
6102 * 8bpc to 10bpc when setting the metadata to enter
6103 * or exit HDR.
6104 *
6105 * Changing the static metadata after it's been
6106 * set is permissible, however. So only force a
6107 * modeset if we're entering or exiting HDR.
88694af9 6108 */
b232d4ed
NK
6109 new_crtc_state->mode_changed =
6110 !old_con_state->hdr_output_metadata ||
6111 !new_con_state->hdr_output_metadata;
88694af9
NK
6112 }
6113
6114 return 0;
6115}
6116
e7b07cee
HW
6117static const struct drm_connector_helper_funcs
6118amdgpu_dm_connector_helper_funcs = {
6119 /*
1f6010a9 6120 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6121 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6122 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6123 * in get_modes call back, not just return the modes count
6124 */
e7b07cee
HW
6125 .get_modes = get_modes,
6126 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6127 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6128};
6129
6130static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6131{
6132}
6133
d6ef9b41 6134static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6135{
6136 struct drm_atomic_state *state = new_crtc_state->state;
6137 struct drm_plane *plane;
6138 int num_active = 0;
6139
6140 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6141 struct drm_plane_state *new_plane_state;
6142
6143 /* Cursor planes are "fake". */
6144 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6145 continue;
6146
6147 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6148
6149 if (!new_plane_state) {
6150 /*
6151 * The plane is enable on the CRTC and hasn't changed
6152 * state. This means that it previously passed
6153 * validation and is therefore enabled.
6154 */
6155 num_active += 1;
6156 continue;
6157 }
6158
6159 /* We need a framebuffer to be considered enabled. */
6160 num_active += (new_plane_state->fb != NULL);
6161 }
6162
d6ef9b41
NK
6163 return num_active;
6164}
6165
8fe684e9
NK
6166static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6167 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6168{
6169 struct dm_crtc_state *dm_new_crtc_state =
6170 to_dm_crtc_state(new_crtc_state);
6171
6172 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6173
6174 if (!dm_new_crtc_state->stream)
6175 return;
6176
6177 dm_new_crtc_state->active_planes =
6178 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6179}
6180
3ee6b26b 6181static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6182 struct drm_atomic_state *state)
e7b07cee 6183{
29b77ad7
MR
6184 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6185 crtc);
1348969a 6186 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6187 struct dc *dc = adev->dm.dc;
29b77ad7 6188 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6189 int ret = -EINVAL;
6190
5b8c5969 6191 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6192
29b77ad7 6193 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6194
9b690ef3 6195 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6196 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6197 WARN_ON(1);
6198 return ret;
6199 }
6200
bc92c065 6201 /*
b836a274
MD
6202 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6203 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6204 * planes are disabled, which is not supported by the hardware. And there is legacy
6205 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6206 */
29b77ad7 6207 if (crtc_state->enable &&
ea9522f5
SS
6208 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6209 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6210 return -EINVAL;
ea9522f5 6211 }
c14a005c 6212
b836a274
MD
6213 /* In some use cases, like reset, no stream is attached */
6214 if (!dm_crtc_state->stream)
6215 return 0;
6216
62c933f9 6217 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6218 return 0;
6219
ea9522f5 6220 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6221 return ret;
6222}
6223
3ee6b26b
AD
6224static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6225 const struct drm_display_mode *mode,
6226 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6227{
6228 return true;
6229}
6230
6231static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6232 .disable = dm_crtc_helper_disable,
6233 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6234 .mode_fixup = dm_crtc_helper_mode_fixup,
6235 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6236};
6237
6238static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6239{
6240
6241}
6242
3261e013
ML
6243static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6244{
6245 switch (display_color_depth) {
6246 case COLOR_DEPTH_666:
6247 return 6;
6248 case COLOR_DEPTH_888:
6249 return 8;
6250 case COLOR_DEPTH_101010:
6251 return 10;
6252 case COLOR_DEPTH_121212:
6253 return 12;
6254 case COLOR_DEPTH_141414:
6255 return 14;
6256 case COLOR_DEPTH_161616:
6257 return 16;
6258 default:
6259 break;
6260 }
6261 return 0;
6262}
6263
3ee6b26b
AD
6264static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6265 struct drm_crtc_state *crtc_state,
6266 struct drm_connector_state *conn_state)
e7b07cee 6267{
3261e013
ML
6268 struct drm_atomic_state *state = crtc_state->state;
6269 struct drm_connector *connector = conn_state->connector;
6270 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6271 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6272 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6273 struct drm_dp_mst_topology_mgr *mst_mgr;
6274 struct drm_dp_mst_port *mst_port;
6275 enum dc_color_depth color_depth;
6276 int clock, bpp = 0;
1bc22f20 6277 bool is_y420 = false;
3261e013
ML
6278
6279 if (!aconnector->port || !aconnector->dc_sink)
6280 return 0;
6281
6282 mst_port = aconnector->port;
6283 mst_mgr = &aconnector->mst_port->mst_mgr;
6284
6285 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6286 return 0;
6287
6288 if (!state->duplicated) {
cbd14ae7 6289 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6290 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6291 aconnector->force_yuv420_output;
cbd14ae7
SW
6292 color_depth = convert_color_depth_from_display_info(connector,
6293 is_y420,
6294 max_bpc);
3261e013
ML
6295 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6296 clock = adjusted_mode->clock;
dc48529f 6297 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6298 }
6299 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6300 mst_mgr,
6301 mst_port,
1c6c1cb5 6302 dm_new_connector_state->pbn,
03ca9600 6303 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6304 if (dm_new_connector_state->vcpi_slots < 0) {
6305 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6306 return dm_new_connector_state->vcpi_slots;
6307 }
e7b07cee
HW
6308 return 0;
6309}
6310
6311const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6312 .disable = dm_encoder_helper_disable,
6313 .atomic_check = dm_encoder_helper_atomic_check
6314};
6315
d9fe1a4c 6316#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6317static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6318 struct dc_state *dc_state)
6319{
6320 struct dc_stream_state *stream = NULL;
6321 struct drm_connector *connector;
6322 struct drm_connector_state *new_con_state, *old_con_state;
6323 struct amdgpu_dm_connector *aconnector;
6324 struct dm_connector_state *dm_conn_state;
6325 int i, j, clock, bpp;
6326 int vcpi, pbn_div, pbn = 0;
6327
6328 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6329
6330 aconnector = to_amdgpu_dm_connector(connector);
6331
6332 if (!aconnector->port)
6333 continue;
6334
6335 if (!new_con_state || !new_con_state->crtc)
6336 continue;
6337
6338 dm_conn_state = to_dm_connector_state(new_con_state);
6339
6340 for (j = 0; j < dc_state->stream_count; j++) {
6341 stream = dc_state->streams[j];
6342 if (!stream)
6343 continue;
6344
6345 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6346 break;
6347
6348 stream = NULL;
6349 }
6350
6351 if (!stream)
6352 continue;
6353
6354 if (stream->timing.flags.DSC != 1) {
6355 drm_dp_mst_atomic_enable_dsc(state,
6356 aconnector->port,
6357 dm_conn_state->pbn,
6358 0,
6359 false);
6360 continue;
6361 }
6362
6363 pbn_div = dm_mst_get_pbn_divider(stream->link);
6364 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6365 clock = stream->timing.pix_clk_100hz / 10;
6366 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6367 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6368 aconnector->port,
6369 pbn, pbn_div,
6370 true);
6371 if (vcpi < 0)
6372 return vcpi;
6373
6374 dm_conn_state->pbn = pbn;
6375 dm_conn_state->vcpi_slots = vcpi;
6376 }
6377 return 0;
6378}
d9fe1a4c 6379#endif
29b9ba74 6380
e7b07cee
HW
6381static void dm_drm_plane_reset(struct drm_plane *plane)
6382{
6383 struct dm_plane_state *amdgpu_state = NULL;
6384
6385 if (plane->state)
6386 plane->funcs->atomic_destroy_state(plane, plane->state);
6387
6388 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6389 WARN_ON(amdgpu_state == NULL);
1f6010a9 6390
7ddaef96
NK
6391 if (amdgpu_state)
6392 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6393}
6394
6395static struct drm_plane_state *
6396dm_drm_plane_duplicate_state(struct drm_plane *plane)
6397{
6398 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6399
6400 old_dm_plane_state = to_dm_plane_state(plane->state);
6401 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6402 if (!dm_plane_state)
6403 return NULL;
6404
6405 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6406
3be5262e
HW
6407 if (old_dm_plane_state->dc_state) {
6408 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6409 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6410 }
6411
6412 return &dm_plane_state->base;
6413}
6414
dfd84d90 6415static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6416 struct drm_plane_state *state)
e7b07cee
HW
6417{
6418 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6419
3be5262e
HW
6420 if (dm_plane_state->dc_state)
6421 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6422
0627bbd3 6423 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6424}
6425
6426static const struct drm_plane_funcs dm_plane_funcs = {
6427 .update_plane = drm_atomic_helper_update_plane,
6428 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6429 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6430 .reset = dm_drm_plane_reset,
6431 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6432 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6433 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6434};
6435
3ee6b26b
AD
6436static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6437 struct drm_plane_state *new_state)
e7b07cee
HW
6438{
6439 struct amdgpu_framebuffer *afb;
6440 struct drm_gem_object *obj;
5d43be0c 6441 struct amdgpu_device *adev;
e7b07cee 6442 struct amdgpu_bo *rbo;
e7b07cee 6443 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6444 struct list_head list;
6445 struct ttm_validate_buffer tv;
6446 struct ww_acquire_ctx ticket;
5d43be0c
CK
6447 uint32_t domain;
6448 int r;
e7b07cee
HW
6449
6450 if (!new_state->fb) {
f1ad2f5e 6451 DRM_DEBUG_DRIVER("No FB bound\n");
e7b07cee
HW
6452 return 0;
6453 }
6454
6455 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6456 obj = new_state->fb->obj[0];
e7b07cee 6457 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6458 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6459 INIT_LIST_HEAD(&list);
6460
6461 tv.bo = &rbo->tbo;
6462 tv.num_shared = 1;
6463 list_add(&tv.head, &list);
6464
9165fb87 6465 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6466 if (r) {
6467 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6468 return r;
0f257b09 6469 }
e7b07cee 6470
5d43be0c 6471 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6472 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6473 else
6474 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6475
7b7c6c81 6476 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6477 if (unlikely(r != 0)) {
30b7c614
HW
6478 if (r != -ERESTARTSYS)
6479 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6480 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6481 return r;
6482 }
6483
bb812f1e
JZ
6484 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6485 if (unlikely(r != 0)) {
6486 amdgpu_bo_unpin(rbo);
0f257b09 6487 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6488 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6489 return r;
6490 }
7df7e505 6491
0f257b09 6492 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6493
7b7c6c81 6494 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6495
6496 amdgpu_bo_ref(rbo);
6497
cf322b49
NK
6498 /**
6499 * We don't do surface updates on planes that have been newly created,
6500 * but we also don't have the afb->address during atomic check.
6501 *
6502 * Fill in buffer attributes depending on the address here, but only on
6503 * newly created planes since they're not being used by DC yet and this
6504 * won't modify global state.
6505 */
6506 dm_plane_state_old = to_dm_plane_state(plane->state);
6507 dm_plane_state_new = to_dm_plane_state(new_state);
6508
3be5262e 6509 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6510 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6511 struct dc_plane_state *plane_state =
6512 dm_plane_state_new->dc_state;
6513 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6514
320932bf 6515 fill_plane_buffer_attributes(
695af5f9 6516 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6517 afb->tiling_flags,
cf322b49
NK
6518 &plane_state->tiling_info, &plane_state->plane_size,
6519 &plane_state->dcc, &plane_state->address,
6eed95b0 6520 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6521 }
6522
e7b07cee
HW
6523 return 0;
6524}
6525
3ee6b26b
AD
6526static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6527 struct drm_plane_state *old_state)
e7b07cee
HW
6528{
6529 struct amdgpu_bo *rbo;
e7b07cee
HW
6530 int r;
6531
6532 if (!old_state->fb)
6533 return;
6534
e68d14dd 6535 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6536 r = amdgpu_bo_reserve(rbo, false);
6537 if (unlikely(r)) {
6538 DRM_ERROR("failed to reserve rbo before unpin\n");
6539 return;
b830ebc9
HW
6540 }
6541
6542 amdgpu_bo_unpin(rbo);
6543 amdgpu_bo_unreserve(rbo);
6544 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6545}
6546
8c44515b
AP
6547static int dm_plane_helper_check_state(struct drm_plane_state *state,
6548 struct drm_crtc_state *new_crtc_state)
6549{
6300b3bd
MK
6550 struct drm_framebuffer *fb = state->fb;
6551 int min_downscale, max_upscale;
6552 int min_scale = 0;
6553 int max_scale = INT_MAX;
6554
40d916a2 6555 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 6556 if (fb && state->crtc) {
40d916a2
NC
6557 /* Validate viewport to cover the case when only the position changes */
6558 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6559 int viewport_width = state->crtc_w;
6560 int viewport_height = state->crtc_h;
6561
6562 if (state->crtc_x < 0)
6563 viewport_width += state->crtc_x;
6564 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6565 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6566
6567 if (state->crtc_y < 0)
6568 viewport_height += state->crtc_y;
6569 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6570 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6571
4abdb72b
NC
6572 if (viewport_width < 0 || viewport_height < 0) {
6573 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6574 return -EINVAL;
6575 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6576 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6577 return -EINVAL;
6578 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6579 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 6580 return -EINVAL;
4abdb72b
NC
6581 }
6582
40d916a2
NC
6583 }
6584
6585 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
6586 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6587 &min_downscale, &max_upscale);
6588 /*
6589 * Convert to drm convention: 16.16 fixed point, instead of dc's
6590 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6591 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6592 */
6593 min_scale = (1000 << 16) / max_upscale;
6594 max_scale = (1000 << 16) / min_downscale;
6595 }
8c44515b 6596
8c44515b 6597 return drm_atomic_helper_check_plane_state(
6300b3bd 6598 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
6599}
6600
7578ecda
AD
6601static int dm_plane_atomic_check(struct drm_plane *plane,
6602 struct drm_plane_state *state)
cbd19488 6603{
1348969a 6604 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6605 struct dc *dc = adev->dm.dc;
78171832 6606 struct dm_plane_state *dm_plane_state;
695af5f9 6607 struct dc_scaling_info scaling_info;
8c44515b 6608 struct drm_crtc_state *new_crtc_state;
695af5f9 6609 int ret;
78171832 6610
e8a98235
RS
6611 trace_amdgpu_dm_plane_atomic_check(state);
6612
78171832 6613 dm_plane_state = to_dm_plane_state(state);
cbd19488 6614
3be5262e 6615 if (!dm_plane_state->dc_state)
9a3329b1 6616 return 0;
cbd19488 6617
8c44515b
AP
6618 new_crtc_state =
6619 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6620 if (!new_crtc_state)
6621 return -EINVAL;
6622
6623 ret = dm_plane_helper_check_state(state, new_crtc_state);
6624 if (ret)
6625 return ret;
6626
695af5f9
NK
6627 ret = fill_dc_scaling_info(state, &scaling_info);
6628 if (ret)
6629 return ret;
a05bcff1 6630
62c933f9 6631 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6632 return 0;
6633
6634 return -EINVAL;
6635}
6636
674e78ac
NK
6637static int dm_plane_atomic_async_check(struct drm_plane *plane,
6638 struct drm_plane_state *new_plane_state)
6639{
6640 /* Only support async updates on cursor planes. */
6641 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6642 return -EINVAL;
6643
6644 return 0;
6645}
6646
6647static void dm_plane_atomic_async_update(struct drm_plane *plane,
6648 struct drm_plane_state *new_state)
6649{
6650 struct drm_plane_state *old_state =
6651 drm_atomic_get_old_plane_state(new_state->state, plane);
6652
e8a98235
RS
6653 trace_amdgpu_dm_atomic_update_cursor(new_state);
6654
332af874 6655 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6656
6657 plane->state->src_x = new_state->src_x;
6658 plane->state->src_y = new_state->src_y;
6659 plane->state->src_w = new_state->src_w;
6660 plane->state->src_h = new_state->src_h;
6661 plane->state->crtc_x = new_state->crtc_x;
6662 plane->state->crtc_y = new_state->crtc_y;
6663 plane->state->crtc_w = new_state->crtc_w;
6664 plane->state->crtc_h = new_state->crtc_h;
6665
6666 handle_cursor_update(plane, old_state);
6667}
6668
e7b07cee
HW
6669static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6670 .prepare_fb = dm_plane_helper_prepare_fb,
6671 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6672 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6673 .atomic_async_check = dm_plane_atomic_async_check,
6674 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6675};
6676
6677/*
6678 * TODO: these are currently initialized to rgb formats only.
6679 * For future use cases we should either initialize them dynamically based on
6680 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6681 * check will succeed, and let DC implement proper check
e7b07cee 6682 */
d90371b0 6683static const uint32_t rgb_formats[] = {
e7b07cee
HW
6684 DRM_FORMAT_XRGB8888,
6685 DRM_FORMAT_ARGB8888,
6686 DRM_FORMAT_RGBA8888,
6687 DRM_FORMAT_XRGB2101010,
6688 DRM_FORMAT_XBGR2101010,
6689 DRM_FORMAT_ARGB2101010,
6690 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6691 DRM_FORMAT_XBGR8888,
6692 DRM_FORMAT_ABGR8888,
46dd9ff7 6693 DRM_FORMAT_RGB565,
e7b07cee
HW
6694};
6695
0d579c7e
NK
6696static const uint32_t overlay_formats[] = {
6697 DRM_FORMAT_XRGB8888,
6698 DRM_FORMAT_ARGB8888,
6699 DRM_FORMAT_RGBA8888,
6700 DRM_FORMAT_XBGR8888,
6701 DRM_FORMAT_ABGR8888,
7267a1a9 6702 DRM_FORMAT_RGB565
e7b07cee
HW
6703};
6704
6705static const u32 cursor_formats[] = {
6706 DRM_FORMAT_ARGB8888
6707};
6708
37c6a93b
NK
6709static int get_plane_formats(const struct drm_plane *plane,
6710 const struct dc_plane_cap *plane_cap,
6711 uint32_t *formats, int max_formats)
e7b07cee 6712{
37c6a93b
NK
6713 int i, num_formats = 0;
6714
6715 /*
6716 * TODO: Query support for each group of formats directly from
6717 * DC plane caps. This will require adding more formats to the
6718 * caps list.
6719 */
e7b07cee 6720
f180b4bc 6721 switch (plane->type) {
e7b07cee 6722 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6723 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6724 if (num_formats >= max_formats)
6725 break;
6726
6727 formats[num_formats++] = rgb_formats[i];
6728 }
6729
ea36ad34 6730 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6731 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6732 if (plane_cap && plane_cap->pixel_format_support.p010)
6733 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6734 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6735 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6736 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6737 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6738 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6739 }
e7b07cee 6740 break;
37c6a93b 6741
e7b07cee 6742 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6743 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6744 if (num_formats >= max_formats)
6745 break;
6746
6747 formats[num_formats++] = overlay_formats[i];
6748 }
e7b07cee 6749 break;
37c6a93b 6750
e7b07cee 6751 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6752 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6753 if (num_formats >= max_formats)
6754 break;
6755
6756 formats[num_formats++] = cursor_formats[i];
6757 }
e7b07cee
HW
6758 break;
6759 }
6760
37c6a93b
NK
6761 return num_formats;
6762}
6763
6764static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6765 struct drm_plane *plane,
6766 unsigned long possible_crtcs,
6767 const struct dc_plane_cap *plane_cap)
6768{
6769 uint32_t formats[32];
6770 int num_formats;
6771 int res = -EPERM;
ecc874a6 6772 unsigned int supported_rotations;
faa37f54 6773 uint64_t *modifiers = NULL;
37c6a93b
NK
6774
6775 num_formats = get_plane_formats(plane, plane_cap, formats,
6776 ARRAY_SIZE(formats));
6777
faa37f54
BN
6778 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6779 if (res)
6780 return res;
6781
4a580877 6782 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 6783 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
6784 modifiers, plane->type, NULL);
6785 kfree(modifiers);
37c6a93b
NK
6786 if (res)
6787 return res;
6788
cc1fec57
NK
6789 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6790 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
6791 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6792 BIT(DRM_MODE_BLEND_PREMULTI);
6793
6794 drm_plane_create_alpha_property(plane);
6795 drm_plane_create_blend_mode_property(plane, blend_caps);
6796 }
6797
fc8e5230 6798 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
6799 plane_cap &&
6800 (plane_cap->pixel_format_support.nv12 ||
6801 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
6802 /* This only affects YUV formats. */
6803 drm_plane_create_color_properties(
6804 plane,
6805 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
6806 BIT(DRM_COLOR_YCBCR_BT709) |
6807 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
6808 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6809 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6810 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6811 }
6812
ecc874a6
PLG
6813 supported_rotations =
6814 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6815 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6816
1347385f
SS
6817 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6818 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
6819 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6820 supported_rotations);
ecc874a6 6821
f180b4bc 6822 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 6823
96719c54 6824 /* Create (reset) the plane state */
f180b4bc
HW
6825 if (plane->funcs->reset)
6826 plane->funcs->reset(plane);
96719c54 6827
37c6a93b 6828 return 0;
e7b07cee
HW
6829}
6830
7578ecda
AD
6831static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6832 struct drm_plane *plane,
6833 uint32_t crtc_index)
e7b07cee
HW
6834{
6835 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 6836 struct drm_plane *cursor_plane;
e7b07cee
HW
6837
6838 int res = -ENOMEM;
6839
6840 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6841 if (!cursor_plane)
6842 goto fail;
6843
f180b4bc 6844 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 6845 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
6846
6847 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6848 if (!acrtc)
6849 goto fail;
6850
6851 res = drm_crtc_init_with_planes(
6852 dm->ddev,
6853 &acrtc->base,
6854 plane,
f180b4bc 6855 cursor_plane,
e7b07cee
HW
6856 &amdgpu_dm_crtc_funcs, NULL);
6857
6858 if (res)
6859 goto fail;
6860
6861 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6862
96719c54
HW
6863 /* Create (reset) the plane state */
6864 if (acrtc->base.funcs->reset)
6865 acrtc->base.funcs->reset(&acrtc->base);
6866
e7b07cee
HW
6867 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6868 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6869
6870 acrtc->crtc_id = crtc_index;
6871 acrtc->base.enabled = false;
c37e2d29 6872 acrtc->otg_inst = -1;
e7b07cee
HW
6873
6874 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
6875 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6876 true, MAX_COLOR_LUT_ENTRIES);
086247a4 6877 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 6878
e7b07cee
HW
6879 return 0;
6880
6881fail:
b830ebc9
HW
6882 kfree(acrtc);
6883 kfree(cursor_plane);
e7b07cee
HW
6884 return res;
6885}
6886
6887
6888static int to_drm_connector_type(enum signal_type st)
6889{
6890 switch (st) {
6891 case SIGNAL_TYPE_HDMI_TYPE_A:
6892 return DRM_MODE_CONNECTOR_HDMIA;
6893 case SIGNAL_TYPE_EDP:
6894 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
6895 case SIGNAL_TYPE_LVDS:
6896 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
6897 case SIGNAL_TYPE_RGB:
6898 return DRM_MODE_CONNECTOR_VGA;
6899 case SIGNAL_TYPE_DISPLAY_PORT:
6900 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6901 return DRM_MODE_CONNECTOR_DisplayPort;
6902 case SIGNAL_TYPE_DVI_DUAL_LINK:
6903 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6904 return DRM_MODE_CONNECTOR_DVID;
6905 case SIGNAL_TYPE_VIRTUAL:
6906 return DRM_MODE_CONNECTOR_VIRTUAL;
6907
6908 default:
6909 return DRM_MODE_CONNECTOR_Unknown;
6910 }
6911}
6912
2b4c1c05
DV
6913static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6914{
62afb4ad
JRS
6915 struct drm_encoder *encoder;
6916
6917 /* There is only one encoder per connector */
6918 drm_connector_for_each_possible_encoder(connector, encoder)
6919 return encoder;
6920
6921 return NULL;
2b4c1c05
DV
6922}
6923
e7b07cee
HW
6924static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6925{
e7b07cee
HW
6926 struct drm_encoder *encoder;
6927 struct amdgpu_encoder *amdgpu_encoder;
6928
2b4c1c05 6929 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
6930
6931 if (encoder == NULL)
6932 return;
6933
6934 amdgpu_encoder = to_amdgpu_encoder(encoder);
6935
6936 amdgpu_encoder->native_mode.clock = 0;
6937
6938 if (!list_empty(&connector->probed_modes)) {
6939 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 6940
e7b07cee 6941 list_for_each_entry(preferred_mode,
b830ebc9
HW
6942 &connector->probed_modes,
6943 head) {
6944 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6945 amdgpu_encoder->native_mode = *preferred_mode;
6946
e7b07cee
HW
6947 break;
6948 }
6949
6950 }
6951}
6952
3ee6b26b
AD
6953static struct drm_display_mode *
6954amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6955 char *name,
6956 int hdisplay, int vdisplay)
e7b07cee
HW
6957{
6958 struct drm_device *dev = encoder->dev;
6959 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6960 struct drm_display_mode *mode = NULL;
6961 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6962
6963 mode = drm_mode_duplicate(dev, native_mode);
6964
b830ebc9 6965 if (mode == NULL)
e7b07cee
HW
6966 return NULL;
6967
6968 mode->hdisplay = hdisplay;
6969 mode->vdisplay = vdisplay;
6970 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 6971 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
6972
6973 return mode;
6974
6975}
6976
6977static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 6978 struct drm_connector *connector)
e7b07cee
HW
6979{
6980 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6981 struct drm_display_mode *mode = NULL;
6982 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
6983 struct amdgpu_dm_connector *amdgpu_dm_connector =
6984 to_amdgpu_dm_connector(connector);
e7b07cee
HW
6985 int i;
6986 int n;
6987 struct mode_size {
6988 char name[DRM_DISPLAY_MODE_LEN];
6989 int w;
6990 int h;
b830ebc9 6991 } common_modes[] = {
e7b07cee
HW
6992 { "640x480", 640, 480},
6993 { "800x600", 800, 600},
6994 { "1024x768", 1024, 768},
6995 { "1280x720", 1280, 720},
6996 { "1280x800", 1280, 800},
6997 {"1280x1024", 1280, 1024},
6998 { "1440x900", 1440, 900},
6999 {"1680x1050", 1680, 1050},
7000 {"1600x1200", 1600, 1200},
7001 {"1920x1080", 1920, 1080},
7002 {"1920x1200", 1920, 1200}
7003 };
7004
b830ebc9 7005 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7006
7007 for (i = 0; i < n; i++) {
7008 struct drm_display_mode *curmode = NULL;
7009 bool mode_existed = false;
7010
7011 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7012 common_modes[i].h > native_mode->vdisplay ||
7013 (common_modes[i].w == native_mode->hdisplay &&
7014 common_modes[i].h == native_mode->vdisplay))
7015 continue;
e7b07cee
HW
7016
7017 list_for_each_entry(curmode, &connector->probed_modes, head) {
7018 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7019 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7020 mode_existed = true;
7021 break;
7022 }
7023 }
7024
7025 if (mode_existed)
7026 continue;
7027
7028 mode = amdgpu_dm_create_common_mode(encoder,
7029 common_modes[i].name, common_modes[i].w,
7030 common_modes[i].h);
7031 drm_mode_probed_add(connector, mode);
c84dec2f 7032 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7033 }
7034}
7035
3ee6b26b
AD
7036static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7037 struct edid *edid)
e7b07cee 7038{
c84dec2f
HW
7039 struct amdgpu_dm_connector *amdgpu_dm_connector =
7040 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7041
7042 if (edid) {
7043 /* empty probed_modes */
7044 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7045 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7046 drm_add_edid_modes(connector, edid);
7047
f1e5e913
YMM
7048 /* sorting the probed modes before calling function
7049 * amdgpu_dm_get_native_mode() since EDID can have
7050 * more than one preferred mode. The modes that are
7051 * later in the probed mode list could be of higher
7052 * and preferred resolution. For example, 3840x2160
7053 * resolution in base EDID preferred timing and 4096x2160
7054 * preferred resolution in DID extension block later.
7055 */
7056 drm_mode_sort(&connector->probed_modes);
e7b07cee 7057 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7058
7059 /* Freesync capabilities are reset by calling
7060 * drm_add_edid_modes() and need to be
7061 * restored here.
7062 */
7063 amdgpu_dm_update_freesync_caps(connector, edid);
a8d8d3dc 7064 } else {
c84dec2f 7065 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7066 }
e7b07cee
HW
7067}
7068
7578ecda 7069static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 7070{
c84dec2f
HW
7071 struct amdgpu_dm_connector *amdgpu_dm_connector =
7072 to_amdgpu_dm_connector(connector);
e7b07cee 7073 struct drm_encoder *encoder;
c84dec2f 7074 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 7075
2b4c1c05 7076 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 7077
5c0e6840 7078 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
7079 amdgpu_dm_connector->num_modes =
7080 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
7081 } else {
7082 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7083 amdgpu_dm_connector_add_common_modes(encoder, connector);
7084 }
3e332d3a 7085 amdgpu_dm_fbc_init(connector);
5099114b 7086
c84dec2f 7087 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
7088}
7089
3ee6b26b
AD
7090void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7091 struct amdgpu_dm_connector *aconnector,
7092 int connector_type,
7093 struct dc_link *link,
7094 int link_index)
e7b07cee 7095{
1348969a 7096 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7097
f04bee34
NK
7098 /*
7099 * Some of the properties below require access to state, like bpc.
7100 * Allocate some default initial connector state with our reset helper.
7101 */
7102 if (aconnector->base.funcs->reset)
7103 aconnector->base.funcs->reset(&aconnector->base);
7104
e7b07cee
HW
7105 aconnector->connector_id = link_index;
7106 aconnector->dc_link = link;
7107 aconnector->base.interlace_allowed = false;
7108 aconnector->base.doublescan_allowed = false;
7109 aconnector->base.stereo_allowed = false;
7110 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7111 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7112 aconnector->audio_inst = -1;
e7b07cee
HW
7113 mutex_init(&aconnector->hpd_lock);
7114
1f6010a9
DF
7115 /*
7116 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7117 * which means HPD hot plug not supported
7118 */
e7b07cee
HW
7119 switch (connector_type) {
7120 case DRM_MODE_CONNECTOR_HDMIA:
7121 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7122 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7123 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7124 break;
7125 case DRM_MODE_CONNECTOR_DisplayPort:
7126 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7127 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7128 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7129 break;
7130 case DRM_MODE_CONNECTOR_DVID:
7131 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7132 break;
7133 default:
7134 break;
7135 }
7136
7137 drm_object_attach_property(&aconnector->base.base,
7138 dm->ddev->mode_config.scaling_mode_property,
7139 DRM_MODE_SCALE_NONE);
7140
7141 drm_object_attach_property(&aconnector->base.base,
7142 adev->mode_info.underscan_property,
7143 UNDERSCAN_OFF);
7144 drm_object_attach_property(&aconnector->base.base,
7145 adev->mode_info.underscan_hborder_property,
7146 0);
7147 drm_object_attach_property(&aconnector->base.base,
7148 adev->mode_info.underscan_vborder_property,
7149 0);
1825fd34 7150
8c61b31e
JFZ
7151 if (!aconnector->mst_port)
7152 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7153
4a8ca46b
RL
7154 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7155 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7156 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7157
c1ee92f9 7158 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7159 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7160 drm_object_attach_property(&aconnector->base.base,
7161 adev->mode_info.abm_level_property, 0);
7162 }
bb47de73
NK
7163
7164 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7165 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7166 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
7167 drm_object_attach_property(
7168 &aconnector->base.base,
7169 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7170
8c61b31e
JFZ
7171 if (!aconnector->mst_port)
7172 drm_connector_attach_vrr_capable_property(&aconnector->base);
7173
0c8620d6 7174#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7175 if (adev->dm.hdcp_workqueue)
53e108aa 7176 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7177#endif
bb47de73 7178 }
e7b07cee
HW
7179}
7180
7578ecda
AD
7181static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7182 struct i2c_msg *msgs, int num)
e7b07cee
HW
7183{
7184 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7185 struct ddc_service *ddc_service = i2c->ddc_service;
7186 struct i2c_command cmd;
7187 int i;
7188 int result = -EIO;
7189
b830ebc9 7190 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7191
7192 if (!cmd.payloads)
7193 return result;
7194
7195 cmd.number_of_payloads = num;
7196 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7197 cmd.speed = 100;
7198
7199 for (i = 0; i < num; i++) {
7200 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7201 cmd.payloads[i].address = msgs[i].addr;
7202 cmd.payloads[i].length = msgs[i].len;
7203 cmd.payloads[i].data = msgs[i].buf;
7204 }
7205
c85e6e54
DF
7206 if (dc_submit_i2c(
7207 ddc_service->ctx->dc,
7208 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7209 &cmd))
7210 result = num;
7211
7212 kfree(cmd.payloads);
7213 return result;
7214}
7215
7578ecda 7216static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7217{
7218 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7219}
7220
7221static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7222 .master_xfer = amdgpu_dm_i2c_xfer,
7223 .functionality = amdgpu_dm_i2c_func,
7224};
7225
3ee6b26b
AD
7226static struct amdgpu_i2c_adapter *
7227create_i2c(struct ddc_service *ddc_service,
7228 int link_index,
7229 int *res)
e7b07cee
HW
7230{
7231 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7232 struct amdgpu_i2c_adapter *i2c;
7233
b830ebc9 7234 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7235 if (!i2c)
7236 return NULL;
e7b07cee
HW
7237 i2c->base.owner = THIS_MODULE;
7238 i2c->base.class = I2C_CLASS_DDC;
7239 i2c->base.dev.parent = &adev->pdev->dev;
7240 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7241 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7242 i2c_set_adapdata(&i2c->base, i2c);
7243 i2c->ddc_service = ddc_service;
c85e6e54 7244 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7245
7246 return i2c;
7247}
7248
89fc8d4e 7249
1f6010a9
DF
7250/*
7251 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7252 * dc_link which will be represented by this aconnector.
7253 */
7578ecda
AD
7254static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7255 struct amdgpu_dm_connector *aconnector,
7256 uint32_t link_index,
7257 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7258{
7259 int res = 0;
7260 int connector_type;
7261 struct dc *dc = dm->dc;
7262 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7263 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7264
7265 link->priv = aconnector;
e7b07cee 7266
f1ad2f5e 7267 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7268
7269 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7270 if (!i2c) {
7271 DRM_ERROR("Failed to create i2c adapter data\n");
7272 return -ENOMEM;
7273 }
7274
e7b07cee
HW
7275 aconnector->i2c = i2c;
7276 res = i2c_add_adapter(&i2c->base);
7277
7278 if (res) {
7279 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7280 goto out_free;
7281 }
7282
7283 connector_type = to_drm_connector_type(link->connector_signal);
7284
17165de2 7285 res = drm_connector_init_with_ddc(
e7b07cee
HW
7286 dm->ddev,
7287 &aconnector->base,
7288 &amdgpu_dm_connector_funcs,
17165de2
AP
7289 connector_type,
7290 &i2c->base);
e7b07cee
HW
7291
7292 if (res) {
7293 DRM_ERROR("connector_init failed\n");
7294 aconnector->connector_id = -1;
7295 goto out_free;
7296 }
7297
7298 drm_connector_helper_add(
7299 &aconnector->base,
7300 &amdgpu_dm_connector_helper_funcs);
7301
7302 amdgpu_dm_connector_init_helper(
7303 dm,
7304 aconnector,
7305 connector_type,
7306 link,
7307 link_index);
7308
cde4c44d 7309 drm_connector_attach_encoder(
e7b07cee
HW
7310 &aconnector->base, &aencoder->base);
7311
e7b07cee
HW
7312 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7313 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7314 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7315
e7b07cee
HW
7316out_free:
7317 if (res) {
7318 kfree(i2c);
7319 aconnector->i2c = NULL;
7320 }
7321 return res;
7322}
7323
7324int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7325{
7326 switch (adev->mode_info.num_crtc) {
7327 case 1:
7328 return 0x1;
7329 case 2:
7330 return 0x3;
7331 case 3:
7332 return 0x7;
7333 case 4:
7334 return 0xf;
7335 case 5:
7336 return 0x1f;
7337 case 6:
7338 default:
7339 return 0x3f;
7340 }
7341}
7342
7578ecda
AD
7343static int amdgpu_dm_encoder_init(struct drm_device *dev,
7344 struct amdgpu_encoder *aencoder,
7345 uint32_t link_index)
e7b07cee 7346{
1348969a 7347 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7348
7349 int res = drm_encoder_init(dev,
7350 &aencoder->base,
7351 &amdgpu_dm_encoder_funcs,
7352 DRM_MODE_ENCODER_TMDS,
7353 NULL);
7354
7355 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7356
7357 if (!res)
7358 aencoder->encoder_id = link_index;
7359 else
7360 aencoder->encoder_id = -1;
7361
7362 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7363
7364 return res;
7365}
7366
3ee6b26b
AD
7367static void manage_dm_interrupts(struct amdgpu_device *adev,
7368 struct amdgpu_crtc *acrtc,
7369 bool enable)
e7b07cee
HW
7370{
7371 /*
8fe684e9
NK
7372 * We have no guarantee that the frontend index maps to the same
7373 * backend index - some even map to more than one.
7374 *
7375 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7376 */
7377 int irq_type =
734dd01d 7378 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7379 adev,
7380 acrtc->crtc_id);
7381
7382 if (enable) {
7383 drm_crtc_vblank_on(&acrtc->base);
7384 amdgpu_irq_get(
7385 adev,
7386 &adev->pageflip_irq,
7387 irq_type);
86bc2219
WL
7388#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7389 amdgpu_irq_get(
7390 adev,
7391 &adev->vline0_irq,
7392 irq_type);
7393#endif
e7b07cee 7394 } else {
86bc2219
WL
7395#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7396 amdgpu_irq_put(
7397 adev,
7398 &adev->vline0_irq,
7399 irq_type);
7400#endif
e7b07cee
HW
7401 amdgpu_irq_put(
7402 adev,
7403 &adev->pageflip_irq,
7404 irq_type);
7405 drm_crtc_vblank_off(&acrtc->base);
7406 }
7407}
7408
8fe684e9
NK
7409static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7410 struct amdgpu_crtc *acrtc)
7411{
7412 int irq_type =
7413 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7414
7415 /**
7416 * This reads the current state for the IRQ and force reapplies
7417 * the setting to hardware.
7418 */
7419 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7420}
7421
3ee6b26b
AD
7422static bool
7423is_scaling_state_different(const struct dm_connector_state *dm_state,
7424 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7425{
7426 if (dm_state->scaling != old_dm_state->scaling)
7427 return true;
7428 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7429 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7430 return true;
7431 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7432 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7433 return true;
b830ebc9
HW
7434 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7435 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7436 return true;
e7b07cee
HW
7437 return false;
7438}
7439
0c8620d6
BL
7440#ifdef CONFIG_DRM_AMD_DC_HDCP
7441static bool is_content_protection_different(struct drm_connector_state *state,
7442 const struct drm_connector_state *old_state,
7443 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7444{
7445 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7446 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7447
31c0ed90 7448 /* Handle: Type0/1 change */
53e108aa
BL
7449 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7450 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7451 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7452 return true;
7453 }
7454
31c0ed90
BL
7455 /* CP is being re enabled, ignore this
7456 *
7457 * Handles: ENABLED -> DESIRED
7458 */
0c8620d6
BL
7459 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7460 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7461 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7462 return false;
7463 }
7464
31c0ed90
BL
7465 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7466 *
7467 * Handles: UNDESIRED -> ENABLED
7468 */
0c8620d6
BL
7469 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7470 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7471 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7472
7473 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7474 * hot-plug, headless s3, dpms
31c0ed90
BL
7475 *
7476 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7477 */
97f6c917
BL
7478 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7479 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7480 dm_con_state->update_hdcp = false;
0c8620d6 7481 return true;
97f6c917 7482 }
0c8620d6 7483
31c0ed90
BL
7484 /*
7485 * Handles: UNDESIRED -> UNDESIRED
7486 * DESIRED -> DESIRED
7487 * ENABLED -> ENABLED
7488 */
0c8620d6
BL
7489 if (old_state->content_protection == state->content_protection)
7490 return false;
7491
31c0ed90
BL
7492 /*
7493 * Handles: UNDESIRED -> DESIRED
7494 * DESIRED -> UNDESIRED
7495 * ENABLED -> UNDESIRED
7496 */
97f6c917 7497 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
7498 return true;
7499
31c0ed90
BL
7500 /*
7501 * Handles: DESIRED -> ENABLED
7502 */
0c8620d6
BL
7503 return false;
7504}
7505
0c8620d6 7506#endif
3ee6b26b
AD
7507static void remove_stream(struct amdgpu_device *adev,
7508 struct amdgpu_crtc *acrtc,
7509 struct dc_stream_state *stream)
e7b07cee
HW
7510{
7511 /* this is the update mode case */
e7b07cee
HW
7512
7513 acrtc->otg_inst = -1;
7514 acrtc->enabled = false;
7515}
7516
7578ecda
AD
7517static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7518 struct dc_cursor_position *position)
2a8f6ccb 7519{
f4c2cc43 7520 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
7521 int x, y;
7522 int xorigin = 0, yorigin = 0;
7523
e371e19c
NK
7524 position->enable = false;
7525 position->x = 0;
7526 position->y = 0;
7527
7528 if (!crtc || !plane->state->fb)
2a8f6ccb 7529 return 0;
2a8f6ccb
HW
7530
7531 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7532 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7533 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7534 __func__,
7535 plane->state->crtc_w,
7536 plane->state->crtc_h);
7537 return -EINVAL;
7538 }
7539
7540 x = plane->state->crtc_x;
7541 y = plane->state->crtc_y;
c14a005c 7542
e371e19c
NK
7543 if (x <= -amdgpu_crtc->max_cursor_width ||
7544 y <= -amdgpu_crtc->max_cursor_height)
7545 return 0;
7546
2a8f6ccb
HW
7547 if (x < 0) {
7548 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7549 x = 0;
7550 }
7551 if (y < 0) {
7552 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7553 y = 0;
7554 }
7555 position->enable = true;
d243b6ff 7556 position->translate_by_source = true;
2a8f6ccb
HW
7557 position->x = x;
7558 position->y = y;
7559 position->x_hotspot = xorigin;
7560 position->y_hotspot = yorigin;
7561
7562 return 0;
7563}
7564
3ee6b26b
AD
7565static void handle_cursor_update(struct drm_plane *plane,
7566 struct drm_plane_state *old_plane_state)
e7b07cee 7567{
1348969a 7568 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
7569 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7570 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7571 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7572 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7573 uint64_t address = afb ? afb->address : 0;
7574 struct dc_cursor_position position;
7575 struct dc_cursor_attributes attributes;
7576 int ret;
7577
e7b07cee
HW
7578 if (!plane->state->fb && !old_plane_state->fb)
7579 return;
7580
f1ad2f5e 7581 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
c12a7ba5
HW
7582 __func__,
7583 amdgpu_crtc->crtc_id,
7584 plane->state->crtc_w,
7585 plane->state->crtc_h);
2a8f6ccb
HW
7586
7587 ret = get_cursor_position(plane, crtc, &position);
7588 if (ret)
7589 return;
7590
7591 if (!position.enable) {
7592 /* turn off cursor */
674e78ac
NK
7593 if (crtc_state && crtc_state->stream) {
7594 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
7595 dc_stream_set_cursor_position(crtc_state->stream,
7596 &position);
674e78ac
NK
7597 mutex_unlock(&adev->dm.dc_lock);
7598 }
2a8f6ccb 7599 return;
e7b07cee 7600 }
e7b07cee 7601
2a8f6ccb
HW
7602 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7603 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7604
c1cefe11 7605 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
7606 attributes.address.high_part = upper_32_bits(address);
7607 attributes.address.low_part = lower_32_bits(address);
7608 attributes.width = plane->state->crtc_w;
7609 attributes.height = plane->state->crtc_h;
7610 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7611 attributes.rotation_angle = 0;
7612 attributes.attribute_flags.value = 0;
7613
03a66367 7614 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 7615
886daac9 7616 if (crtc_state->stream) {
674e78ac 7617 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
7618 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7619 &attributes))
7620 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 7621
2a8f6ccb
HW
7622 if (!dc_stream_set_cursor_position(crtc_state->stream,
7623 &position))
7624 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 7625 mutex_unlock(&adev->dm.dc_lock);
886daac9 7626 }
2a8f6ccb 7627}
e7b07cee
HW
7628
7629static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7630{
7631
7632 assert_spin_locked(&acrtc->base.dev->event_lock);
7633 WARN_ON(acrtc->event);
7634
7635 acrtc->event = acrtc->base.state->event;
7636
7637 /* Set the flip status */
7638 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7639
7640 /* Mark this event as consumed */
7641 acrtc->base.state->event = NULL;
7642
7643 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7644 acrtc->crtc_id);
7645}
7646
bb47de73
NK
7647static void update_freesync_state_on_stream(
7648 struct amdgpu_display_manager *dm,
7649 struct dm_crtc_state *new_crtc_state,
180db303
NK
7650 struct dc_stream_state *new_stream,
7651 struct dc_plane_state *surface,
7652 u32 flip_timestamp_in_us)
bb47de73 7653{
09aef2c4 7654 struct mod_vrr_params vrr_params;
bb47de73 7655 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7656 struct amdgpu_device *adev = dm->adev;
585d450c 7657 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7658 unsigned long flags;
4cda3243 7659 bool pack_sdp_v1_3 = false;
bb47de73
NK
7660
7661 if (!new_stream)
7662 return;
7663
7664 /*
7665 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7666 * For now it's sufficient to just guard against these conditions.
7667 */
7668
7669 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7670 return;
7671
4a580877 7672 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7673 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7674
180db303
NK
7675 if (surface) {
7676 mod_freesync_handle_preflip(
7677 dm->freesync_module,
7678 surface,
7679 new_stream,
7680 flip_timestamp_in_us,
7681 &vrr_params);
09aef2c4
MK
7682
7683 if (adev->family < AMDGPU_FAMILY_AI &&
7684 amdgpu_dm_vrr_active(new_crtc_state)) {
7685 mod_freesync_handle_v_update(dm->freesync_module,
7686 new_stream, &vrr_params);
e63e2491
EB
7687
7688 /* Need to call this before the frame ends. */
7689 dc_stream_adjust_vmin_vmax(dm->dc,
7690 new_crtc_state->stream,
7691 &vrr_params.adjust);
09aef2c4 7692 }
180db303 7693 }
bb47de73
NK
7694
7695 mod_freesync_build_vrr_infopacket(
7696 dm->freesync_module,
7697 new_stream,
180db303 7698 &vrr_params,
ecd0136b
HT
7699 PACKET_TYPE_VRR,
7700 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
7701 &vrr_infopacket,
7702 pack_sdp_v1_3);
bb47de73 7703
8a48b44c 7704 new_crtc_state->freesync_timing_changed |=
585d450c 7705 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
7706 &vrr_params.adjust,
7707 sizeof(vrr_params.adjust)) != 0);
bb47de73 7708
8a48b44c 7709 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
7710 (memcmp(&new_crtc_state->vrr_infopacket,
7711 &vrr_infopacket,
7712 sizeof(vrr_infopacket)) != 0);
7713
585d450c 7714 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
7715 new_crtc_state->vrr_infopacket = vrr_infopacket;
7716
585d450c 7717 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
7718 new_stream->vrr_infopacket = vrr_infopacket;
7719
7720 if (new_crtc_state->freesync_vrr_info_changed)
7721 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7722 new_crtc_state->base.crtc->base.id,
7723 (int)new_crtc_state->base.vrr_enabled,
180db303 7724 (int)vrr_params.state);
09aef2c4 7725
4a580877 7726 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
7727}
7728
585d450c 7729static void update_stream_irq_parameters(
e854194c
MK
7730 struct amdgpu_display_manager *dm,
7731 struct dm_crtc_state *new_crtc_state)
7732{
7733 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 7734 struct mod_vrr_params vrr_params;
e854194c 7735 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 7736 struct amdgpu_device *adev = dm->adev;
585d450c 7737 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7738 unsigned long flags;
e854194c
MK
7739
7740 if (!new_stream)
7741 return;
7742
7743 /*
7744 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7745 * For now it's sufficient to just guard against these conditions.
7746 */
7747 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7748 return;
7749
4a580877 7750 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 7751 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 7752
e854194c
MK
7753 if (new_crtc_state->vrr_supported &&
7754 config.min_refresh_in_uhz &&
7755 config.max_refresh_in_uhz) {
c0ea73a4
AP
7756 config.state = new_crtc_state->base.vrr_enabled ?
7757 VRR_STATE_ACTIVE_VARIABLE :
7758 VRR_STATE_INACTIVE;
e854194c
MK
7759 } else {
7760 config.state = VRR_STATE_UNSUPPORTED;
7761 }
7762
7763 mod_freesync_build_vrr_params(dm->freesync_module,
7764 new_stream,
7765 &config, &vrr_params);
7766
7767 new_crtc_state->freesync_timing_changed |=
585d450c
AP
7768 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7769 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 7770
585d450c
AP
7771 new_crtc_state->freesync_config = config;
7772 /* Copy state for access from DM IRQ handler */
7773 acrtc->dm_irq_params.freesync_config = config;
7774 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7775 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 7776 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
7777}
7778
66b0c973
MK
7779static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7780 struct dm_crtc_state *new_state)
7781{
7782 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7783 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7784
7785 if (!old_vrr_active && new_vrr_active) {
7786 /* Transition VRR inactive -> active:
7787 * While VRR is active, we must not disable vblank irq, as a
7788 * reenable after disable would compute bogus vblank/pflip
7789 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
7790 *
7791 * We also need vupdate irq for the actual core vblank handling
7792 * at end of vblank.
66b0c973 7793 */
d2574c33 7794 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
7795 drm_crtc_vblank_get(new_state->base.crtc);
7796 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7797 __func__, new_state->base.crtc->base.id);
7798 } else if (old_vrr_active && !new_vrr_active) {
7799 /* Transition VRR active -> inactive:
7800 * Allow vblank irq disable again for fixed refresh rate.
7801 */
d2574c33 7802 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
7803 drm_crtc_vblank_put(new_state->base.crtc);
7804 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7805 __func__, new_state->base.crtc->base.id);
7806 }
7807}
7808
8ad27806
NK
7809static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7810{
7811 struct drm_plane *plane;
7812 struct drm_plane_state *old_plane_state, *new_plane_state;
7813 int i;
7814
7815 /*
7816 * TODO: Make this per-stream so we don't issue redundant updates for
7817 * commits with multiple streams.
7818 */
7819 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7820 new_plane_state, i)
7821 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7822 handle_cursor_update(plane, old_plane_state);
7823}
7824
3be5262e 7825static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 7826 struct dc_state *dc_state,
3ee6b26b
AD
7827 struct drm_device *dev,
7828 struct amdgpu_display_manager *dm,
7829 struct drm_crtc *pcrtc,
420cd472 7830 bool wait_for_vblank)
e7b07cee 7831{
263a4feb 7832 uint32_t i;
8a48b44c 7833 uint64_t timestamp_ns;
e7b07cee 7834 struct drm_plane *plane;
0bc9706d 7835 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 7836 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
7837 struct drm_crtc_state *new_pcrtc_state =
7838 drm_atomic_get_new_crtc_state(state, pcrtc);
7839 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
7840 struct dm_crtc_state *dm_old_crtc_state =
7841 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 7842 int planes_count = 0, vpos, hpos;
570c91d5 7843 long r;
e7b07cee 7844 unsigned long flags;
8a48b44c 7845 struct amdgpu_bo *abo;
fdd1fe57
MK
7846 uint32_t target_vblank, last_flip_vblank;
7847 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 7848 bool pflip_present = false;
bc7f670e
DF
7849 struct {
7850 struct dc_surface_update surface_updates[MAX_SURFACES];
7851 struct dc_plane_info plane_infos[MAX_SURFACES];
7852 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 7853 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 7854 struct dc_stream_update stream_update;
74aa7bd4 7855 } *bundle;
bc7f670e 7856
74aa7bd4 7857 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 7858
74aa7bd4
DF
7859 if (!bundle) {
7860 dm_error("Failed to allocate update bundle\n");
4b510503
NK
7861 goto cleanup;
7862 }
e7b07cee 7863
8ad27806
NK
7864 /*
7865 * Disable the cursor first if we're disabling all the planes.
7866 * It'll remain on the screen after the planes are re-enabled
7867 * if we don't.
7868 */
7869 if (acrtc_state->active_planes == 0)
7870 amdgpu_dm_commit_cursors(state);
7871
e7b07cee 7872 /* update planes when needed */
263a4feb 7873 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 7874 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 7875 struct drm_crtc_state *new_crtc_state;
0bc9706d 7876 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 7877 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 7878 bool plane_needs_flip;
c7af5f77 7879 struct dc_plane_state *dc_plane;
54d76575 7880 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 7881
80c218d5
NK
7882 /* Cursor plane is handled after stream updates */
7883 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 7884 continue;
e7b07cee 7885
f5ba60fe
DD
7886 if (!fb || !crtc || pcrtc != crtc)
7887 continue;
7888
7889 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7890 if (!new_crtc_state->active)
e7b07cee
HW
7891 continue;
7892
bc7f670e 7893 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 7894
74aa7bd4 7895 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 7896 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
7897 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7898 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 7899 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 7900 }
8a48b44c 7901
695af5f9
NK
7902 fill_dc_scaling_info(new_plane_state,
7903 &bundle->scaling_infos[planes_count]);
8a48b44c 7904
695af5f9
NK
7905 bundle->surface_updates[planes_count].scaling_info =
7906 &bundle->scaling_infos[planes_count];
8a48b44c 7907
f5031000 7908 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 7909
f5031000 7910 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 7911
f5031000
DF
7912 if (!plane_needs_flip) {
7913 planes_count += 1;
7914 continue;
7915 }
8a48b44c 7916
2fac0f53
CK
7917 abo = gem_to_amdgpu_bo(fb->obj[0]);
7918
f8308898
AG
7919 /*
7920 * Wait for all fences on this FB. Do limited wait to avoid
7921 * deadlock during GPU reset when this fence will not signal
7922 * but we hold reservation lock for the BO.
7923 */
52791eee 7924 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 7925 false,
f8308898
AG
7926 msecs_to_jiffies(5000));
7927 if (unlikely(r <= 0))
ed8a5fb2 7928 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 7929
695af5f9 7930 fill_dc_plane_info_and_addr(
8ce5d842 7931 dm->adev, new_plane_state,
6eed95b0 7932 afb->tiling_flags,
695af5f9 7933 &bundle->plane_infos[planes_count],
87b7ebc2 7934 &bundle->flip_addrs[planes_count].address,
6eed95b0 7935 afb->tmz_surface, false);
87b7ebc2
RS
7936
7937 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7938 new_plane_state->plane->index,
7939 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
7940
7941 bundle->surface_updates[planes_count].plane_info =
7942 &bundle->plane_infos[planes_count];
8a48b44c 7943
caff0e66
NK
7944 /*
7945 * Only allow immediate flips for fast updates that don't
7946 * change FB pitch, DCC state, rotation or mirroing.
7947 */
f5031000 7948 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 7949 crtc->state->async_flip &&
caff0e66 7950 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 7951
f5031000
DF
7952 timestamp_ns = ktime_get_ns();
7953 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7954 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7955 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 7956
f5031000
DF
7957 if (!bundle->surface_updates[planes_count].surface) {
7958 DRM_ERROR("No surface for CRTC: id=%d\n",
7959 acrtc_attach->crtc_id);
7960 continue;
bc7f670e
DF
7961 }
7962
f5031000
DF
7963 if (plane == pcrtc->primary)
7964 update_freesync_state_on_stream(
7965 dm,
7966 acrtc_state,
7967 acrtc_state->stream,
7968 dc_plane,
7969 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 7970
f5031000
DF
7971 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7972 __func__,
7973 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7974 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
7975
7976 planes_count += 1;
7977
8a48b44c
DF
7978 }
7979
74aa7bd4 7980 if (pflip_present) {
634092b1
MK
7981 if (!vrr_active) {
7982 /* Use old throttling in non-vrr fixed refresh rate mode
7983 * to keep flip scheduling based on target vblank counts
7984 * working in a backwards compatible way, e.g., for
7985 * clients using the GLX_OML_sync_control extension or
7986 * DRI3/Present extension with defined target_msc.
7987 */
e3eff4b5 7988 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
7989 }
7990 else {
7991 /* For variable refresh rate mode only:
7992 * Get vblank of last completed flip to avoid > 1 vrr
7993 * flips per video frame by use of throttling, but allow
7994 * flip programming anywhere in the possibly large
7995 * variable vrr vblank interval for fine-grained flip
7996 * timing control and more opportunity to avoid stutter
7997 * on late submission of flips.
7998 */
7999 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 8000 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
8001 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8002 }
8003
fdd1fe57 8004 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
8005
8006 /*
8007 * Wait until we're out of the vertical blank period before the one
8008 * targeted by the flip
8009 */
8010 while ((acrtc_attach->enabled &&
8011 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8012 0, &vpos, &hpos, NULL,
8013 NULL, &pcrtc->hwmode)
8014 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8015 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8016 (int)(target_vblank -
e3eff4b5 8017 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
8018 usleep_range(1000, 1100);
8019 }
8020
8fe684e9
NK
8021 /**
8022 * Prepare the flip event for the pageflip interrupt to handle.
8023 *
8024 * This only works in the case where we've already turned on the
8025 * appropriate hardware blocks (eg. HUBP) so in the transition case
8026 * from 0 -> n planes we have to skip a hardware generated event
8027 * and rely on sending it from software.
8028 */
8029 if (acrtc_attach->base.state->event &&
8030 acrtc_state->active_planes > 0) {
8a48b44c
DF
8031 drm_crtc_vblank_get(pcrtc);
8032
8033 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8034
8035 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8036 prepare_flip_isr(acrtc_attach);
8037
8038 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8039 }
8040
8041 if (acrtc_state->stream) {
8a48b44c 8042 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 8043 bundle->stream_update.vrr_infopacket =
8a48b44c 8044 &acrtc_state->stream->vrr_infopacket;
e7b07cee 8045 }
e7b07cee
HW
8046 }
8047
bc92c065 8048 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
8049 if ((planes_count || acrtc_state->active_planes == 0) &&
8050 acrtc_state->stream) {
b6e881c9 8051 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 8052 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
8053 bundle->stream_update.src = acrtc_state->stream->src;
8054 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
8055 }
8056
cf020d49
NK
8057 if (new_pcrtc_state->color_mgmt_changed) {
8058 /*
8059 * TODO: This isn't fully correct since we've actually
8060 * already modified the stream in place.
8061 */
8062 bundle->stream_update.gamut_remap =
8063 &acrtc_state->stream->gamut_remap_matrix;
8064 bundle->stream_update.output_csc_transform =
8065 &acrtc_state->stream->csc_color_matrix;
8066 bundle->stream_update.out_transfer_func =
8067 acrtc_state->stream->out_transfer_func;
8068 }
bc7f670e 8069
8a48b44c 8070 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 8071 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 8072 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 8073
e63e2491
EB
8074 /*
8075 * If FreeSync state on the stream has changed then we need to
8076 * re-adjust the min/max bounds now that DC doesn't handle this
8077 * as part of commit.
8078 */
c0ea73a4
AP
8079 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
8080 amdgpu_dm_vrr_active(acrtc_state)) {
e63e2491
EB
8081 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8082 dc_stream_adjust_vmin_vmax(
8083 dm->dc, acrtc_state->stream,
585d450c 8084 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
8085 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8086 }
bc7f670e 8087 mutex_lock(&dm->dc_lock);
8c322309 8088 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 8089 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
8090 amdgpu_dm_psr_disable(acrtc_state->stream);
8091
bc7f670e 8092 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 8093 bundle->surface_updates,
bc7f670e
DF
8094 planes_count,
8095 acrtc_state->stream,
263a4feb
AJ
8096 &bundle->stream_update,
8097 dc_state);
8c322309 8098
8fe684e9
NK
8099 /**
8100 * Enable or disable the interrupts on the backend.
8101 *
8102 * Most pipes are put into power gating when unused.
8103 *
8104 * When power gating is enabled on a pipe we lose the
8105 * interrupt enablement state when power gating is disabled.
8106 *
8107 * So we need to update the IRQ control state in hardware
8108 * whenever the pipe turns on (since it could be previously
8109 * power gated) or off (since some pipes can't be power gated
8110 * on some ASICs).
8111 */
8112 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8113 dm_update_pflip_irq_state(drm_to_adev(dev),
8114 acrtc_attach);
8fe684e9 8115
8c322309 8116 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8117 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8118 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
8119 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8120 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
8121 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8122 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
8123 amdgpu_dm_psr_enable(acrtc_state->stream);
8124 }
8125
bc7f670e 8126 mutex_unlock(&dm->dc_lock);
e7b07cee 8127 }
4b510503 8128
8ad27806
NK
8129 /*
8130 * Update cursor state *after* programming all the planes.
8131 * This avoids redundant programming in the case where we're going
8132 * to be disabling a single plane - those pipes are being disabled.
8133 */
8134 if (acrtc_state->active_planes)
8135 amdgpu_dm_commit_cursors(state);
80c218d5 8136
4b510503 8137cleanup:
74aa7bd4 8138 kfree(bundle);
e7b07cee
HW
8139}
8140
6ce8f316
NK
8141static void amdgpu_dm_commit_audio(struct drm_device *dev,
8142 struct drm_atomic_state *state)
8143{
1348969a 8144 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8145 struct amdgpu_dm_connector *aconnector;
8146 struct drm_connector *connector;
8147 struct drm_connector_state *old_con_state, *new_con_state;
8148 struct drm_crtc_state *new_crtc_state;
8149 struct dm_crtc_state *new_dm_crtc_state;
8150 const struct dc_stream_status *status;
8151 int i, inst;
8152
8153 /* Notify device removals. */
8154 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8155 if (old_con_state->crtc != new_con_state->crtc) {
8156 /* CRTC changes require notification. */
8157 goto notify;
8158 }
8159
8160 if (!new_con_state->crtc)
8161 continue;
8162
8163 new_crtc_state = drm_atomic_get_new_crtc_state(
8164 state, new_con_state->crtc);
8165
8166 if (!new_crtc_state)
8167 continue;
8168
8169 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8170 continue;
8171
8172 notify:
8173 aconnector = to_amdgpu_dm_connector(connector);
8174
8175 mutex_lock(&adev->dm.audio_lock);
8176 inst = aconnector->audio_inst;
8177 aconnector->audio_inst = -1;
8178 mutex_unlock(&adev->dm.audio_lock);
8179
8180 amdgpu_dm_audio_eld_notify(adev, inst);
8181 }
8182
8183 /* Notify audio device additions. */
8184 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8185 if (!new_con_state->crtc)
8186 continue;
8187
8188 new_crtc_state = drm_atomic_get_new_crtc_state(
8189 state, new_con_state->crtc);
8190
8191 if (!new_crtc_state)
8192 continue;
8193
8194 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8195 continue;
8196
8197 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8198 if (!new_dm_crtc_state->stream)
8199 continue;
8200
8201 status = dc_stream_get_status(new_dm_crtc_state->stream);
8202 if (!status)
8203 continue;
8204
8205 aconnector = to_amdgpu_dm_connector(connector);
8206
8207 mutex_lock(&adev->dm.audio_lock);
8208 inst = status->audio_inst;
8209 aconnector->audio_inst = inst;
8210 mutex_unlock(&adev->dm.audio_lock);
8211
8212 amdgpu_dm_audio_eld_notify(adev, inst);
8213 }
8214}
8215
1f6010a9 8216/*
27b3f4fc
LSL
8217 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8218 * @crtc_state: the DRM CRTC state
8219 * @stream_state: the DC stream state.
8220 *
8221 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8222 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8223 */
8224static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8225 struct dc_stream_state *stream_state)
8226{
b9952f93 8227 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8228}
e7b07cee 8229
b8592b48
LL
8230/**
8231 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8232 * @state: The atomic state to commit
8233 *
8234 * This will tell DC to commit the constructed DC state from atomic_check,
8235 * programming the hardware. Any failures here implies a hardware failure, since
8236 * atomic check should have filtered anything non-kosher.
8237 */
7578ecda 8238static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8239{
8240 struct drm_device *dev = state->dev;
1348969a 8241 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8242 struct amdgpu_display_manager *dm = &adev->dm;
8243 struct dm_atomic_state *dm_state;
eb3dc897 8244 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8245 uint32_t i, j;
5cc6dcbd 8246 struct drm_crtc *crtc;
0bc9706d 8247 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8248 unsigned long flags;
8249 bool wait_for_vblank = true;
8250 struct drm_connector *connector;
c2cea706 8251 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8252 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8253 int crtc_disable_count = 0;
6ee90e88 8254 bool mode_set_reset_required = false;
e7b07cee 8255
e8a98235
RS
8256 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8257
e7b07cee
HW
8258 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8259
eb3dc897
NK
8260 dm_state = dm_atomic_get_new_state(state);
8261 if (dm_state && dm_state->context) {
8262 dc_state = dm_state->context;
8263 } else {
8264 /* No state changes, retain current state. */
813d20dc 8265 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8266 ASSERT(dc_state_temp);
8267 dc_state = dc_state_temp;
8268 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8269 }
e7b07cee 8270
6d90a208
AP
8271 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8272 new_crtc_state, i) {
8273 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8274
8275 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8276
8277 if (old_crtc_state->active &&
8278 (!new_crtc_state->active ||
8279 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8280 manage_dm_interrupts(adev, acrtc, false);
8281 dc_stream_release(dm_old_crtc_state->stream);
8282 }
8283 }
8284
8976f73b
RS
8285 drm_atomic_helper_calc_timestamping_constants(state);
8286
e7b07cee 8287 /* update changed items */
0bc9706d 8288 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8289 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8290
54d76575
LSL
8291 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8292 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8293
f1ad2f5e 8294 DRM_DEBUG_DRIVER(
e7b07cee
HW
8295 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8296 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8297 "connectors_changed:%d\n",
8298 acrtc->crtc_id,
0bc9706d
LSL
8299 new_crtc_state->enable,
8300 new_crtc_state->active,
8301 new_crtc_state->planes_changed,
8302 new_crtc_state->mode_changed,
8303 new_crtc_state->active_changed,
8304 new_crtc_state->connectors_changed);
e7b07cee 8305
5c68c652
VL
8306 /* Disable cursor if disabling crtc */
8307 if (old_crtc_state->active && !new_crtc_state->active) {
8308 struct dc_cursor_position position;
8309
8310 memset(&position, 0, sizeof(position));
8311 mutex_lock(&dm->dc_lock);
8312 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8313 mutex_unlock(&dm->dc_lock);
8314 }
8315
27b3f4fc
LSL
8316 /* Copy all transient state flags into dc state */
8317 if (dm_new_crtc_state->stream) {
8318 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8319 dm_new_crtc_state->stream);
8320 }
8321
e7b07cee
HW
8322 /* handles headless hotplug case, updating new_state and
8323 * aconnector as needed
8324 */
8325
54d76575 8326 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8327
f1ad2f5e 8328 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8329
54d76575 8330 if (!dm_new_crtc_state->stream) {
e7b07cee 8331 /*
b830ebc9
HW
8332 * this could happen because of issues with
8333 * userspace notifications delivery.
8334 * In this case userspace tries to set mode on
1f6010a9
DF
8335 * display which is disconnected in fact.
8336 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8337 * We expect reset mode will come soon.
8338 *
8339 * This can also happen when unplug is done
8340 * during resume sequence ended
8341 *
8342 * In this case, we want to pretend we still
8343 * have a sink to keep the pipe running so that
8344 * hw state is consistent with the sw state
8345 */
f1ad2f5e 8346 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8347 __func__, acrtc->base.base.id);
8348 continue;
8349 }
8350
54d76575
LSL
8351 if (dm_old_crtc_state->stream)
8352 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8353
97028037
LP
8354 pm_runtime_get_noresume(dev->dev);
8355
e7b07cee 8356 acrtc->enabled = true;
0bc9706d
LSL
8357 acrtc->hw_mode = new_crtc_state->mode;
8358 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8359 mode_set_reset_required = true;
0bc9706d 8360 } else if (modereset_required(new_crtc_state)) {
f1ad2f5e 8361 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8362 /* i.e. reset mode */
6ee90e88 8363 if (dm_old_crtc_state->stream)
54d76575 8364 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6ee90e88 8365 mode_set_reset_required = true;
e7b07cee
HW
8366 }
8367 } /* for_each_crtc_in_state() */
8368
eb3dc897 8369 if (dc_state) {
6ee90e88 8370 /* if there mode set or reset, disable eDP PSR */
8371 if (mode_set_reset_required)
8372 amdgpu_dm_psr_disable_all(dm);
8373
eb3dc897 8374 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8375 mutex_lock(&dm->dc_lock);
eb3dc897 8376 WARN_ON(!dc_commit_state(dm->dc, dc_state));
674e78ac 8377 mutex_unlock(&dm->dc_lock);
fa2123db 8378 }
e7b07cee 8379
0bc9706d 8380 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8381 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8382
54d76575 8383 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8384
54d76575 8385 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8386 const struct dc_stream_status *status =
54d76575 8387 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8388
eb3dc897 8389 if (!status)
09f609c3
LL
8390 status = dc_stream_get_status_from_state(dc_state,
8391 dm_new_crtc_state->stream);
e7b07cee 8392 if (!status)
54d76575 8393 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8394 else
8395 acrtc->otg_inst = status->primary_otg_inst;
8396 }
8397 }
0c8620d6
BL
8398#ifdef CONFIG_DRM_AMD_DC_HDCP
8399 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8400 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8401 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8402 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8403
8404 new_crtc_state = NULL;
8405
8406 if (acrtc)
8407 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8408
8409 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8410
8411 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8412 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8413 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8414 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8415 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8416 continue;
8417 }
8418
8419 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8420 hdcp_update_display(
8421 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8422 new_con_state->hdcp_content_type,
0e86d3d4 8423 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
8424 }
8425#endif
e7b07cee 8426
02d6a6fc 8427 /* Handle connector state changes */
c2cea706 8428 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8429 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8430 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8431 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
263a4feb 8432 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 8433 struct dc_stream_update stream_update;
b232d4ed 8434 struct dc_info_packet hdr_packet;
e7b07cee 8435 struct dc_stream_status *status = NULL;
b232d4ed 8436 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8437
263a4feb 8438 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
8439 memset(&stream_update, 0, sizeof(stream_update));
8440
44d09c6a 8441 if (acrtc) {
0bc9706d 8442 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8443 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8444 }
0bc9706d 8445
e7b07cee 8446 /* Skip any modesets/resets */
0bc9706d 8447 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8448 continue;
8449
54d76575 8450 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8451 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8452
b232d4ed
NK
8453 scaling_changed = is_scaling_state_different(dm_new_con_state,
8454 dm_old_con_state);
8455
8456 abm_changed = dm_new_crtc_state->abm_level !=
8457 dm_old_crtc_state->abm_level;
8458
8459 hdr_changed =
8460 is_hdr_metadata_different(old_con_state, new_con_state);
8461
8462 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8463 continue;
e7b07cee 8464
b6e881c9 8465 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8466 if (scaling_changed) {
02d6a6fc 8467 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8468 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8469
02d6a6fc
DF
8470 stream_update.src = dm_new_crtc_state->stream->src;
8471 stream_update.dst = dm_new_crtc_state->stream->dst;
8472 }
8473
b232d4ed 8474 if (abm_changed) {
02d6a6fc
DF
8475 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8476
8477 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8478 }
70e8ffc5 8479
b232d4ed
NK
8480 if (hdr_changed) {
8481 fill_hdr_info_packet(new_con_state, &hdr_packet);
8482 stream_update.hdr_static_metadata = &hdr_packet;
8483 }
8484
54d76575 8485 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8486 WARN_ON(!status);
3be5262e 8487 WARN_ON(!status->plane_count);
e7b07cee 8488
02d6a6fc
DF
8489 /*
8490 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8491 * Here we create an empty update on each plane.
8492 * To fix this, DC should permit updating only stream properties.
8493 */
8494 for (j = 0; j < status->plane_count; j++)
263a4feb 8495 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
8496
8497
8498 mutex_lock(&dm->dc_lock);
8499 dc_commit_updates_for_stream(dm->dc,
263a4feb 8500 dummy_updates,
02d6a6fc
DF
8501 status->plane_count,
8502 dm_new_crtc_state->stream,
263a4feb
AJ
8503 &stream_update,
8504 dc_state);
02d6a6fc 8505 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8506 }
8507
b5e83f6f 8508 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 8509 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 8510 new_crtc_state, i) {
fe2a1965
LP
8511 if (old_crtc_state->active && !new_crtc_state->active)
8512 crtc_disable_count++;
8513
54d76575 8514 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 8515 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 8516
585d450c
AP
8517 /* For freesync config update on crtc state and params for irq */
8518 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 8519
66b0c973
MK
8520 /* Handle vrr on->off / off->on transitions */
8521 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8522 dm_new_crtc_state);
e7b07cee
HW
8523 }
8524
8fe684e9
NK
8525 /**
8526 * Enable interrupts for CRTCs that are newly enabled or went through
8527 * a modeset. It was intentionally deferred until after the front end
8528 * state was modified to wait until the OTG was on and so the IRQ
8529 * handlers didn't access stale or invalid state.
8530 */
8531 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8532 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 8533#ifdef CONFIG_DEBUG_FS
86bc2219 8534 bool configure_crc = false;
8e7b6fee
WL
8535 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8536#endif
585d450c
AP
8537 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8538
8fe684e9
NK
8539 if (new_crtc_state->active &&
8540 (!old_crtc_state->active ||
8541 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8542 dc_stream_retain(dm_new_crtc_state->stream);
8543 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8544 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 8545
24eb9374 8546#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
8547 /**
8548 * Frontend may have changed so reapply the CRC capture
8549 * settings for the stream.
8550 */
8551 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8e7b6fee
WL
8552 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8553 cur_crc_src = acrtc->dm_irq_params.crc_src;
8554 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
c920888c 8555
8e7b6fee 8556 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
8557 configure_crc = true;
8558#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8559 if (amdgpu_dm_crc_window_is_activated(crtc))
8560 configure_crc = false;
8561#endif
e2881d6d 8562 }
86bc2219
WL
8563
8564 if (configure_crc)
8565 amdgpu_dm_crtc_configure_crc_source(
8566 crtc, dm_new_crtc_state, cur_crc_src);
24eb9374 8567#endif
8fe684e9
NK
8568 }
8569 }
e7b07cee 8570
420cd472 8571 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8572 if (new_crtc_state->async_flip)
420cd472
DF
8573 wait_for_vblank = false;
8574
e7b07cee 8575 /* update planes when needed per crtc*/
5cc6dcbd 8576 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8577 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8578
54d76575 8579 if (dm_new_crtc_state->stream)
eb3dc897 8580 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8581 dm, crtc, wait_for_vblank);
e7b07cee
HW
8582 }
8583
6ce8f316
NK
8584 /* Update audio instances for each connector. */
8585 amdgpu_dm_commit_audio(dev, state);
8586
e7b07cee
HW
8587 /*
8588 * send vblank event on all events not handled in flip and
8589 * mark consumed event for drm_atomic_helper_commit_hw_done
8590 */
4a580877 8591 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8592 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8593
0bc9706d
LSL
8594 if (new_crtc_state->event)
8595 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8596
0bc9706d 8597 new_crtc_state->event = NULL;
e7b07cee 8598 }
4a580877 8599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8600
29c8f234
LL
8601 /* Signal HW programming completion */
8602 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8603
8604 if (wait_for_vblank)
320a1274 8605 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8606
8607 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8608
5f6fab24
AD
8609 /* return the stolen vga memory back to VRAM */
8610 if (!adev->mman.keep_stolen_vga_memory)
8611 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8612 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8613
1f6010a9
DF
8614 /*
8615 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8616 * so we can put the GPU into runtime suspend if we're not driving any
8617 * displays anymore
8618 */
fe2a1965
LP
8619 for (i = 0; i < crtc_disable_count; i++)
8620 pm_runtime_put_autosuspend(dev->dev);
97028037 8621 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8622
8623 if (dc_state_temp)
8624 dc_release_state(dc_state_temp);
e7b07cee
HW
8625}
8626
8627
8628static int dm_force_atomic_commit(struct drm_connector *connector)
8629{
8630 int ret = 0;
8631 struct drm_device *ddev = connector->dev;
8632 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8633 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8634 struct drm_plane *plane = disconnected_acrtc->base.primary;
8635 struct drm_connector_state *conn_state;
8636 struct drm_crtc_state *crtc_state;
8637 struct drm_plane_state *plane_state;
8638
8639 if (!state)
8640 return -ENOMEM;
8641
8642 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8643
8644 /* Construct an atomic state to restore previous display setting */
8645
8646 /*
8647 * Attach connectors to drm_atomic_state
8648 */
8649 conn_state = drm_atomic_get_connector_state(state, connector);
8650
8651 ret = PTR_ERR_OR_ZERO(conn_state);
8652 if (ret)
2dc39051 8653 goto out;
e7b07cee
HW
8654
8655 /* Attach crtc to drm_atomic_state*/
8656 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8657
8658 ret = PTR_ERR_OR_ZERO(crtc_state);
8659 if (ret)
2dc39051 8660 goto out;
e7b07cee
HW
8661
8662 /* force a restore */
8663 crtc_state->mode_changed = true;
8664
8665 /* Attach plane to drm_atomic_state */
8666 plane_state = drm_atomic_get_plane_state(state, plane);
8667
8668 ret = PTR_ERR_OR_ZERO(plane_state);
8669 if (ret)
2dc39051 8670 goto out;
e7b07cee
HW
8671
8672 /* Call commit internally with the state we just constructed */
8673 ret = drm_atomic_commit(state);
e7b07cee 8674
2dc39051 8675out:
e7b07cee 8676 drm_atomic_state_put(state);
2dc39051
VL
8677 if (ret)
8678 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
8679
8680 return ret;
8681}
8682
8683/*
1f6010a9
DF
8684 * This function handles all cases when set mode does not come upon hotplug.
8685 * This includes when a display is unplugged then plugged back into the
8686 * same port and when running without usermode desktop manager supprot
e7b07cee 8687 */
3ee6b26b
AD
8688void dm_restore_drm_connector_state(struct drm_device *dev,
8689 struct drm_connector *connector)
e7b07cee 8690{
c84dec2f 8691 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
8692 struct amdgpu_crtc *disconnected_acrtc;
8693 struct dm_crtc_state *acrtc_state;
8694
8695 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8696 return;
8697
8698 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
8699 if (!disconnected_acrtc)
8700 return;
e7b07cee 8701
70e8ffc5
HW
8702 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8703 if (!acrtc_state->stream)
e7b07cee
HW
8704 return;
8705
8706 /*
8707 * If the previous sink is not released and different from the current,
8708 * we deduce we are in a state where we can not rely on usermode call
8709 * to turn on the display, so we do it here
8710 */
8711 if (acrtc_state->stream->sink != aconnector->dc_sink)
8712 dm_force_atomic_commit(&aconnector->base);
8713}
8714
1f6010a9 8715/*
e7b07cee
HW
8716 * Grabs all modesetting locks to serialize against any blocking commits,
8717 * Waits for completion of all non blocking commits.
8718 */
3ee6b26b
AD
8719static int do_aquire_global_lock(struct drm_device *dev,
8720 struct drm_atomic_state *state)
e7b07cee
HW
8721{
8722 struct drm_crtc *crtc;
8723 struct drm_crtc_commit *commit;
8724 long ret;
8725
1f6010a9
DF
8726 /*
8727 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
8728 * ensure that when the framework release it the
8729 * extra locks we are locking here will get released to
8730 */
8731 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8732 if (ret)
8733 return ret;
8734
8735 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8736 spin_lock(&crtc->commit_lock);
8737 commit = list_first_entry_or_null(&crtc->commit_list,
8738 struct drm_crtc_commit, commit_entry);
8739 if (commit)
8740 drm_crtc_commit_get(commit);
8741 spin_unlock(&crtc->commit_lock);
8742
8743 if (!commit)
8744 continue;
8745
1f6010a9
DF
8746 /*
8747 * Make sure all pending HW programming completed and
e7b07cee
HW
8748 * page flips done
8749 */
8750 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8751
8752 if (ret > 0)
8753 ret = wait_for_completion_interruptible_timeout(
8754 &commit->flip_done, 10*HZ);
8755
8756 if (ret == 0)
8757 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 8758 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
8759
8760 drm_crtc_commit_put(commit);
8761 }
8762
8763 return ret < 0 ? ret : 0;
8764}
8765
bb47de73
NK
8766static void get_freesync_config_for_crtc(
8767 struct dm_crtc_state *new_crtc_state,
8768 struct dm_connector_state *new_con_state)
98e6436d
AK
8769{
8770 struct mod_freesync_config config = {0};
98e6436d
AK
8771 struct amdgpu_dm_connector *aconnector =
8772 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 8773 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 8774 int vrefresh = drm_mode_vrefresh(mode);
98e6436d 8775
a057ec46 8776 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
8777 vrefresh >= aconnector->min_vfreq &&
8778 vrefresh <= aconnector->max_vfreq;
bb47de73 8779
a057ec46
IB
8780 if (new_crtc_state->vrr_supported) {
8781 new_crtc_state->stream->ignore_msa_timing_param = true;
c0ea73a4
AP
8782 config.state = new_crtc_state->base.vrr_enabled ?
8783 VRR_STATE_ACTIVE_VARIABLE :
8784 VRR_STATE_INACTIVE;
8785 config.min_refresh_in_uhz =
8786 aconnector->min_vfreq * 1000000;
8787 config.max_refresh_in_uhz =
8788 aconnector->max_vfreq * 1000000;
69ff8845 8789 config.vsif_supported = true;
180db303 8790 config.btr = true;
6f59f229 8791 }
c0ea73a4 8792
bb47de73
NK
8793 new_crtc_state->freesync_config = config;
8794}
98e6436d 8795
bb47de73
NK
8796static void reset_freesync_config_for_crtc(
8797 struct dm_crtc_state *new_crtc_state)
8798{
8799 new_crtc_state->vrr_supported = false;
98e6436d 8800
bb47de73
NK
8801 memset(&new_crtc_state->vrr_infopacket, 0,
8802 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
8803}
8804
4b9674e5
LL
8805static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8806 struct drm_atomic_state *state,
8807 struct drm_crtc *crtc,
8808 struct drm_crtc_state *old_crtc_state,
8809 struct drm_crtc_state *new_crtc_state,
8810 bool enable,
8811 bool *lock_and_validation_needed)
e7b07cee 8812{
eb3dc897 8813 struct dm_atomic_state *dm_state = NULL;
54d76575 8814 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 8815 struct dc_stream_state *new_stream;
62f55537 8816 int ret = 0;
d4d4a645 8817
1f6010a9
DF
8818 /*
8819 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8820 * update changed items
8821 */
4b9674e5
LL
8822 struct amdgpu_crtc *acrtc = NULL;
8823 struct amdgpu_dm_connector *aconnector = NULL;
8824 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8825 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 8826
4b9674e5 8827 new_stream = NULL;
9635b754 8828
4b9674e5
LL
8829 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8830 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8831 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 8832 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 8833
4b9674e5
LL
8834 /* TODO This hack should go away */
8835 if (aconnector && enable) {
8836 /* Make sure fake sink is created in plug-in scenario */
8837 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8838 &aconnector->base);
8839 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8840 &aconnector->base);
19f89e23 8841
4b9674e5
LL
8842 if (IS_ERR(drm_new_conn_state)) {
8843 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8844 goto fail;
8845 }
19f89e23 8846
4b9674e5
LL
8847 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8848 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 8849
02d35a67
JFZ
8850 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8851 goto skip_modeset;
8852
cbd14ae7
SW
8853 new_stream = create_validate_stream_for_sink(aconnector,
8854 &new_crtc_state->mode,
8855 dm_new_conn_state,
8856 dm_old_crtc_state->stream);
19f89e23 8857
4b9674e5
LL
8858 /*
8859 * we can have no stream on ACTION_SET if a display
8860 * was disconnected during S3, in this case it is not an
8861 * error, the OS will be updated after detection, and
8862 * will do the right thing on next atomic commit
8863 */
19f89e23 8864
4b9674e5
LL
8865 if (!new_stream) {
8866 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8867 __func__, acrtc->base.base.id);
8868 ret = -ENOMEM;
8869 goto fail;
8870 }
e7b07cee 8871
3d4e52d0
VL
8872 /*
8873 * TODO: Check VSDB bits to decide whether this should
8874 * be enabled or not.
8875 */
8876 new_stream->triggered_crtc_reset.enabled =
8877 dm->force_timing_sync;
8878
4b9674e5 8879 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 8880
88694af9
NK
8881 ret = fill_hdr_info_packet(drm_new_conn_state,
8882 &new_stream->hdr_static_metadata);
8883 if (ret)
8884 goto fail;
8885
7e930949
NK
8886 /*
8887 * If we already removed the old stream from the context
8888 * (and set the new stream to NULL) then we can't reuse
8889 * the old stream even if the stream and scaling are unchanged.
8890 * We'll hit the BUG_ON and black screen.
8891 *
8892 * TODO: Refactor this function to allow this check to work
8893 * in all conditions.
8894 */
8895 if (dm_new_crtc_state->stream &&
8896 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
8897 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8898 new_crtc_state->mode_changed = false;
8899 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8900 new_crtc_state->mode_changed);
62f55537 8901 }
4b9674e5 8902 }
b830ebc9 8903
02d35a67 8904 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
8905 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8906 goto skip_modeset;
e7b07cee 8907
4b9674e5
LL
8908 DRM_DEBUG_DRIVER(
8909 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8910 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8911 "connectors_changed:%d\n",
8912 acrtc->crtc_id,
8913 new_crtc_state->enable,
8914 new_crtc_state->active,
8915 new_crtc_state->planes_changed,
8916 new_crtc_state->mode_changed,
8917 new_crtc_state->active_changed,
8918 new_crtc_state->connectors_changed);
62f55537 8919
4b9674e5
LL
8920 /* Remove stream for any changed/disabled CRTC */
8921 if (!enable) {
62f55537 8922
4b9674e5
LL
8923 if (!dm_old_crtc_state->stream)
8924 goto skip_modeset;
eb3dc897 8925
4b9674e5
LL
8926 ret = dm_atomic_get_state(state, &dm_state);
8927 if (ret)
8928 goto fail;
e7b07cee 8929
4b9674e5
LL
8930 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8931 crtc->base.id);
62f55537 8932
4b9674e5
LL
8933 /* i.e. reset mode */
8934 if (dc_remove_stream_from_ctx(
8935 dm->dc,
8936 dm_state->context,
8937 dm_old_crtc_state->stream) != DC_OK) {
8938 ret = -EINVAL;
8939 goto fail;
8940 }
62f55537 8941
4b9674e5
LL
8942 dc_stream_release(dm_old_crtc_state->stream);
8943 dm_new_crtc_state->stream = NULL;
bb47de73 8944
4b9674e5 8945 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 8946
4b9674e5 8947 *lock_and_validation_needed = true;
62f55537 8948
4b9674e5
LL
8949 } else {/* Add stream for any updated/enabled CRTC */
8950 /*
8951 * Quick fix to prevent NULL pointer on new_stream when
8952 * added MST connectors not found in existing crtc_state in the chained mode
8953 * TODO: need to dig out the root cause of that
8954 */
8955 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8956 goto skip_modeset;
62f55537 8957
4b9674e5
LL
8958 if (modereset_required(new_crtc_state))
8959 goto skip_modeset;
62f55537 8960
4b9674e5
LL
8961 if (modeset_required(new_crtc_state, new_stream,
8962 dm_old_crtc_state->stream)) {
62f55537 8963
4b9674e5 8964 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 8965
4b9674e5
LL
8966 ret = dm_atomic_get_state(state, &dm_state);
8967 if (ret)
8968 goto fail;
27b3f4fc 8969
4b9674e5 8970 dm_new_crtc_state->stream = new_stream;
62f55537 8971
4b9674e5 8972 dc_stream_retain(new_stream);
1dc90497 8973
4b9674e5
LL
8974 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8975 crtc->base.id);
1dc90497 8976
4b9674e5
LL
8977 if (dc_add_stream_to_ctx(
8978 dm->dc,
8979 dm_state->context,
8980 dm_new_crtc_state->stream) != DC_OK) {
8981 ret = -EINVAL;
8982 goto fail;
9b690ef3
BL
8983 }
8984
4b9674e5
LL
8985 *lock_and_validation_needed = true;
8986 }
8987 }
e277adc5 8988
4b9674e5
LL
8989skip_modeset:
8990 /* Release extra reference */
8991 if (new_stream)
8992 dc_stream_release(new_stream);
e277adc5 8993
4b9674e5
LL
8994 /*
8995 * We want to do dc stream updates that do not require a
8996 * full modeset below.
8997 */
2afda735 8998 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
8999 return 0;
9000 /*
9001 * Given above conditions, the dc state cannot be NULL because:
9002 * 1. We're in the process of enabling CRTCs (just been added
9003 * to the dc context, or already is on the context)
9004 * 2. Has a valid connector attached, and
9005 * 3. Is currently active and enabled.
9006 * => The dc stream state currently exists.
9007 */
9008 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 9009
4b9674e5
LL
9010 /* Scaling or underscan settings */
9011 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9012 update_stream_scaling_settings(
9013 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 9014
b05e2c5e
DF
9015 /* ABM settings */
9016 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9017
4b9674e5
LL
9018 /*
9019 * Color management settings. We also update color properties
9020 * when a modeset is needed, to ensure it gets reprogrammed.
9021 */
9022 if (dm_new_crtc_state->base.color_mgmt_changed ||
9023 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 9024 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
9025 if (ret)
9026 goto fail;
62f55537 9027 }
e7b07cee 9028
4b9674e5
LL
9029 /* Update Freesync settings. */
9030 get_freesync_config_for_crtc(dm_new_crtc_state,
9031 dm_new_conn_state);
9032
62f55537 9033 return ret;
9635b754
DS
9034
9035fail:
9036 if (new_stream)
9037 dc_stream_release(new_stream);
9038 return ret;
62f55537 9039}
9b690ef3 9040
f6ff2a08
NK
9041static bool should_reset_plane(struct drm_atomic_state *state,
9042 struct drm_plane *plane,
9043 struct drm_plane_state *old_plane_state,
9044 struct drm_plane_state *new_plane_state)
9045{
9046 struct drm_plane *other;
9047 struct drm_plane_state *old_other_state, *new_other_state;
9048 struct drm_crtc_state *new_crtc_state;
9049 int i;
9050
70a1efac
NK
9051 /*
9052 * TODO: Remove this hack once the checks below are sufficient
9053 * enough to determine when we need to reset all the planes on
9054 * the stream.
9055 */
9056 if (state->allow_modeset)
9057 return true;
9058
f6ff2a08
NK
9059 /* Exit early if we know that we're adding or removing the plane. */
9060 if (old_plane_state->crtc != new_plane_state->crtc)
9061 return true;
9062
9063 /* old crtc == new_crtc == NULL, plane not in context. */
9064 if (!new_plane_state->crtc)
9065 return false;
9066
9067 new_crtc_state =
9068 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9069
9070 if (!new_crtc_state)
9071 return true;
9072
7316c4ad
NK
9073 /* CRTC Degamma changes currently require us to recreate planes. */
9074 if (new_crtc_state->color_mgmt_changed)
9075 return true;
9076
f6ff2a08
NK
9077 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9078 return true;
9079
9080 /*
9081 * If there are any new primary or overlay planes being added or
9082 * removed then the z-order can potentially change. To ensure
9083 * correct z-order and pipe acquisition the current DC architecture
9084 * requires us to remove and recreate all existing planes.
9085 *
9086 * TODO: Come up with a more elegant solution for this.
9087 */
9088 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 9089 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
9090 if (other->type == DRM_PLANE_TYPE_CURSOR)
9091 continue;
9092
9093 if (old_other_state->crtc != new_plane_state->crtc &&
9094 new_other_state->crtc != new_plane_state->crtc)
9095 continue;
9096
9097 if (old_other_state->crtc != new_other_state->crtc)
9098 return true;
9099
dc4cb30d
NK
9100 /* Src/dst size and scaling updates. */
9101 if (old_other_state->src_w != new_other_state->src_w ||
9102 old_other_state->src_h != new_other_state->src_h ||
9103 old_other_state->crtc_w != new_other_state->crtc_w ||
9104 old_other_state->crtc_h != new_other_state->crtc_h)
9105 return true;
9106
9107 /* Rotation / mirroring updates. */
9108 if (old_other_state->rotation != new_other_state->rotation)
9109 return true;
9110
9111 /* Blending updates. */
9112 if (old_other_state->pixel_blend_mode !=
9113 new_other_state->pixel_blend_mode)
9114 return true;
9115
9116 /* Alpha updates. */
9117 if (old_other_state->alpha != new_other_state->alpha)
9118 return true;
9119
9120 /* Colorspace changes. */
9121 if (old_other_state->color_range != new_other_state->color_range ||
9122 old_other_state->color_encoding != new_other_state->color_encoding)
9123 return true;
9124
9a81cc60
NK
9125 /* Framebuffer checks fall at the end. */
9126 if (!old_other_state->fb || !new_other_state->fb)
9127 continue;
9128
9129 /* Pixel format changes can require bandwidth updates. */
9130 if (old_other_state->fb->format != new_other_state->fb->format)
9131 return true;
9132
6eed95b0
BN
9133 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9134 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9135
9136 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9137 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9138 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9139 return true;
9140 }
9141
9142 return false;
9143}
9144
b0455fda
SS
9145static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9146 struct drm_plane_state *new_plane_state,
9147 struct drm_framebuffer *fb)
9148{
e72868c4
SS
9149 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9150 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9151 unsigned int pitch;
e72868c4 9152 bool linear;
b0455fda
SS
9153
9154 if (fb->width > new_acrtc->max_cursor_width ||
9155 fb->height > new_acrtc->max_cursor_height) {
9156 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9157 new_plane_state->fb->width,
9158 new_plane_state->fb->height);
9159 return -EINVAL;
9160 }
9161 if (new_plane_state->src_w != fb->width << 16 ||
9162 new_plane_state->src_h != fb->height << 16) {
9163 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9164 return -EINVAL;
9165 }
9166
9167 /* Pitch in pixels */
9168 pitch = fb->pitches[0] / fb->format->cpp[0];
9169
9170 if (fb->width != pitch) {
9171 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9172 fb->width, pitch);
9173 return -EINVAL;
9174 }
9175
9176 switch (pitch) {
9177 case 64:
9178 case 128:
9179 case 256:
9180 /* FB pitch is supported by cursor plane */
9181 break;
9182 default:
9183 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9184 return -EINVAL;
9185 }
9186
e72868c4
SS
9187 /* Core DRM takes care of checking FB modifiers, so we only need to
9188 * check tiling flags when the FB doesn't have a modifier. */
9189 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9190 if (adev->family < AMDGPU_FAMILY_AI) {
9191 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9192 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9193 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9194 } else {
9195 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9196 }
9197 if (!linear) {
9198 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9199 return -EINVAL;
9200 }
9201 }
9202
b0455fda
SS
9203 return 0;
9204}
9205
9e869063
LL
9206static int dm_update_plane_state(struct dc *dc,
9207 struct drm_atomic_state *state,
9208 struct drm_plane *plane,
9209 struct drm_plane_state *old_plane_state,
9210 struct drm_plane_state *new_plane_state,
9211 bool enable,
9212 bool *lock_and_validation_needed)
62f55537 9213{
eb3dc897
NK
9214
9215 struct dm_atomic_state *dm_state = NULL;
62f55537 9216 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9217 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9218 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9219 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9220 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9221 bool needs_reset;
62f55537 9222 int ret = 0;
e7b07cee 9223
9b690ef3 9224
9e869063
LL
9225 new_plane_crtc = new_plane_state->crtc;
9226 old_plane_crtc = old_plane_state->crtc;
9227 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9228 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9229
626bf90f
SS
9230 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9231 if (!enable || !new_plane_crtc ||
9232 drm_atomic_plane_disabling(plane->state, new_plane_state))
9233 return 0;
9234
9235 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9236
5f581248
SS
9237 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9238 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9239 return -EINVAL;
9240 }
9241
24f99d2b 9242 if (new_plane_state->fb) {
b0455fda
SS
9243 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9244 new_plane_state->fb);
9245 if (ret)
9246 return ret;
24f99d2b
SS
9247 }
9248
9e869063 9249 return 0;
626bf90f 9250 }
9b690ef3 9251
f6ff2a08
NK
9252 needs_reset = should_reset_plane(state, plane, old_plane_state,
9253 new_plane_state);
9254
9e869063
LL
9255 /* Remove any changed/removed planes */
9256 if (!enable) {
f6ff2a08 9257 if (!needs_reset)
9e869063 9258 return 0;
a7b06724 9259
9e869063
LL
9260 if (!old_plane_crtc)
9261 return 0;
62f55537 9262
9e869063
LL
9263 old_crtc_state = drm_atomic_get_old_crtc_state(
9264 state, old_plane_crtc);
9265 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9266
9e869063
LL
9267 if (!dm_old_crtc_state->stream)
9268 return 0;
62f55537 9269
9e869063
LL
9270 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9271 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9272
9e869063
LL
9273 ret = dm_atomic_get_state(state, &dm_state);
9274 if (ret)
9275 return ret;
eb3dc897 9276
9e869063
LL
9277 if (!dc_remove_plane_from_context(
9278 dc,
9279 dm_old_crtc_state->stream,
9280 dm_old_plane_state->dc_state,
9281 dm_state->context)) {
62f55537 9282
c3537613 9283 return -EINVAL;
9e869063 9284 }
e7b07cee 9285
9b690ef3 9286
9e869063
LL
9287 dc_plane_state_release(dm_old_plane_state->dc_state);
9288 dm_new_plane_state->dc_state = NULL;
1dc90497 9289
9e869063 9290 *lock_and_validation_needed = true;
1dc90497 9291
9e869063
LL
9292 } else { /* Add new planes */
9293 struct dc_plane_state *dc_new_plane_state;
1dc90497 9294
9e869063
LL
9295 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9296 return 0;
e7b07cee 9297
9e869063
LL
9298 if (!new_plane_crtc)
9299 return 0;
e7b07cee 9300
9e869063
LL
9301 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9302 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9303
9e869063
LL
9304 if (!dm_new_crtc_state->stream)
9305 return 0;
62f55537 9306
f6ff2a08 9307 if (!needs_reset)
9e869063 9308 return 0;
62f55537 9309
8c44515b
AP
9310 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9311 if (ret)
9312 return ret;
9313
9e869063 9314 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9315
9e869063
LL
9316 dc_new_plane_state = dc_create_plane_state(dc);
9317 if (!dc_new_plane_state)
9318 return -ENOMEM;
62f55537 9319
9e869063
LL
9320 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9321 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9322
695af5f9 9323 ret = fill_dc_plane_attributes(
1348969a 9324 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9325 dc_new_plane_state,
9326 new_plane_state,
9327 new_crtc_state);
9328 if (ret) {
9329 dc_plane_state_release(dc_new_plane_state);
9330 return ret;
9331 }
62f55537 9332
9e869063
LL
9333 ret = dm_atomic_get_state(state, &dm_state);
9334 if (ret) {
9335 dc_plane_state_release(dc_new_plane_state);
9336 return ret;
9337 }
eb3dc897 9338
9e869063
LL
9339 /*
9340 * Any atomic check errors that occur after this will
9341 * not need a release. The plane state will be attached
9342 * to the stream, and therefore part of the atomic
9343 * state. It'll be released when the atomic state is
9344 * cleaned.
9345 */
9346 if (!dc_add_plane_to_context(
9347 dc,
9348 dm_new_crtc_state->stream,
9349 dc_new_plane_state,
9350 dm_state->context)) {
62f55537 9351
9e869063
LL
9352 dc_plane_state_release(dc_new_plane_state);
9353 return -EINVAL;
9354 }
8c45c5db 9355
9e869063 9356 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9357
9e869063
LL
9358 /* Tell DC to do a full surface update every time there
9359 * is a plane change. Inefficient, but works for now.
9360 */
9361 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9362
9363 *lock_and_validation_needed = true;
62f55537 9364 }
e7b07cee
HW
9365
9366
62f55537
AG
9367 return ret;
9368}
a87fa993 9369
12f4849a
SS
9370static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9371 struct drm_crtc *crtc,
9372 struct drm_crtc_state *new_crtc_state)
9373{
9374 struct drm_plane_state *new_cursor_state, *new_primary_state;
9375 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9376
9377 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9378 * cursor per pipe but it's going to inherit the scaling and
9379 * positioning from the underlying pipe. Check the cursor plane's
9380 * blending properties match the primary plane's. */
9381
9382 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9383 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9384 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9385 return 0;
9386 }
9387
9388 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9389 (new_cursor_state->src_w >> 16);
9390 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9391 (new_cursor_state->src_h >> 16);
9392
9393 primary_scale_w = new_primary_state->crtc_w * 1000 /
9394 (new_primary_state->src_w >> 16);
9395 primary_scale_h = new_primary_state->crtc_h * 1000 /
9396 (new_primary_state->src_h >> 16);
9397
9398 if (cursor_scale_w != primary_scale_w ||
9399 cursor_scale_h != primary_scale_h) {
9400 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9401 return -EINVAL;
9402 }
9403
9404 return 0;
9405}
9406
e10517b3 9407#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9408static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9409{
9410 struct drm_connector *connector;
9411 struct drm_connector_state *conn_state;
9412 struct amdgpu_dm_connector *aconnector = NULL;
9413 int i;
9414 for_each_new_connector_in_state(state, connector, conn_state, i) {
9415 if (conn_state->crtc != crtc)
9416 continue;
9417
9418 aconnector = to_amdgpu_dm_connector(connector);
9419 if (!aconnector->port || !aconnector->mst_port)
9420 aconnector = NULL;
9421 else
9422 break;
9423 }
9424
9425 if (!aconnector)
9426 return 0;
9427
9428 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9429}
e10517b3 9430#endif
44be939f 9431
b8592b48
LL
9432/**
9433 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9434 * @dev: The DRM device
9435 * @state: The atomic state to commit
9436 *
9437 * Validate that the given atomic state is programmable by DC into hardware.
9438 * This involves constructing a &struct dc_state reflecting the new hardware
9439 * state we wish to commit, then querying DC to see if it is programmable. It's
9440 * important not to modify the existing DC state. Otherwise, atomic_check
9441 * may unexpectedly commit hardware changes.
9442 *
9443 * When validating the DC state, it's important that the right locks are
9444 * acquired. For full updates case which removes/adds/updates streams on one
9445 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9446 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9447 * flip using DRMs synchronization events.
b8592b48
LL
9448 *
9449 * Note that DM adds the affected connectors for all CRTCs in state, when that
9450 * might not seem necessary. This is because DC stream creation requires the
9451 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9452 * be possible but non-trivial - a possible TODO item.
9453 *
9454 * Return: -Error code if validation failed.
9455 */
7578ecda
AD
9456static int amdgpu_dm_atomic_check(struct drm_device *dev,
9457 struct drm_atomic_state *state)
62f55537 9458{
1348969a 9459 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9460 struct dm_atomic_state *dm_state = NULL;
62f55537 9461 struct dc *dc = adev->dm.dc;
62f55537 9462 struct drm_connector *connector;
c2cea706 9463 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9464 struct drm_crtc *crtc;
fc9e9920 9465 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9466 struct drm_plane *plane;
9467 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9468 enum dc_status status;
1e88ad0a 9469 int ret, i;
62f55537 9470 bool lock_and_validation_needed = false;
886876ec 9471 struct dm_crtc_state *dm_old_crtc_state;
62f55537 9472
e8a98235 9473 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9474
62f55537 9475 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
9476 if (ret)
9477 goto fail;
62f55537 9478
c5892a10
SW
9479 /* Check connector changes */
9480 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9481 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9482 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9483
9484 /* Skip connectors that are disabled or part of modeset already. */
9485 if (!old_con_state->crtc && !new_con_state->crtc)
9486 continue;
9487
9488 if (!new_con_state->crtc)
9489 continue;
9490
9491 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9492 if (IS_ERR(new_crtc_state)) {
9493 ret = PTR_ERR(new_crtc_state);
9494 goto fail;
9495 }
9496
9497 if (dm_old_con_state->abm_level !=
9498 dm_new_con_state->abm_level)
9499 new_crtc_state->connectors_changed = true;
9500 }
9501
e10517b3 9502#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 9503 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
9504 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9505 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9506 ret = add_affected_mst_dsc_crtcs(state, crtc);
9507 if (ret)
9508 goto fail;
9509 }
9510 }
9511 }
e10517b3 9512#endif
1e88ad0a 9513 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9514 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9515
1e88ad0a 9516 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9517 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9518 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9519 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9520 continue;
7bef1af3 9521
1e88ad0a
S
9522 if (!new_crtc_state->enable)
9523 continue;
fc9e9920 9524
1e88ad0a
S
9525 ret = drm_atomic_add_affected_connectors(state, crtc);
9526 if (ret)
9527 return ret;
fc9e9920 9528
1e88ad0a
S
9529 ret = drm_atomic_add_affected_planes(state, crtc);
9530 if (ret)
9531 goto fail;
115a385c 9532
cbac53f7 9533 if (dm_old_crtc_state->dsc_force_changed)
115a385c 9534 new_crtc_state->mode_changed = true;
e7b07cee
HW
9535 }
9536
2d9e6431
NK
9537 /*
9538 * Add all primary and overlay planes on the CRTC to the state
9539 * whenever a plane is enabled to maintain correct z-ordering
9540 * and to enable fast surface updates.
9541 */
9542 drm_for_each_crtc(crtc, dev) {
9543 bool modified = false;
9544
9545 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9546 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9547 continue;
9548
9549 if (new_plane_state->crtc == crtc ||
9550 old_plane_state->crtc == crtc) {
9551 modified = true;
9552 break;
9553 }
9554 }
9555
9556 if (!modified)
9557 continue;
9558
9559 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9560 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9561 continue;
9562
9563 new_plane_state =
9564 drm_atomic_get_plane_state(state, plane);
9565
9566 if (IS_ERR(new_plane_state)) {
9567 ret = PTR_ERR(new_plane_state);
9568 goto fail;
9569 }
9570 }
9571 }
9572
62f55537 9573 /* Remove exiting planes if they are modified */
9e869063
LL
9574 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9575 ret = dm_update_plane_state(dc, state, plane,
9576 old_plane_state,
9577 new_plane_state,
9578 false,
9579 &lock_and_validation_needed);
9580 if (ret)
9581 goto fail;
62f55537
AG
9582 }
9583
9584 /* Disable all crtcs which require disable */
4b9674e5
LL
9585 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9586 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9587 old_crtc_state,
9588 new_crtc_state,
9589 false,
9590 &lock_and_validation_needed);
9591 if (ret)
9592 goto fail;
62f55537
AG
9593 }
9594
9595 /* Enable all crtcs which require enable */
4b9674e5
LL
9596 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9597 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9598 old_crtc_state,
9599 new_crtc_state,
9600 true,
9601 &lock_and_validation_needed);
9602 if (ret)
9603 goto fail;
62f55537
AG
9604 }
9605
9606 /* Add new/modified planes */
9e869063
LL
9607 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9608 ret = dm_update_plane_state(dc, state, plane,
9609 old_plane_state,
9610 new_plane_state,
9611 true,
9612 &lock_and_validation_needed);
9613 if (ret)
9614 goto fail;
62f55537
AG
9615 }
9616
b349f76e
ES
9617 /* Run this here since we want to validate the streams we created */
9618 ret = drm_atomic_helper_check_planes(dev, state);
9619 if (ret)
9620 goto fail;
62f55537 9621
12f4849a
SS
9622 /* Check cursor planes scaling */
9623 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9624 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9625 if (ret)
9626 goto fail;
9627 }
9628
43d10d30
NK
9629 if (state->legacy_cursor_update) {
9630 /*
9631 * This is a fast cursor update coming from the plane update
9632 * helper, check if it can be done asynchronously for better
9633 * performance.
9634 */
9635 state->async_update =
9636 !drm_atomic_helper_async_check(dev, state);
9637
9638 /*
9639 * Skip the remaining global validation if this is an async
9640 * update. Cursor updates can be done without affecting
9641 * state or bandwidth calcs and this avoids the performance
9642 * penalty of locking the private state object and
9643 * allocating a new dc_state.
9644 */
9645 if (state->async_update)
9646 return 0;
9647 }
9648
ebdd27e1 9649 /* Check scaling and underscan changes*/
1f6010a9 9650 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
9651 * new stream into context w\o causing full reset. Need to
9652 * decide how to handle.
9653 */
c2cea706 9654 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9655 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9656 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9657 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
9658
9659 /* Skip any modesets/resets */
0bc9706d
LSL
9660 if (!acrtc || drm_atomic_crtc_needs_modeset(
9661 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
9662 continue;
9663
b830ebc9 9664 /* Skip any thing not scale or underscan changes */
54d76575 9665 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
9666 continue;
9667
9668 lock_and_validation_needed = true;
9669 }
9670
f6d7c7fa
NK
9671 /**
9672 * Streams and planes are reset when there are changes that affect
9673 * bandwidth. Anything that affects bandwidth needs to go through
9674 * DC global validation to ensure that the configuration can be applied
9675 * to hardware.
9676 *
9677 * We have to currently stall out here in atomic_check for outstanding
9678 * commits to finish in this case because our IRQ handlers reference
9679 * DRM state directly - we can end up disabling interrupts too early
9680 * if we don't.
9681 *
9682 * TODO: Remove this stall and drop DM state private objects.
a87fa993 9683 */
f6d7c7fa 9684 if (lock_and_validation_needed) {
eb3dc897
NK
9685 ret = dm_atomic_get_state(state, &dm_state);
9686 if (ret)
9687 goto fail;
e7b07cee
HW
9688
9689 ret = do_aquire_global_lock(dev, state);
9690 if (ret)
9691 goto fail;
1dc90497 9692
d9fe1a4c 9693#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
9694 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9695 goto fail;
9696
29b9ba74
ML
9697 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9698 if (ret)
9699 goto fail;
d9fe1a4c 9700#endif
29b9ba74 9701
ded58c7b
ZL
9702 /*
9703 * Perform validation of MST topology in the state:
9704 * We need to perform MST atomic check before calling
9705 * dc_validate_global_state(), or there is a chance
9706 * to get stuck in an infinite loop and hang eventually.
9707 */
9708 ret = drm_dp_mst_atomic_check(state);
9709 if (ret)
9710 goto fail;
74a16675
RS
9711 status = dc_validate_global_state(dc, dm_state->context, false);
9712 if (status != DC_OK) {
9713 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9714 dc_status_to_str(status), status);
e7b07cee
HW
9715 ret = -EINVAL;
9716 goto fail;
9717 }
bd200d19 9718 } else {
674e78ac 9719 /*
bd200d19
NK
9720 * The commit is a fast update. Fast updates shouldn't change
9721 * the DC context, affect global validation, and can have their
9722 * commit work done in parallel with other commits not touching
9723 * the same resource. If we have a new DC context as part of
9724 * the DM atomic state from validation we need to free it and
9725 * retain the existing one instead.
fde9f39a
MR
9726 *
9727 * Furthermore, since the DM atomic state only contains the DC
9728 * context and can safely be annulled, we can free the state
9729 * and clear the associated private object now to free
9730 * some memory and avoid a possible use-after-free later.
674e78ac 9731 */
bd200d19 9732
fde9f39a
MR
9733 for (i = 0; i < state->num_private_objs; i++) {
9734 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 9735
fde9f39a
MR
9736 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9737 int j = state->num_private_objs-1;
bd200d19 9738
fde9f39a
MR
9739 dm_atomic_destroy_state(obj,
9740 state->private_objs[i].state);
9741
9742 /* If i is not at the end of the array then the
9743 * last element needs to be moved to where i was
9744 * before the array can safely be truncated.
9745 */
9746 if (i != j)
9747 state->private_objs[i] =
9748 state->private_objs[j];
bd200d19 9749
fde9f39a
MR
9750 state->private_objs[j].ptr = NULL;
9751 state->private_objs[j].state = NULL;
9752 state->private_objs[j].old_state = NULL;
9753 state->private_objs[j].new_state = NULL;
9754
9755 state->num_private_objs = j;
9756 break;
9757 }
bd200d19 9758 }
e7b07cee
HW
9759 }
9760
caff0e66
NK
9761 /* Store the overall update type for use later in atomic check. */
9762 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9763 struct dm_crtc_state *dm_new_crtc_state =
9764 to_dm_crtc_state(new_crtc_state);
9765
f6d7c7fa
NK
9766 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9767 UPDATE_TYPE_FULL :
9768 UPDATE_TYPE_FAST;
e7b07cee
HW
9769 }
9770
9771 /* Must be success */
9772 WARN_ON(ret);
e8a98235
RS
9773
9774 trace_amdgpu_dm_atomic_check_finish(state, ret);
9775
e7b07cee
HW
9776 return ret;
9777
9778fail:
9779 if (ret == -EDEADLK)
01e28f9c 9780 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 9781 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 9782 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 9783 else
01e28f9c 9784 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 9785
e8a98235
RS
9786 trace_amdgpu_dm_atomic_check_finish(state, ret);
9787
e7b07cee
HW
9788 return ret;
9789}
9790
3ee6b26b
AD
9791static bool is_dp_capable_without_timing_msa(struct dc *dc,
9792 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
9793{
9794 uint8_t dpcd_data;
9795 bool capable = false;
9796
c84dec2f 9797 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
9798 dm_helpers_dp_read_dpcd(
9799 NULL,
c84dec2f 9800 amdgpu_dm_connector->dc_link,
e7b07cee
HW
9801 DP_DOWN_STREAM_PORT_COUNT,
9802 &dpcd_data,
9803 sizeof(dpcd_data))) {
9804 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9805 }
9806
9807 return capable;
9808}
f9b4f20c
SW
9809
9810static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
9811 uint8_t *edid_ext, int len,
9812 struct amdgpu_hdmi_vsdb_info *vsdb_info)
9813{
9814 int i;
9815 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
9816 struct dc *dc = adev->dm.dc;
9817
9818 /* send extension block to DMCU for parsing */
9819 for (i = 0; i < len; i += 8) {
9820 bool res;
9821 int offset;
9822
9823 /* send 8 bytes a time */
9824 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
9825 return false;
9826
9827 if (i+8 == len) {
9828 /* EDID block sent completed, expect result */
9829 int version, min_rate, max_rate;
9830
9831 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
9832 if (res) {
9833 /* amd vsdb found */
9834 vsdb_info->freesync_supported = 1;
9835 vsdb_info->amd_vsdb_version = version;
9836 vsdb_info->min_refresh_rate_hz = min_rate;
9837 vsdb_info->max_refresh_rate_hz = max_rate;
9838 return true;
9839 }
9840 /* not amd vsdb */
9841 return false;
9842 }
9843
9844 /* check for ack*/
9845 res = dc_edid_parser_recv_cea_ack(dc, &offset);
9846 if (!res)
9847 return false;
9848 }
9849
9850 return false;
9851}
9852
7c7dd774 9853static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
9854 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
9855{
9856 uint8_t *edid_ext = NULL;
9857 int i;
9858 bool valid_vsdb_found = false;
9859
9860 /*----- drm_find_cea_extension() -----*/
9861 /* No EDID or EDID extensions */
9862 if (edid == NULL || edid->extensions == 0)
7c7dd774 9863 return -ENODEV;
f9b4f20c
SW
9864
9865 /* Find CEA extension */
9866 for (i = 0; i < edid->extensions; i++) {
9867 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
9868 if (edid_ext[0] == CEA_EXT)
9869 break;
9870 }
9871
9872 if (i == edid->extensions)
7c7dd774 9873 return -ENODEV;
f9b4f20c
SW
9874
9875 /*----- cea_db_offsets() -----*/
9876 if (edid_ext[0] != CEA_EXT)
7c7dd774 9877 return -ENODEV;
f9b4f20c
SW
9878
9879 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
9880
9881 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
9882}
9883
98e6436d
AK
9884void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9885 struct edid *edid)
e7b07cee 9886{
eb0709ba 9887 int i = 0;
e7b07cee
HW
9888 struct detailed_timing *timing;
9889 struct detailed_non_pixel *data;
9890 struct detailed_data_monitor_range *range;
c84dec2f
HW
9891 struct amdgpu_dm_connector *amdgpu_dm_connector =
9892 to_amdgpu_dm_connector(connector);
bb47de73 9893 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
9894
9895 struct drm_device *dev = connector->dev;
1348969a 9896 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 9897 bool freesync_capable = false;
f9b4f20c 9898 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 9899
8218d7f1
HW
9900 if (!connector->state) {
9901 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 9902 goto update;
8218d7f1
HW
9903 }
9904
98e6436d
AK
9905 if (!edid) {
9906 dm_con_state = to_dm_connector_state(connector->state);
9907
9908 amdgpu_dm_connector->min_vfreq = 0;
9909 amdgpu_dm_connector->max_vfreq = 0;
9910 amdgpu_dm_connector->pixel_clock_mhz = 0;
9911
bb47de73 9912 goto update;
98e6436d
AK
9913 }
9914
8218d7f1
HW
9915 dm_con_state = to_dm_connector_state(connector->state);
9916
c84dec2f 9917 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 9918 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 9919 goto update;
e7b07cee
HW
9920 }
9921 if (!adev->dm.freesync_module)
bb47de73 9922 goto update;
f9b4f20c
SW
9923
9924
9925 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9926 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9927 bool edid_check_required = false;
9928
9929 if (edid) {
e7b07cee
HW
9930 edid_check_required = is_dp_capable_without_timing_msa(
9931 adev->dm.dc,
c84dec2f 9932 amdgpu_dm_connector);
e7b07cee 9933 }
e7b07cee 9934
f9b4f20c
SW
9935 if (edid_check_required == true && (edid->version > 1 ||
9936 (edid->version == 1 && edid->revision > 1))) {
9937 for (i = 0; i < 4; i++) {
9938
9939 timing = &edid->detailed_timings[i];
9940 data = &timing->data.other_data;
9941 range = &data->data.range;
9942 /*
9943 * Check if monitor has continuous frequency mode
9944 */
9945 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9946 continue;
9947 /*
9948 * Check for flag range limits only. If flag == 1 then
9949 * no additional timing information provided.
9950 * Default GTF, GTF Secondary curve and CVT are not
9951 * supported
9952 */
9953 if (range->flags != 1)
9954 continue;
e7b07cee 9955
f9b4f20c
SW
9956 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9957 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9958 amdgpu_dm_connector->pixel_clock_mhz =
9959 range->pixel_clock_mhz * 10;
a0ffc3fd 9960
f9b4f20c
SW
9961 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9962 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
a0ffc3fd 9963
f9b4f20c
SW
9964 break;
9965 }
e7b07cee 9966
f9b4f20c
SW
9967 if (amdgpu_dm_connector->max_vfreq -
9968 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 9969
f9b4f20c
SW
9970 freesync_capable = true;
9971 }
9972 }
9973 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
9974 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
9975 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
9976 timing = &edid->detailed_timings[i];
9977 data = &timing->data.other_data;
9978
9979 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
9980 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
9981 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
9982 freesync_capable = true;
9983
9984 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
9985 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
9986 }
9987 }
bb47de73
NK
9988
9989update:
9990 if (dm_con_state)
9991 dm_con_state->freesync_capable = freesync_capable;
9992
9993 if (connector->vrr_capable_property)
9994 drm_connector_set_vrr_capable_property(connector,
9995 freesync_capable);
e7b07cee
HW
9996}
9997
8c322309
RL
9998static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9999{
10000 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10001
10002 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10003 return;
10004 if (link->type == dc_connection_none)
10005 return;
10006 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10007 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
10008 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10009
10010 if (dpcd_data[0] == 0) {
1cfbbdde 10011 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
10012 link->psr_settings.psr_feature_enabled = false;
10013 } else {
1cfbbdde 10014 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
10015 link->psr_settings.psr_feature_enabled = true;
10016 }
10017
10018 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
10019 }
10020}
10021
10022/*
10023 * amdgpu_dm_link_setup_psr() - configure psr link
10024 * @stream: stream state
10025 *
10026 * Return: true if success
10027 */
10028static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10029{
10030 struct dc_link *link = NULL;
10031 struct psr_config psr_config = {0};
10032 struct psr_context psr_context = {0};
8c322309
RL
10033 bool ret = false;
10034
10035 if (stream == NULL)
10036 return false;
10037
10038 link = stream->link;
8c322309 10039
d1ebfdd8 10040 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
10041
10042 if (psr_config.psr_version > 0) {
10043 psr_config.psr_exit_link_training_required = 0x1;
10044 psr_config.psr_frame_capture_indication_req = 0;
10045 psr_config.psr_rfb_setup_time = 0x37;
10046 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10047 psr_config.allow_smu_optimizations = 0x0;
10048
10049 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10050
10051 }
d1ebfdd8 10052 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
10053
10054 return ret;
10055}
10056
10057/*
10058 * amdgpu_dm_psr_enable() - enable psr f/w
10059 * @stream: stream state
10060 *
10061 * Return: true if success
10062 */
10063bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10064{
10065 struct dc_link *link = stream->link;
5b5abe95
AK
10066 unsigned int vsync_rate_hz = 0;
10067 struct dc_static_screen_params params = {0};
10068 /* Calculate number of static frames before generating interrupt to
10069 * enter PSR.
10070 */
5b5abe95
AK
10071 // Init fail safe of 2 frames static
10072 unsigned int num_frames_static = 2;
8c322309
RL
10073
10074 DRM_DEBUG_DRIVER("Enabling psr...\n");
10075
5b5abe95
AK
10076 vsync_rate_hz = div64_u64(div64_u64((
10077 stream->timing.pix_clk_100hz * 100),
10078 stream->timing.v_total),
10079 stream->timing.h_total);
10080
10081 /* Round up
10082 * Calculate number of frames such that at least 30 ms of time has
10083 * passed.
10084 */
7aa62404
RL
10085 if (vsync_rate_hz != 0) {
10086 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 10087 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 10088 }
5b5abe95
AK
10089
10090 params.triggers.cursor_update = true;
10091 params.triggers.overlay_update = true;
10092 params.triggers.surface_update = true;
10093 params.num_frames = num_frames_static;
8c322309 10094
5b5abe95 10095 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 10096 &stream, 1,
5b5abe95 10097 &params);
8c322309 10098
1d496907 10099 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
10100}
10101
10102/*
10103 * amdgpu_dm_psr_disable() - disable psr f/w
10104 * @stream: stream state
10105 *
10106 * Return: true if success
10107 */
10108static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10109{
10110
10111 DRM_DEBUG_DRIVER("Disabling psr...\n");
10112
1d496907 10113 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 10114}
3d4e52d0 10115
6ee90e88 10116/*
10117 * amdgpu_dm_psr_disable() - disable psr f/w
10118 * if psr is enabled on any stream
10119 *
10120 * Return: true if success
10121 */
10122static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10123{
10124 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10125 return dc_set_psr_allow_active(dm->dc, false);
10126}
10127
3d4e52d0
VL
10128void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10129{
1348969a 10130 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
10131 struct dc *dc = adev->dm.dc;
10132 int i;
10133
10134 mutex_lock(&adev->dm.dc_lock);
10135 if (dc->current_state) {
10136 for (i = 0; i < dc->current_state->stream_count; ++i)
10137 dc->current_state->streams[i]
10138 ->triggered_crtc_reset.enabled =
10139 adev->dm.force_timing_sync;
10140
10141 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10142 dc_trigger_sync(dc, dc->current_state);
10143 }
10144 mutex_unlock(&adev->dm.dc_lock);
10145}
9d83722d
RS
10146
10147void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10148 uint32_t value, const char *func_name)
10149{
10150#ifdef DM_CHECK_ADDR_0
10151 if (address == 0) {
10152 DC_ERR("invalid register write. address = 0");
10153 return;
10154 }
10155#endif
10156 cgs_write_register(ctx->cgs_device, address, value);
10157 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10158}
10159
10160uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10161 const char *func_name)
10162{
10163 uint32_t value;
10164#ifdef DM_CHECK_ADDR_0
10165 if (address == 0) {
10166 DC_ERR("invalid register read; address = 0\n");
10167 return 0;
10168 }
10169#endif
10170
10171 if (ctx->dmub_srv &&
10172 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10173 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10174 ASSERT(false);
10175 return 0;
10176 }
10177
10178 value = cgs_read_register(ctx->cgs_device, address);
10179
10180 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10181
10182 return value;
10183}