drm/radeon/r600: Fix variables that are not used after assignment
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
1dc90497 31#include "dc/inc/core_types.h"
a7669aff 32#include "dal_asic_id.h"
cdca3f21 33#include "dmub/dmub_srv.h"
743b9786
NK
34#include "dc/inc/hw/dmcu.h"
35#include "dc/inc/hw/abm.h"
9a71c7d3 36#include "dc/dc_dmub_srv.h"
f9b4f20c 37#include "dc/dc_edid_parser.h"
9d83722d 38#include "amdgpu_dm_trace.h"
4562236b
HW
39
40#include "vid.h"
41#include "amdgpu.h"
a49dcb88 42#include "amdgpu_display.h"
a94d5569 43#include "amdgpu_ucode.h"
4562236b
HW
44#include "atom.h"
45#include "amdgpu_dm.h"
52704fca
BL
46#ifdef CONFIG_DRM_AMD_DC_HDCP
47#include "amdgpu_dm_hdcp.h"
53e108aa 48#include <drm/drm_hdcp.h>
52704fca 49#endif
e7b07cee 50#include "amdgpu_pm.h"
4562236b
HW
51
52#include "amd_shared.h"
53#include "amdgpu_dm_irq.h"
54#include "dm_helpers.h"
e7b07cee 55#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
56#if defined(CONFIG_DEBUG_FS)
57#include "amdgpu_dm_debugfs.h"
58#endif
4562236b
HW
59
60#include "ivsrcid/ivsrcid_vislands30.h"
61
62#include <linux/module.h>
63#include <linux/moduleparam.h>
e7b07cee 64#include <linux/types.h>
97028037 65#include <linux/pm_runtime.h>
09d21852 66#include <linux/pci.h>
a94d5569 67#include <linux/firmware.h>
6ce8f316 68#include <linux/component.h>
4562236b
HW
69
70#include <drm/drm_atomic.h>
674e78ac 71#include <drm/drm_atomic_uapi.h>
4562236b
HW
72#include <drm/drm_atomic_helper.h>
73#include <drm/drm_dp_mst_helper.h>
e7b07cee 74#include <drm/drm_fb_helper.h>
09d21852 75#include <drm/drm_fourcc.h>
e7b07cee 76#include <drm/drm_edid.h>
09d21852 77#include <drm/drm_vblank.h>
6ce8f316 78#include <drm/drm_audio_component.h>
4562236b 79
b86a1aa3 80#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 81#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 82
ad941f7a
FX
83#include "dcn/dcn_1_0_offset.h"
84#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
85#include "soc15_hw_ip.h"
86#include "vega10_ip_offset.h"
ff5ef992
AD
87
88#include "soc15_common.h"
89#endif
90
e7b07cee 91#include "modules/inc/mod_freesync.h"
bbf854dc 92#include "modules/power/power_helpers.h"
ecd0136b 93#include "modules/inc/mod_info_packet.h"
e7b07cee 94
743b9786
NK
95#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
97#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
99#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
101#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
103#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
105#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
2200eb9e 107
a94d5569
DF
108#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 110
5ea23931
RL
111#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
8c7aea40
NK
114/* Number of bytes in PSP header for firmware. */
115#define PSP_HEADER_BYTES 0x100
116
117/* Number of bytes in PSP footer for firmware. */
118#define PSP_FOOTER_BYTES 0x100
119
b8592b48
LL
120/**
121 * DOC: overview
122 *
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 124 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
125 * requests into DC requests, and DC responses into DRM responses.
126 *
127 * The root control structure is &struct amdgpu_display_manager.
128 */
129
7578ecda
AD
130/* basic init/fini API */
131static int amdgpu_dm_init(struct amdgpu_device *adev);
132static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 133static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 134
0f877894
OV
135static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136{
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 default:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
151 }
152}
153
154static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155{
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 return;
162
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
165
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
168 subconnector);
169}
170
1f6010a9
DF
171/*
172 * initializes drm_device display related structures, based on the information
7578ecda
AD
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
175 *
176 * Returns 0 on success
177 */
178static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179/* removes and deallocates the drm structures, created by the above function */
180static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
7578ecda 182static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 183 struct drm_plane *plane,
cc1fec57
NK
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
7578ecda
AD
186static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 uint32_t link_index,
192 struct amdgpu_encoder *amdgpu_encoder);
193static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
196
197static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
7578ecda
AD
199static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 struct drm_atomic_state *state);
203
674e78ac
NK
204static void handle_cursor_update(struct drm_plane *plane,
205 struct drm_plane_state *old_plane_state);
7578ecda 206
8c322309
RL
207static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 211static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 212
dfbbfe3c
BN
213static const struct drm_format_info *
214amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
a85ba005
NC
216static bool
217is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 struct drm_crtc_state *new_crtc_state);
4562236b
HW
219/*
220 * dm_vblank_get_counter
221 *
222 * @brief
223 * Get counter for number of vertical blanks
224 *
225 * @param
226 * struct amdgpu_device *adev - [in] desired amdgpu device
227 * int disp_idx - [in] which CRTC to get the counter from
228 *
229 * @return
230 * Counter for vertical blanks
231 */
232static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233{
234 if (crtc >= adev->mode_info.num_crtc)
235 return 0;
236 else {
237 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238
585d450c 239 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
240 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 crtc);
4562236b
HW
242 return 0;
243 }
244
585d450c 245 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
246 }
247}
248
249static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 250 u32 *vbl, u32 *position)
4562236b 251{
81c50963
ST
252 uint32_t v_blank_start, v_blank_end, h_position, v_position;
253
4562236b
HW
254 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255 return -EINVAL;
256 else {
257 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258
585d450c 259 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
260 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 crtc);
4562236b
HW
262 return 0;
263 }
264
81c50963
ST
265 /*
266 * TODO rework base driver to use values directly.
267 * for now parse it back into reg-format
268 */
585d450c 269 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
270 &v_blank_start,
271 &v_blank_end,
272 &h_position,
273 &v_position);
274
e806208d
AG
275 *position = v_position | (h_position << 16);
276 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
277 }
278
279 return 0;
280}
281
282static bool dm_is_idle(void *handle)
283{
284 /* XXX todo */
285 return true;
286}
287
288static int dm_wait_for_idle(void *handle)
289{
290 /* XXX todo */
291 return 0;
292}
293
294static bool dm_check_soft_reset(void *handle)
295{
296 return false;
297}
298
299static int dm_soft_reset(void *handle)
300{
301 /* XXX todo */
302 return 0;
303}
304
3ee6b26b
AD
305static struct amdgpu_crtc *
306get_crtc_by_otg_inst(struct amdgpu_device *adev,
307 int otg_inst)
4562236b 308{
4a580877 309 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
310 struct drm_crtc *crtc;
311 struct amdgpu_crtc *amdgpu_crtc;
312
4562236b
HW
313 if (otg_inst == -1) {
314 WARN_ON(1);
315 return adev->mode_info.crtcs[0];
316 }
317
318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321 if (amdgpu_crtc->otg_inst == otg_inst)
322 return amdgpu_crtc;
323 }
324
325 return NULL;
326}
327
585d450c
AP
328static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329{
330 return acrtc->dm_irq_params.freesync_config.state ==
331 VRR_STATE_ACTIVE_VARIABLE ||
332 acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_FIXED;
334}
335
66b0c973
MK
336static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337{
338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340}
341
a85ba005
NC
342static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 struct dm_crtc_state *new_state)
344{
345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
346 return true;
347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 return true;
349 else
350 return false;
351}
352
b8e8c934
HW
353/**
354 * dm_pflip_high_irq() - Handle pageflip interrupt
355 * @interrupt_params: ignored
356 *
357 * Handles the pageflip interrupt by notifying all interested parties
358 * that the pageflip has been completed.
359 */
4562236b
HW
360static void dm_pflip_high_irq(void *interrupt_params)
361{
4562236b
HW
362 struct amdgpu_crtc *amdgpu_crtc;
363 struct common_irq_params *irq_params = interrupt_params;
364 struct amdgpu_device *adev = irq_params->adev;
365 unsigned long flags;
71bbe51a 366 struct drm_pending_vblank_event *e;
71bbe51a
MK
367 uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 bool vrr_active;
4562236b
HW
369
370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372 /* IRQ could occur when in initial stage */
1f6010a9 373 /* TODO work and BO cleanup */
4562236b 374 if (amdgpu_crtc == NULL) {
cb2318b7 375 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
376 return;
377 }
378
4a580877 379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
380
381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
383 amdgpu_crtc->pflip_status,
384 AMDGPU_FLIP_SUBMITTED,
385 amdgpu_crtc->crtc_id,
386 amdgpu_crtc);
4a580877 387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
388 return;
389 }
390
71bbe51a
MK
391 /* page flip completed. */
392 e = amdgpu_crtc->event;
393 amdgpu_crtc->event = NULL;
4562236b 394
71bbe51a
MK
395 if (!e)
396 WARN_ON(1);
1159898a 397
585d450c 398 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
399
400 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 if (!vrr_active ||
585d450c 402 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
403 &v_blank_end, &hpos, &vpos) ||
404 (vpos < v_blank_start)) {
405 /* Update to correct count and vblank timestamp if racing with
406 * vblank irq. This also updates to the correct vblank timestamp
407 * even in VRR mode, as scanout is past the front-porch atm.
408 */
409 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 410
71bbe51a
MK
411 /* Wake up userspace by sending the pageflip event with proper
412 * count and timestamp of vblank of flip completion.
413 */
414 if (e) {
415 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416
417 /* Event sent, so done with vblank for this flip */
418 drm_crtc_vblank_put(&amdgpu_crtc->base);
419 }
420 } else if (e) {
421 /* VRR active and inside front-porch: vblank count and
422 * timestamp for pageflip event will only be up to date after
423 * drm_crtc_handle_vblank() has been executed from late vblank
424 * irq handler after start of back-porch (vline 0). We queue the
425 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 * updated timestamp and count, once it runs after us.
427 *
428 * We need to open-code this instead of using the helper
429 * drm_crtc_arm_vblank_event(), as that helper would
430 * call drm_crtc_accurate_vblank_count(), which we must
431 * not call in VRR mode while we are in front-porch!
432 */
433
434 /* sequence will be replaced by real count during send-out. */
435 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 e->pipe = amdgpu_crtc->crtc_id;
437
4a580877 438 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
439 e = NULL;
440 }
4562236b 441
fdd1fe57
MK
442 /* Keep track of vblank of this flip for flip throttling. We use the
443 * cooked hw counter, as that one incremented at start of this vblank
444 * of pageflip completion, so last_flip_vblank is the forbidden count
445 * for queueing new pageflips if vsync + VRR is enabled.
446 */
5d1c59c4 447 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 448 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 449
54f5499a 450 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 451 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 452
cb2318b7
VL
453 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 amdgpu_crtc->crtc_id, amdgpu_crtc,
455 vrr_active, (int) !e);
4562236b
HW
456}
457
d2574c33
MK
458static void dm_vupdate_high_irq(void *interrupt_params)
459{
460 struct common_irq_params *irq_params = interrupt_params;
461 struct amdgpu_device *adev = irq_params->adev;
462 struct amdgpu_crtc *acrtc;
47588233
RS
463 struct drm_device *drm_dev;
464 struct drm_vblank_crtc *vblank;
465 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 466 unsigned long flags;
585d450c 467 int vrr_active;
d2574c33
MK
468
469 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470
471 if (acrtc) {
585d450c 472 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
473 drm_dev = acrtc->base.dev;
474 vblank = &drm_dev->vblank[acrtc->base.index];
475 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476 frame_duration_ns = vblank->time - previous_timestamp;
477
478 if (frame_duration_ns > 0) {
479 trace_amdgpu_refresh_rate_track(acrtc->base.index,
480 frame_duration_ns,
481 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482 atomic64_set(&irq_params->previous_timestamp, vblank->time);
483 }
d2574c33 484
cb2318b7 485 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 486 acrtc->crtc_id,
585d450c 487 vrr_active);
d2574c33
MK
488
489 /* Core vblank handling is done here after end of front-porch in
490 * vrr mode, as vblank timestamping will give valid results
491 * while now done after front-porch. This will also deliver
492 * page-flip completion events that have been queued to us
493 * if a pageflip happened inside front-porch.
494 */
585d450c 495 if (vrr_active) {
d2574c33 496 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
497
498 /* BTR processing for pre-DCE12 ASICs */
585d450c 499 if (acrtc->dm_irq_params.stream &&
09aef2c4 500 adev->family < AMDGPU_FAMILY_AI) {
4a580877 501 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
502 mod_freesync_handle_v_update(
503 adev->dm.freesync_module,
585d450c
AP
504 acrtc->dm_irq_params.stream,
505 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
506
507 dc_stream_adjust_vmin_vmax(
508 adev->dm.dc,
585d450c
AP
509 acrtc->dm_irq_params.stream,
510 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 511 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
512 }
513 }
d2574c33
MK
514 }
515}
516
b8e8c934
HW
517/**
518 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 519 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
520 *
521 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522 * event handler.
523 */
4562236b
HW
524static void dm_crtc_high_irq(void *interrupt_params)
525{
526 struct common_irq_params *irq_params = interrupt_params;
527 struct amdgpu_device *adev = irq_params->adev;
4562236b 528 struct amdgpu_crtc *acrtc;
09aef2c4 529 unsigned long flags;
585d450c 530 int vrr_active;
4562236b 531
b57de80a 532 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
533 if (!acrtc)
534 return;
535
585d450c 536 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 537
cb2318b7 538 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 539 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 540
2346ef47
NK
541 /**
542 * Core vblank handling at start of front-porch is only possible
543 * in non-vrr mode, as only there vblank timestamping will give
544 * valid results while done in front-porch. Otherwise defer it
545 * to dm_vupdate_high_irq after end of front-porch.
546 */
585d450c 547 if (!vrr_active)
2346ef47
NK
548 drm_crtc_handle_vblank(&acrtc->base);
549
550 /**
551 * Following stuff must happen at start of vblank, for crc
552 * computation and below-the-range btr support in vrr mode.
553 */
16f17eda 554 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
555
556 /* BTR updates need to happen before VUPDATE on Vega and above. */
557 if (adev->family < AMDGPU_FAMILY_AI)
558 return;
16f17eda 559
4a580877 560 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 561
585d450c
AP
562 if (acrtc->dm_irq_params.stream &&
563 acrtc->dm_irq_params.vrr_params.supported &&
564 acrtc->dm_irq_params.freesync_config.state ==
565 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 566 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
567 acrtc->dm_irq_params.stream,
568 &acrtc->dm_irq_params.vrr_params);
16f17eda 569
585d450c
AP
570 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
572 }
573
2b5aed9a
MK
574 /*
575 * If there aren't any active_planes then DCH HUBP may be clock-gated.
576 * In that case, pageflip completion interrupts won't fire and pageflip
577 * completion events won't get delivered. Prevent this by sending
578 * pending pageflip events from here if a flip is still pending.
579 *
580 * If any planes are enabled, use dm_pflip_high_irq() instead, to
581 * avoid race conditions between flip programming and completion,
582 * which could cause too early flip completion events.
583 */
2346ef47
NK
584 if (adev->family >= AMDGPU_FAMILY_RV &&
585 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 586 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
587 if (acrtc->event) {
588 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589 acrtc->event = NULL;
590 drm_crtc_vblank_put(&acrtc->base);
591 }
592 acrtc->pflip_status = AMDGPU_FLIP_NONE;
593 }
594
4a580877 595 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
596}
597
86bc2219
WL
598#if defined(CONFIG_DRM_AMD_DC_DCN)
599/**
600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601 * DCN generation ASICs
602 * @interrupt params - interrupt parameters
603 *
604 * Used to set crc window/read out crc value at vertical line 0 position
605 */
606#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608{
609 struct common_irq_params *irq_params = interrupt_params;
610 struct amdgpu_device *adev = irq_params->adev;
611 struct amdgpu_crtc *acrtc;
612
613 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614
615 if (!acrtc)
616 return;
617
618 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619}
620#endif
621#endif
622
4562236b
HW
623static int dm_set_clockgating_state(void *handle,
624 enum amd_clockgating_state state)
625{
626 return 0;
627}
628
629static int dm_set_powergating_state(void *handle,
630 enum amd_powergating_state state)
631{
632 return 0;
633}
634
635/* Prototypes of private functions */
636static int dm_early_init(void* handle);
637
a32e24b4 638/* Allocate memory for FBC compressed data */
3e332d3a 639static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 640{
3e332d3a 641 struct drm_device *dev = connector->dev;
1348969a 642 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 643 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
644 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645 struct drm_display_mode *mode;
42e67c3b
RL
646 unsigned long max_size = 0;
647
648 if (adev->dm.dc->fbc_compressor == NULL)
649 return;
a32e24b4 650
3e332d3a 651 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
652 return;
653
3e332d3a
RL
654 if (compressor->bo_ptr)
655 return;
42e67c3b 656
42e67c3b 657
3e332d3a
RL
658 list_for_each_entry(mode, &connector->modes, head) {
659 if (max_size < mode->htotal * mode->vtotal)
660 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
661 }
662
663 if (max_size) {
664 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 665 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 666 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
667
668 if (r)
42e67c3b
RL
669 DRM_ERROR("DM: Failed to initialize FBC\n");
670 else {
671 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673 }
674
a32e24b4
RL
675 }
676
677}
a32e24b4 678
6ce8f316
NK
679static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680 int pipe, bool *enabled,
681 unsigned char *buf, int max_bytes)
682{
683 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 684 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
685 struct drm_connector *connector;
686 struct drm_connector_list_iter conn_iter;
687 struct amdgpu_dm_connector *aconnector;
688 int ret = 0;
689
690 *enabled = false;
691
692 mutex_lock(&adev->dm.audio_lock);
693
694 drm_connector_list_iter_begin(dev, &conn_iter);
695 drm_for_each_connector_iter(connector, &conn_iter) {
696 aconnector = to_amdgpu_dm_connector(connector);
697 if (aconnector->audio_inst != port)
698 continue;
699
700 *enabled = true;
701 ret = drm_eld_size(connector->eld);
702 memcpy(buf, connector->eld, min(max_bytes, ret));
703
704 break;
705 }
706 drm_connector_list_iter_end(&conn_iter);
707
708 mutex_unlock(&adev->dm.audio_lock);
709
710 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711
712 return ret;
713}
714
715static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716 .get_eld = amdgpu_dm_audio_component_get_eld,
717};
718
719static int amdgpu_dm_audio_component_bind(struct device *kdev,
720 struct device *hda_kdev, void *data)
721{
722 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 723 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
724 struct drm_audio_component *acomp = data;
725
726 acomp->ops = &amdgpu_dm_audio_component_ops;
727 acomp->dev = kdev;
728 adev->dm.audio_component = acomp;
729
730 return 0;
731}
732
733static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734 struct device *hda_kdev, void *data)
735{
736 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 737 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
738 struct drm_audio_component *acomp = data;
739
740 acomp->ops = NULL;
741 acomp->dev = NULL;
742 adev->dm.audio_component = NULL;
743}
744
745static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746 .bind = amdgpu_dm_audio_component_bind,
747 .unbind = amdgpu_dm_audio_component_unbind,
748};
749
750static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751{
752 int i, ret;
753
754 if (!amdgpu_audio)
755 return 0;
756
757 adev->mode_info.audio.enabled = true;
758
759 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760
761 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762 adev->mode_info.audio.pin[i].channels = -1;
763 adev->mode_info.audio.pin[i].rate = -1;
764 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765 adev->mode_info.audio.pin[i].status_bits = 0;
766 adev->mode_info.audio.pin[i].category_code = 0;
767 adev->mode_info.audio.pin[i].connected = false;
768 adev->mode_info.audio.pin[i].id =
769 adev->dm.dc->res_pool->audios[i]->inst;
770 adev->mode_info.audio.pin[i].offset = 0;
771 }
772
773 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774 if (ret < 0)
775 return ret;
776
777 adev->dm.audio_registered = true;
778
779 return 0;
780}
781
782static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783{
784 if (!amdgpu_audio)
785 return;
786
787 if (!adev->mode_info.audio.enabled)
788 return;
789
790 if (adev->dm.audio_registered) {
791 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792 adev->dm.audio_registered = false;
793 }
794
795 /* TODO: Disable audio? */
796
797 adev->mode_info.audio.enabled = false;
798}
799
dfd84d90 800static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
801{
802 struct drm_audio_component *acomp = adev->dm.audio_component;
803
804 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806
807 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808 pin, -1);
809 }
810}
811
743b9786
NK
812static int dm_dmub_hw_init(struct amdgpu_device *adev)
813{
743b9786
NK
814 const struct dmcub_firmware_header_v1_0 *hdr;
815 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 816 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
817 const struct firmware *dmub_fw = adev->dm.dmub_fw;
818 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
820 struct dmub_srv_hw_params hw_params;
821 enum dmub_status status;
822 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 823 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
824 bool has_hw_support;
825
826 if (!dmub_srv)
827 /* DMUB isn't supported on the ASIC. */
828 return 0;
829
8c7aea40
NK
830 if (!fb_info) {
831 DRM_ERROR("No framebuffer info for DMUB service.\n");
832 return -EINVAL;
833 }
834
743b9786
NK
835 if (!dmub_fw) {
836 /* Firmware required for DMUB support. */
837 DRM_ERROR("No firmware provided for DMUB.\n");
838 return -EINVAL;
839 }
840
841 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842 if (status != DMUB_STATUS_OK) {
843 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844 return -EINVAL;
845 }
846
847 if (!has_hw_support) {
848 DRM_INFO("DMUB unsupported on ASIC\n");
849 return 0;
850 }
851
852 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853
743b9786
NK
854 fw_inst_const = dmub_fw->data +
855 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 856 PSP_HEADER_BYTES;
743b9786
NK
857
858 fw_bss_data = dmub_fw->data +
859 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860 le32_to_cpu(hdr->inst_const_bytes);
861
862 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
863 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865
866 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867
ddde28a5
HW
868 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869 * amdgpu_ucode_init_single_fw will load dmub firmware
870 * fw_inst_const part to cw0; otherwise, the firmware back door load
871 * will be done by dm_dmub_hw_init
872 */
873 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875 fw_inst_const_size);
876 }
877
a576b345
NK
878 if (fw_bss_data_size)
879 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
881
882 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
883 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884 adev->bios_size);
885
886 /* Reset regions that need to be reset. */
887 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889
890 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892
893 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
895
896 /* Initialize hardware. */
897 memset(&hw_params, 0, sizeof(hw_params));
898 hw_params.fb_base = adev->gmc.fb_start;
899 hw_params.fb_offset = adev->gmc.aper_base;
900
31a7f4bb
HW
901 /* backdoor load firmware and trigger dmub running */
902 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903 hw_params.load_inst_const = true;
904
743b9786
NK
905 if (dmcu)
906 hw_params.psp_version = dmcu->psp_version;
907
8c7aea40
NK
908 for (i = 0; i < fb_info->num_fb; ++i)
909 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
910
911 status = dmub_srv_hw_init(dmub_srv, &hw_params);
912 if (status != DMUB_STATUS_OK) {
913 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914 return -EINVAL;
915 }
916
917 /* Wait for firmware load to finish. */
918 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919 if (status != DMUB_STATUS_OK)
920 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921
922 /* Init DMCU and ABM if available. */
923 if (dmcu && abm) {
924 dmcu->funcs->dmcu_init(dmcu);
925 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926 }
927
9a71c7d3
NK
928 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929 if (!adev->dm.dc->ctx->dmub_srv) {
930 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931 return -ENOMEM;
932 }
933
743b9786
NK
934 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935 adev->dm.dmcub_fw_version);
936
937 return 0;
938}
939
a08f16cf
LHM
940#define DMUB_TRACE_MAX_READ 64
941static void dm_dmub_trace_high_irq(void *interrupt_params)
942{
943 struct common_irq_params *irq_params = interrupt_params;
944 struct amdgpu_device *adev = irq_params->adev;
945 struct amdgpu_display_manager *dm = &adev->dm;
946 struct dmcub_trace_buf_entry entry = { 0 };
947 uint32_t count = 0;
948
949 do {
950 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
951 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
952 entry.param0, entry.param1);
953
954 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
955 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
956 } else
957 break;
958
959 count++;
960
961 } while (count <= DMUB_TRACE_MAX_READ);
962
963 ASSERT(count <= DMUB_TRACE_MAX_READ);
964}
965
e6cd859d 966#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 967static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 968{
c0fb85ae
YZ
969 uint64_t pt_base;
970 uint32_t logical_addr_low;
971 uint32_t logical_addr_high;
972 uint32_t agp_base, agp_bot, agp_top;
973 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 974
c0fb85ae
YZ
975 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 977
c0fb85ae
YZ
978 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979 /*
980 * Raven2 has a HW issue that it is unable to use the vram which
981 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982 * workaround that increase system aperture high address (add 1)
983 * to get rid of the VM fault and hardware hang.
984 */
985 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986 else
987 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 988
c0fb85ae
YZ
989 agp_base = 0;
990 agp_bot = adev->gmc.agp_start >> 24;
991 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 992
c44a22b3 993
c0fb85ae
YZ
994 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1000
c0fb85ae
YZ
1001 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003
1004 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007
1008 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011
1012 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015
1016 pa_config->is_hvm_enabled = 0;
c44a22b3 1017
c44a22b3 1018}
e6cd859d 1019#endif
ea3b4242
QZ
1020#if defined(CONFIG_DRM_AMD_DC_DCN)
1021static void event_mall_stutter(struct work_struct *work)
1022{
1023
1024 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025 struct amdgpu_display_manager *dm = vblank_work->dm;
1026
1027 mutex_lock(&dm->dc_lock);
1028
1029 if (vblank_work->enable)
1030 dm->active_vblank_irq_count++;
5af50b0b 1031 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1032 dm->active_vblank_irq_count--;
1033
2cbcb78c 1034 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1035
4711c033 1036 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242
QZ
1037
1038 mutex_unlock(&dm->dc_lock);
1039}
1040
1041static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042{
1043
1044 int max_caps = dc->caps.max_links;
1045 struct vblank_workqueue *vblank_work;
1046 int i = 0;
1047
1048 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049 if (ZERO_OR_NULL_PTR(vblank_work)) {
1050 kfree(vblank_work);
1051 return NULL;
1052 }
c44a22b3 1053
ea3b4242
QZ
1054 for (i = 0; i < max_caps; i++)
1055 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056
1057 return vblank_work;
1058}
1059#endif
7578ecda 1060static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1061{
1062 struct dc_init_data init_data;
52704fca
BL
1063#ifdef CONFIG_DRM_AMD_DC_HDCP
1064 struct dc_callback_init init_params;
1065#endif
743b9786 1066 int r;
52704fca 1067
4a580877 1068 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1069 adev->dm.adev = adev;
1070
4562236b
HW
1071 /* Zero all the fields */
1072 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1073#ifdef CONFIG_DRM_AMD_DC_HDCP
1074 memset(&init_params, 0, sizeof(init_params));
1075#endif
4562236b 1076
674e78ac 1077 mutex_init(&adev->dm.dc_lock);
6ce8f316 1078 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1079#if defined(CONFIG_DRM_AMD_DC_DCN)
1080 spin_lock_init(&adev->dm.vblank_lock);
1081#endif
674e78ac 1082
4562236b
HW
1083 if(amdgpu_dm_irq_init(adev)) {
1084 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085 goto error;
1086 }
1087
1088 init_data.asic_id.chip_family = adev->family;
1089
2dc31ca1 1090 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1091 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092
770d13b1 1093 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1094 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095 init_data.asic_id.atombios_base_address =
1096 adev->mode_info.atom_context->bios;
1097
1098 init_data.driver = adev;
1099
1100 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101
1102 if (!adev->dm.cgs_device) {
1103 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104 goto error;
1105 }
1106
1107 init_data.cgs_device = adev->dm.cgs_device;
1108
4562236b
HW
1109 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110
60fb100b
AD
1111 switch (adev->asic_type) {
1112 case CHIP_CARRIZO:
1113 case CHIP_STONEY:
1114 case CHIP_RAVEN:
fe3db437 1115 case CHIP_RENOIR:
6e227308 1116 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1117 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118 init_data.flags.disable_dmcu = true;
60fb100b 1119 break;
6df9218a
CL
1120#if defined(CONFIG_DRM_AMD_DC_DCN)
1121 case CHIP_VANGOGH:
1122 init_data.flags.gpu_vm_support = true;
1123 break;
1124#endif
60fb100b
AD
1125 default:
1126 break;
1127 }
6e227308 1128
04b94af4
AD
1129 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130 init_data.flags.fbc_support = true;
1131
d99f38ae
AD
1132 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133 init_data.flags.multi_mon_pp_mclk_switch = true;
1134
eaf56410
LL
1135 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136 init_data.flags.disable_fractional_pwm = true;
1137
27eaa492 1138 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1139
0dd79532 1140 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1141 /* Display Core create. */
1142 adev->dm.dc = dc_create(&init_data);
1143
423788c7 1144 if (adev->dm.dc) {
76121231 1145 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1146 } else {
76121231 1147 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1148 goto error;
1149 }
4562236b 1150
8a791dab
HW
1151 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154 }
1155
f99d8762
HW
1156 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158
8a791dab
HW
1159 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160 adev->dm.dc->debug.disable_stutter = true;
1161
1162 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163 adev->dm.dc->debug.disable_dsc = true;
1164
1165 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166 adev->dm.dc->debug.disable_clock_gate = true;
1167
743b9786
NK
1168 r = dm_dmub_hw_init(adev);
1169 if (r) {
1170 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171 goto error;
1172 }
1173
bb6785c1
NK
1174 dc_hardware_init(adev->dm.dc);
1175
0b08c54b 1176#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1177 if (adev->apu_flags) {
e6cd859d
AD
1178 struct dc_phy_addr_space_config pa_config;
1179
0b08c54b 1180 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1181
0b08c54b
YZ
1182 // Call the DC init_memory func
1183 dc_setup_system_context(adev->dm.dc, &pa_config);
1184 }
1185#endif
c0fb85ae 1186
4562236b
HW
1187 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188 if (!adev->dm.freesync_module) {
1189 DRM_ERROR(
1190 "amdgpu: failed to initialize freesync_module.\n");
1191 } else
f1ad2f5e 1192 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1193 adev->dm.freesync_module);
1194
e277adc5
LSL
1195 amdgpu_dm_init_color_mod();
1196
ea3b4242
QZ
1197#if defined(CONFIG_DRM_AMD_DC_DCN)
1198 if (adev->dm.dc->caps.max_links > 0) {
1199 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200
1201 if (!adev->dm.vblank_workqueue)
1202 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203 else
1204 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205 }
1206#endif
1207
52704fca 1208#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1209 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1210 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1211
96a3b32e
BL
1212 if (!adev->dm.hdcp_workqueue)
1213 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214 else
1215 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1216
96a3b32e
BL
1217 dc_init_callbacks(adev->dm.dc, &init_params);
1218 }
9a65df19
WL
1219#endif
1220#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1222#endif
4562236b
HW
1223 if (amdgpu_dm_initialize_drm_device(adev)) {
1224 DRM_ERROR(
1225 "amdgpu: failed to initialize sw for display support.\n");
1226 goto error;
1227 }
1228
f74367e4
AD
1229 /* create fake encoders for MST */
1230 dm_dp_create_fake_mst_encoders(adev);
1231
4562236b
HW
1232 /* TODO: Add_display_info? */
1233
1234 /* TODO use dynamic cursor width */
4a580877
LT
1235 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1237
4a580877 1238 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1239 DRM_ERROR(
1240 "amdgpu: failed to initialize sw for display support.\n");
1241 goto error;
1242 }
1243
c0fb85ae 1244
f1ad2f5e 1245 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1246
1247 return 0;
1248error:
1249 amdgpu_dm_fini(adev);
1250
59d0f396 1251 return -EINVAL;
4562236b
HW
1252}
1253
7578ecda 1254static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1255{
f74367e4
AD
1256 int i;
1257
1258 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260 }
1261
6ce8f316
NK
1262 amdgpu_dm_audio_fini(adev);
1263
4562236b 1264 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1265
9a65df19
WL
1266#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267 if (adev->dm.crc_rd_wrk) {
1268 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269 kfree(adev->dm.crc_rd_wrk);
1270 adev->dm.crc_rd_wrk = NULL;
1271 }
1272#endif
52704fca
BL
1273#ifdef CONFIG_DRM_AMD_DC_HDCP
1274 if (adev->dm.hdcp_workqueue) {
e96b1b29 1275 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1276 adev->dm.hdcp_workqueue = NULL;
1277 }
1278
1279 if (adev->dm.dc)
1280 dc_deinit_callbacks(adev->dm.dc);
1281#endif
51ba6912
QZ
1282
1283#if defined(CONFIG_DRM_AMD_DC_DCN)
1284 if (adev->dm.vblank_workqueue) {
1285 adev->dm.vblank_workqueue->dm = NULL;
1286 kfree(adev->dm.vblank_workqueue);
1287 adev->dm.vblank_workqueue = NULL;
1288 }
1289#endif
1290
9a71c7d3
NK
1291 if (adev->dm.dc->ctx->dmub_srv) {
1292 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293 adev->dm.dc->ctx->dmub_srv = NULL;
1294 }
1295
743b9786
NK
1296 if (adev->dm.dmub_bo)
1297 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298 &adev->dm.dmub_bo_gpu_addr,
1299 &adev->dm.dmub_bo_cpu_addr);
52704fca 1300
c8bdf2b6
ED
1301 /* DC Destroy TODO: Replace destroy DAL */
1302 if (adev->dm.dc)
1303 dc_destroy(&adev->dm.dc);
4562236b
HW
1304 /*
1305 * TODO: pageflip, vlank interrupt
1306 *
1307 * amdgpu_dm_irq_fini(adev);
1308 */
1309
1310 if (adev->dm.cgs_device) {
1311 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312 adev->dm.cgs_device = NULL;
1313 }
1314 if (adev->dm.freesync_module) {
1315 mod_freesync_destroy(adev->dm.freesync_module);
1316 adev->dm.freesync_module = NULL;
1317 }
674e78ac 1318
6ce8f316 1319 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1320 mutex_destroy(&adev->dm.dc_lock);
1321
4562236b
HW
1322 return;
1323}
1324
a94d5569 1325static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1326{
a7669aff 1327 const char *fw_name_dmcu = NULL;
a94d5569
DF
1328 int r;
1329 const struct dmcu_firmware_header_v1_0 *hdr;
1330
1331 switch(adev->asic_type) {
55e56389
MR
1332#if defined(CONFIG_DRM_AMD_DC_SI)
1333 case CHIP_TAHITI:
1334 case CHIP_PITCAIRN:
1335 case CHIP_VERDE:
1336 case CHIP_OLAND:
1337#endif
a94d5569
DF
1338 case CHIP_BONAIRE:
1339 case CHIP_HAWAII:
1340 case CHIP_KAVERI:
1341 case CHIP_KABINI:
1342 case CHIP_MULLINS:
1343 case CHIP_TONGA:
1344 case CHIP_FIJI:
1345 case CHIP_CARRIZO:
1346 case CHIP_STONEY:
1347 case CHIP_POLARIS11:
1348 case CHIP_POLARIS10:
1349 case CHIP_POLARIS12:
1350 case CHIP_VEGAM:
1351 case CHIP_VEGA10:
1352 case CHIP_VEGA12:
1353 case CHIP_VEGA20:
476e955d 1354 case CHIP_NAVI10:
baebcf2e 1355 case CHIP_NAVI14:
30221ad8 1356 case CHIP_RENOIR:
79037324 1357 case CHIP_SIENNA_CICHLID:
a6c5308f 1358 case CHIP_NAVY_FLOUNDER:
2a411205 1359 case CHIP_DIMGREY_CAVEFISH:
469989ca 1360 case CHIP_VANGOGH:
a94d5569 1361 return 0;
5ea23931
RL
1362 case CHIP_NAVI12:
1363 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364 break;
a94d5569 1365 case CHIP_RAVEN:
a7669aff
HW
1366 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370 else
a7669aff 1371 return 0;
a94d5569
DF
1372 break;
1373 default:
1374 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1375 return -EINVAL;
a94d5569
DF
1376 }
1377
1378 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380 return 0;
1381 }
1382
1383 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384 if (r == -ENOENT) {
1385 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387 adev->dm.fw_dmcu = NULL;
1388 return 0;
1389 }
1390 if (r) {
1391 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392 fw_name_dmcu);
1393 return r;
1394 }
1395
1396 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397 if (r) {
1398 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399 fw_name_dmcu);
1400 release_firmware(adev->dm.fw_dmcu);
1401 adev->dm.fw_dmcu = NULL;
1402 return r;
1403 }
1404
1405 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408 adev->firmware.fw_size +=
1409 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410
1411 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413 adev->firmware.fw_size +=
1414 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415
ee6e89c0
DF
1416 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417
a94d5569
DF
1418 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419
4562236b
HW
1420 return 0;
1421}
1422
743b9786
NK
1423static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424{
1425 struct amdgpu_device *adev = ctx;
1426
1427 return dm_read_reg(adev->dm.dc->ctx, address);
1428}
1429
1430static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431 uint32_t value)
1432{
1433 struct amdgpu_device *adev = ctx;
1434
1435 return dm_write_reg(adev->dm.dc->ctx, address, value);
1436}
1437
1438static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439{
1440 struct dmub_srv_create_params create_params;
8c7aea40
NK
1441 struct dmub_srv_region_params region_params;
1442 struct dmub_srv_region_info region_info;
1443 struct dmub_srv_fb_params fb_params;
1444 struct dmub_srv_fb_info *fb_info;
1445 struct dmub_srv *dmub_srv;
743b9786
NK
1446 const struct dmcub_firmware_header_v1_0 *hdr;
1447 const char *fw_name_dmub;
1448 enum dmub_asic dmub_asic;
1449 enum dmub_status status;
1450 int r;
1451
1452 switch (adev->asic_type) {
1453 case CHIP_RENOIR:
1454 dmub_asic = DMUB_ASIC_DCN21;
1455 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1456 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1458 break;
79037324
BL
1459 case CHIP_SIENNA_CICHLID:
1460 dmub_asic = DMUB_ASIC_DCN30;
1461 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462 break;
5ce868fc
BL
1463 case CHIP_NAVY_FLOUNDER:
1464 dmub_asic = DMUB_ASIC_DCN30;
1465 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1466 break;
469989ca
RL
1467 case CHIP_VANGOGH:
1468 dmub_asic = DMUB_ASIC_DCN301;
1469 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470 break;
2a411205
BL
1471 case CHIP_DIMGREY_CAVEFISH:
1472 dmub_asic = DMUB_ASIC_DCN302;
1473 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474 break;
743b9786
NK
1475
1476 default:
1477 /* ASIC doesn't support DMUB. */
1478 return 0;
1479 }
1480
743b9786
NK
1481 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482 if (r) {
1483 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484 return 0;
1485 }
1486
1487 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488 if (r) {
1489 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490 return 0;
1491 }
1492
743b9786 1493 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1494
9a6ed547
NK
1495 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497 AMDGPU_UCODE_ID_DMCUB;
1498 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499 adev->dm.dmub_fw;
1500 adev->firmware.fw_size +=
1501 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1502
9a6ed547
NK
1503 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504 adev->dm.dmcub_fw_version);
1505 }
1506
1507 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1508
8c7aea40
NK
1509 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510 dmub_srv = adev->dm.dmub_srv;
1511
1512 if (!dmub_srv) {
1513 DRM_ERROR("Failed to allocate DMUB service!\n");
1514 return -ENOMEM;
1515 }
1516
1517 memset(&create_params, 0, sizeof(create_params));
1518 create_params.user_ctx = adev;
1519 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521 create_params.asic = dmub_asic;
1522
1523 /* Create the DMUB service. */
1524 status = dmub_srv_create(dmub_srv, &create_params);
1525 if (status != DMUB_STATUS_OK) {
1526 DRM_ERROR("Error creating DMUB service: %d\n", status);
1527 return -EINVAL;
1528 }
1529
1530 /* Calculate the size of all the regions for the DMUB service. */
1531 memset(&region_params, 0, sizeof(region_params));
1532
1533 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536 region_params.vbios_size = adev->bios_size;
0922b899 1537 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1538 adev->dm.dmub_fw->data +
1539 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1540 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1541 region_params.fw_inst_const =
1542 adev->dm.dmub_fw->data +
1543 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544 PSP_HEADER_BYTES;
8c7aea40
NK
1545
1546 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547 &region_info);
1548
1549 if (status != DMUB_STATUS_OK) {
1550 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551 return -EINVAL;
1552 }
1553
1554 /*
1555 * Allocate a framebuffer based on the total size of all the regions.
1556 * TODO: Move this into GART.
1557 */
1558 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560 &adev->dm.dmub_bo_gpu_addr,
1561 &adev->dm.dmub_bo_cpu_addr);
1562 if (r)
1563 return r;
1564
1565 /* Rebase the regions on the framebuffer address. */
1566 memset(&fb_params, 0, sizeof(fb_params));
1567 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569 fb_params.region_info = &region_info;
1570
1571 adev->dm.dmub_fb_info =
1572 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573 fb_info = adev->dm.dmub_fb_info;
1574
1575 if (!fb_info) {
1576 DRM_ERROR(
1577 "Failed to allocate framebuffer info for DMUB service!\n");
1578 return -ENOMEM;
1579 }
1580
1581 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582 if (status != DMUB_STATUS_OK) {
1583 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584 return -EINVAL;
1585 }
1586
743b9786
NK
1587 return 0;
1588}
1589
a94d5569
DF
1590static int dm_sw_init(void *handle)
1591{
1592 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1593 int r;
1594
1595 r = dm_dmub_sw_init(adev);
1596 if (r)
1597 return r;
a94d5569
DF
1598
1599 return load_dmcu_fw(adev);
1600}
1601
4562236b
HW
1602static int dm_sw_fini(void *handle)
1603{
a94d5569
DF
1604 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605
8c7aea40
NK
1606 kfree(adev->dm.dmub_fb_info);
1607 adev->dm.dmub_fb_info = NULL;
1608
743b9786
NK
1609 if (adev->dm.dmub_srv) {
1610 dmub_srv_destroy(adev->dm.dmub_srv);
1611 adev->dm.dmub_srv = NULL;
1612 }
1613
75e1658e
ND
1614 release_firmware(adev->dm.dmub_fw);
1615 adev->dm.dmub_fw = NULL;
743b9786 1616
75e1658e
ND
1617 release_firmware(adev->dm.fw_dmcu);
1618 adev->dm.fw_dmcu = NULL;
a94d5569 1619
4562236b
HW
1620 return 0;
1621}
1622
7abcf6b5 1623static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1624{
c84dec2f 1625 struct amdgpu_dm_connector *aconnector;
4562236b 1626 struct drm_connector *connector;
f8d2d39e 1627 struct drm_connector_list_iter iter;
7abcf6b5 1628 int ret = 0;
4562236b 1629
f8d2d39e
LP
1630 drm_connector_list_iter_begin(dev, &iter);
1631 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1632 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1633 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634 aconnector->mst_mgr.aux) {
f1ad2f5e 1635 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1636 aconnector,
1637 aconnector->base.base.id);
7abcf6b5
AG
1638
1639 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640 if (ret < 0) {
1641 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1642 aconnector->dc_link->type =
1643 dc_connection_single;
1644 break;
7abcf6b5 1645 }
f8d2d39e 1646 }
4562236b 1647 }
f8d2d39e 1648 drm_connector_list_iter_end(&iter);
4562236b 1649
7abcf6b5
AG
1650 return ret;
1651}
1652
1653static int dm_late_init(void *handle)
1654{
42e67c3b 1655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1656
bbf854dc
DF
1657 struct dmcu_iram_parameters params;
1658 unsigned int linear_lut[16];
1659 int i;
17bdb4a8 1660 struct dmcu *dmcu = NULL;
5cb32419 1661 bool ret = true;
bbf854dc 1662
17bdb4a8
JFZ
1663 dmcu = adev->dm.dc->res_pool->dmcu;
1664
bbf854dc
DF
1665 for (i = 0; i < 16; i++)
1666 linear_lut[i] = 0xFFFF * i / 15;
1667
1668 params.set = 0;
1669 params.backlight_ramping_start = 0xCCCC;
1670 params.backlight_ramping_reduction = 0xCCCCCCCC;
1671 params.backlight_lut_array_size = 16;
1672 params.backlight_lut_array = linear_lut;
1673
2ad0cdf9
AK
1674 /* Min backlight level after ABM reduction, Don't allow below 1%
1675 * 0xFFFF x 0.01 = 0x28F
1676 */
1677 params.min_abm_backlight = 0x28F;
1678
5cb32419
RL
1679 /* In the case where abm is implemented on dmcub,
1680 * dmcu object will be null.
1681 * ABM 2.4 and up are implemented on dmcub.
1682 */
1683 if (dmcu)
1684 ret = dmcu_load_iram(dmcu, params);
1685 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1686 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1687
14ed1c90
HW
1688 if (!ret)
1689 return -EINVAL;
bbf854dc 1690
4a580877 1691 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1692}
1693
1694static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695{
c84dec2f 1696 struct amdgpu_dm_connector *aconnector;
4562236b 1697 struct drm_connector *connector;
f8d2d39e 1698 struct drm_connector_list_iter iter;
fe7553be
LP
1699 struct drm_dp_mst_topology_mgr *mgr;
1700 int ret;
1701 bool need_hotplug = false;
4562236b 1702
f8d2d39e
LP
1703 drm_connector_list_iter_begin(dev, &iter);
1704 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1705 aconnector = to_amdgpu_dm_connector(connector);
1706 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707 aconnector->mst_port)
1708 continue;
1709
1710 mgr = &aconnector->mst_mgr;
1711
1712 if (suspend) {
1713 drm_dp_mst_topology_mgr_suspend(mgr);
1714 } else {
6f85f738 1715 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1716 if (ret < 0) {
1717 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718 need_hotplug = true;
1719 }
1720 }
4562236b 1721 }
f8d2d39e 1722 drm_connector_list_iter_end(&iter);
fe7553be
LP
1723
1724 if (need_hotplug)
1725 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1726}
1727
9340dfd3
HW
1728static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729{
1730 struct smu_context *smu = &adev->smu;
1731 int ret = 0;
1732
1733 if (!is_support_sw_smu(adev))
1734 return 0;
1735
1736 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737 * on window driver dc implementation.
1738 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739 * should be passed to smu during boot up and resume from s3.
1740 * boot up: dc calculate dcn watermark clock settings within dc_create,
1741 * dcn20_resource_construct
1742 * then call pplib functions below to pass the settings to smu:
1743 * smu_set_watermarks_for_clock_ranges
1744 * smu_set_watermarks_table
1745 * navi10_set_watermarks_table
1746 * smu_write_watermarks_table
1747 *
1748 * For Renoir, clock settings of dcn watermark are also fixed values.
1749 * dc has implemented different flow for window driver:
1750 * dc_hardware_init / dc_set_power_state
1751 * dcn10_init_hw
1752 * notify_wm_ranges
1753 * set_wm_ranges
1754 * -- Linux
1755 * smu_set_watermarks_for_clock_ranges
1756 * renoir_set_watermarks_table
1757 * smu_write_watermarks_table
1758 *
1759 * For Linux,
1760 * dc_hardware_init -> amdgpu_dm_init
1761 * dc_set_power_state --> dm_resume
1762 *
1763 * therefore, this function apply to navi10/12/14 but not Renoir
1764 * *
1765 */
1766 switch(adev->asic_type) {
1767 case CHIP_NAVI10:
1768 case CHIP_NAVI14:
1769 case CHIP_NAVI12:
1770 break;
1771 default:
1772 return 0;
1773 }
1774
e7a95eea
EQ
1775 ret = smu_write_watermarks_table(smu);
1776 if (ret) {
1777 DRM_ERROR("Failed to update WMTABLE!\n");
1778 return ret;
9340dfd3
HW
1779 }
1780
9340dfd3
HW
1781 return 0;
1782}
1783
b8592b48
LL
1784/**
1785 * dm_hw_init() - Initialize DC device
28d687ea 1786 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1787 *
1788 * Initialize the &struct amdgpu_display_manager device. This involves calling
1789 * the initializers of each DM component, then populating the struct with them.
1790 *
1791 * Although the function implies hardware initialization, both hardware and
1792 * software are initialized here. Splitting them out to their relevant init
1793 * hooks is a future TODO item.
1794 *
1795 * Some notable things that are initialized here:
1796 *
1797 * - Display Core, both software and hardware
1798 * - DC modules that we need (freesync and color management)
1799 * - DRM software states
1800 * - Interrupt sources and handlers
1801 * - Vblank support
1802 * - Debug FS entries, if enabled
1803 */
4562236b
HW
1804static int dm_hw_init(void *handle)
1805{
1806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807 /* Create DAL display manager */
1808 amdgpu_dm_init(adev);
4562236b
HW
1809 amdgpu_dm_hpd_init(adev);
1810
4562236b
HW
1811 return 0;
1812}
1813
b8592b48
LL
1814/**
1815 * dm_hw_fini() - Teardown DC device
28d687ea 1816 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1817 *
1818 * Teardown components within &struct amdgpu_display_manager that require
1819 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820 * were loaded. Also flush IRQ workqueues and disable them.
1821 */
4562236b
HW
1822static int dm_hw_fini(void *handle)
1823{
1824 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825
1826 amdgpu_dm_hpd_fini(adev);
1827
1828 amdgpu_dm_irq_fini(adev);
21de3396 1829 amdgpu_dm_fini(adev);
4562236b
HW
1830 return 0;
1831}
1832
cdaae837
BL
1833
1834static int dm_enable_vblank(struct drm_crtc *crtc);
1835static void dm_disable_vblank(struct drm_crtc *crtc);
1836
1837static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838 struct dc_state *state, bool enable)
1839{
1840 enum dc_irq_source irq_source;
1841 struct amdgpu_crtc *acrtc;
1842 int rc = -EBUSY;
1843 int i = 0;
1844
1845 for (i = 0; i < state->stream_count; i++) {
1846 acrtc = get_crtc_by_otg_inst(
1847 adev, state->stream_status[i].primary_otg_inst);
1848
1849 if (acrtc && state->stream_status[i].plane_count != 0) {
1850 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
1852 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
1854 if (rc)
1855 DRM_WARN("Failed to %s pflip interrupts\n",
1856 enable ? "enable" : "disable");
1857
1858 if (enable) {
1859 rc = dm_enable_vblank(&acrtc->base);
1860 if (rc)
1861 DRM_WARN("Failed to enable vblank interrupts\n");
1862 } else {
1863 dm_disable_vblank(&acrtc->base);
1864 }
1865
1866 }
1867 }
1868
1869}
1870
dfd84d90 1871static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1872{
1873 struct dc_state *context = NULL;
1874 enum dc_status res = DC_ERROR_UNEXPECTED;
1875 int i;
1876 struct dc_stream_state *del_streams[MAX_PIPES];
1877 int del_streams_count = 0;
1878
1879 memset(del_streams, 0, sizeof(del_streams));
1880
1881 context = dc_create_state(dc);
1882 if (context == NULL)
1883 goto context_alloc_fail;
1884
1885 dc_resource_state_copy_construct_current(dc, context);
1886
1887 /* First remove from context all streams */
1888 for (i = 0; i < context->stream_count; i++) {
1889 struct dc_stream_state *stream = context->streams[i];
1890
1891 del_streams[del_streams_count++] = stream;
1892 }
1893
1894 /* Remove all planes for removed streams and then remove the streams */
1895 for (i = 0; i < del_streams_count; i++) {
1896 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897 res = DC_FAIL_DETACH_SURFACES;
1898 goto fail;
1899 }
1900
1901 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902 if (res != DC_OK)
1903 goto fail;
1904 }
1905
1906
1907 res = dc_validate_global_state(dc, context, false);
1908
1909 if (res != DC_OK) {
1910 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911 goto fail;
1912 }
1913
1914 res = dc_commit_state(dc, context);
1915
1916fail:
1917 dc_release_state(context);
1918
1919context_alloc_fail:
1920 return res;
1921}
1922
4562236b
HW
1923static int dm_suspend(void *handle)
1924{
1925 struct amdgpu_device *adev = handle;
1926 struct amdgpu_display_manager *dm = &adev->dm;
1927 int ret = 0;
4562236b 1928
53b3f8f4 1929 if (amdgpu_in_reset(adev)) {
cdaae837 1930 mutex_lock(&dm->dc_lock);
98ab5f35
BL
1931
1932#if defined(CONFIG_DRM_AMD_DC_DCN)
1933 dc_allow_idle_optimizations(adev->dm.dc, false);
1934#endif
1935
cdaae837
BL
1936 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937
1938 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939
1940 amdgpu_dm_commit_zero_streams(dm->dc);
1941
1942 amdgpu_dm_irq_suspend(adev);
1943
1944 return ret;
1945 }
4562236b 1946
9a65df19
WL
1947#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948 amdgpu_dm_crtc_secure_display_suspend(adev);
1949#endif
d2f0b53b 1950 WARN_ON(adev->dm.cached_state);
4a580877 1951 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 1952
4a580877 1953 s3_handle_mst(adev_to_drm(adev), true);
4562236b 1954
4562236b
HW
1955 amdgpu_dm_irq_suspend(adev);
1956
a3621485 1957
32f5062d 1958 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 1959
1c2075d4 1960 return 0;
4562236b
HW
1961}
1962
1daf8c63
AD
1963static struct amdgpu_dm_connector *
1964amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965 struct drm_crtc *crtc)
4562236b
HW
1966{
1967 uint32_t i;
c2cea706 1968 struct drm_connector_state *new_con_state;
4562236b
HW
1969 struct drm_connector *connector;
1970 struct drm_crtc *crtc_from_state;
1971
c2cea706
LSL
1972 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973 crtc_from_state = new_con_state->crtc;
4562236b
HW
1974
1975 if (crtc_from_state == crtc)
c84dec2f 1976 return to_amdgpu_dm_connector(connector);
4562236b
HW
1977 }
1978
1979 return NULL;
1980}
1981
fbbdadf2
BL
1982static void emulated_link_detect(struct dc_link *link)
1983{
1984 struct dc_sink_init_data sink_init_data = { 0 };
1985 struct display_sink_capability sink_caps = { 0 };
1986 enum dc_edid_status edid_status;
1987 struct dc_context *dc_ctx = link->ctx;
1988 struct dc_sink *sink = NULL;
1989 struct dc_sink *prev_sink = NULL;
1990
1991 link->type = dc_connection_none;
1992 prev_sink = link->local_sink;
1993
30164a16
VL
1994 if (prev_sink)
1995 dc_sink_release(prev_sink);
fbbdadf2
BL
1996
1997 switch (link->connector_signal) {
1998 case SIGNAL_TYPE_HDMI_TYPE_A: {
1999 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001 break;
2002 }
2003
2004 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007 break;
2008 }
2009
2010 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013 break;
2014 }
2015
2016 case SIGNAL_TYPE_LVDS: {
2017 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018 sink_caps.signal = SIGNAL_TYPE_LVDS;
2019 break;
2020 }
2021
2022 case SIGNAL_TYPE_EDP: {
2023 sink_caps.transaction_type =
2024 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025 sink_caps.signal = SIGNAL_TYPE_EDP;
2026 break;
2027 }
2028
2029 case SIGNAL_TYPE_DISPLAY_PORT: {
2030 sink_caps.transaction_type =
2031 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033 break;
2034 }
2035
2036 default:
2037 DC_ERROR("Invalid connector type! signal:%d\n",
2038 link->connector_signal);
2039 return;
2040 }
2041
2042 sink_init_data.link = link;
2043 sink_init_data.sink_signal = sink_caps.signal;
2044
2045 sink = dc_sink_create(&sink_init_data);
2046 if (!sink) {
2047 DC_ERROR("Failed to create sink!\n");
2048 return;
2049 }
2050
dcd5fb82 2051 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2052 link->local_sink = sink;
2053
2054 edid_status = dm_helpers_read_local_edid(
2055 link->ctx,
2056 link,
2057 sink);
2058
2059 if (edid_status != EDID_OK)
2060 DC_ERROR("Failed to read EDID");
2061
2062}
2063
cdaae837
BL
2064static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065 struct amdgpu_display_manager *dm)
2066{
2067 struct {
2068 struct dc_surface_update surface_updates[MAX_SURFACES];
2069 struct dc_plane_info plane_infos[MAX_SURFACES];
2070 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072 struct dc_stream_update stream_update;
2073 } * bundle;
2074 int k, m;
2075
2076 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077
2078 if (!bundle) {
2079 dm_error("Failed to allocate update bundle\n");
2080 goto cleanup;
2081 }
2082
2083 for (k = 0; k < dc_state->stream_count; k++) {
2084 bundle->stream_update.stream = dc_state->streams[k];
2085
2086 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087 bundle->surface_updates[m].surface =
2088 dc_state->stream_status->plane_states[m];
2089 bundle->surface_updates[m].surface->force_full_update =
2090 true;
2091 }
2092 dc_commit_updates_for_stream(
2093 dm->dc, bundle->surface_updates,
2094 dc_state->stream_status->plane_count,
efc8278e 2095 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2096 }
2097
2098cleanup:
2099 kfree(bundle);
2100
2101 return;
2102}
2103
3c4d55c9
AP
2104static void dm_set_dpms_off(struct dc_link *link)
2105{
2106 struct dc_stream_state *stream_state;
2107 struct amdgpu_dm_connector *aconnector = link->priv;
2108 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109 struct dc_stream_update stream_update;
2110 bool dpms_off = true;
2111
2112 memset(&stream_update, 0, sizeof(stream_update));
2113 stream_update.dpms_off = &dpms_off;
2114
2115 mutex_lock(&adev->dm.dc_lock);
2116 stream_state = dc_stream_find_from_link(link);
2117
2118 if (stream_state == NULL) {
2119 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120 mutex_unlock(&adev->dm.dc_lock);
2121 return;
2122 }
2123
2124 stream_update.stream = stream_state;
2125 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2126 stream_state, &stream_update,
2127 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2128 mutex_unlock(&adev->dm.dc_lock);
2129}
2130
4562236b
HW
2131static int dm_resume(void *handle)
2132{
2133 struct amdgpu_device *adev = handle;
4a580877 2134 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2135 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2136 struct amdgpu_dm_connector *aconnector;
4562236b 2137 struct drm_connector *connector;
f8d2d39e 2138 struct drm_connector_list_iter iter;
4562236b 2139 struct drm_crtc *crtc;
c2cea706 2140 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2141 struct dm_crtc_state *dm_new_crtc_state;
2142 struct drm_plane *plane;
2143 struct drm_plane_state *new_plane_state;
2144 struct dm_plane_state *dm_new_plane_state;
113b7a01 2145 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2146 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2147 struct dc_state *dc_state;
2148 int i, r, j;
4562236b 2149
53b3f8f4 2150 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2151 dc_state = dm->cached_dc_state;
2152
2153 r = dm_dmub_hw_init(adev);
2154 if (r)
2155 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156
2157 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158 dc_resume(dm->dc);
2159
2160 amdgpu_dm_irq_resume_early(adev);
2161
2162 for (i = 0; i < dc_state->stream_count; i++) {
2163 dc_state->streams[i]->mode_changed = true;
2164 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165 dc_state->stream_status->plane_states[j]->update_flags.raw
2166 = 0xffffffff;
2167 }
2168 }
2169
2170 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2171
cdaae837
BL
2172 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173
2174 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175
2176 dc_release_state(dm->cached_dc_state);
2177 dm->cached_dc_state = NULL;
2178
2179 amdgpu_dm_irq_resume_late(adev);
2180
2181 mutex_unlock(&dm->dc_lock);
2182
2183 return 0;
2184 }
113b7a01
LL
2185 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186 dc_release_state(dm_state->context);
2187 dm_state->context = dc_create_state(dm->dc);
2188 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189 dc_resource_state_construct(dm->dc, dm_state->context);
2190
8c7aea40
NK
2191 /* Before powering on DC we need to re-initialize DMUB. */
2192 r = dm_dmub_hw_init(adev);
2193 if (r)
2194 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195
a80aa93d
ML
2196 /* power on hardware */
2197 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198
4562236b
HW
2199 /* program HPD filter */
2200 dc_resume(dm->dc);
2201
4562236b
HW
2202 /*
2203 * early enable HPD Rx IRQ, should be done before set mode as short
2204 * pulse interrupts are used for MST
2205 */
2206 amdgpu_dm_irq_resume_early(adev);
2207
d20ebea8 2208 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2209 s3_handle_mst(ddev, false);
2210
4562236b 2211 /* Do detection*/
f8d2d39e
LP
2212 drm_connector_list_iter_begin(ddev, &iter);
2213 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2214 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2215
2216 /*
2217 * this is the case when traversing through already created
2218 * MST connectors, should be skipped
2219 */
2220 if (aconnector->mst_port)
2221 continue;
2222
03ea364c 2223 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2224 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225 DRM_ERROR("KMS: Failed to detect connector\n");
2226
2227 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228 emulated_link_detect(aconnector->dc_link);
2229 else
2230 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2231
2232 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233 aconnector->fake_enable = false;
2234
dcd5fb82
MF
2235 if (aconnector->dc_sink)
2236 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2237 aconnector->dc_sink = NULL;
2238 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2239 mutex_unlock(&aconnector->hpd_lock);
4562236b 2240 }
f8d2d39e 2241 drm_connector_list_iter_end(&iter);
4562236b 2242
1f6010a9 2243 /* Force mode set in atomic commit */
a80aa93d 2244 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2245 new_crtc_state->active_changed = true;
4f346e65 2246
fcb4019e
LSL
2247 /*
2248 * atomic_check is expected to create the dc states. We need to release
2249 * them here, since they were duplicated as part of the suspend
2250 * procedure.
2251 */
a80aa93d 2252 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2253 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254 if (dm_new_crtc_state->stream) {
2255 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256 dc_stream_release(dm_new_crtc_state->stream);
2257 dm_new_crtc_state->stream = NULL;
2258 }
2259 }
2260
a80aa93d 2261 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2262 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263 if (dm_new_plane_state->dc_state) {
2264 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265 dc_plane_state_release(dm_new_plane_state->dc_state);
2266 dm_new_plane_state->dc_state = NULL;
2267 }
2268 }
2269
2d1af6a1 2270 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2271
a80aa93d 2272 dm->cached_state = NULL;
0a214e2f 2273
9a65df19
WL
2274#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275 amdgpu_dm_crtc_secure_display_resume(adev);
2276#endif
2277
9faa4237 2278 amdgpu_dm_irq_resume_late(adev);
4562236b 2279
9340dfd3
HW
2280 amdgpu_dm_smu_write_watermarks_table(adev);
2281
2d1af6a1 2282 return 0;
4562236b
HW
2283}
2284
b8592b48
LL
2285/**
2286 * DOC: DM Lifecycle
2287 *
2288 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290 * the base driver's device list to be initialized and torn down accordingly.
2291 *
2292 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293 */
2294
4562236b
HW
2295static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296 .name = "dm",
2297 .early_init = dm_early_init,
7abcf6b5 2298 .late_init = dm_late_init,
4562236b
HW
2299 .sw_init = dm_sw_init,
2300 .sw_fini = dm_sw_fini,
2301 .hw_init = dm_hw_init,
2302 .hw_fini = dm_hw_fini,
2303 .suspend = dm_suspend,
2304 .resume = dm_resume,
2305 .is_idle = dm_is_idle,
2306 .wait_for_idle = dm_wait_for_idle,
2307 .check_soft_reset = dm_check_soft_reset,
2308 .soft_reset = dm_soft_reset,
2309 .set_clockgating_state = dm_set_clockgating_state,
2310 .set_powergating_state = dm_set_powergating_state,
2311};
2312
2313const struct amdgpu_ip_block_version dm_ip_block =
2314{
2315 .type = AMD_IP_BLOCK_TYPE_DCE,
2316 .major = 1,
2317 .minor = 0,
2318 .rev = 0,
2319 .funcs = &amdgpu_dm_funcs,
2320};
2321
ca3268c4 2322
b8592b48
LL
2323/**
2324 * DOC: atomic
2325 *
2326 * *WIP*
2327 */
0a323b84 2328
b3663f70 2329static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2330 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2331 .get_format_info = amd_get_format_info,
366c1baa 2332 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2333 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2334 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2335};
2336
2337static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2339};
2340
94562810
RS
2341static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342{
2343 u32 max_cll, min_cll, max, min, q, r;
2344 struct amdgpu_dm_backlight_caps *caps;
2345 struct amdgpu_display_manager *dm;
2346 struct drm_connector *conn_base;
2347 struct amdgpu_device *adev;
ec11fe37 2348 struct dc_link *link = NULL;
94562810
RS
2349 static const u8 pre_computed_values[] = {
2350 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352
2353 if (!aconnector || !aconnector->dc_link)
2354 return;
2355
ec11fe37 2356 link = aconnector->dc_link;
2357 if (link->connector_signal != SIGNAL_TYPE_EDP)
2358 return;
2359
94562810 2360 conn_base = &aconnector->base;
1348969a 2361 adev = drm_to_adev(conn_base->dev);
94562810
RS
2362 dm = &adev->dm;
2363 caps = &dm->backlight_caps;
2364 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365 caps->aux_support = false;
2366 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368
2369 if (caps->ext_caps->bits.oled == 1 ||
2370 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372 caps->aux_support = true;
2373
7a46f05e
TI
2374 if (amdgpu_backlight == 0)
2375 caps->aux_support = false;
2376 else if (amdgpu_backlight == 1)
2377 caps->aux_support = true;
2378
94562810
RS
2379 /* From the specification (CTA-861-G), for calculating the maximum
2380 * luminance we need to use:
2381 * Luminance = 50*2**(CV/32)
2382 * Where CV is a one-byte value.
2383 * For calculating this expression we may need float point precision;
2384 * to avoid this complexity level, we take advantage that CV is divided
2385 * by a constant. From the Euclids division algorithm, we know that CV
2386 * can be written as: CV = 32*q + r. Next, we replace CV in the
2387 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388 * need to pre-compute the value of r/32. For pre-computing the values
2389 * We just used the following Ruby line:
2390 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391 * The results of the above expressions can be verified at
2392 * pre_computed_values.
2393 */
2394 q = max_cll >> 5;
2395 r = max_cll % 32;
2396 max = (1 << q) * pre_computed_values[r];
2397
2398 // min luminance: maxLum * (CV/255)^2 / 100
2399 q = DIV_ROUND_CLOSEST(min_cll, 255);
2400 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401
2402 caps->aux_max_input_signal = max;
2403 caps->aux_min_input_signal = min;
2404}
2405
97e51c16
HW
2406void amdgpu_dm_update_connector_after_detect(
2407 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2408{
2409 struct drm_connector *connector = &aconnector->base;
2410 struct drm_device *dev = connector->dev;
b73a22d3 2411 struct dc_sink *sink;
4562236b
HW
2412
2413 /* MST handled by drm_mst framework */
2414 if (aconnector->mst_mgr.mst_state == true)
2415 return;
2416
4562236b 2417 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2418 if (sink)
2419 dc_sink_retain(sink);
4562236b 2420
1f6010a9
DF
2421 /*
2422 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2423 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2424 * Skip if already done during boot.
4562236b
HW
2425 */
2426 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427 && aconnector->dc_em_sink) {
2428
1f6010a9
DF
2429 /*
2430 * For S3 resume with headless use eml_sink to fake stream
2431 * because on resume connector->sink is set to NULL
4562236b
HW
2432 */
2433 mutex_lock(&dev->mode_config.mutex);
2434
2435 if (sink) {
922aa1e1 2436 if (aconnector->dc_sink) {
98e6436d 2437 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2438 /*
2439 * retain and release below are used to
2440 * bump up refcount for sink because the link doesn't point
2441 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2442 * reshuffle by UMD we will get into unwanted dc_sink release
2443 */
dcd5fb82 2444 dc_sink_release(aconnector->dc_sink);
922aa1e1 2445 }
4562236b 2446 aconnector->dc_sink = sink;
dcd5fb82 2447 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2448 amdgpu_dm_update_freesync_caps(connector,
2449 aconnector->edid);
4562236b 2450 } else {
98e6436d 2451 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2452 if (!aconnector->dc_sink) {
4562236b 2453 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2454 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2455 }
4562236b
HW
2456 }
2457
2458 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2459
2460 if (sink)
2461 dc_sink_release(sink);
4562236b
HW
2462 return;
2463 }
2464
2465 /*
2466 * TODO: temporary guard to look for proper fix
2467 * if this sink is MST sink, we should not do anything
2468 */
dcd5fb82
MF
2469 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470 dc_sink_release(sink);
4562236b 2471 return;
dcd5fb82 2472 }
4562236b
HW
2473
2474 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2475 /*
2476 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477 * Do nothing!!
2478 */
f1ad2f5e 2479 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2480 aconnector->connector_id);
dcd5fb82
MF
2481 if (sink)
2482 dc_sink_release(sink);
4562236b
HW
2483 return;
2484 }
2485
f1ad2f5e 2486 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2487 aconnector->connector_id, aconnector->dc_sink, sink);
2488
2489 mutex_lock(&dev->mode_config.mutex);
2490
1f6010a9
DF
2491 /*
2492 * 1. Update status of the drm connector
2493 * 2. Send an event and let userspace tell us what to do
2494 */
4562236b 2495 if (sink) {
1f6010a9
DF
2496 /*
2497 * TODO: check if we still need the S3 mode update workaround.
2498 * If yes, put it here.
2499 */
c64b0d6b 2500 if (aconnector->dc_sink) {
98e6436d 2501 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2502 dc_sink_release(aconnector->dc_sink);
2503 }
4562236b
HW
2504
2505 aconnector->dc_sink = sink;
dcd5fb82 2506 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2507 if (sink->dc_edid.length == 0) {
4562236b 2508 aconnector->edid = NULL;
e6142dd5
AP
2509 if (aconnector->dc_link->aux_mode) {
2510 drm_dp_cec_unset_edid(
2511 &aconnector->dm_dp_aux.aux);
2512 }
900b3cb1 2513 } else {
4562236b 2514 aconnector->edid =
e6142dd5 2515 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2516
c555f023 2517 drm_connector_update_edid_property(connector,
e6142dd5 2518 aconnector->edid);
e6142dd5
AP
2519 if (aconnector->dc_link->aux_mode)
2520 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521 aconnector->edid);
4562236b 2522 }
e6142dd5 2523
98e6436d 2524 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2525 update_connector_ext_caps(aconnector);
4562236b 2526 } else {
e86e8947 2527 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2528 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2529 drm_connector_update_edid_property(connector, NULL);
4562236b 2530 aconnector->num_modes = 0;
dcd5fb82 2531 dc_sink_release(aconnector->dc_sink);
4562236b 2532 aconnector->dc_sink = NULL;
5326c452 2533 aconnector->edid = NULL;
0c8620d6
BL
2534#ifdef CONFIG_DRM_AMD_DC_HDCP
2535 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538#endif
4562236b
HW
2539 }
2540
2541 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2542
0f877894
OV
2543 update_subconnector_property(aconnector);
2544
dcd5fb82
MF
2545 if (sink)
2546 dc_sink_release(sink);
4562236b
HW
2547}
2548
2549static void handle_hpd_irq(void *param)
2550{
c84dec2f 2551 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2552 struct drm_connector *connector = &aconnector->base;
2553 struct drm_device *dev = connector->dev;
fbbdadf2 2554 enum dc_connection_type new_connection_type = dc_connection_none;
0c8620d6 2555#ifdef CONFIG_DRM_AMD_DC_HDCP
1348969a 2556 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2557 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2558#endif
4562236b 2559
1f6010a9
DF
2560 /*
2561 * In case of failure or MST no need to update connector status or notify the OS
2562 * since (for MST case) MST does this in its own context.
4562236b
HW
2563 */
2564 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2565
0c8620d6 2566#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2567 if (adev->dm.hdcp_workqueue) {
96a3b32e 2568 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2569 dm_con_state->update_hdcp = true;
2570 }
0c8620d6 2571#endif
2e0ac3d6
HW
2572 if (aconnector->fake_enable)
2573 aconnector->fake_enable = false;
2574
fbbdadf2
BL
2575 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2576 DRM_ERROR("KMS: Failed to detect connector\n");
2577
2578 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2579 emulated_link_detect(aconnector->dc_link);
2580
2581
2582 drm_modeset_lock_all(dev);
2583 dm_restore_drm_connector_state(dev, connector);
2584 drm_modeset_unlock_all(dev);
2585
2586 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2587 drm_kms_helper_hotplug_event(dev);
2588
2589 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2590 if (new_connection_type == dc_connection_none &&
2591 aconnector->dc_link->type == dc_connection_none)
2592 dm_set_dpms_off(aconnector->dc_link);
4562236b 2593
3c4d55c9 2594 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2595
2596 drm_modeset_lock_all(dev);
2597 dm_restore_drm_connector_state(dev, connector);
2598 drm_modeset_unlock_all(dev);
2599
2600 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2601 drm_kms_helper_hotplug_event(dev);
2602 }
2603 mutex_unlock(&aconnector->hpd_lock);
2604
2605}
2606
c84dec2f 2607static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2608{
2609 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2610 uint8_t dret;
2611 bool new_irq_handled = false;
2612 int dpcd_addr;
2613 int dpcd_bytes_to_read;
2614
2615 const int max_process_count = 30;
2616 int process_count = 0;
2617
2618 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2619
2620 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2621 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2622 /* DPCD 0x200 - 0x201 for downstream IRQ */
2623 dpcd_addr = DP_SINK_COUNT;
2624 } else {
2625 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2626 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2627 dpcd_addr = DP_SINK_COUNT_ESI;
2628 }
2629
2630 dret = drm_dp_dpcd_read(
2631 &aconnector->dm_dp_aux.aux,
2632 dpcd_addr,
2633 esi,
2634 dpcd_bytes_to_read);
2635
2636 while (dret == dpcd_bytes_to_read &&
2637 process_count < max_process_count) {
2638 uint8_t retry;
2639 dret = 0;
2640
2641 process_count++;
2642
f1ad2f5e 2643 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2644 /* handle HPD short pulse irq */
2645 if (aconnector->mst_mgr.mst_state)
2646 drm_dp_mst_hpd_irq(
2647 &aconnector->mst_mgr,
2648 esi,
2649 &new_irq_handled);
4562236b
HW
2650
2651 if (new_irq_handled) {
2652 /* ACK at DPCD to notify down stream */
2653 const int ack_dpcd_bytes_to_write =
2654 dpcd_bytes_to_read - 1;
2655
2656 for (retry = 0; retry < 3; retry++) {
2657 uint8_t wret;
2658
2659 wret = drm_dp_dpcd_write(
2660 &aconnector->dm_dp_aux.aux,
2661 dpcd_addr + 1,
2662 &esi[1],
2663 ack_dpcd_bytes_to_write);
2664 if (wret == ack_dpcd_bytes_to_write)
2665 break;
2666 }
2667
1f6010a9 2668 /* check if there is new irq to be handled */
4562236b
HW
2669 dret = drm_dp_dpcd_read(
2670 &aconnector->dm_dp_aux.aux,
2671 dpcd_addr,
2672 esi,
2673 dpcd_bytes_to_read);
2674
2675 new_irq_handled = false;
d4a6e8a9 2676 } else {
4562236b 2677 break;
d4a6e8a9 2678 }
4562236b
HW
2679 }
2680
2681 if (process_count == max_process_count)
f1ad2f5e 2682 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2683}
2684
2685static void handle_hpd_rx_irq(void *param)
2686{
c84dec2f 2687 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2688 struct drm_connector *connector = &aconnector->base;
2689 struct drm_device *dev = connector->dev;
53cbf65c 2690 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2691 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2692 bool result = false;
fbbdadf2 2693 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2694 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2695 union hpd_irq_data hpd_irq_data;
2a0f9270
BL
2696
2697 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2698
1f6010a9
DF
2699 /*
2700 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2701 * conflict, after implement i2c helper, this mutex should be
2702 * retired.
2703 */
53cbf65c 2704 if (dc_link->type != dc_connection_mst_branch)
4562236b
HW
2705 mutex_lock(&aconnector->hpd_lock);
2706
3083a984
QZ
2707 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2708
2709 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2710 (dc_link->type == dc_connection_mst_branch)) {
2711 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2712 result = true;
2713 dm_handle_hpd_rx_irq(aconnector);
2714 goto out;
2715 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2716 result = false;
2717 dm_handle_hpd_rx_irq(aconnector);
2718 goto out;
2719 }
2720 }
2721
c8ea79a8 2722 mutex_lock(&adev->dm.dc_lock);
2a0f9270 2723#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2724 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2725#else
c8ea79a8 2726 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2727#endif
c8ea79a8
QZ
2728 mutex_unlock(&adev->dm.dc_lock);
2729
3083a984 2730out:
c8ea79a8 2731 if (result && !is_mst_root_connector) {
4562236b 2732 /* Downstream Port status changed. */
fbbdadf2
BL
2733 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2734 DRM_ERROR("KMS: Failed to detect connector\n");
2735
2736 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2737 emulated_link_detect(dc_link);
2738
2739 if (aconnector->fake_enable)
2740 aconnector->fake_enable = false;
2741
2742 amdgpu_dm_update_connector_after_detect(aconnector);
2743
2744
2745 drm_modeset_lock_all(dev);
2746 dm_restore_drm_connector_state(dev, connector);
2747 drm_modeset_unlock_all(dev);
2748
2749 drm_kms_helper_hotplug_event(dev);
2750 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2751
2752 if (aconnector->fake_enable)
2753 aconnector->fake_enable = false;
2754
4562236b
HW
2755 amdgpu_dm_update_connector_after_detect(aconnector);
2756
2757
2758 drm_modeset_lock_all(dev);
2759 dm_restore_drm_connector_state(dev, connector);
2760 drm_modeset_unlock_all(dev);
2761
2762 drm_kms_helper_hotplug_event(dev);
2763 }
2764 }
2a0f9270 2765#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2766 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2767 if (adev->dm.hdcp_workqueue)
2768 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2769 }
2a0f9270 2770#endif
4562236b 2771
e86e8947
HV
2772 if (dc_link->type != dc_connection_mst_branch) {
2773 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
4562236b 2774 mutex_unlock(&aconnector->hpd_lock);
e86e8947 2775 }
4562236b
HW
2776}
2777
2778static void register_hpd_handlers(struct amdgpu_device *adev)
2779{
4a580877 2780 struct drm_device *dev = adev_to_drm(adev);
4562236b 2781 struct drm_connector *connector;
c84dec2f 2782 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2783 const struct dc_link *dc_link;
2784 struct dc_interrupt_params int_params = {0};
2785
2786 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2788
2789 list_for_each_entry(connector,
2790 &dev->mode_config.connector_list, head) {
2791
c84dec2f 2792 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2793 dc_link = aconnector->dc_link;
2794
2795 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2796 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2797 int_params.irq_source = dc_link->irq_source_hpd;
2798
2799 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2800 handle_hpd_irq,
2801 (void *) aconnector);
2802 }
2803
2804 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2805
2806 /* Also register for DP short pulse (hpd_rx). */
2807 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2808 int_params.irq_source = dc_link->irq_source_hpd_rx;
2809
2810 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2811 handle_hpd_rx_irq,
2812 (void *) aconnector);
2813 }
2814 }
2815}
2816
55e56389
MR
2817#if defined(CONFIG_DRM_AMD_DC_SI)
2818/* Register IRQ sources and initialize IRQ callbacks */
2819static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2820{
2821 struct dc *dc = adev->dm.dc;
2822 struct common_irq_params *c_irq_params;
2823 struct dc_interrupt_params int_params = {0};
2824 int r;
2825 int i;
2826 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2827
2828 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2829 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2830
2831 /*
2832 * Actions of amdgpu_irq_add_id():
2833 * 1. Register a set() function with base driver.
2834 * Base driver will call set() function to enable/disable an
2835 * interrupt in DC hardware.
2836 * 2. Register amdgpu_dm_irq_handler().
2837 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2838 * coming from DC hardware.
2839 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2840 * for acknowledging and handling. */
2841
2842 /* Use VBLANK interrupt */
2843 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2844 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2845 if (r) {
2846 DRM_ERROR("Failed to add crtc irq id!\n");
2847 return r;
2848 }
2849
2850 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851 int_params.irq_source =
2852 dc_interrupt_to_irq_source(dc, i+1 , 0);
2853
2854 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2855
2856 c_irq_params->adev = adev;
2857 c_irq_params->irq_src = int_params.irq_source;
2858
2859 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860 dm_crtc_high_irq, c_irq_params);
2861 }
2862
2863 /* Use GRPH_PFLIP interrupt */
2864 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2865 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2866 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2867 if (r) {
2868 DRM_ERROR("Failed to add page flip irq id!\n");
2869 return r;
2870 }
2871
2872 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2873 int_params.irq_source =
2874 dc_interrupt_to_irq_source(dc, i, 0);
2875
2876 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2877
2878 c_irq_params->adev = adev;
2879 c_irq_params->irq_src = int_params.irq_source;
2880
2881 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882 dm_pflip_high_irq, c_irq_params);
2883
2884 }
2885
2886 /* HPD */
2887 r = amdgpu_irq_add_id(adev, client_id,
2888 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2889 if (r) {
2890 DRM_ERROR("Failed to add hpd irq id!\n");
2891 return r;
2892 }
2893
2894 register_hpd_handlers(adev);
2895
2896 return 0;
2897}
2898#endif
2899
4562236b
HW
2900/* Register IRQ sources and initialize IRQ callbacks */
2901static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2902{
2903 struct dc *dc = adev->dm.dc;
2904 struct common_irq_params *c_irq_params;
2905 struct dc_interrupt_params int_params = {0};
2906 int r;
2907 int i;
1ffdeca6 2908 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2909
84374725 2910 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2911 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2912
2913 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2915
1f6010a9
DF
2916 /*
2917 * Actions of amdgpu_irq_add_id():
4562236b
HW
2918 * 1. Register a set() function with base driver.
2919 * Base driver will call set() function to enable/disable an
2920 * interrupt in DC hardware.
2921 * 2. Register amdgpu_dm_irq_handler().
2922 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923 * coming from DC hardware.
2924 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925 * for acknowledging and handling. */
2926
b57de80a 2927 /* Use VBLANK interrupt */
e9029155 2928 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2929 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2930 if (r) {
2931 DRM_ERROR("Failed to add crtc irq id!\n");
2932 return r;
2933 }
2934
2935 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936 int_params.irq_source =
3d761e79 2937 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 2938
b57de80a 2939 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
2940
2941 c_irq_params->adev = adev;
2942 c_irq_params->irq_src = int_params.irq_source;
2943
2944 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945 dm_crtc_high_irq, c_irq_params);
2946 }
2947
d2574c33
MK
2948 /* Use VUPDATE interrupt */
2949 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2950 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2951 if (r) {
2952 DRM_ERROR("Failed to add vupdate irq id!\n");
2953 return r;
2954 }
2955
2956 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2957 int_params.irq_source =
2958 dc_interrupt_to_irq_source(dc, i, 0);
2959
2960 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2961
2962 c_irq_params->adev = adev;
2963 c_irq_params->irq_src = int_params.irq_source;
2964
2965 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2966 dm_vupdate_high_irq, c_irq_params);
2967 }
2968
3d761e79 2969 /* Use GRPH_PFLIP interrupt */
4562236b
HW
2970 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2971 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 2972 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
2973 if (r) {
2974 DRM_ERROR("Failed to add page flip irq id!\n");
2975 return r;
2976 }
2977
2978 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2979 int_params.irq_source =
2980 dc_interrupt_to_irq_source(dc, i, 0);
2981
2982 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2983
2984 c_irq_params->adev = adev;
2985 c_irq_params->irq_src = int_params.irq_source;
2986
2987 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2988 dm_pflip_high_irq, c_irq_params);
2989
2990 }
2991
2992 /* HPD */
2c8ad2d5
AD
2993 r = amdgpu_irq_add_id(adev, client_id,
2994 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
2995 if (r) {
2996 DRM_ERROR("Failed to add hpd irq id!\n");
2997 return r;
2998 }
2999
3000 register_hpd_handlers(adev);
3001
3002 return 0;
3003}
3004
b86a1aa3 3005#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3006/* Register IRQ sources and initialize IRQ callbacks */
3007static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3008{
3009 struct dc *dc = adev->dm.dc;
3010 struct common_irq_params *c_irq_params;
3011 struct dc_interrupt_params int_params = {0};
3012 int r;
3013 int i;
660d5406
WL
3014#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3015 static const unsigned int vrtl_int_srcid[] = {
3016 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3017 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3018 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3019 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3020 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3021 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3022 };
3023#endif
ff5ef992
AD
3024
3025 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3026 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3027
1f6010a9
DF
3028 /*
3029 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3030 * 1. Register a set() function with base driver.
3031 * Base driver will call set() function to enable/disable an
3032 * interrupt in DC hardware.
3033 * 2. Register amdgpu_dm_irq_handler().
3034 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3035 * coming from DC hardware.
3036 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3037 * for acknowledging and handling.
1f6010a9 3038 */
ff5ef992
AD
3039
3040 /* Use VSTARTUP interrupt */
3041 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3042 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3043 i++) {
3760f76c 3044 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3045
3046 if (r) {
3047 DRM_ERROR("Failed to add crtc irq id!\n");
3048 return r;
3049 }
3050
3051 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3052 int_params.irq_source =
3053 dc_interrupt_to_irq_source(dc, i, 0);
3054
3055 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3056
3057 c_irq_params->adev = adev;
3058 c_irq_params->irq_src = int_params.irq_source;
3059
2346ef47
NK
3060 amdgpu_dm_irq_register_interrupt(
3061 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3062 }
3063
86bc2219
WL
3064 /* Use otg vertical line interrupt */
3065#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3066 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3067 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3068 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3069
3070 if (r) {
3071 DRM_ERROR("Failed to add vline0 irq id!\n");
3072 return r;
3073 }
3074
3075 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3076 int_params.irq_source =
660d5406
WL
3077 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3078
3079 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3080 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3081 break;
3082 }
86bc2219
WL
3083
3084 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3085 - DC_IRQ_SOURCE_DC1_VLINE0];
3086
3087 c_irq_params->adev = adev;
3088 c_irq_params->irq_src = int_params.irq_source;
3089
3090 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3092 }
3093#endif
3094
2346ef47
NK
3095 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3096 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3097 * to trigger at end of each vblank, regardless of state of the lock,
3098 * matching DCE behaviour.
3099 */
3100 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3101 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3102 i++) {
3103 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3104
3105 if (r) {
3106 DRM_ERROR("Failed to add vupdate irq id!\n");
3107 return r;
3108 }
3109
3110 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111 int_params.irq_source =
3112 dc_interrupt_to_irq_source(dc, i, 0);
3113
3114 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3115
3116 c_irq_params->adev = adev;
3117 c_irq_params->irq_src = int_params.irq_source;
3118
ff5ef992 3119 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3120 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3121 }
3122
ff5ef992
AD
3123 /* Use GRPH_PFLIP interrupt */
3124 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3125 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3126 i++) {
3760f76c 3127 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3128 if (r) {
3129 DRM_ERROR("Failed to add page flip irq id!\n");
3130 return r;
3131 }
3132
3133 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3134 int_params.irq_source =
3135 dc_interrupt_to_irq_source(dc, i, 0);
3136
3137 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3138
3139 c_irq_params->adev = adev;
3140 c_irq_params->irq_src = int_params.irq_source;
3141
3142 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3143 dm_pflip_high_irq, c_irq_params);
3144
3145 }
3146
a08f16cf
LHM
3147 if (dc->ctx->dmub_srv) {
3148 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3149 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3150
3151 if (r) {
3152 DRM_ERROR("Failed to add dmub trace irq id!\n");
3153 return r;
3154 }
3155
3156 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3157 int_params.irq_source =
3158 dc_interrupt_to_irq_source(dc, i, 0);
3159
3160 c_irq_params = &adev->dm.dmub_trace_params[0];
3161
3162 c_irq_params->adev = adev;
3163 c_irq_params->irq_src = int_params.irq_source;
3164
3165 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3166 dm_dmub_trace_high_irq, c_irq_params);
3167 }
3168
ff5ef992 3169 /* HPD */
3760f76c 3170 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
ff5ef992
AD
3171 &adev->hpd_irq);
3172 if (r) {
3173 DRM_ERROR("Failed to add hpd irq id!\n");
3174 return r;
3175 }
3176
3177 register_hpd_handlers(adev);
3178
3179 return 0;
3180}
3181#endif
3182
eb3dc897
NK
3183/*
3184 * Acquires the lock for the atomic state object and returns
3185 * the new atomic state.
3186 *
3187 * This should only be called during atomic check.
3188 */
3189static int dm_atomic_get_state(struct drm_atomic_state *state,
3190 struct dm_atomic_state **dm_state)
3191{
3192 struct drm_device *dev = state->dev;
1348969a 3193 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3194 struct amdgpu_display_manager *dm = &adev->dm;
3195 struct drm_private_state *priv_state;
eb3dc897
NK
3196
3197 if (*dm_state)
3198 return 0;
3199
eb3dc897
NK
3200 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3201 if (IS_ERR(priv_state))
3202 return PTR_ERR(priv_state);
3203
3204 *dm_state = to_dm_atomic_state(priv_state);
3205
3206 return 0;
3207}
3208
dfd84d90 3209static struct dm_atomic_state *
eb3dc897
NK
3210dm_atomic_get_new_state(struct drm_atomic_state *state)
3211{
3212 struct drm_device *dev = state->dev;
1348969a 3213 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3214 struct amdgpu_display_manager *dm = &adev->dm;
3215 struct drm_private_obj *obj;
3216 struct drm_private_state *new_obj_state;
3217 int i;
3218
3219 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3220 if (obj->funcs == dm->atomic_obj.funcs)
3221 return to_dm_atomic_state(new_obj_state);
3222 }
3223
3224 return NULL;
3225}
3226
eb3dc897
NK
3227static struct drm_private_state *
3228dm_atomic_duplicate_state(struct drm_private_obj *obj)
3229{
3230 struct dm_atomic_state *old_state, *new_state;
3231
3232 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3233 if (!new_state)
3234 return NULL;
3235
3236 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3237
813d20dc
AW
3238 old_state = to_dm_atomic_state(obj->state);
3239
3240 if (old_state && old_state->context)
3241 new_state->context = dc_copy_state(old_state->context);
3242
eb3dc897
NK
3243 if (!new_state->context) {
3244 kfree(new_state);
3245 return NULL;
3246 }
3247
eb3dc897
NK
3248 return &new_state->base;
3249}
3250
3251static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3252 struct drm_private_state *state)
3253{
3254 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3255
3256 if (dm_state && dm_state->context)
3257 dc_release_state(dm_state->context);
3258
3259 kfree(dm_state);
3260}
3261
3262static struct drm_private_state_funcs dm_atomic_state_funcs = {
3263 .atomic_duplicate_state = dm_atomic_duplicate_state,
3264 .atomic_destroy_state = dm_atomic_destroy_state,
3265};
3266
4562236b
HW
3267static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3268{
eb3dc897 3269 struct dm_atomic_state *state;
4562236b
HW
3270 int r;
3271
3272 adev->mode_info.mode_config_initialized = true;
3273
4a580877
LT
3274 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3275 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3276
4a580877
LT
3277 adev_to_drm(adev)->mode_config.max_width = 16384;
3278 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3279
4a580877
LT
3280 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3281 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3282 /* indicates support for immediate flip */
4a580877 3283 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3284
4a580877 3285 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3286
eb3dc897
NK
3287 state = kzalloc(sizeof(*state), GFP_KERNEL);
3288 if (!state)
3289 return -ENOMEM;
3290
813d20dc 3291 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3292 if (!state->context) {
3293 kfree(state);
3294 return -ENOMEM;
3295 }
3296
3297 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3298
4a580877 3299 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3300 &adev->dm.atomic_obj,
eb3dc897
NK
3301 &state->base,
3302 &dm_atomic_state_funcs);
3303
3dc9b1ce 3304 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3305 if (r) {
3306 dc_release_state(state->context);
3307 kfree(state);
4562236b 3308 return r;
b67a468a 3309 }
4562236b 3310
6ce8f316 3311 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3312 if (r) {
3313 dc_release_state(state->context);
3314 kfree(state);
6ce8f316 3315 return r;
b67a468a 3316 }
6ce8f316 3317
4562236b
HW
3318 return 0;
3319}
3320
206bbafe
DF
3321#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3322#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3323#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3324
4562236b
HW
3325#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3326 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3327
206bbafe
DF
3328static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3329{
3330#if defined(CONFIG_ACPI)
3331 struct amdgpu_dm_backlight_caps caps;
3332
58965855
FS
3333 memset(&caps, 0, sizeof(caps));
3334
206bbafe
DF
3335 if (dm->backlight_caps.caps_valid)
3336 return;
3337
3338 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3339 if (caps.caps_valid) {
94562810
RS
3340 dm->backlight_caps.caps_valid = true;
3341 if (caps.aux_support)
3342 return;
206bbafe
DF
3343 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3344 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3345 } else {
3346 dm->backlight_caps.min_input_signal =
3347 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3348 dm->backlight_caps.max_input_signal =
3349 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3350 }
3351#else
94562810
RS
3352 if (dm->backlight_caps.aux_support)
3353 return;
3354
8bcbc9ef
DF
3355 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3356 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3357#endif
3358}
3359
69d9f427
AM
3360static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3361 unsigned *min, unsigned *max)
94562810 3362{
94562810 3363 if (!caps)
69d9f427 3364 return 0;
94562810 3365
69d9f427
AM
3366 if (caps->aux_support) {
3367 // Firmware limits are in nits, DC API wants millinits.
3368 *max = 1000 * caps->aux_max_input_signal;
3369 *min = 1000 * caps->aux_min_input_signal;
94562810 3370 } else {
69d9f427
AM
3371 // Firmware limits are 8-bit, PWM control is 16-bit.
3372 *max = 0x101 * caps->max_input_signal;
3373 *min = 0x101 * caps->min_input_signal;
94562810 3374 }
69d9f427
AM
3375 return 1;
3376}
94562810 3377
69d9f427
AM
3378static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3379 uint32_t brightness)
3380{
3381 unsigned min, max;
94562810 3382
69d9f427
AM
3383 if (!get_brightness_range(caps, &min, &max))
3384 return brightness;
3385
3386 // Rescale 0..255 to min..max
3387 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3388 AMDGPU_MAX_BL_LEVEL);
3389}
3390
3391static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3392 uint32_t brightness)
3393{
3394 unsigned min, max;
3395
3396 if (!get_brightness_range(caps, &min, &max))
3397 return brightness;
3398
3399 if (brightness < min)
3400 return 0;
3401 // Rescale min..max to 0..255
3402 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3403 max - min);
94562810
RS
3404}
3405
4562236b
HW
3406static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3407{
3408 struct amdgpu_display_manager *dm = bl_get_data(bd);
206bbafe 3409 struct amdgpu_dm_backlight_caps caps;
94562810
RS
3410 struct dc_link *link = NULL;
3411 u32 brightness;
3412 bool rc;
4562236b 3413
206bbafe
DF
3414 amdgpu_dm_update_backlight_caps(dm);
3415 caps = dm->backlight_caps;
94562810
RS
3416
3417 link = (struct dc_link *)dm->backlight_link;
3418
69d9f427 3419 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
94562810
RS
3420 // Change brightness based on AUX property
3421 if (caps.aux_support)
a2f8d988
AD
3422 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3423 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3424 else
3425 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
94562810
RS
3426
3427 return rc ? 0 : 1;
4562236b
HW
3428}
3429
3430static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3431{
620a0d27 3432 struct amdgpu_display_manager *dm = bl_get_data(bd);
0ad3e64e
AD
3433 struct amdgpu_dm_backlight_caps caps;
3434
3435 amdgpu_dm_update_backlight_caps(dm);
3436 caps = dm->backlight_caps;
620a0d27 3437
0ad3e64e
AD
3438 if (caps.aux_support) {
3439 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3440 u32 avg, peak;
3441 bool rc;
3442
3443 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3444 if (!rc)
3445 return bd->props.brightness;
3446 return convert_brightness_to_user(&caps, avg);
3447 } else {
3448 int ret = dc_link_get_backlight_level(dm->backlight_link);
3449
3450 if (ret == DC_ERROR_UNEXPECTED)
3451 return bd->props.brightness;
3452 return convert_brightness_to_user(&caps, ret);
3453 }
4562236b
HW
3454}
3455
3456static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3457 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3458 .get_brightness = amdgpu_dm_backlight_get_brightness,
3459 .update_status = amdgpu_dm_backlight_update_status,
3460};
3461
7578ecda
AD
3462static void
3463amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3464{
3465 char bl_name[16];
3466 struct backlight_properties props = { 0 };
3467
206bbafe
DF
3468 amdgpu_dm_update_backlight_caps(dm);
3469
4562236b 3470 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3471 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3472 props.type = BACKLIGHT_RAW;
3473
3474 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3475 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3476
3477 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3478 adev_to_drm(dm->adev)->dev,
3479 dm,
3480 &amdgpu_dm_backlight_ops,
3481 &props);
4562236b 3482
74baea42 3483 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3484 DRM_ERROR("DM: Backlight registration failed!\n");
3485 else
f1ad2f5e 3486 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3487}
3488
3489#endif
3490
df534fff 3491static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3492 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3493 enum drm_plane_type plane_type,
3494 const struct dc_plane_cap *plane_cap)
df534fff 3495{
f180b4bc 3496 struct drm_plane *plane;
df534fff
S
3497 unsigned long possible_crtcs;
3498 int ret = 0;
3499
f180b4bc 3500 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3501 if (!plane) {
3502 DRM_ERROR("KMS: Failed to allocate plane\n");
3503 return -ENOMEM;
3504 }
b2fddb13 3505 plane->type = plane_type;
df534fff
S
3506
3507 /*
b2fddb13
NK
3508 * HACK: IGT tests expect that the primary plane for a CRTC
3509 * can only have one possible CRTC. Only expose support for
3510 * any CRTC if they're not going to be used as a primary plane
3511 * for a CRTC - like overlay or underlay planes.
df534fff
S
3512 */
3513 possible_crtcs = 1 << plane_id;
3514 if (plane_id >= dm->dc->caps.max_streams)
3515 possible_crtcs = 0xff;
3516
cc1fec57 3517 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3518
3519 if (ret) {
3520 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3521 kfree(plane);
df534fff
S
3522 return ret;
3523 }
3524
54087768
NK
3525 if (mode_info)
3526 mode_info->planes[plane_id] = plane;
3527
df534fff
S
3528 return ret;
3529}
3530
89fc8d4e
HW
3531
3532static void register_backlight_device(struct amdgpu_display_manager *dm,
3533 struct dc_link *link)
3534{
3535#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3536 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3537
3538 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3539 link->type != dc_connection_none) {
1f6010a9
DF
3540 /*
3541 * Event if registration failed, we should continue with
89fc8d4e
HW
3542 * DM initialization because not having a backlight control
3543 * is better then a black screen.
3544 */
3545 amdgpu_dm_register_backlight_device(dm);
3546
3547 if (dm->backlight_dev)
3548 dm->backlight_link = link;
3549 }
3550#endif
3551}
3552
3553
1f6010a9
DF
3554/*
3555 * In this architecture, the association
4562236b
HW
3556 * connector -> encoder -> crtc
3557 * id not really requried. The crtc and connector will hold the
3558 * display_index as an abstraction to use with DAL component
3559 *
3560 * Returns 0 on success
3561 */
7578ecda 3562static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3563{
3564 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3565 int32_t i;
c84dec2f 3566 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3567 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3568 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3569 uint32_t link_cnt;
cc1fec57 3570 int32_t primary_planes;
fbbdadf2 3571 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3572 const struct dc_plane_cap *plane;
4562236b 3573
d58159de
AD
3574 dm->display_indexes_num = dm->dc->caps.max_streams;
3575 /* Update the actual used number of crtc */
3576 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3577
4562236b 3578 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3579 if (amdgpu_dm_mode_config_init(dm->adev)) {
3580 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3581 return -EINVAL;
4562236b
HW
3582 }
3583
b2fddb13
NK
3584 /* There is one primary plane per CRTC */
3585 primary_planes = dm->dc->caps.max_streams;
54087768 3586 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3587
b2fddb13
NK
3588 /*
3589 * Initialize primary planes, implicit planes for legacy IOCTLS.
3590 * Order is reversed to match iteration order in atomic check.
3591 */
3592 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3593 plane = &dm->dc->caps.planes[i];
3594
b2fddb13 3595 if (initialize_plane(dm, mode_info, i,
cc1fec57 3596 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3597 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3598 goto fail;
d4e13b0d 3599 }
df534fff 3600 }
92f3ac40 3601
0d579c7e
NK
3602 /*
3603 * Initialize overlay planes, index starting after primary planes.
3604 * These planes have a higher DRM index than the primary planes since
3605 * they should be considered as having a higher z-order.
3606 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3607 *
3608 * Only support DCN for now, and only expose one so we don't encourage
3609 * userspace to use up all the pipes.
0d579c7e 3610 */
cc1fec57
NK
3611 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3612 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3613
3614 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3615 continue;
3616
3617 if (!plane->blends_with_above || !plane->blends_with_below)
3618 continue;
3619
ea36ad34 3620 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3621 continue;
3622
54087768 3623 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3624 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3625 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3626 goto fail;
d4e13b0d 3627 }
cc1fec57
NK
3628
3629 /* Only create one overlay plane. */
3630 break;
d4e13b0d 3631 }
4562236b 3632
d4e13b0d 3633 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3634 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3635 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3636 goto fail;
4562236b 3637 }
4562236b 3638
4562236b
HW
3639 /* loops over all connectors on the board */
3640 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3641 struct dc_link *link = NULL;
4562236b
HW
3642
3643 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3644 DRM_ERROR(
3645 "KMS: Cannot support more than %d display indexes\n",
3646 AMDGPU_DM_MAX_DISPLAY_INDEX);
3647 continue;
3648 }
3649
3650 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3651 if (!aconnector)
cd8a2ae8 3652 goto fail;
4562236b
HW
3653
3654 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3655 if (!aencoder)
cd8a2ae8 3656 goto fail;
4562236b
HW
3657
3658 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3659 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3660 goto fail;
4562236b
HW
3661 }
3662
3663 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3664 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3665 goto fail;
4562236b
HW
3666 }
3667
89fc8d4e
HW
3668 link = dc_get_link_at_index(dm->dc, i);
3669
fbbdadf2
BL
3670 if (!dc_link_detect_sink(link, &new_connection_type))
3671 DRM_ERROR("KMS: Failed to detect connector\n");
3672
3673 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3674 emulated_link_detect(link);
3675 amdgpu_dm_update_connector_after_detect(aconnector);
3676
3677 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3678 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3679 register_backlight_device(dm, link);
397a9bc5
RL
3680 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3681 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3682 }
3683
3684
4562236b
HW
3685 }
3686
3687 /* Software is initialized. Now we can register interrupt handlers. */
3688 switch (adev->asic_type) {
55e56389
MR
3689#if defined(CONFIG_DRM_AMD_DC_SI)
3690 case CHIP_TAHITI:
3691 case CHIP_PITCAIRN:
3692 case CHIP_VERDE:
3693 case CHIP_OLAND:
3694 if (dce60_register_irq_handlers(dm->adev)) {
3695 DRM_ERROR("DM: Failed to initialize IRQ\n");
3696 goto fail;
3697 }
3698 break;
3699#endif
4562236b
HW
3700 case CHIP_BONAIRE:
3701 case CHIP_HAWAII:
cd4b356f
AD
3702 case CHIP_KAVERI:
3703 case CHIP_KABINI:
3704 case CHIP_MULLINS:
4562236b
HW
3705 case CHIP_TONGA:
3706 case CHIP_FIJI:
3707 case CHIP_CARRIZO:
3708 case CHIP_STONEY:
3709 case CHIP_POLARIS11:
3710 case CHIP_POLARIS10:
b264d345 3711 case CHIP_POLARIS12:
7737de91 3712 case CHIP_VEGAM:
2c8ad2d5 3713 case CHIP_VEGA10:
2325ff30 3714 case CHIP_VEGA12:
1fe6bf2f 3715 case CHIP_VEGA20:
4562236b
HW
3716 if (dce110_register_irq_handlers(dm->adev)) {
3717 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3718 goto fail;
4562236b
HW
3719 }
3720 break;
b86a1aa3 3721#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3722 case CHIP_RAVEN:
fbd2afe5 3723 case CHIP_NAVI12:
476e955d 3724 case CHIP_NAVI10:
fce651e3 3725 case CHIP_NAVI14:
30221ad8 3726 case CHIP_RENOIR:
79037324 3727 case CHIP_SIENNA_CICHLID:
a6c5308f 3728 case CHIP_NAVY_FLOUNDER:
2a411205 3729 case CHIP_DIMGREY_CAVEFISH:
469989ca 3730 case CHIP_VANGOGH:
ff5ef992
AD
3731 if (dcn10_register_irq_handlers(dm->adev)) {
3732 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3733 goto fail;
ff5ef992
AD
3734 }
3735 break;
3736#endif
4562236b 3737 default:
e63f8673 3738 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3739 goto fail;
4562236b
HW
3740 }
3741
4562236b 3742 return 0;
cd8a2ae8 3743fail:
4562236b 3744 kfree(aencoder);
4562236b 3745 kfree(aconnector);
54087768 3746
59d0f396 3747 return -EINVAL;
4562236b
HW
3748}
3749
7578ecda 3750static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3751{
3752 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3753 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3754 return;
3755}
3756
3757/******************************************************************************
3758 * amdgpu_display_funcs functions
3759 *****************************************************************************/
3760
1f6010a9 3761/*
4562236b
HW
3762 * dm_bandwidth_update - program display watermarks
3763 *
3764 * @adev: amdgpu_device pointer
3765 *
3766 * Calculate and program the display watermarks and line buffer allocation.
3767 */
3768static void dm_bandwidth_update(struct amdgpu_device *adev)
3769{
49c07a99 3770 /* TODO: implement later */
4562236b
HW
3771}
3772
39cc5be2 3773static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3774 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3775 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3776 .backlight_set_level = NULL, /* never called for DC */
3777 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3778 .hpd_sense = NULL,/* called unconditionally */
3779 .hpd_set_polarity = NULL, /* called unconditionally */
3780 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3781 .page_flip_get_scanoutpos =
3782 dm_crtc_get_scanoutpos,/* called unconditionally */
3783 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3784 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3785};
3786
3787#if defined(CONFIG_DEBUG_KERNEL_DC)
3788
3ee6b26b
AD
3789static ssize_t s3_debug_store(struct device *device,
3790 struct device_attribute *attr,
3791 const char *buf,
3792 size_t count)
4562236b
HW
3793{
3794 int ret;
3795 int s3_state;
ef1de361 3796 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3797 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3798
3799 ret = kstrtoint(buf, 0, &s3_state);
3800
3801 if (ret == 0) {
3802 if (s3_state) {
3803 dm_resume(adev);
4a580877 3804 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3805 } else
3806 dm_suspend(adev);
3807 }
3808
3809 return ret == 0 ? count : 0;
3810}
3811
3812DEVICE_ATTR_WO(s3_debug);
3813
3814#endif
3815
3816static int dm_early_init(void *handle)
3817{
3818 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3819
4562236b 3820 switch (adev->asic_type) {
55e56389
MR
3821#if defined(CONFIG_DRM_AMD_DC_SI)
3822 case CHIP_TAHITI:
3823 case CHIP_PITCAIRN:
3824 case CHIP_VERDE:
3825 adev->mode_info.num_crtc = 6;
3826 adev->mode_info.num_hpd = 6;
3827 adev->mode_info.num_dig = 6;
3828 break;
3829 case CHIP_OLAND:
3830 adev->mode_info.num_crtc = 2;
3831 adev->mode_info.num_hpd = 2;
3832 adev->mode_info.num_dig = 2;
3833 break;
3834#endif
4562236b
HW
3835 case CHIP_BONAIRE:
3836 case CHIP_HAWAII:
3837 adev->mode_info.num_crtc = 6;
3838 adev->mode_info.num_hpd = 6;
3839 adev->mode_info.num_dig = 6;
4562236b 3840 break;
cd4b356f
AD
3841 case CHIP_KAVERI:
3842 adev->mode_info.num_crtc = 4;
3843 adev->mode_info.num_hpd = 6;
3844 adev->mode_info.num_dig = 7;
cd4b356f
AD
3845 break;
3846 case CHIP_KABINI:
3847 case CHIP_MULLINS:
3848 adev->mode_info.num_crtc = 2;
3849 adev->mode_info.num_hpd = 6;
3850 adev->mode_info.num_dig = 6;
cd4b356f 3851 break;
4562236b
HW
3852 case CHIP_FIJI:
3853 case CHIP_TONGA:
3854 adev->mode_info.num_crtc = 6;
3855 adev->mode_info.num_hpd = 6;
3856 adev->mode_info.num_dig = 7;
4562236b
HW
3857 break;
3858 case CHIP_CARRIZO:
3859 adev->mode_info.num_crtc = 3;
3860 adev->mode_info.num_hpd = 6;
3861 adev->mode_info.num_dig = 9;
4562236b
HW
3862 break;
3863 case CHIP_STONEY:
3864 adev->mode_info.num_crtc = 2;
3865 adev->mode_info.num_hpd = 6;
3866 adev->mode_info.num_dig = 9;
4562236b
HW
3867 break;
3868 case CHIP_POLARIS11:
b264d345 3869 case CHIP_POLARIS12:
4562236b
HW
3870 adev->mode_info.num_crtc = 5;
3871 adev->mode_info.num_hpd = 5;
3872 adev->mode_info.num_dig = 5;
4562236b
HW
3873 break;
3874 case CHIP_POLARIS10:
7737de91 3875 case CHIP_VEGAM:
4562236b
HW
3876 adev->mode_info.num_crtc = 6;
3877 adev->mode_info.num_hpd = 6;
3878 adev->mode_info.num_dig = 6;
4562236b 3879 break;
2c8ad2d5 3880 case CHIP_VEGA10:
2325ff30 3881 case CHIP_VEGA12:
1fe6bf2f 3882 case CHIP_VEGA20:
2c8ad2d5
AD
3883 adev->mode_info.num_crtc = 6;
3884 adev->mode_info.num_hpd = 6;
3885 adev->mode_info.num_dig = 6;
3886 break;
b86a1aa3 3887#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3888 case CHIP_RAVEN:
20f2ffe5
AD
3889 case CHIP_RENOIR:
3890 case CHIP_VANGOGH:
ff5ef992
AD
3891 adev->mode_info.num_crtc = 4;
3892 adev->mode_info.num_hpd = 4;
3893 adev->mode_info.num_dig = 4;
ff5ef992 3894 break;
476e955d 3895 case CHIP_NAVI10:
fbd2afe5 3896 case CHIP_NAVI12:
79037324 3897 case CHIP_SIENNA_CICHLID:
a6c5308f 3898 case CHIP_NAVY_FLOUNDER:
476e955d
HW
3899 adev->mode_info.num_crtc = 6;
3900 adev->mode_info.num_hpd = 6;
3901 adev->mode_info.num_dig = 6;
3902 break;
fce651e3 3903 case CHIP_NAVI14:
2a411205 3904 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
3905 adev->mode_info.num_crtc = 5;
3906 adev->mode_info.num_hpd = 5;
3907 adev->mode_info.num_dig = 5;
3908 break;
20f2ffe5 3909#endif
4562236b 3910 default:
e63f8673 3911 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
3912 return -EINVAL;
3913 }
3914
c8dd5715
MD
3915 amdgpu_dm_set_irq_funcs(adev);
3916
39cc5be2
AD
3917 if (adev->mode_info.funcs == NULL)
3918 adev->mode_info.funcs = &dm_display_funcs;
3919
1f6010a9
DF
3920 /*
3921 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 3922 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
3923 * amdgpu_device_init()
3924 */
4562236b
HW
3925#if defined(CONFIG_DEBUG_KERNEL_DC)
3926 device_create_file(
4a580877 3927 adev_to_drm(adev)->dev,
4562236b
HW
3928 &dev_attr_s3_debug);
3929#endif
3930
3931 return 0;
3932}
3933
9b690ef3 3934static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
3935 struct dc_stream_state *new_stream,
3936 struct dc_stream_state *old_stream)
9b690ef3 3937{
2afda735 3938 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3939}
3940
3941static bool modereset_required(struct drm_crtc_state *crtc_state)
3942{
2afda735 3943 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
3944}
3945
7578ecda 3946static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
3947{
3948 drm_encoder_cleanup(encoder);
3949 kfree(encoder);
3950}
3951
3952static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3953 .destroy = amdgpu_dm_encoder_destroy,
3954};
3955
e7b07cee 3956
6300b3bd
MK
3957static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3958 struct drm_framebuffer *fb,
3959 int *min_downscale, int *max_upscale)
3960{
3961 struct amdgpu_device *adev = drm_to_adev(dev);
3962 struct dc *dc = adev->dm.dc;
3963 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3964 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3965
3966 switch (fb->format->format) {
3967 case DRM_FORMAT_P010:
3968 case DRM_FORMAT_NV12:
3969 case DRM_FORMAT_NV21:
3970 *max_upscale = plane_cap->max_upscale_factor.nv12;
3971 *min_downscale = plane_cap->max_downscale_factor.nv12;
3972 break;
3973
3974 case DRM_FORMAT_XRGB16161616F:
3975 case DRM_FORMAT_ARGB16161616F:
3976 case DRM_FORMAT_XBGR16161616F:
3977 case DRM_FORMAT_ABGR16161616F:
3978 *max_upscale = plane_cap->max_upscale_factor.fp16;
3979 *min_downscale = plane_cap->max_downscale_factor.fp16;
3980 break;
3981
3982 default:
3983 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3984 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3985 break;
3986 }
3987
3988 /*
3989 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3990 * scaling factor of 1.0 == 1000 units.
3991 */
3992 if (*max_upscale == 1)
3993 *max_upscale = 1000;
3994
3995 if (*min_downscale == 1)
3996 *min_downscale = 1000;
3997}
3998
3999
695af5f9
NK
4000static int fill_dc_scaling_info(const struct drm_plane_state *state,
4001 struct dc_scaling_info *scaling_info)
e7b07cee 4002{
6300b3bd 4003 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4004
695af5f9 4005 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4006
695af5f9
NK
4007 /* Source is fixed 16.16 but we ignore mantissa for now... */
4008 scaling_info->src_rect.x = state->src_x >> 16;
4009 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4010
695af5f9
NK
4011 scaling_info->src_rect.width = state->src_w >> 16;
4012 if (scaling_info->src_rect.width == 0)
4013 return -EINVAL;
4014
4015 scaling_info->src_rect.height = state->src_h >> 16;
4016 if (scaling_info->src_rect.height == 0)
4017 return -EINVAL;
4018
4019 scaling_info->dst_rect.x = state->crtc_x;
4020 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4021
4022 if (state->crtc_w == 0)
695af5f9 4023 return -EINVAL;
e7b07cee 4024
695af5f9 4025 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4026
4027 if (state->crtc_h == 0)
695af5f9 4028 return -EINVAL;
e7b07cee 4029
695af5f9 4030 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4031
695af5f9
NK
4032 /* DRM doesn't specify clipping on destination output. */
4033 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4034
6300b3bd
MK
4035 /* Validate scaling per-format with DC plane caps */
4036 if (state->plane && state->plane->dev && state->fb) {
4037 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4038 &min_downscale, &max_upscale);
4039 } else {
4040 min_downscale = 250;
4041 max_upscale = 16000;
4042 }
4043
6491f0c0
NK
4044 scale_w = scaling_info->dst_rect.width * 1000 /
4045 scaling_info->src_rect.width;
e7b07cee 4046
6300b3bd 4047 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4048 return -EINVAL;
4049
4050 scale_h = scaling_info->dst_rect.height * 1000 /
4051 scaling_info->src_rect.height;
4052
6300b3bd 4053 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4054 return -EINVAL;
4055
695af5f9
NK
4056 /*
4057 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4058 * assume reasonable defaults based on the format.
4059 */
e7b07cee 4060
695af5f9 4061 return 0;
4562236b 4062}
695af5f9 4063
a3241991
BN
4064static void
4065fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4066 uint64_t tiling_flags)
e7b07cee 4067{
a3241991
BN
4068 /* Fill GFX8 params */
4069 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4070 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4071
a3241991
BN
4072 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4073 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4074 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4075 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4076 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4077
a3241991
BN
4078 /* XXX fix me for VI */
4079 tiling_info->gfx8.num_banks = num_banks;
4080 tiling_info->gfx8.array_mode =
4081 DC_ARRAY_2D_TILED_THIN1;
4082 tiling_info->gfx8.tile_split = tile_split;
4083 tiling_info->gfx8.bank_width = bankw;
4084 tiling_info->gfx8.bank_height = bankh;
4085 tiling_info->gfx8.tile_aspect = mtaspect;
4086 tiling_info->gfx8.tile_mode =
4087 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4088 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4089 == DC_ARRAY_1D_TILED_THIN1) {
4090 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4091 }
4092
a3241991
BN
4093 tiling_info->gfx8.pipe_config =
4094 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4095}
4096
a3241991
BN
4097static void
4098fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4099 union dc_tiling_info *tiling_info)
4100{
4101 tiling_info->gfx9.num_pipes =
4102 adev->gfx.config.gb_addr_config_fields.num_pipes;
4103 tiling_info->gfx9.num_banks =
4104 adev->gfx.config.gb_addr_config_fields.num_banks;
4105 tiling_info->gfx9.pipe_interleave =
4106 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4107 tiling_info->gfx9.num_shader_engines =
4108 adev->gfx.config.gb_addr_config_fields.num_se;
4109 tiling_info->gfx9.max_compressed_frags =
4110 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4111 tiling_info->gfx9.num_rb_per_se =
4112 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4113 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
4114 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4115 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4116 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4117 adev->asic_type == CHIP_VANGOGH)
4118 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4119}
4120
695af5f9 4121static int
a3241991
BN
4122validate_dcc(struct amdgpu_device *adev,
4123 const enum surface_pixel_format format,
4124 const enum dc_rotation_angle rotation,
4125 const union dc_tiling_info *tiling_info,
4126 const struct dc_plane_dcc_param *dcc,
4127 const struct dc_plane_address *address,
4128 const struct plane_size *plane_size)
7df7e505
NK
4129{
4130 struct dc *dc = adev->dm.dc;
8daa1218
NC
4131 struct dc_dcc_surface_param input;
4132 struct dc_surface_dcc_cap output;
7df7e505 4133
8daa1218
NC
4134 memset(&input, 0, sizeof(input));
4135 memset(&output, 0, sizeof(output));
4136
a3241991 4137 if (!dcc->enable)
87b7ebc2
RS
4138 return 0;
4139
a3241991
BN
4140 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4141 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4142 return -EINVAL;
7df7e505 4143
695af5f9 4144 input.format = format;
12e2b2d4
DL
4145 input.surface_size.width = plane_size->surface_size.width;
4146 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4147 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4148
695af5f9 4149 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4150 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4151 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4152 input.scan = SCAN_DIRECTION_VERTICAL;
4153
4154 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4155 return -EINVAL;
7df7e505
NK
4156
4157 if (!output.capable)
09e5665a 4158 return -EINVAL;
7df7e505 4159
a3241991
BN
4160 if (dcc->independent_64b_blks == 0 &&
4161 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4162 return -EINVAL;
7df7e505 4163
a3241991
BN
4164 return 0;
4165}
4166
37384b3f
BN
4167static bool
4168modifier_has_dcc(uint64_t modifier)
4169{
4170 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4171}
4172
4173static unsigned
4174modifier_gfx9_swizzle_mode(uint64_t modifier)
4175{
4176 if (modifier == DRM_FORMAT_MOD_LINEAR)
4177 return 0;
4178
4179 return AMD_FMT_MOD_GET(TILE, modifier);
4180}
4181
dfbbfe3c
BN
4182static const struct drm_format_info *
4183amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4184{
816853f9 4185 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4186}
4187
37384b3f
BN
4188static void
4189fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4190 union dc_tiling_info *tiling_info,
4191 uint64_t modifier)
4192{
4193 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4194 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4195 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4196 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4197
4198 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4199
4200 if (!IS_AMD_FMT_MOD(modifier))
4201 return;
4202
4203 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4204 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4205
4206 if (adev->family >= AMDGPU_FAMILY_NV) {
4207 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4208 } else {
4209 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4210
4211 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4212 }
4213}
4214
faa37f54
BN
4215enum dm_micro_swizzle {
4216 MICRO_SWIZZLE_Z = 0,
4217 MICRO_SWIZZLE_S = 1,
4218 MICRO_SWIZZLE_D = 2,
4219 MICRO_SWIZZLE_R = 3
4220};
4221
4222static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4223 uint32_t format,
4224 uint64_t modifier)
4225{
4226 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4227 const struct drm_format_info *info = drm_format_info(format);
4228
4229 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4230
4231 if (!info)
4232 return false;
4233
4234 /*
55fa622f
QZ
4235 * We always have to allow this modifier, because core DRM still
4236 * checks LINEAR support if userspace does not provide modifers.
faa37f54 4237 */
55fa622f 4238 if (modifier == DRM_FORMAT_MOD_LINEAR)
faa37f54
BN
4239 return true;
4240
4241 /*
4242 * The arbitrary tiling support for multiplane formats has not been hooked
4243 * up.
4244 */
4245 if (info->num_planes > 1)
4246 return false;
4247
4248 /*
4249 * For D swizzle the canonical modifier depends on the bpp, so check
4250 * it here.
4251 */
4252 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4253 adev->family >= AMDGPU_FAMILY_NV) {
4254 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4255 return false;
4256 }
4257
4258 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4259 info->cpp[0] < 8)
4260 return false;
4261
4262 if (modifier_has_dcc(modifier)) {
4263 /* Per radeonsi comments 16/64 bpp are more complicated. */
4264 if (info->cpp[0] != 4)
4265 return false;
4266 }
4267
4268 return true;
4269}
4270
4271static void
4272add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4273{
4274 if (!*mods)
4275 return;
4276
4277 if (*cap - *size < 1) {
4278 uint64_t new_cap = *cap * 2;
4279 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4280
4281 if (!new_mods) {
4282 kfree(*mods);
4283 *mods = NULL;
4284 return;
4285 }
4286
4287 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4288 kfree(*mods);
4289 *mods = new_mods;
4290 *cap = new_cap;
4291 }
4292
4293 (*mods)[*size] = mod;
4294 *size += 1;
4295}
4296
4297static void
4298add_gfx9_modifiers(const struct amdgpu_device *adev,
4299 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4300{
4301 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4302 int pipe_xor_bits = min(8, pipes +
4303 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4304 int bank_xor_bits = min(8 - pipe_xor_bits,
4305 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4306 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4307 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4308
4309
4310 if (adev->family == AMDGPU_FAMILY_RV) {
4311 /* Raven2 and later */
4312 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4313
4314 /*
4315 * No _D DCC swizzles yet because we only allow 32bpp, which
4316 * doesn't support _D on DCN
4317 */
4318
4319 if (has_constant_encode) {
4320 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4321 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4322 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4323 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4324 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4325 AMD_FMT_MOD_SET(DCC, 1) |
4326 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4327 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4328 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4329 }
4330
4331 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4332 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4333 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4334 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4335 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4336 AMD_FMT_MOD_SET(DCC, 1) |
4337 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4338 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4339 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4340
4341 if (has_constant_encode) {
4342 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4343 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4344 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4345 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4346 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4347 AMD_FMT_MOD_SET(DCC, 1) |
4348 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4349 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4350 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4351
4352 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4353 AMD_FMT_MOD_SET(RB, rb) |
4354 AMD_FMT_MOD_SET(PIPE, pipes));
4355 }
4356
4357 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4358 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4359 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4360 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4361 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4362 AMD_FMT_MOD_SET(DCC, 1) |
4363 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4364 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4365 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4366 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4367 AMD_FMT_MOD_SET(RB, rb) |
4368 AMD_FMT_MOD_SET(PIPE, pipes));
4369 }
4370
4371 /*
4372 * Only supported for 64bpp on Raven, will be filtered on format in
4373 * dm_plane_format_mod_supported.
4374 */
4375 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4376 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4377 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4378 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4379 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4380
4381 if (adev->family == AMDGPU_FAMILY_RV) {
4382 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4383 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4384 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4385 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4386 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4387 }
4388
4389 /*
4390 * Only supported for 64bpp on Raven, will be filtered on format in
4391 * dm_plane_format_mod_supported.
4392 */
4393 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4394 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4395 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4396
4397 if (adev->family == AMDGPU_FAMILY_RV) {
4398 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4399 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4400 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4401 }
4402}
4403
4404static void
4405add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4406 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4407{
4408 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4409
4410 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4411 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4412 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4413 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4414 AMD_FMT_MOD_SET(DCC, 1) |
4415 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4416 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4417 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4418
4419 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4420 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4421 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4422 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4423 AMD_FMT_MOD_SET(DCC, 1) |
4424 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4425 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4426 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4427 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4428
4429 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4430 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4431 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4432 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4433
4434 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4435 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4436 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4437 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4438
4439
4440 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4441 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4442 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4443 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4444
4445 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4446 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4447 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4448}
4449
4450static void
4451add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4452 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4453{
4454 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4455 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4456
4457 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4458 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4459 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4460 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4461 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4462 AMD_FMT_MOD_SET(DCC, 1) |
4463 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4464 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4465 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4466 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4467
4468 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4469 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4470 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4471 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4472 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4473 AMD_FMT_MOD_SET(DCC, 1) |
4474 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4475 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4476 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4477 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4478 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4479
4480 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4481 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4482 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4483 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4484 AMD_FMT_MOD_SET(PACKERS, pkrs));
4485
4486 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4487 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4488 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4489 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4490 AMD_FMT_MOD_SET(PACKERS, pkrs));
4491
4492 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4493 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4494 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4495 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4496
4497 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4499 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4500}
4501
4502static int
4503get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4504{
4505 uint64_t size = 0, capacity = 128;
4506 *mods = NULL;
4507
4508 /* We have not hooked up any pre-GFX9 modifiers. */
4509 if (adev->family < AMDGPU_FAMILY_AI)
4510 return 0;
4511
4512 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4513
4514 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4515 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4516 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4517 return *mods ? 0 : -ENOMEM;
4518 }
4519
4520 switch (adev->family) {
4521 case AMDGPU_FAMILY_AI:
4522 case AMDGPU_FAMILY_RV:
4523 add_gfx9_modifiers(adev, mods, &size, &capacity);
4524 break;
4525 case AMDGPU_FAMILY_NV:
4526 case AMDGPU_FAMILY_VGH:
4527 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4528 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4529 else
4530 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4531 break;
4532 }
4533
4534 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4535
4536 /* INVALID marks the end of the list. */
4537 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4538
4539 if (!*mods)
4540 return -ENOMEM;
4541
4542 return 0;
4543}
4544
37384b3f
BN
4545static int
4546fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4547 const struct amdgpu_framebuffer *afb,
4548 const enum surface_pixel_format format,
4549 const enum dc_rotation_angle rotation,
4550 const struct plane_size *plane_size,
4551 union dc_tiling_info *tiling_info,
4552 struct dc_plane_dcc_param *dcc,
4553 struct dc_plane_address *address,
4554 const bool force_disable_dcc)
4555{
4556 const uint64_t modifier = afb->base.modifier;
4557 int ret;
4558
4559 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4560 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4561
4562 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4563 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4564
4565 dcc->enable = 1;
4566 dcc->meta_pitch = afb->base.pitches[1];
4567 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4568
4569 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4570 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4571 }
4572
4573 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4574 if (ret)
4575 return ret;
7df7e505 4576
09e5665a
NK
4577 return 0;
4578}
4579
4580static int
320932bf 4581fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4582 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4583 const enum surface_pixel_format format,
4584 const enum dc_rotation_angle rotation,
4585 const uint64_t tiling_flags,
09e5665a 4586 union dc_tiling_info *tiling_info,
12e2b2d4 4587 struct plane_size *plane_size,
09e5665a 4588 struct dc_plane_dcc_param *dcc,
87b7ebc2 4589 struct dc_plane_address *address,
5888f07a 4590 bool tmz_surface,
87b7ebc2 4591 bool force_disable_dcc)
09e5665a 4592{
320932bf 4593 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4594 int ret;
4595
4596 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4597 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4598 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4599 memset(address, 0, sizeof(*address));
4600
5888f07a
HW
4601 address->tmz_surface = tmz_surface;
4602
695af5f9 4603 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4604 uint64_t addr = afb->address + fb->offsets[0];
4605
12e2b2d4
DL
4606 plane_size->surface_size.x = 0;
4607 plane_size->surface_size.y = 0;
4608 plane_size->surface_size.width = fb->width;
4609 plane_size->surface_size.height = fb->height;
4610 plane_size->surface_pitch =
320932bf
NK
4611 fb->pitches[0] / fb->format->cpp[0];
4612
e0634e8d 4613 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4614 address->grph.addr.low_part = lower_32_bits(addr);
4615 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4616 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4617 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4618 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4619
12e2b2d4
DL
4620 plane_size->surface_size.x = 0;
4621 plane_size->surface_size.y = 0;
4622 plane_size->surface_size.width = fb->width;
4623 plane_size->surface_size.height = fb->height;
4624 plane_size->surface_pitch =
320932bf
NK
4625 fb->pitches[0] / fb->format->cpp[0];
4626
12e2b2d4
DL
4627 plane_size->chroma_size.x = 0;
4628 plane_size->chroma_size.y = 0;
320932bf 4629 /* TODO: set these based on surface format */
12e2b2d4
DL
4630 plane_size->chroma_size.width = fb->width / 2;
4631 plane_size->chroma_size.height = fb->height / 2;
320932bf 4632
12e2b2d4 4633 plane_size->chroma_pitch =
320932bf
NK
4634 fb->pitches[1] / fb->format->cpp[1];
4635
e0634e8d
NK
4636 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4637 address->video_progressive.luma_addr.low_part =
be7b9b32 4638 lower_32_bits(luma_addr);
e0634e8d 4639 address->video_progressive.luma_addr.high_part =
be7b9b32 4640 upper_32_bits(luma_addr);
e0634e8d
NK
4641 address->video_progressive.chroma_addr.low_part =
4642 lower_32_bits(chroma_addr);
4643 address->video_progressive.chroma_addr.high_part =
4644 upper_32_bits(chroma_addr);
4645 }
09e5665a 4646
a3241991 4647 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4648 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4649 rotation, plane_size,
4650 tiling_info, dcc,
4651 address,
4652 force_disable_dcc);
09e5665a
NK
4653 if (ret)
4654 return ret;
a3241991
BN
4655 } else {
4656 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4657 }
4658
4659 return 0;
7df7e505
NK
4660}
4661
d74004b6 4662static void
695af5f9 4663fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4664 bool *per_pixel_alpha, bool *global_alpha,
4665 int *global_alpha_value)
4666{
4667 *per_pixel_alpha = false;
4668 *global_alpha = false;
4669 *global_alpha_value = 0xff;
4670
4671 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4672 return;
4673
4674 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4675 static const uint32_t alpha_formats[] = {
4676 DRM_FORMAT_ARGB8888,
4677 DRM_FORMAT_RGBA8888,
4678 DRM_FORMAT_ABGR8888,
4679 };
4680 uint32_t format = plane_state->fb->format->format;
4681 unsigned int i;
4682
4683 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4684 if (format == alpha_formats[i]) {
4685 *per_pixel_alpha = true;
4686 break;
4687 }
4688 }
4689 }
4690
4691 if (plane_state->alpha < 0xffff) {
4692 *global_alpha = true;
4693 *global_alpha_value = plane_state->alpha >> 8;
4694 }
4695}
4696
004fefa3
NK
4697static int
4698fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4699 const enum surface_pixel_format format,
004fefa3
NK
4700 enum dc_color_space *color_space)
4701{
4702 bool full_range;
4703
4704 *color_space = COLOR_SPACE_SRGB;
4705
4706 /* DRM color properties only affect non-RGB formats. */
695af5f9 4707 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4708 return 0;
4709
4710 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4711
4712 switch (plane_state->color_encoding) {
4713 case DRM_COLOR_YCBCR_BT601:
4714 if (full_range)
4715 *color_space = COLOR_SPACE_YCBCR601;
4716 else
4717 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4718 break;
4719
4720 case DRM_COLOR_YCBCR_BT709:
4721 if (full_range)
4722 *color_space = COLOR_SPACE_YCBCR709;
4723 else
4724 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4725 break;
4726
4727 case DRM_COLOR_YCBCR_BT2020:
4728 if (full_range)
4729 *color_space = COLOR_SPACE_2020_YCBCR;
4730 else
4731 return -EINVAL;
4732 break;
4733
4734 default:
4735 return -EINVAL;
4736 }
4737
4738 return 0;
4739}
4740
695af5f9
NK
4741static int
4742fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4743 const struct drm_plane_state *plane_state,
4744 const uint64_t tiling_flags,
4745 struct dc_plane_info *plane_info,
87b7ebc2 4746 struct dc_plane_address *address,
5888f07a 4747 bool tmz_surface,
87b7ebc2 4748 bool force_disable_dcc)
695af5f9
NK
4749{
4750 const struct drm_framebuffer *fb = plane_state->fb;
4751 const struct amdgpu_framebuffer *afb =
4752 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
4753 int ret;
4754
4755 memset(plane_info, 0, sizeof(*plane_info));
4756
4757 switch (fb->format->format) {
4758 case DRM_FORMAT_C8:
4759 plane_info->format =
4760 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4761 break;
4762 case DRM_FORMAT_RGB565:
4763 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4764 break;
4765 case DRM_FORMAT_XRGB8888:
4766 case DRM_FORMAT_ARGB8888:
4767 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4768 break;
4769 case DRM_FORMAT_XRGB2101010:
4770 case DRM_FORMAT_ARGB2101010:
4771 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4772 break;
4773 case DRM_FORMAT_XBGR2101010:
4774 case DRM_FORMAT_ABGR2101010:
4775 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4776 break;
4777 case DRM_FORMAT_XBGR8888:
4778 case DRM_FORMAT_ABGR8888:
4779 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4780 break;
4781 case DRM_FORMAT_NV21:
4782 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4783 break;
4784 case DRM_FORMAT_NV12:
4785 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4786 break;
cbec6477
SW
4787 case DRM_FORMAT_P010:
4788 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4789 break;
492548dc
SW
4790 case DRM_FORMAT_XRGB16161616F:
4791 case DRM_FORMAT_ARGB16161616F:
4792 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4793 break;
2a5195dc
MK
4794 case DRM_FORMAT_XBGR16161616F:
4795 case DRM_FORMAT_ABGR16161616F:
4796 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4797 break;
695af5f9
NK
4798 default:
4799 DRM_ERROR(
92f1d09c
SA
4800 "Unsupported screen format %p4cc\n",
4801 &fb->format->format);
695af5f9
NK
4802 return -EINVAL;
4803 }
4804
4805 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4806 case DRM_MODE_ROTATE_0:
4807 plane_info->rotation = ROTATION_ANGLE_0;
4808 break;
4809 case DRM_MODE_ROTATE_90:
4810 plane_info->rotation = ROTATION_ANGLE_90;
4811 break;
4812 case DRM_MODE_ROTATE_180:
4813 plane_info->rotation = ROTATION_ANGLE_180;
4814 break;
4815 case DRM_MODE_ROTATE_270:
4816 plane_info->rotation = ROTATION_ANGLE_270;
4817 break;
4818 default:
4819 plane_info->rotation = ROTATION_ANGLE_0;
4820 break;
4821 }
4822
4823 plane_info->visible = true;
4824 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4825
6d83a32d
MS
4826 plane_info->layer_index = 0;
4827
695af5f9
NK
4828 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4829 &plane_info->color_space);
4830 if (ret)
4831 return ret;
4832
4833 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4834 plane_info->rotation, tiling_flags,
4835 &plane_info->tiling_info,
4836 &plane_info->plane_size,
5888f07a 4837 &plane_info->dcc, address, tmz_surface,
87b7ebc2 4838 force_disable_dcc);
695af5f9
NK
4839 if (ret)
4840 return ret;
4841
4842 fill_blending_from_plane_state(
4843 plane_state, &plane_info->per_pixel_alpha,
4844 &plane_info->global_alpha, &plane_info->global_alpha_value);
4845
4846 return 0;
4847}
4848
4849static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4850 struct dc_plane_state *dc_plane_state,
4851 struct drm_plane_state *plane_state,
4852 struct drm_crtc_state *crtc_state)
e7b07cee 4853{
cf020d49 4854 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 4855 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
4856 struct dc_scaling_info scaling_info;
4857 struct dc_plane_info plane_info;
695af5f9 4858 int ret;
87b7ebc2 4859 bool force_disable_dcc = false;
e7b07cee 4860
695af5f9
NK
4861 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4862 if (ret)
4863 return ret;
e7b07cee 4864
695af5f9
NK
4865 dc_plane_state->src_rect = scaling_info.src_rect;
4866 dc_plane_state->dst_rect = scaling_info.dst_rect;
4867 dc_plane_state->clip_rect = scaling_info.clip_rect;
4868 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 4869
87b7ebc2 4870 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 4871 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 4872 afb->tiling_flags,
695af5f9 4873 &plane_info,
87b7ebc2 4874 &dc_plane_state->address,
6eed95b0 4875 afb->tmz_surface,
87b7ebc2 4876 force_disable_dcc);
004fefa3
NK
4877 if (ret)
4878 return ret;
4879
695af5f9
NK
4880 dc_plane_state->format = plane_info.format;
4881 dc_plane_state->color_space = plane_info.color_space;
4882 dc_plane_state->format = plane_info.format;
4883 dc_plane_state->plane_size = plane_info.plane_size;
4884 dc_plane_state->rotation = plane_info.rotation;
4885 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4886 dc_plane_state->stereo_format = plane_info.stereo_format;
4887 dc_plane_state->tiling_info = plane_info.tiling_info;
4888 dc_plane_state->visible = plane_info.visible;
4889 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4890 dc_plane_state->global_alpha = plane_info.global_alpha;
4891 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4892 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 4893 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 4894 dc_plane_state->flip_int_enabled = true;
695af5f9 4895
e277adc5
LSL
4896 /*
4897 * Always set input transfer function, since plane state is refreshed
4898 * every time.
4899 */
cf020d49
NK
4900 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4901 if (ret)
4902 return ret;
e7b07cee 4903
cf020d49 4904 return 0;
e7b07cee
HW
4905}
4906
3ee6b26b
AD
4907static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4908 const struct dm_connector_state *dm_state,
4909 struct dc_stream_state *stream)
e7b07cee
HW
4910{
4911 enum amdgpu_rmx_type rmx_type;
4912
4913 struct rect src = { 0 }; /* viewport in composition space*/
4914 struct rect dst = { 0 }; /* stream addressable area */
4915
4916 /* no mode. nothing to be done */
4917 if (!mode)
4918 return;
4919
4920 /* Full screen scaling by default */
4921 src.width = mode->hdisplay;
4922 src.height = mode->vdisplay;
4923 dst.width = stream->timing.h_addressable;
4924 dst.height = stream->timing.v_addressable;
4925
f4791779
HW
4926 if (dm_state) {
4927 rmx_type = dm_state->scaling;
4928 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4929 if (src.width * dst.height <
4930 src.height * dst.width) {
4931 /* height needs less upscaling/more downscaling */
4932 dst.width = src.width *
4933 dst.height / src.height;
4934 } else {
4935 /* width needs less upscaling/more downscaling */
4936 dst.height = src.height *
4937 dst.width / src.width;
4938 }
4939 } else if (rmx_type == RMX_CENTER) {
4940 dst = src;
e7b07cee 4941 }
e7b07cee 4942
f4791779
HW
4943 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4944 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 4945
f4791779
HW
4946 if (dm_state->underscan_enable) {
4947 dst.x += dm_state->underscan_hborder / 2;
4948 dst.y += dm_state->underscan_vborder / 2;
4949 dst.width -= dm_state->underscan_hborder;
4950 dst.height -= dm_state->underscan_vborder;
4951 }
e7b07cee
HW
4952 }
4953
4954 stream->src = src;
4955 stream->dst = dst;
4956
4711c033
LT
4957 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4958 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
4959
4960}
4961
3ee6b26b 4962static enum dc_color_depth
42ba01fc 4963convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 4964 bool is_y420, int requested_bpc)
e7b07cee 4965{
1bc22f20 4966 uint8_t bpc;
01c22997 4967
1bc22f20
SW
4968 if (is_y420) {
4969 bpc = 8;
4970
4971 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4972 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4973 bpc = 16;
4974 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4975 bpc = 12;
4976 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4977 bpc = 10;
4978 } else {
4979 bpc = (uint8_t)connector->display_info.bpc;
4980 /* Assume 8 bpc by default if no bpc is specified. */
4981 bpc = bpc ? bpc : 8;
4982 }
e7b07cee 4983
cbd14ae7 4984 if (requested_bpc > 0) {
01c22997
NK
4985 /*
4986 * Cap display bpc based on the user requested value.
4987 *
4988 * The value for state->max_bpc may not correctly updated
4989 * depending on when the connector gets added to the state
4990 * or if this was called outside of atomic check, so it
4991 * can't be used directly.
4992 */
cbd14ae7 4993 bpc = min_t(u8, bpc, requested_bpc);
01c22997 4994
1825fd34
NK
4995 /* Round down to the nearest even number. */
4996 bpc = bpc - (bpc & 1);
4997 }
07e3a1cf 4998
e7b07cee
HW
4999 switch (bpc) {
5000 case 0:
1f6010a9
DF
5001 /*
5002 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5003 * EDID revision before 1.4
5004 * TODO: Fix edid parsing
5005 */
5006 return COLOR_DEPTH_888;
5007 case 6:
5008 return COLOR_DEPTH_666;
5009 case 8:
5010 return COLOR_DEPTH_888;
5011 case 10:
5012 return COLOR_DEPTH_101010;
5013 case 12:
5014 return COLOR_DEPTH_121212;
5015 case 14:
5016 return COLOR_DEPTH_141414;
5017 case 16:
5018 return COLOR_DEPTH_161616;
5019 default:
5020 return COLOR_DEPTH_UNDEFINED;
5021 }
5022}
5023
3ee6b26b
AD
5024static enum dc_aspect_ratio
5025get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5026{
e11d4147
LSL
5027 /* 1-1 mapping, since both enums follow the HDMI spec. */
5028 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5029}
5030
3ee6b26b
AD
5031static enum dc_color_space
5032get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5033{
5034 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5035
5036 switch (dc_crtc_timing->pixel_encoding) {
5037 case PIXEL_ENCODING_YCBCR422:
5038 case PIXEL_ENCODING_YCBCR444:
5039 case PIXEL_ENCODING_YCBCR420:
5040 {
5041 /*
5042 * 27030khz is the separation point between HDTV and SDTV
5043 * according to HDMI spec, we use YCbCr709 and YCbCr601
5044 * respectively
5045 */
380604e2 5046 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5047 if (dc_crtc_timing->flags.Y_ONLY)
5048 color_space =
5049 COLOR_SPACE_YCBCR709_LIMITED;
5050 else
5051 color_space = COLOR_SPACE_YCBCR709;
5052 } else {
5053 if (dc_crtc_timing->flags.Y_ONLY)
5054 color_space =
5055 COLOR_SPACE_YCBCR601_LIMITED;
5056 else
5057 color_space = COLOR_SPACE_YCBCR601;
5058 }
5059
5060 }
5061 break;
5062 case PIXEL_ENCODING_RGB:
5063 color_space = COLOR_SPACE_SRGB;
5064 break;
5065
5066 default:
5067 WARN_ON(1);
5068 break;
5069 }
5070
5071 return color_space;
5072}
5073
ea117312
TA
5074static bool adjust_colour_depth_from_display_info(
5075 struct dc_crtc_timing *timing_out,
5076 const struct drm_display_info *info)
400443e8 5077{
ea117312 5078 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5079 int normalized_clk;
400443e8 5080 do {
380604e2 5081 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5082 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5083 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5084 normalized_clk /= 2;
5085 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5086 switch (depth) {
5087 case COLOR_DEPTH_888:
5088 break;
400443e8
ML
5089 case COLOR_DEPTH_101010:
5090 normalized_clk = (normalized_clk * 30) / 24;
5091 break;
5092 case COLOR_DEPTH_121212:
5093 normalized_clk = (normalized_clk * 36) / 24;
5094 break;
5095 case COLOR_DEPTH_161616:
5096 normalized_clk = (normalized_clk * 48) / 24;
5097 break;
5098 default:
ea117312
TA
5099 /* The above depths are the only ones valid for HDMI. */
5100 return false;
400443e8 5101 }
ea117312
TA
5102 if (normalized_clk <= info->max_tmds_clock) {
5103 timing_out->display_color_depth = depth;
5104 return true;
5105 }
5106 } while (--depth > COLOR_DEPTH_666);
5107 return false;
400443e8 5108}
e7b07cee 5109
42ba01fc
NK
5110static void fill_stream_properties_from_drm_display_mode(
5111 struct dc_stream_state *stream,
5112 const struct drm_display_mode *mode_in,
5113 const struct drm_connector *connector,
5114 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5115 const struct dc_stream_state *old_stream,
5116 int requested_bpc)
e7b07cee
HW
5117{
5118 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5119 const struct drm_display_info *info = &connector->display_info;
d4252eee 5120 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5121 struct hdmi_vendor_infoframe hv_frame;
5122 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5123
acf83f86
WL
5124 memset(&hv_frame, 0, sizeof(hv_frame));
5125 memset(&avi_frame, 0, sizeof(avi_frame));
5126
e7b07cee
HW
5127 timing_out->h_border_left = 0;
5128 timing_out->h_border_right = 0;
5129 timing_out->v_border_top = 0;
5130 timing_out->v_border_bottom = 0;
5131 /* TODO: un-hardcode */
fe61a2f1 5132 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5133 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5134 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5135 else if (drm_mode_is_420_also(info, mode_in)
5136 && aconnector->force_yuv420_output)
5137 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5138 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5139 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5140 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5141 else
5142 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5143
5144 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5145 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5146 connector,
5147 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5148 requested_bpc);
e7b07cee
HW
5149 timing_out->scan_type = SCANNING_TYPE_NODATA;
5150 timing_out->hdmi_vic = 0;
b333730d
BL
5151
5152 if(old_stream) {
5153 timing_out->vic = old_stream->timing.vic;
5154 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5155 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5156 } else {
5157 timing_out->vic = drm_match_cea_mode(mode_in);
5158 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5159 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5160 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5161 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5162 }
e7b07cee 5163
1cb1d477
WL
5164 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5165 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5166 timing_out->vic = avi_frame.video_code;
5167 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5168 timing_out->hdmi_vic = hv_frame.vic;
5169 }
5170
fe8858bb
NC
5171 if (is_freesync_video_mode(mode_in, aconnector)) {
5172 timing_out->h_addressable = mode_in->hdisplay;
5173 timing_out->h_total = mode_in->htotal;
5174 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5175 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5176 timing_out->v_total = mode_in->vtotal;
5177 timing_out->v_addressable = mode_in->vdisplay;
5178 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5179 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5180 timing_out->pix_clk_100hz = mode_in->clock * 10;
5181 } else {
5182 timing_out->h_addressable = mode_in->crtc_hdisplay;
5183 timing_out->h_total = mode_in->crtc_htotal;
5184 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5185 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5186 timing_out->v_total = mode_in->crtc_vtotal;
5187 timing_out->v_addressable = mode_in->crtc_vdisplay;
5188 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5189 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5190 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5191 }
a85ba005 5192
e7b07cee 5193 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5194
5195 stream->output_color_space = get_output_color_space(timing_out);
5196
e43a432c
AK
5197 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5198 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5199 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5200 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5201 drm_mode_is_420_also(info, mode_in) &&
5202 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5203 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5204 adjust_colour_depth_from_display_info(timing_out, info);
5205 }
5206 }
e7b07cee
HW
5207}
5208
3ee6b26b
AD
5209static void fill_audio_info(struct audio_info *audio_info,
5210 const struct drm_connector *drm_connector,
5211 const struct dc_sink *dc_sink)
e7b07cee
HW
5212{
5213 int i = 0;
5214 int cea_revision = 0;
5215 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5216
5217 audio_info->manufacture_id = edid_caps->manufacturer_id;
5218 audio_info->product_id = edid_caps->product_id;
5219
5220 cea_revision = drm_connector->display_info.cea_rev;
5221
090afc1e 5222 strscpy(audio_info->display_name,
d2b2562c 5223 edid_caps->display_name,
090afc1e 5224 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5225
b830ebc9 5226 if (cea_revision >= 3) {
e7b07cee
HW
5227 audio_info->mode_count = edid_caps->audio_mode_count;
5228
5229 for (i = 0; i < audio_info->mode_count; ++i) {
5230 audio_info->modes[i].format_code =
5231 (enum audio_format_code)
5232 (edid_caps->audio_modes[i].format_code);
5233 audio_info->modes[i].channel_count =
5234 edid_caps->audio_modes[i].channel_count;
5235 audio_info->modes[i].sample_rates.all =
5236 edid_caps->audio_modes[i].sample_rate;
5237 audio_info->modes[i].sample_size =
5238 edid_caps->audio_modes[i].sample_size;
5239 }
5240 }
5241
5242 audio_info->flags.all = edid_caps->speaker_flags;
5243
5244 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5245 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5246 audio_info->video_latency = drm_connector->video_latency[0];
5247 audio_info->audio_latency = drm_connector->audio_latency[0];
5248 }
5249
5250 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5251
5252}
5253
3ee6b26b
AD
5254static void
5255copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5256 struct drm_display_mode *dst_mode)
e7b07cee
HW
5257{
5258 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5259 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5260 dst_mode->crtc_clock = src_mode->crtc_clock;
5261 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5262 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5263 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5264 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5265 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5266 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5267 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5268 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5269 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5270 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5271 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5272}
5273
3ee6b26b
AD
5274static void
5275decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5276 const struct drm_display_mode *native_mode,
5277 bool scale_enabled)
e7b07cee
HW
5278{
5279 if (scale_enabled) {
5280 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5281 } else if (native_mode->clock == drm_mode->clock &&
5282 native_mode->htotal == drm_mode->htotal &&
5283 native_mode->vtotal == drm_mode->vtotal) {
5284 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5285 } else {
5286 /* no scaling nor amdgpu inserted, no need to patch */
5287 }
5288}
5289
aed15309
ML
5290static struct dc_sink *
5291create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5292{
2e0ac3d6 5293 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5294 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5295 sink_init_data.link = aconnector->dc_link;
5296 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5297
5298 sink = dc_sink_create(&sink_init_data);
423788c7 5299 if (!sink) {
2e0ac3d6 5300 DRM_ERROR("Failed to create sink!\n");
aed15309 5301 return NULL;
423788c7 5302 }
2e0ac3d6 5303 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5304
aed15309 5305 return sink;
2e0ac3d6
HW
5306}
5307
fa2123db
ML
5308static void set_multisync_trigger_params(
5309 struct dc_stream_state *stream)
5310{
ec372186
ML
5311 struct dc_stream_state *master = NULL;
5312
fa2123db 5313 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5314 master = stream->triggered_crtc_reset.event_source;
5315 stream->triggered_crtc_reset.event =
5316 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5317 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5318 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5319 }
5320}
5321
5322static void set_master_stream(struct dc_stream_state *stream_set[],
5323 int stream_count)
5324{
5325 int j, highest_rfr = 0, master_stream = 0;
5326
5327 for (j = 0; j < stream_count; j++) {
5328 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5329 int refresh_rate = 0;
5330
380604e2 5331 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5332 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5333 if (refresh_rate > highest_rfr) {
5334 highest_rfr = refresh_rate;
5335 master_stream = j;
5336 }
5337 }
5338 }
5339 for (j = 0; j < stream_count; j++) {
03736f4c 5340 if (stream_set[j])
fa2123db
ML
5341 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5342 }
5343}
5344
5345static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5346{
5347 int i = 0;
ec372186 5348 struct dc_stream_state *stream;
fa2123db
ML
5349
5350 if (context->stream_count < 2)
5351 return;
5352 for (i = 0; i < context->stream_count ; i++) {
5353 if (!context->streams[i])
5354 continue;
1f6010a9
DF
5355 /*
5356 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5357 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5358 * For now it's set to false
fa2123db 5359 */
fa2123db 5360 }
ec372186 5361
fa2123db 5362 set_master_stream(context->streams, context->stream_count);
ec372186
ML
5363
5364 for (i = 0; i < context->stream_count ; i++) {
5365 stream = context->streams[i];
5366
5367 if (!stream)
5368 continue;
5369
5370 set_multisync_trigger_params(stream);
5371 }
fa2123db
ML
5372}
5373
a85ba005
NC
5374static struct drm_display_mode *
5375get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5376 bool use_probed_modes)
5377{
5378 struct drm_display_mode *m, *m_pref = NULL;
5379 u16 current_refresh, highest_refresh;
5380 struct list_head *list_head = use_probed_modes ?
5381 &aconnector->base.probed_modes :
5382 &aconnector->base.modes;
5383
5384 if (aconnector->freesync_vid_base.clock != 0)
5385 return &aconnector->freesync_vid_base;
5386
5387 /* Find the preferred mode */
5388 list_for_each_entry (m, list_head, head) {
5389 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5390 m_pref = m;
5391 break;
5392 }
5393 }
5394
5395 if (!m_pref) {
5396 /* Probably an EDID with no preferred mode. Fallback to first entry */
5397 m_pref = list_first_entry_or_null(
5398 &aconnector->base.modes, struct drm_display_mode, head);
5399 if (!m_pref) {
5400 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5401 return NULL;
5402 }
5403 }
5404
5405 highest_refresh = drm_mode_vrefresh(m_pref);
5406
5407 /*
5408 * Find the mode with highest refresh rate with same resolution.
5409 * For some monitors, preferred mode is not the mode with highest
5410 * supported refresh rate.
5411 */
5412 list_for_each_entry (m, list_head, head) {
5413 current_refresh = drm_mode_vrefresh(m);
5414
5415 if (m->hdisplay == m_pref->hdisplay &&
5416 m->vdisplay == m_pref->vdisplay &&
5417 highest_refresh < current_refresh) {
5418 highest_refresh = current_refresh;
5419 m_pref = m;
5420 }
5421 }
5422
5423 aconnector->freesync_vid_base = *m_pref;
5424 return m_pref;
5425}
5426
fe8858bb 5427static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
5428 struct amdgpu_dm_connector *aconnector)
5429{
5430 struct drm_display_mode *high_mode;
5431 int timing_diff;
5432
5433 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5434 if (!high_mode || !mode)
5435 return false;
5436
5437 timing_diff = high_mode->vtotal - mode->vtotal;
5438
5439 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5440 high_mode->hdisplay != mode->hdisplay ||
5441 high_mode->vdisplay != mode->vdisplay ||
5442 high_mode->hsync_start != mode->hsync_start ||
5443 high_mode->hsync_end != mode->hsync_end ||
5444 high_mode->htotal != mode->htotal ||
5445 high_mode->hskew != mode->hskew ||
5446 high_mode->vscan != mode->vscan ||
5447 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5448 high_mode->vsync_end - mode->vsync_end != timing_diff)
5449 return false;
5450 else
5451 return true;
5452}
5453
3ee6b26b
AD
5454static struct dc_stream_state *
5455create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5456 const struct drm_display_mode *drm_mode,
b333730d 5457 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5458 const struct dc_stream_state *old_stream,
5459 int requested_bpc)
e7b07cee
HW
5460{
5461 struct drm_display_mode *preferred_mode = NULL;
391ef035 5462 struct drm_connector *drm_connector;
42ba01fc
NK
5463 const struct drm_connector_state *con_state =
5464 dm_state ? &dm_state->base : NULL;
0971c40e 5465 struct dc_stream_state *stream = NULL;
e7b07cee 5466 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
5467 struct drm_display_mode saved_mode;
5468 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 5469 bool native_mode_found = false;
a85ba005 5470 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 5471 int mode_refresh;
58124bf8 5472 int preferred_refresh = 0;
defeb878 5473#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015
DF
5474 struct dsc_dec_dpcd_caps dsc_caps;
5475 uint32_t link_bandwidth_kbps;
7c431455 5476#endif
aed15309 5477 struct dc_sink *sink = NULL;
a85ba005
NC
5478
5479 memset(&saved_mode, 0, sizeof(saved_mode));
5480
b830ebc9 5481 if (aconnector == NULL) {
e7b07cee 5482 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5483 return stream;
e7b07cee
HW
5484 }
5485
e7b07cee 5486 drm_connector = &aconnector->base;
2e0ac3d6 5487
f4ac176e 5488 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5489 sink = create_fake_sink(aconnector);
5490 if (!sink)
5491 return stream;
aed15309
ML
5492 } else {
5493 sink = aconnector->dc_sink;
dcd5fb82 5494 dc_sink_retain(sink);
f4ac176e 5495 }
2e0ac3d6 5496
aed15309 5497 stream = dc_create_stream_for_sink(sink);
4562236b 5498
b830ebc9 5499 if (stream == NULL) {
e7b07cee 5500 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5501 goto finish;
e7b07cee
HW
5502 }
5503
ceb3dbb4
JL
5504 stream->dm_stream_context = aconnector;
5505
4a36fcba
WL
5506 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5507 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5508
e7b07cee
HW
5509 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5510 /* Search for preferred mode */
5511 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5512 native_mode_found = true;
5513 break;
5514 }
5515 }
5516 if (!native_mode_found)
5517 preferred_mode = list_first_entry_or_null(
5518 &aconnector->base.modes,
5519 struct drm_display_mode,
5520 head);
5521
b333730d
BL
5522 mode_refresh = drm_mode_vrefresh(&mode);
5523
b830ebc9 5524 if (preferred_mode == NULL) {
1f6010a9
DF
5525 /*
5526 * This may not be an error, the use case is when we have no
e7b07cee
HW
5527 * usermode calls to reset and set mode upon hotplug. In this
5528 * case, we call set mode ourselves to restore the previous mode
5529 * and the modelist may not be filled in in time.
5530 */
f1ad2f5e 5531 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 5532 } else {
a85ba005
NC
5533 recalculate_timing |= amdgpu_freesync_vid_mode &&
5534 is_freesync_video_mode(&mode, aconnector);
5535 if (recalculate_timing) {
5536 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5537 saved_mode = mode;
5538 mode = *freesync_mode;
5539 } else {
5540 decide_crtc_timing_for_drm_display_mode(
e7b07cee 5541 &mode, preferred_mode,
f4791779 5542 dm_state ? (dm_state->scaling != RMX_OFF) : false);
a85ba005
NC
5543 }
5544
58124bf8 5545 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
5546 }
5547
a85ba005
NC
5548 if (recalculate_timing)
5549 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 5550 else if (!dm_state)
f783577c
JFZ
5551 drm_mode_set_crtcinfo(&mode, 0);
5552
a85ba005 5553 /*
b333730d
BL
5554 * If scaling is enabled and refresh rate didn't change
5555 * we copy the vic and polarities of the old timings
5556 */
a85ba005
NC
5557 if (!recalculate_timing || mode_refresh != preferred_refresh)
5558 fill_stream_properties_from_drm_display_mode(
5559 stream, &mode, &aconnector->base, con_state, NULL,
5560 requested_bpc);
b333730d 5561 else
a85ba005
NC
5562 fill_stream_properties_from_drm_display_mode(
5563 stream, &mode, &aconnector->base, con_state, old_stream,
5564 requested_bpc);
b333730d 5565
df2f1015
DF
5566 stream->timing.flags.DSC = 0;
5567
5568 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 5569#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
5570 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5571 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 5572 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015
DF
5573 &dsc_caps);
5574 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5575 dc_link_get_link_cap(aconnector->dc_link));
5576
0749ddeb 5577 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 5578 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
5579 dc_dsc_policy_set_enable_dsc_when_not_needed(
5580 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 5581
0417df16 5582 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 5583 &dsc_caps,
0417df16 5584 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 5585 0,
df2f1015
DF
5586 link_bandwidth_kbps,
5587 &stream->timing,
5588 &stream->timing.dsc_cfg))
5589 stream->timing.flags.DSC = 1;
27e84dd7 5590 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 5591 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 5592 stream->timing.flags.DSC = 1;
734e4c97 5593
28b2f656
EB
5594 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5595 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 5596
28b2f656
EB
5597 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5598 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
5599
5600 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5601 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 5602 }
39a4eb85 5603#endif
df2f1015 5604 }
39a4eb85 5605
e7b07cee
HW
5606 update_stream_scaling_settings(&mode, dm_state, stream);
5607
5608 fill_audio_info(
5609 &stream->audio_info,
5610 drm_connector,
aed15309 5611 sink);
e7b07cee 5612
ceb3dbb4 5613 update_stream_signal(stream, sink);
9182b4cb 5614
d832fc3b 5615 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5616 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5617
8a488f5d
RL
5618 if (stream->link->psr_settings.psr_feature_enabled) {
5619 //
5620 // should decide stream support vsc sdp colorimetry capability
5621 // before building vsc info packet
5622 //
5623 stream->use_vsc_sdp_for_colorimetry = false;
5624 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5625 stream->use_vsc_sdp_for_colorimetry =
5626 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5627 } else {
5628 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5629 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5630 }
8a488f5d 5631 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5632 }
aed15309 5633finish:
dcd5fb82 5634 dc_sink_release(sink);
9e3efe3e 5635
e7b07cee
HW
5636 return stream;
5637}
5638
7578ecda 5639static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5640{
5641 drm_crtc_cleanup(crtc);
5642 kfree(crtc);
5643}
5644
5645static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5646 struct drm_crtc_state *state)
e7b07cee
HW
5647{
5648 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5649
5650 /* TODO Destroy dc_stream objects are stream object is flattened */
5651 if (cur->stream)
5652 dc_stream_release(cur->stream);
5653
5654
5655 __drm_atomic_helper_crtc_destroy_state(state);
5656
5657
5658 kfree(state);
5659}
5660
5661static void dm_crtc_reset_state(struct drm_crtc *crtc)
5662{
5663 struct dm_crtc_state *state;
5664
5665 if (crtc->state)
5666 dm_crtc_destroy_state(crtc, crtc->state);
5667
5668 state = kzalloc(sizeof(*state), GFP_KERNEL);
5669 if (WARN_ON(!state))
5670 return;
5671
1f8a52ec 5672 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5673}
5674
5675static struct drm_crtc_state *
5676dm_crtc_duplicate_state(struct drm_crtc *crtc)
5677{
5678 struct dm_crtc_state *state, *cur;
5679
5680 cur = to_dm_crtc_state(crtc->state);
5681
5682 if (WARN_ON(!crtc->state))
5683 return NULL;
5684
2004f45e 5685 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5686 if (!state)
5687 return NULL;
e7b07cee
HW
5688
5689 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5690
5691 if (cur->stream) {
5692 state->stream = cur->stream;
5693 dc_stream_retain(state->stream);
5694 }
5695
d6ef9b41 5696 state->active_planes = cur->active_planes;
98e6436d 5697 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5698 state->abm_level = cur->abm_level;
bb47de73
NK
5699 state->vrr_supported = cur->vrr_supported;
5700 state->freesync_config = cur->freesync_config;
cf020d49
NK
5701 state->cm_has_degamma = cur->cm_has_degamma;
5702 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
e7b07cee
HW
5703 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5704
5705 return &state->base;
5706}
5707
86bc2219 5708#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 5709static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
5710{
5711 crtc_debugfs_init(crtc);
5712
5713 return 0;
5714}
5715#endif
5716
d2574c33
MK
5717static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5718{
5719 enum dc_irq_source irq_source;
5720 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5721 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5722 int rc;
5723
5724 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5725
5726 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5727
4711c033
LT
5728 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5729 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
5730 return rc;
5731}
589d2739
HW
5732
5733static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5734{
5735 enum dc_irq_source irq_source;
5736 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5737 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 5738 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 5739#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 5740 struct amdgpu_display_manager *dm = &adev->dm;
ea3b4242
QZ
5741 unsigned long flags;
5742#endif
d2574c33
MK
5743 int rc = 0;
5744
5745 if (enable) {
5746 /* vblank irq on -> Only need vupdate irq in vrr mode */
5747 if (amdgpu_dm_vrr_active(acrtc_state))
5748 rc = dm_set_vupdate_irq(crtc, true);
5749 } else {
5750 /* vblank irq off -> vupdate irq off */
5751 rc = dm_set_vupdate_irq(crtc, false);
5752 }
5753
5754 if (rc)
5755 return rc;
589d2739
HW
5756
5757 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
5758
5759 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5760 return -EBUSY;
5761
98ab5f35
BL
5762 if (amdgpu_in_reset(adev))
5763 return 0;
5764
4928b480 5765#if defined(CONFIG_DRM_AMD_DC_DCN)
ea3b4242
QZ
5766 spin_lock_irqsave(&dm->vblank_lock, flags);
5767 dm->vblank_workqueue->dm = dm;
5768 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5769 dm->vblank_workqueue->enable = enable;
5770 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5771 schedule_work(&dm->vblank_workqueue->mall_work);
4928b480 5772#endif
71338cb4 5773
71338cb4 5774 return 0;
589d2739
HW
5775}
5776
5777static int dm_enable_vblank(struct drm_crtc *crtc)
5778{
5779 return dm_set_vblank(crtc, true);
5780}
5781
5782static void dm_disable_vblank(struct drm_crtc *crtc)
5783{
5784 dm_set_vblank(crtc, false);
5785}
5786
e7b07cee
HW
5787/* Implemented only the options currently availible for the driver */
5788static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5789 .reset = dm_crtc_reset_state,
5790 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
5791 .set_config = drm_atomic_helper_set_config,
5792 .page_flip = drm_atomic_helper_page_flip,
5793 .atomic_duplicate_state = dm_crtc_duplicate_state,
5794 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5795 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5796 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5797 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5798 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5799 .enable_vblank = dm_enable_vblank,
5800 .disable_vblank = dm_disable_vblank,
e3eff4b5 5801 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
5802#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5803 .late_register = amdgpu_dm_crtc_late_register,
5804#endif
e7b07cee
HW
5805};
5806
5807static enum drm_connector_status
5808amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5809{
5810 bool connected;
c84dec2f 5811 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5812
1f6010a9
DF
5813 /*
5814 * Notes:
e7b07cee
HW
5815 * 1. This interface is NOT called in context of HPD irq.
5816 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5817 * makes it a bad place for *any* MST-related activity.
5818 */
e7b07cee 5819
8580d60b
HW
5820 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5821 !aconnector->fake_enable)
e7b07cee
HW
5822 connected = (aconnector->dc_sink != NULL);
5823 else
5824 connected = (aconnector->base.force == DRM_FORCE_ON);
5825
0f877894
OV
5826 update_subconnector_property(aconnector);
5827
e7b07cee
HW
5828 return (connected ? connector_status_connected :
5829 connector_status_disconnected);
5830}
5831
3ee6b26b
AD
5832int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5833 struct drm_connector_state *connector_state,
5834 struct drm_property *property,
5835 uint64_t val)
e7b07cee
HW
5836{
5837 struct drm_device *dev = connector->dev;
1348969a 5838 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5839 struct dm_connector_state *dm_old_state =
5840 to_dm_connector_state(connector->state);
5841 struct dm_connector_state *dm_new_state =
5842 to_dm_connector_state(connector_state);
5843
5844 int ret = -EINVAL;
5845
5846 if (property == dev->mode_config.scaling_mode_property) {
5847 enum amdgpu_rmx_type rmx_type;
5848
5849 switch (val) {
5850 case DRM_MODE_SCALE_CENTER:
5851 rmx_type = RMX_CENTER;
5852 break;
5853 case DRM_MODE_SCALE_ASPECT:
5854 rmx_type = RMX_ASPECT;
5855 break;
5856 case DRM_MODE_SCALE_FULLSCREEN:
5857 rmx_type = RMX_FULL;
5858 break;
5859 case DRM_MODE_SCALE_NONE:
5860 default:
5861 rmx_type = RMX_OFF;
5862 break;
5863 }
5864
5865 if (dm_old_state->scaling == rmx_type)
5866 return 0;
5867
5868 dm_new_state->scaling = rmx_type;
5869 ret = 0;
5870 } else if (property == adev->mode_info.underscan_hborder_property) {
5871 dm_new_state->underscan_hborder = val;
5872 ret = 0;
5873 } else if (property == adev->mode_info.underscan_vborder_property) {
5874 dm_new_state->underscan_vborder = val;
5875 ret = 0;
5876 } else if (property == adev->mode_info.underscan_property) {
5877 dm_new_state->underscan_enable = val;
5878 ret = 0;
c1ee92f9
DF
5879 } else if (property == adev->mode_info.abm_level_property) {
5880 dm_new_state->abm_level = val;
5881 ret = 0;
e7b07cee
HW
5882 }
5883
5884 return ret;
5885}
5886
3ee6b26b
AD
5887int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5888 const struct drm_connector_state *state,
5889 struct drm_property *property,
5890 uint64_t *val)
e7b07cee
HW
5891{
5892 struct drm_device *dev = connector->dev;
1348969a 5893 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
5894 struct dm_connector_state *dm_state =
5895 to_dm_connector_state(state);
5896 int ret = -EINVAL;
5897
5898 if (property == dev->mode_config.scaling_mode_property) {
5899 switch (dm_state->scaling) {
5900 case RMX_CENTER:
5901 *val = DRM_MODE_SCALE_CENTER;
5902 break;
5903 case RMX_ASPECT:
5904 *val = DRM_MODE_SCALE_ASPECT;
5905 break;
5906 case RMX_FULL:
5907 *val = DRM_MODE_SCALE_FULLSCREEN;
5908 break;
5909 case RMX_OFF:
5910 default:
5911 *val = DRM_MODE_SCALE_NONE;
5912 break;
5913 }
5914 ret = 0;
5915 } else if (property == adev->mode_info.underscan_hborder_property) {
5916 *val = dm_state->underscan_hborder;
5917 ret = 0;
5918 } else if (property == adev->mode_info.underscan_vborder_property) {
5919 *val = dm_state->underscan_vborder;
5920 ret = 0;
5921 } else if (property == adev->mode_info.underscan_property) {
5922 *val = dm_state->underscan_enable;
5923 ret = 0;
c1ee92f9
DF
5924 } else if (property == adev->mode_info.abm_level_property) {
5925 *val = dm_state->abm_level;
5926 ret = 0;
e7b07cee 5927 }
c1ee92f9 5928
e7b07cee
HW
5929 return ret;
5930}
5931
526c654a
ED
5932static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5933{
5934 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5935
5936 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5937}
5938
7578ecda 5939static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 5940{
c84dec2f 5941 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5942 const struct dc_link *link = aconnector->dc_link;
1348969a 5943 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 5944 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 5945
5dff80bd
AG
5946 /*
5947 * Call only if mst_mgr was iniitalized before since it's not done
5948 * for all connector types.
5949 */
5950 if (aconnector->mst_mgr.dev)
5951 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5952
e7b07cee
HW
5953#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5954 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5955
89fc8d4e 5956 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
5957 link->type != dc_connection_none &&
5958 dm->backlight_dev) {
5959 backlight_device_unregister(dm->backlight_dev);
5960 dm->backlight_dev = NULL;
e7b07cee
HW
5961 }
5962#endif
dcd5fb82
MF
5963
5964 if (aconnector->dc_em_sink)
5965 dc_sink_release(aconnector->dc_em_sink);
5966 aconnector->dc_em_sink = NULL;
5967 if (aconnector->dc_sink)
5968 dc_sink_release(aconnector->dc_sink);
5969 aconnector->dc_sink = NULL;
5970
e86e8947 5971 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
5972 drm_connector_unregister(connector);
5973 drm_connector_cleanup(connector);
526c654a
ED
5974 if (aconnector->i2c) {
5975 i2c_del_adapter(&aconnector->i2c->base);
5976 kfree(aconnector->i2c);
5977 }
7daec99f 5978 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 5979
e7b07cee
HW
5980 kfree(connector);
5981}
5982
5983void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5984{
5985 struct dm_connector_state *state =
5986 to_dm_connector_state(connector->state);
5987
df099b9b
LSL
5988 if (connector->state)
5989 __drm_atomic_helper_connector_destroy_state(connector->state);
5990
e7b07cee
HW
5991 kfree(state);
5992
5993 state = kzalloc(sizeof(*state), GFP_KERNEL);
5994
5995 if (state) {
5996 state->scaling = RMX_OFF;
5997 state->underscan_enable = false;
5998 state->underscan_hborder = 0;
5999 state->underscan_vborder = 0;
01933ba4 6000 state->base.max_requested_bpc = 8;
3261e013
ML
6001 state->vcpi_slots = 0;
6002 state->pbn = 0;
c3e50f89
NK
6003 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6004 state->abm_level = amdgpu_dm_abm_level;
6005
df099b9b 6006 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6007 }
6008}
6009
3ee6b26b
AD
6010struct drm_connector_state *
6011amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6012{
6013 struct dm_connector_state *state =
6014 to_dm_connector_state(connector->state);
6015
6016 struct dm_connector_state *new_state =
6017 kmemdup(state, sizeof(*state), GFP_KERNEL);
6018
98e6436d
AK
6019 if (!new_state)
6020 return NULL;
e7b07cee 6021
98e6436d
AK
6022 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6023
6024 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6025 new_state->abm_level = state->abm_level;
922454c2
NK
6026 new_state->scaling = state->scaling;
6027 new_state->underscan_enable = state->underscan_enable;
6028 new_state->underscan_hborder = state->underscan_hborder;
6029 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6030 new_state->vcpi_slots = state->vcpi_slots;
6031 new_state->pbn = state->pbn;
98e6436d 6032 return &new_state->base;
e7b07cee
HW
6033}
6034
14f04fa4
AD
6035static int
6036amdgpu_dm_connector_late_register(struct drm_connector *connector)
6037{
6038 struct amdgpu_dm_connector *amdgpu_dm_connector =
6039 to_amdgpu_dm_connector(connector);
00a8037e 6040 int r;
14f04fa4 6041
00a8037e
AD
6042 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6043 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6044 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6045 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6046 if (r)
6047 return r;
6048 }
6049
6050#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6051 connector_debugfs_init(amdgpu_dm_connector);
6052#endif
6053
6054 return 0;
6055}
6056
e7b07cee
HW
6057static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6058 .reset = amdgpu_dm_connector_funcs_reset,
6059 .detect = amdgpu_dm_connector_detect,
6060 .fill_modes = drm_helper_probe_single_connector_modes,
6061 .destroy = amdgpu_dm_connector_destroy,
6062 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6063 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6064 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6065 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6066 .late_register = amdgpu_dm_connector_late_register,
526c654a 6067 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6068};
6069
e7b07cee
HW
6070static int get_modes(struct drm_connector *connector)
6071{
6072 return amdgpu_dm_connector_get_modes(connector);
6073}
6074
c84dec2f 6075static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6076{
6077 struct dc_sink_init_data init_params = {
6078 .link = aconnector->dc_link,
6079 .sink_signal = SIGNAL_TYPE_VIRTUAL
6080 };
70e8ffc5 6081 struct edid *edid;
e7b07cee 6082
a89ff457 6083 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6084 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6085 aconnector->base.name);
6086
6087 aconnector->base.force = DRM_FORCE_OFF;
6088 aconnector->base.override_edid = false;
6089 return;
6090 }
6091
70e8ffc5
HW
6092 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6093
e7b07cee
HW
6094 aconnector->edid = edid;
6095
6096 aconnector->dc_em_sink = dc_link_add_remote_sink(
6097 aconnector->dc_link,
6098 (uint8_t *)edid,
6099 (edid->extensions + 1) * EDID_LENGTH,
6100 &init_params);
6101
dcd5fb82 6102 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6103 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6104 aconnector->dc_link->local_sink :
6105 aconnector->dc_em_sink;
dcd5fb82
MF
6106 dc_sink_retain(aconnector->dc_sink);
6107 }
e7b07cee
HW
6108}
6109
c84dec2f 6110static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6111{
6112 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6113
1f6010a9
DF
6114 /*
6115 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6116 * Those settings have to be != 0 to get initial modeset
6117 */
6118 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6119 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6120 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6121 }
6122
6123
6124 aconnector->base.override_edid = true;
6125 create_eml_sink(aconnector);
6126}
6127
cbd14ae7
SW
6128static struct dc_stream_state *
6129create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6130 const struct drm_display_mode *drm_mode,
6131 const struct dm_connector_state *dm_state,
6132 const struct dc_stream_state *old_stream)
6133{
6134 struct drm_connector *connector = &aconnector->base;
1348969a 6135 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6136 struct dc_stream_state *stream;
4b7da34b
SW
6137 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6138 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6139 enum dc_status dc_result = DC_OK;
6140
6141 do {
6142 stream = create_stream_for_sink(aconnector, drm_mode,
6143 dm_state, old_stream,
6144 requested_bpc);
6145 if (stream == NULL) {
6146 DRM_ERROR("Failed to create stream for sink!\n");
6147 break;
6148 }
6149
6150 dc_result = dc_validate_stream(adev->dm.dc, stream);
6151
6152 if (dc_result != DC_OK) {
74a16675 6153 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6154 drm_mode->hdisplay,
6155 drm_mode->vdisplay,
6156 drm_mode->clock,
74a16675
RS
6157 dc_result,
6158 dc_status_to_str(dc_result));
cbd14ae7
SW
6159
6160 dc_stream_release(stream);
6161 stream = NULL;
6162 requested_bpc -= 2; /* lower bpc to retry validation */
6163 }
6164
6165 } while (stream == NULL && requested_bpc >= 6);
6166
68eb3ae3
WS
6167 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6168 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6169
6170 aconnector->force_yuv420_output = true;
6171 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6172 dm_state, old_stream);
6173 aconnector->force_yuv420_output = false;
6174 }
6175
cbd14ae7
SW
6176 return stream;
6177}
6178
ba9ca088 6179enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6180 struct drm_display_mode *mode)
e7b07cee
HW
6181{
6182 int result = MODE_ERROR;
6183 struct dc_sink *dc_sink;
e7b07cee 6184 /* TODO: Unhardcode stream count */
0971c40e 6185 struct dc_stream_state *stream;
c84dec2f 6186 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6187
6188 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6189 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6190 return result;
6191
1f6010a9
DF
6192 /*
6193 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6194 * EDID mgmt
6195 */
6196 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6197 !aconnector->dc_em_sink)
6198 handle_edid_mgmt(aconnector);
6199
c84dec2f 6200 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6201
ad975f44
VL
6202 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6203 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6204 DRM_ERROR("dc_sink is NULL!\n");
6205 goto fail;
6206 }
6207
cbd14ae7
SW
6208 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6209 if (stream) {
6210 dc_stream_release(stream);
e7b07cee 6211 result = MODE_OK;
cbd14ae7 6212 }
e7b07cee
HW
6213
6214fail:
6215 /* TODO: error handling*/
6216 return result;
6217}
6218
88694af9
NK
6219static int fill_hdr_info_packet(const struct drm_connector_state *state,
6220 struct dc_info_packet *out)
6221{
6222 struct hdmi_drm_infoframe frame;
6223 unsigned char buf[30]; /* 26 + 4 */
6224 ssize_t len;
6225 int ret, i;
6226
6227 memset(out, 0, sizeof(*out));
6228
6229 if (!state->hdr_output_metadata)
6230 return 0;
6231
6232 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6233 if (ret)
6234 return ret;
6235
6236 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6237 if (len < 0)
6238 return (int)len;
6239
6240 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6241 if (len != 30)
6242 return -EINVAL;
6243
6244 /* Prepare the infopacket for DC. */
6245 switch (state->connector->connector_type) {
6246 case DRM_MODE_CONNECTOR_HDMIA:
6247 out->hb0 = 0x87; /* type */
6248 out->hb1 = 0x01; /* version */
6249 out->hb2 = 0x1A; /* length */
6250 out->sb[0] = buf[3]; /* checksum */
6251 i = 1;
6252 break;
6253
6254 case DRM_MODE_CONNECTOR_DisplayPort:
6255 case DRM_MODE_CONNECTOR_eDP:
6256 out->hb0 = 0x00; /* sdp id, zero */
6257 out->hb1 = 0x87; /* type */
6258 out->hb2 = 0x1D; /* payload len - 1 */
6259 out->hb3 = (0x13 << 2); /* sdp version */
6260 out->sb[0] = 0x01; /* version */
6261 out->sb[1] = 0x1A; /* length */
6262 i = 2;
6263 break;
6264
6265 default:
6266 return -EINVAL;
6267 }
6268
6269 memcpy(&out->sb[i], &buf[4], 26);
6270 out->valid = true;
6271
6272 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6273 sizeof(out->sb), false);
6274
6275 return 0;
6276}
6277
6278static bool
6279is_hdr_metadata_different(const struct drm_connector_state *old_state,
6280 const struct drm_connector_state *new_state)
6281{
6282 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6283 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6284
6285 if (old_blob != new_blob) {
6286 if (old_blob && new_blob &&
6287 old_blob->length == new_blob->length)
6288 return memcmp(old_blob->data, new_blob->data,
6289 old_blob->length);
6290
6291 return true;
6292 }
6293
6294 return false;
6295}
6296
6297static int
6298amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6299 struct drm_atomic_state *state)
88694af9 6300{
51e857af
SP
6301 struct drm_connector_state *new_con_state =
6302 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6303 struct drm_connector_state *old_con_state =
6304 drm_atomic_get_old_connector_state(state, conn);
6305 struct drm_crtc *crtc = new_con_state->crtc;
6306 struct drm_crtc_state *new_crtc_state;
6307 int ret;
6308
e8a98235
RS
6309 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6310
88694af9
NK
6311 if (!crtc)
6312 return 0;
6313
6314 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6315 struct dc_info_packet hdr_infopacket;
6316
6317 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6318 if (ret)
6319 return ret;
6320
6321 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6322 if (IS_ERR(new_crtc_state))
6323 return PTR_ERR(new_crtc_state);
6324
6325 /*
6326 * DC considers the stream backends changed if the
6327 * static metadata changes. Forcing the modeset also
6328 * gives a simple way for userspace to switch from
b232d4ed
NK
6329 * 8bpc to 10bpc when setting the metadata to enter
6330 * or exit HDR.
6331 *
6332 * Changing the static metadata after it's been
6333 * set is permissible, however. So only force a
6334 * modeset if we're entering or exiting HDR.
88694af9 6335 */
b232d4ed
NK
6336 new_crtc_state->mode_changed =
6337 !old_con_state->hdr_output_metadata ||
6338 !new_con_state->hdr_output_metadata;
88694af9
NK
6339 }
6340
6341 return 0;
6342}
6343
e7b07cee
HW
6344static const struct drm_connector_helper_funcs
6345amdgpu_dm_connector_helper_funcs = {
6346 /*
1f6010a9 6347 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6348 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6349 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6350 * in get_modes call back, not just return the modes count
6351 */
e7b07cee
HW
6352 .get_modes = get_modes,
6353 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6354 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6355};
6356
6357static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6358{
6359}
6360
d6ef9b41 6361static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6362{
6363 struct drm_atomic_state *state = new_crtc_state->state;
6364 struct drm_plane *plane;
6365 int num_active = 0;
6366
6367 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6368 struct drm_plane_state *new_plane_state;
6369
6370 /* Cursor planes are "fake". */
6371 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6372 continue;
6373
6374 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6375
6376 if (!new_plane_state) {
6377 /*
6378 * The plane is enable on the CRTC and hasn't changed
6379 * state. This means that it previously passed
6380 * validation and is therefore enabled.
6381 */
6382 num_active += 1;
6383 continue;
6384 }
6385
6386 /* We need a framebuffer to be considered enabled. */
6387 num_active += (new_plane_state->fb != NULL);
6388 }
6389
d6ef9b41
NK
6390 return num_active;
6391}
6392
8fe684e9
NK
6393static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6394 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6395{
6396 struct dm_crtc_state *dm_new_crtc_state =
6397 to_dm_crtc_state(new_crtc_state);
6398
6399 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6400
6401 if (!dm_new_crtc_state->stream)
6402 return;
6403
6404 dm_new_crtc_state->active_planes =
6405 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6406}
6407
3ee6b26b 6408static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6409 struct drm_atomic_state *state)
e7b07cee 6410{
29b77ad7
MR
6411 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6412 crtc);
1348969a 6413 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6414 struct dc *dc = adev->dm.dc;
29b77ad7 6415 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6416 int ret = -EINVAL;
6417
5b8c5969 6418 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6419
29b77ad7 6420 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6421
9b690ef3 6422 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6423 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6424 WARN_ON(1);
6425 return ret;
6426 }
6427
bc92c065 6428 /*
b836a274
MD
6429 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6430 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6431 * planes are disabled, which is not supported by the hardware. And there is legacy
6432 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6433 */
29b77ad7 6434 if (crtc_state->enable &&
ea9522f5
SS
6435 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6436 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6437 return -EINVAL;
ea9522f5 6438 }
c14a005c 6439
b836a274
MD
6440 /* In some use cases, like reset, no stream is attached */
6441 if (!dm_crtc_state->stream)
6442 return 0;
6443
62c933f9 6444 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6445 return 0;
6446
ea9522f5 6447 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6448 return ret;
6449}
6450
3ee6b26b
AD
6451static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6452 const struct drm_display_mode *mode,
6453 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6454{
6455 return true;
6456}
6457
6458static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6459 .disable = dm_crtc_helper_disable,
6460 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6461 .mode_fixup = dm_crtc_helper_mode_fixup,
6462 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6463};
6464
6465static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6466{
6467
6468}
6469
3261e013
ML
6470static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6471{
6472 switch (display_color_depth) {
6473 case COLOR_DEPTH_666:
6474 return 6;
6475 case COLOR_DEPTH_888:
6476 return 8;
6477 case COLOR_DEPTH_101010:
6478 return 10;
6479 case COLOR_DEPTH_121212:
6480 return 12;
6481 case COLOR_DEPTH_141414:
6482 return 14;
6483 case COLOR_DEPTH_161616:
6484 return 16;
6485 default:
6486 break;
6487 }
6488 return 0;
6489}
6490
3ee6b26b
AD
6491static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6492 struct drm_crtc_state *crtc_state,
6493 struct drm_connector_state *conn_state)
e7b07cee 6494{
3261e013
ML
6495 struct drm_atomic_state *state = crtc_state->state;
6496 struct drm_connector *connector = conn_state->connector;
6497 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6498 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6499 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6500 struct drm_dp_mst_topology_mgr *mst_mgr;
6501 struct drm_dp_mst_port *mst_port;
6502 enum dc_color_depth color_depth;
6503 int clock, bpp = 0;
1bc22f20 6504 bool is_y420 = false;
3261e013
ML
6505
6506 if (!aconnector->port || !aconnector->dc_sink)
6507 return 0;
6508
6509 mst_port = aconnector->port;
6510 mst_mgr = &aconnector->mst_port->mst_mgr;
6511
6512 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6513 return 0;
6514
6515 if (!state->duplicated) {
cbd14ae7 6516 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6517 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6518 aconnector->force_yuv420_output;
cbd14ae7
SW
6519 color_depth = convert_color_depth_from_display_info(connector,
6520 is_y420,
6521 max_bpc);
3261e013
ML
6522 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6523 clock = adjusted_mode->clock;
dc48529f 6524 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6525 }
6526 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6527 mst_mgr,
6528 mst_port,
1c6c1cb5 6529 dm_new_connector_state->pbn,
03ca9600 6530 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6531 if (dm_new_connector_state->vcpi_slots < 0) {
6532 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6533 return dm_new_connector_state->vcpi_slots;
6534 }
e7b07cee
HW
6535 return 0;
6536}
6537
6538const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6539 .disable = dm_encoder_helper_disable,
6540 .atomic_check = dm_encoder_helper_atomic_check
6541};
6542
d9fe1a4c 6543#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6544static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6545 struct dc_state *dc_state)
6546{
6547 struct dc_stream_state *stream = NULL;
6548 struct drm_connector *connector;
6549 struct drm_connector_state *new_con_state, *old_con_state;
6550 struct amdgpu_dm_connector *aconnector;
6551 struct dm_connector_state *dm_conn_state;
6552 int i, j, clock, bpp;
6553 int vcpi, pbn_div, pbn = 0;
6554
6555 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6556
6557 aconnector = to_amdgpu_dm_connector(connector);
6558
6559 if (!aconnector->port)
6560 continue;
6561
6562 if (!new_con_state || !new_con_state->crtc)
6563 continue;
6564
6565 dm_conn_state = to_dm_connector_state(new_con_state);
6566
6567 for (j = 0; j < dc_state->stream_count; j++) {
6568 stream = dc_state->streams[j];
6569 if (!stream)
6570 continue;
6571
6572 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6573 break;
6574
6575 stream = NULL;
6576 }
6577
6578 if (!stream)
6579 continue;
6580
6581 if (stream->timing.flags.DSC != 1) {
6582 drm_dp_mst_atomic_enable_dsc(state,
6583 aconnector->port,
6584 dm_conn_state->pbn,
6585 0,
6586 false);
6587 continue;
6588 }
6589
6590 pbn_div = dm_mst_get_pbn_divider(stream->link);
6591 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6592 clock = stream->timing.pix_clk_100hz / 10;
6593 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6594 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6595 aconnector->port,
6596 pbn, pbn_div,
6597 true);
6598 if (vcpi < 0)
6599 return vcpi;
6600
6601 dm_conn_state->pbn = pbn;
6602 dm_conn_state->vcpi_slots = vcpi;
6603 }
6604 return 0;
6605}
d9fe1a4c 6606#endif
29b9ba74 6607
e7b07cee
HW
6608static void dm_drm_plane_reset(struct drm_plane *plane)
6609{
6610 struct dm_plane_state *amdgpu_state = NULL;
6611
6612 if (plane->state)
6613 plane->funcs->atomic_destroy_state(plane, plane->state);
6614
6615 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6616 WARN_ON(amdgpu_state == NULL);
1f6010a9 6617
7ddaef96
NK
6618 if (amdgpu_state)
6619 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6620}
6621
6622static struct drm_plane_state *
6623dm_drm_plane_duplicate_state(struct drm_plane *plane)
6624{
6625 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6626
6627 old_dm_plane_state = to_dm_plane_state(plane->state);
6628 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6629 if (!dm_plane_state)
6630 return NULL;
6631
6632 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6633
3be5262e
HW
6634 if (old_dm_plane_state->dc_state) {
6635 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6636 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6637 }
6638
6639 return &dm_plane_state->base;
6640}
6641
dfd84d90 6642static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6643 struct drm_plane_state *state)
e7b07cee
HW
6644{
6645 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6646
3be5262e
HW
6647 if (dm_plane_state->dc_state)
6648 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6649
0627bbd3 6650 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6651}
6652
6653static const struct drm_plane_funcs dm_plane_funcs = {
6654 .update_plane = drm_atomic_helper_update_plane,
6655 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6656 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6657 .reset = dm_drm_plane_reset,
6658 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6659 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6660 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6661};
6662
3ee6b26b
AD
6663static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6664 struct drm_plane_state *new_state)
e7b07cee
HW
6665{
6666 struct amdgpu_framebuffer *afb;
6667 struct drm_gem_object *obj;
5d43be0c 6668 struct amdgpu_device *adev;
e7b07cee 6669 struct amdgpu_bo *rbo;
e7b07cee 6670 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6671 struct list_head list;
6672 struct ttm_validate_buffer tv;
6673 struct ww_acquire_ctx ticket;
5d43be0c
CK
6674 uint32_t domain;
6675 int r;
e7b07cee
HW
6676
6677 if (!new_state->fb) {
4711c033 6678 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
6679 return 0;
6680 }
6681
6682 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6683 obj = new_state->fb->obj[0];
e7b07cee 6684 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6685 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6686 INIT_LIST_HEAD(&list);
6687
6688 tv.bo = &rbo->tbo;
6689 tv.num_shared = 1;
6690 list_add(&tv.head, &list);
6691
9165fb87 6692 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6693 if (r) {
6694 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6695 return r;
0f257b09 6696 }
e7b07cee 6697
5d43be0c 6698 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6699 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6700 else
6701 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6702
7b7c6c81 6703 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6704 if (unlikely(r != 0)) {
30b7c614
HW
6705 if (r != -ERESTARTSYS)
6706 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6707 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6708 return r;
6709 }
6710
bb812f1e
JZ
6711 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6712 if (unlikely(r != 0)) {
6713 amdgpu_bo_unpin(rbo);
0f257b09 6714 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6715 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6716 return r;
6717 }
7df7e505 6718
0f257b09 6719 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6720
7b7c6c81 6721 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6722
6723 amdgpu_bo_ref(rbo);
6724
cf322b49
NK
6725 /**
6726 * We don't do surface updates on planes that have been newly created,
6727 * but we also don't have the afb->address during atomic check.
6728 *
6729 * Fill in buffer attributes depending on the address here, but only on
6730 * newly created planes since they're not being used by DC yet and this
6731 * won't modify global state.
6732 */
6733 dm_plane_state_old = to_dm_plane_state(plane->state);
6734 dm_plane_state_new = to_dm_plane_state(new_state);
6735
3be5262e 6736 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6737 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6738 struct dc_plane_state *plane_state =
6739 dm_plane_state_new->dc_state;
6740 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6741
320932bf 6742 fill_plane_buffer_attributes(
695af5f9 6743 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6744 afb->tiling_flags,
cf322b49
NK
6745 &plane_state->tiling_info, &plane_state->plane_size,
6746 &plane_state->dcc, &plane_state->address,
6eed95b0 6747 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6748 }
6749
e7b07cee
HW
6750 return 0;
6751}
6752
3ee6b26b
AD
6753static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6754 struct drm_plane_state *old_state)
e7b07cee
HW
6755{
6756 struct amdgpu_bo *rbo;
e7b07cee
HW
6757 int r;
6758
6759 if (!old_state->fb)
6760 return;
6761
e68d14dd 6762 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6763 r = amdgpu_bo_reserve(rbo, false);
6764 if (unlikely(r)) {
6765 DRM_ERROR("failed to reserve rbo before unpin\n");
6766 return;
b830ebc9
HW
6767 }
6768
6769 amdgpu_bo_unpin(rbo);
6770 amdgpu_bo_unreserve(rbo);
6771 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6772}
6773
8c44515b
AP
6774static int dm_plane_helper_check_state(struct drm_plane_state *state,
6775 struct drm_crtc_state *new_crtc_state)
6776{
6300b3bd
MK
6777 struct drm_framebuffer *fb = state->fb;
6778 int min_downscale, max_upscale;
6779 int min_scale = 0;
6780 int max_scale = INT_MAX;
6781
40d916a2 6782 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 6783 if (fb && state->crtc) {
40d916a2
NC
6784 /* Validate viewport to cover the case when only the position changes */
6785 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6786 int viewport_width = state->crtc_w;
6787 int viewport_height = state->crtc_h;
6788
6789 if (state->crtc_x < 0)
6790 viewport_width += state->crtc_x;
6791 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6792 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6793
6794 if (state->crtc_y < 0)
6795 viewport_height += state->crtc_y;
6796 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6797 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6798
4abdb72b
NC
6799 if (viewport_width < 0 || viewport_height < 0) {
6800 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6801 return -EINVAL;
6802 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6803 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 6804 return -EINVAL;
4abdb72b
NC
6805 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6806 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 6807 return -EINVAL;
4abdb72b
NC
6808 }
6809
40d916a2
NC
6810 }
6811
6812 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
6813 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6814 &min_downscale, &max_upscale);
6815 /*
6816 * Convert to drm convention: 16.16 fixed point, instead of dc's
6817 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6818 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6819 */
6820 min_scale = (1000 << 16) / max_upscale;
6821 max_scale = (1000 << 16) / min_downscale;
6822 }
8c44515b 6823
8c44515b 6824 return drm_atomic_helper_check_plane_state(
6300b3bd 6825 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
6826}
6827
7578ecda 6828static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 6829 struct drm_atomic_state *state)
cbd19488 6830{
7c11b99a
MR
6831 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6832 plane);
1348969a 6833 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6834 struct dc *dc = adev->dm.dc;
78171832 6835 struct dm_plane_state *dm_plane_state;
695af5f9 6836 struct dc_scaling_info scaling_info;
8c44515b 6837 struct drm_crtc_state *new_crtc_state;
695af5f9 6838 int ret;
78171832 6839
ba5c1649 6840 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 6841
ba5c1649 6842 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 6843
3be5262e 6844 if (!dm_plane_state->dc_state)
9a3329b1 6845 return 0;
cbd19488 6846
8c44515b 6847 new_crtc_state =
dec92020 6848 drm_atomic_get_new_crtc_state(state,
ba5c1649 6849 new_plane_state->crtc);
8c44515b
AP
6850 if (!new_crtc_state)
6851 return -EINVAL;
6852
ba5c1649 6853 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
6854 if (ret)
6855 return ret;
6856
ba5c1649 6857 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
695af5f9
NK
6858 if (ret)
6859 return ret;
a05bcff1 6860
62c933f9 6861 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
6862 return 0;
6863
6864 return -EINVAL;
6865}
6866
674e78ac 6867static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 6868 struct drm_atomic_state *state)
674e78ac
NK
6869{
6870 /* Only support async updates on cursor planes. */
6871 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6872 return -EINVAL;
6873
6874 return 0;
6875}
6876
6877static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 6878 struct drm_atomic_state *state)
674e78ac 6879{
5ddb0bd4
MR
6880 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6881 plane);
674e78ac 6882 struct drm_plane_state *old_state =
5ddb0bd4 6883 drm_atomic_get_old_plane_state(state, plane);
674e78ac 6884
e8a98235
RS
6885 trace_amdgpu_dm_atomic_update_cursor(new_state);
6886
332af874 6887 swap(plane->state->fb, new_state->fb);
674e78ac
NK
6888
6889 plane->state->src_x = new_state->src_x;
6890 plane->state->src_y = new_state->src_y;
6891 plane->state->src_w = new_state->src_w;
6892 plane->state->src_h = new_state->src_h;
6893 plane->state->crtc_x = new_state->crtc_x;
6894 plane->state->crtc_y = new_state->crtc_y;
6895 plane->state->crtc_w = new_state->crtc_w;
6896 plane->state->crtc_h = new_state->crtc_h;
6897
6898 handle_cursor_update(plane, old_state);
6899}
6900
e7b07cee
HW
6901static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6902 .prepare_fb = dm_plane_helper_prepare_fb,
6903 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 6904 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
6905 .atomic_async_check = dm_plane_atomic_async_check,
6906 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
6907};
6908
6909/*
6910 * TODO: these are currently initialized to rgb formats only.
6911 * For future use cases we should either initialize them dynamically based on
6912 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 6913 * check will succeed, and let DC implement proper check
e7b07cee 6914 */
d90371b0 6915static const uint32_t rgb_formats[] = {
e7b07cee
HW
6916 DRM_FORMAT_XRGB8888,
6917 DRM_FORMAT_ARGB8888,
6918 DRM_FORMAT_RGBA8888,
6919 DRM_FORMAT_XRGB2101010,
6920 DRM_FORMAT_XBGR2101010,
6921 DRM_FORMAT_ARGB2101010,
6922 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
6923 DRM_FORMAT_XBGR8888,
6924 DRM_FORMAT_ABGR8888,
46dd9ff7 6925 DRM_FORMAT_RGB565,
e7b07cee
HW
6926};
6927
0d579c7e
NK
6928static const uint32_t overlay_formats[] = {
6929 DRM_FORMAT_XRGB8888,
6930 DRM_FORMAT_ARGB8888,
6931 DRM_FORMAT_RGBA8888,
6932 DRM_FORMAT_XBGR8888,
6933 DRM_FORMAT_ABGR8888,
7267a1a9 6934 DRM_FORMAT_RGB565
e7b07cee
HW
6935};
6936
6937static const u32 cursor_formats[] = {
6938 DRM_FORMAT_ARGB8888
6939};
6940
37c6a93b
NK
6941static int get_plane_formats(const struct drm_plane *plane,
6942 const struct dc_plane_cap *plane_cap,
6943 uint32_t *formats, int max_formats)
e7b07cee 6944{
37c6a93b
NK
6945 int i, num_formats = 0;
6946
6947 /*
6948 * TODO: Query support for each group of formats directly from
6949 * DC plane caps. This will require adding more formats to the
6950 * caps list.
6951 */
e7b07cee 6952
f180b4bc 6953 switch (plane->type) {
e7b07cee 6954 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
6955 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6956 if (num_formats >= max_formats)
6957 break;
6958
6959 formats[num_formats++] = rgb_formats[i];
6960 }
6961
ea36ad34 6962 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 6963 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
6964 if (plane_cap && plane_cap->pixel_format_support.p010)
6965 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
6966 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6967 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6968 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
6969 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6970 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 6971 }
e7b07cee 6972 break;
37c6a93b 6973
e7b07cee 6974 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
6975 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6976 if (num_formats >= max_formats)
6977 break;
6978
6979 formats[num_formats++] = overlay_formats[i];
6980 }
e7b07cee 6981 break;
37c6a93b 6982
e7b07cee 6983 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
6984 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6985 if (num_formats >= max_formats)
6986 break;
6987
6988 formats[num_formats++] = cursor_formats[i];
6989 }
e7b07cee
HW
6990 break;
6991 }
6992
37c6a93b
NK
6993 return num_formats;
6994}
6995
6996static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6997 struct drm_plane *plane,
6998 unsigned long possible_crtcs,
6999 const struct dc_plane_cap *plane_cap)
7000{
7001 uint32_t formats[32];
7002 int num_formats;
7003 int res = -EPERM;
ecc874a6 7004 unsigned int supported_rotations;
faa37f54 7005 uint64_t *modifiers = NULL;
37c6a93b
NK
7006
7007 num_formats = get_plane_formats(plane, plane_cap, formats,
7008 ARRAY_SIZE(formats));
7009
faa37f54
BN
7010 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7011 if (res)
7012 return res;
7013
4a580877 7014 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7015 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7016 modifiers, plane->type, NULL);
7017 kfree(modifiers);
37c6a93b
NK
7018 if (res)
7019 return res;
7020
cc1fec57
NK
7021 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7022 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7023 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7024 BIT(DRM_MODE_BLEND_PREMULTI);
7025
7026 drm_plane_create_alpha_property(plane);
7027 drm_plane_create_blend_mode_property(plane, blend_caps);
7028 }
7029
fc8e5230 7030 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7031 plane_cap &&
7032 (plane_cap->pixel_format_support.nv12 ||
7033 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7034 /* This only affects YUV formats. */
7035 drm_plane_create_color_properties(
7036 plane,
7037 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7038 BIT(DRM_COLOR_YCBCR_BT709) |
7039 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7040 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7041 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7042 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7043 }
7044
ecc874a6
PLG
7045 supported_rotations =
7046 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7047 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7048
1347385f
SS
7049 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7050 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7051 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7052 supported_rotations);
ecc874a6 7053
f180b4bc 7054 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7055
96719c54 7056 /* Create (reset) the plane state */
f180b4bc
HW
7057 if (plane->funcs->reset)
7058 plane->funcs->reset(plane);
96719c54 7059
37c6a93b 7060 return 0;
e7b07cee
HW
7061}
7062
7578ecda
AD
7063static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7064 struct drm_plane *plane,
7065 uint32_t crtc_index)
e7b07cee
HW
7066{
7067 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7068 struct drm_plane *cursor_plane;
e7b07cee
HW
7069
7070 int res = -ENOMEM;
7071
7072 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7073 if (!cursor_plane)
7074 goto fail;
7075
f180b4bc 7076 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7077 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7078
7079 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7080 if (!acrtc)
7081 goto fail;
7082
7083 res = drm_crtc_init_with_planes(
7084 dm->ddev,
7085 &acrtc->base,
7086 plane,
f180b4bc 7087 cursor_plane,
e7b07cee
HW
7088 &amdgpu_dm_crtc_funcs, NULL);
7089
7090 if (res)
7091 goto fail;
7092
7093 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7094
96719c54
HW
7095 /* Create (reset) the plane state */
7096 if (acrtc->base.funcs->reset)
7097 acrtc->base.funcs->reset(&acrtc->base);
7098
e7b07cee
HW
7099 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7100 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7101
7102 acrtc->crtc_id = crtc_index;
7103 acrtc->base.enabled = false;
c37e2d29 7104 acrtc->otg_inst = -1;
e7b07cee
HW
7105
7106 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7107 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7108 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7109 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7110
e7b07cee
HW
7111 return 0;
7112
7113fail:
b830ebc9
HW
7114 kfree(acrtc);
7115 kfree(cursor_plane);
e7b07cee
HW
7116 return res;
7117}
7118
7119
7120static int to_drm_connector_type(enum signal_type st)
7121{
7122 switch (st) {
7123 case SIGNAL_TYPE_HDMI_TYPE_A:
7124 return DRM_MODE_CONNECTOR_HDMIA;
7125 case SIGNAL_TYPE_EDP:
7126 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7127 case SIGNAL_TYPE_LVDS:
7128 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7129 case SIGNAL_TYPE_RGB:
7130 return DRM_MODE_CONNECTOR_VGA;
7131 case SIGNAL_TYPE_DISPLAY_PORT:
7132 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7133 return DRM_MODE_CONNECTOR_DisplayPort;
7134 case SIGNAL_TYPE_DVI_DUAL_LINK:
7135 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7136 return DRM_MODE_CONNECTOR_DVID;
7137 case SIGNAL_TYPE_VIRTUAL:
7138 return DRM_MODE_CONNECTOR_VIRTUAL;
7139
7140 default:
7141 return DRM_MODE_CONNECTOR_Unknown;
7142 }
7143}
7144
2b4c1c05
DV
7145static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7146{
62afb4ad
JRS
7147 struct drm_encoder *encoder;
7148
7149 /* There is only one encoder per connector */
7150 drm_connector_for_each_possible_encoder(connector, encoder)
7151 return encoder;
7152
7153 return NULL;
2b4c1c05
DV
7154}
7155
e7b07cee
HW
7156static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7157{
e7b07cee
HW
7158 struct drm_encoder *encoder;
7159 struct amdgpu_encoder *amdgpu_encoder;
7160
2b4c1c05 7161 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7162
7163 if (encoder == NULL)
7164 return;
7165
7166 amdgpu_encoder = to_amdgpu_encoder(encoder);
7167
7168 amdgpu_encoder->native_mode.clock = 0;
7169
7170 if (!list_empty(&connector->probed_modes)) {
7171 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7172
e7b07cee 7173 list_for_each_entry(preferred_mode,
b830ebc9
HW
7174 &connector->probed_modes,
7175 head) {
7176 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7177 amdgpu_encoder->native_mode = *preferred_mode;
7178
e7b07cee
HW
7179 break;
7180 }
7181
7182 }
7183}
7184
3ee6b26b
AD
7185static struct drm_display_mode *
7186amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7187 char *name,
7188 int hdisplay, int vdisplay)
e7b07cee
HW
7189{
7190 struct drm_device *dev = encoder->dev;
7191 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7192 struct drm_display_mode *mode = NULL;
7193 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7194
7195 mode = drm_mode_duplicate(dev, native_mode);
7196
b830ebc9 7197 if (mode == NULL)
e7b07cee
HW
7198 return NULL;
7199
7200 mode->hdisplay = hdisplay;
7201 mode->vdisplay = vdisplay;
7202 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7203 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7204
7205 return mode;
7206
7207}
7208
7209static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7210 struct drm_connector *connector)
e7b07cee
HW
7211{
7212 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7213 struct drm_display_mode *mode = NULL;
7214 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7215 struct amdgpu_dm_connector *amdgpu_dm_connector =
7216 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7217 int i;
7218 int n;
7219 struct mode_size {
7220 char name[DRM_DISPLAY_MODE_LEN];
7221 int w;
7222 int h;
b830ebc9 7223 } common_modes[] = {
e7b07cee
HW
7224 { "640x480", 640, 480},
7225 { "800x600", 800, 600},
7226 { "1024x768", 1024, 768},
7227 { "1280x720", 1280, 720},
7228 { "1280x800", 1280, 800},
7229 {"1280x1024", 1280, 1024},
7230 { "1440x900", 1440, 900},
7231 {"1680x1050", 1680, 1050},
7232 {"1600x1200", 1600, 1200},
7233 {"1920x1080", 1920, 1080},
7234 {"1920x1200", 1920, 1200}
7235 };
7236
b830ebc9 7237 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7238
7239 for (i = 0; i < n; i++) {
7240 struct drm_display_mode *curmode = NULL;
7241 bool mode_existed = false;
7242
7243 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7244 common_modes[i].h > native_mode->vdisplay ||
7245 (common_modes[i].w == native_mode->hdisplay &&
7246 common_modes[i].h == native_mode->vdisplay))
7247 continue;
e7b07cee
HW
7248
7249 list_for_each_entry(curmode, &connector->probed_modes, head) {
7250 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7251 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7252 mode_existed = true;
7253 break;
7254 }
7255 }
7256
7257 if (mode_existed)
7258 continue;
7259
7260 mode = amdgpu_dm_create_common_mode(encoder,
7261 common_modes[i].name, common_modes[i].w,
7262 common_modes[i].h);
7263 drm_mode_probed_add(connector, mode);
c84dec2f 7264 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7265 }
7266}
7267
3ee6b26b
AD
7268static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7269 struct edid *edid)
e7b07cee 7270{
c84dec2f
HW
7271 struct amdgpu_dm_connector *amdgpu_dm_connector =
7272 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7273
7274 if (edid) {
7275 /* empty probed_modes */
7276 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7277 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7278 drm_add_edid_modes(connector, edid);
7279
f1e5e913
YMM
7280 /* sorting the probed modes before calling function
7281 * amdgpu_dm_get_native_mode() since EDID can have
7282 * more than one preferred mode. The modes that are
7283 * later in the probed mode list could be of higher
7284 * and preferred resolution. For example, 3840x2160
7285 * resolution in base EDID preferred timing and 4096x2160
7286 * preferred resolution in DID extension block later.
7287 */
7288 drm_mode_sort(&connector->probed_modes);
e7b07cee 7289 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7290
7291 /* Freesync capabilities are reset by calling
7292 * drm_add_edid_modes() and need to be
7293 * restored here.
7294 */
7295 amdgpu_dm_update_freesync_caps(connector, edid);
a8d8d3dc 7296 } else {
c84dec2f 7297 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7298 }
e7b07cee
HW
7299}
7300
a85ba005
NC
7301static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7302 struct drm_display_mode *mode)
7303{
7304 struct drm_display_mode *m;
7305
7306 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7307 if (drm_mode_equal(m, mode))
7308 return true;
7309 }
7310
7311 return false;
7312}
7313
7314static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7315{
7316 const struct drm_display_mode *m;
7317 struct drm_display_mode *new_mode;
7318 uint i;
7319 uint32_t new_modes_count = 0;
7320
7321 /* Standard FPS values
7322 *
7323 * 23.976 - TV/NTSC
7324 * 24 - Cinema
7325 * 25 - TV/PAL
7326 * 29.97 - TV/NTSC
7327 * 30 - TV/NTSC
7328 * 48 - Cinema HFR
7329 * 50 - TV/PAL
7330 * 60 - Commonly used
7331 * 48,72,96 - Multiples of 24
7332 */
7333 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7334 48000, 50000, 60000, 72000, 96000 };
7335
7336 /*
7337 * Find mode with highest refresh rate with the same resolution
7338 * as the preferred mode. Some monitors report a preferred mode
7339 * with lower resolution than the highest refresh rate supported.
7340 */
7341
7342 m = get_highest_refresh_rate_mode(aconnector, true);
7343 if (!m)
7344 return 0;
7345
7346 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7347 uint64_t target_vtotal, target_vtotal_diff;
7348 uint64_t num, den;
7349
7350 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7351 continue;
7352
7353 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7354 common_rates[i] > aconnector->max_vfreq * 1000)
7355 continue;
7356
7357 num = (unsigned long long)m->clock * 1000 * 1000;
7358 den = common_rates[i] * (unsigned long long)m->htotal;
7359 target_vtotal = div_u64(num, den);
7360 target_vtotal_diff = target_vtotal - m->vtotal;
7361
7362 /* Check for illegal modes */
7363 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7364 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7365 m->vtotal + target_vtotal_diff < m->vsync_end)
7366 continue;
7367
7368 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7369 if (!new_mode)
7370 goto out;
7371
7372 new_mode->vtotal += (u16)target_vtotal_diff;
7373 new_mode->vsync_start += (u16)target_vtotal_diff;
7374 new_mode->vsync_end += (u16)target_vtotal_diff;
7375 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7376 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7377
7378 if (!is_duplicate_mode(aconnector, new_mode)) {
7379 drm_mode_probed_add(&aconnector->base, new_mode);
7380 new_modes_count += 1;
7381 } else
7382 drm_mode_destroy(aconnector->base.dev, new_mode);
7383 }
7384 out:
7385 return new_modes_count;
7386}
7387
7388static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7389 struct edid *edid)
7390{
7391 struct amdgpu_dm_connector *amdgpu_dm_connector =
7392 to_amdgpu_dm_connector(connector);
7393
7394 if (!(amdgpu_freesync_vid_mode && edid))
7395 return;
fe8858bb 7396
a85ba005
NC
7397 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7398 amdgpu_dm_connector->num_modes +=
7399 add_fs_modes(amdgpu_dm_connector);
7400}
7401
7578ecda 7402static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 7403{
c84dec2f
HW
7404 struct amdgpu_dm_connector *amdgpu_dm_connector =
7405 to_amdgpu_dm_connector(connector);
e7b07cee 7406 struct drm_encoder *encoder;
c84dec2f 7407 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 7408
2b4c1c05 7409 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 7410
5c0e6840 7411 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
7412 amdgpu_dm_connector->num_modes =
7413 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
7414 } else {
7415 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7416 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 7417 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 7418 }
3e332d3a 7419 amdgpu_dm_fbc_init(connector);
5099114b 7420
c84dec2f 7421 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
7422}
7423
3ee6b26b
AD
7424void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7425 struct amdgpu_dm_connector *aconnector,
7426 int connector_type,
7427 struct dc_link *link,
7428 int link_index)
e7b07cee 7429{
1348969a 7430 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7431
f04bee34
NK
7432 /*
7433 * Some of the properties below require access to state, like bpc.
7434 * Allocate some default initial connector state with our reset helper.
7435 */
7436 if (aconnector->base.funcs->reset)
7437 aconnector->base.funcs->reset(&aconnector->base);
7438
e7b07cee
HW
7439 aconnector->connector_id = link_index;
7440 aconnector->dc_link = link;
7441 aconnector->base.interlace_allowed = false;
7442 aconnector->base.doublescan_allowed = false;
7443 aconnector->base.stereo_allowed = false;
7444 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7445 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7446 aconnector->audio_inst = -1;
e7b07cee
HW
7447 mutex_init(&aconnector->hpd_lock);
7448
1f6010a9
DF
7449 /*
7450 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7451 * which means HPD hot plug not supported
7452 */
e7b07cee
HW
7453 switch (connector_type) {
7454 case DRM_MODE_CONNECTOR_HDMIA:
7455 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7456 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7457 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7458 break;
7459 case DRM_MODE_CONNECTOR_DisplayPort:
7460 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7461 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7462 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7463 break;
7464 case DRM_MODE_CONNECTOR_DVID:
7465 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7466 break;
7467 default:
7468 break;
7469 }
7470
7471 drm_object_attach_property(&aconnector->base.base,
7472 dm->ddev->mode_config.scaling_mode_property,
7473 DRM_MODE_SCALE_NONE);
7474
7475 drm_object_attach_property(&aconnector->base.base,
7476 adev->mode_info.underscan_property,
7477 UNDERSCAN_OFF);
7478 drm_object_attach_property(&aconnector->base.base,
7479 adev->mode_info.underscan_hborder_property,
7480 0);
7481 drm_object_attach_property(&aconnector->base.base,
7482 adev->mode_info.underscan_vborder_property,
7483 0);
1825fd34 7484
8c61b31e
JFZ
7485 if (!aconnector->mst_port)
7486 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7487
4a8ca46b
RL
7488 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7489 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7490 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7491
c1ee92f9 7492 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7493 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7494 drm_object_attach_property(&aconnector->base.base,
7495 adev->mode_info.abm_level_property, 0);
7496 }
bb47de73
NK
7497
7498 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7499 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7500 connector_type == DRM_MODE_CONNECTOR_eDP) {
88694af9
NK
7501 drm_object_attach_property(
7502 &aconnector->base.base,
7503 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7504
8c61b31e
JFZ
7505 if (!aconnector->mst_port)
7506 drm_connector_attach_vrr_capable_property(&aconnector->base);
7507
0c8620d6 7508#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7509 if (adev->dm.hdcp_workqueue)
53e108aa 7510 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7511#endif
bb47de73 7512 }
e7b07cee
HW
7513}
7514
7578ecda
AD
7515static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7516 struct i2c_msg *msgs, int num)
e7b07cee
HW
7517{
7518 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7519 struct ddc_service *ddc_service = i2c->ddc_service;
7520 struct i2c_command cmd;
7521 int i;
7522 int result = -EIO;
7523
b830ebc9 7524 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7525
7526 if (!cmd.payloads)
7527 return result;
7528
7529 cmd.number_of_payloads = num;
7530 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7531 cmd.speed = 100;
7532
7533 for (i = 0; i < num; i++) {
7534 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7535 cmd.payloads[i].address = msgs[i].addr;
7536 cmd.payloads[i].length = msgs[i].len;
7537 cmd.payloads[i].data = msgs[i].buf;
7538 }
7539
c85e6e54
DF
7540 if (dc_submit_i2c(
7541 ddc_service->ctx->dc,
7542 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7543 &cmd))
7544 result = num;
7545
7546 kfree(cmd.payloads);
7547 return result;
7548}
7549
7578ecda 7550static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7551{
7552 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7553}
7554
7555static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7556 .master_xfer = amdgpu_dm_i2c_xfer,
7557 .functionality = amdgpu_dm_i2c_func,
7558};
7559
3ee6b26b
AD
7560static struct amdgpu_i2c_adapter *
7561create_i2c(struct ddc_service *ddc_service,
7562 int link_index,
7563 int *res)
e7b07cee
HW
7564{
7565 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7566 struct amdgpu_i2c_adapter *i2c;
7567
b830ebc9 7568 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7569 if (!i2c)
7570 return NULL;
e7b07cee
HW
7571 i2c->base.owner = THIS_MODULE;
7572 i2c->base.class = I2C_CLASS_DDC;
7573 i2c->base.dev.parent = &adev->pdev->dev;
7574 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7575 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7576 i2c_set_adapdata(&i2c->base, i2c);
7577 i2c->ddc_service = ddc_service;
c85e6e54 7578 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7579
7580 return i2c;
7581}
7582
89fc8d4e 7583
1f6010a9
DF
7584/*
7585 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7586 * dc_link which will be represented by this aconnector.
7587 */
7578ecda
AD
7588static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7589 struct amdgpu_dm_connector *aconnector,
7590 uint32_t link_index,
7591 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7592{
7593 int res = 0;
7594 int connector_type;
7595 struct dc *dc = dm->dc;
7596 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7597 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7598
7599 link->priv = aconnector;
e7b07cee 7600
f1ad2f5e 7601 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7602
7603 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7604 if (!i2c) {
7605 DRM_ERROR("Failed to create i2c adapter data\n");
7606 return -ENOMEM;
7607 }
7608
e7b07cee
HW
7609 aconnector->i2c = i2c;
7610 res = i2c_add_adapter(&i2c->base);
7611
7612 if (res) {
7613 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7614 goto out_free;
7615 }
7616
7617 connector_type = to_drm_connector_type(link->connector_signal);
7618
17165de2 7619 res = drm_connector_init_with_ddc(
e7b07cee
HW
7620 dm->ddev,
7621 &aconnector->base,
7622 &amdgpu_dm_connector_funcs,
17165de2
AP
7623 connector_type,
7624 &i2c->base);
e7b07cee
HW
7625
7626 if (res) {
7627 DRM_ERROR("connector_init failed\n");
7628 aconnector->connector_id = -1;
7629 goto out_free;
7630 }
7631
7632 drm_connector_helper_add(
7633 &aconnector->base,
7634 &amdgpu_dm_connector_helper_funcs);
7635
7636 amdgpu_dm_connector_init_helper(
7637 dm,
7638 aconnector,
7639 connector_type,
7640 link,
7641 link_index);
7642
cde4c44d 7643 drm_connector_attach_encoder(
e7b07cee
HW
7644 &aconnector->base, &aencoder->base);
7645
e7b07cee
HW
7646 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7647 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7648 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7649
e7b07cee
HW
7650out_free:
7651 if (res) {
7652 kfree(i2c);
7653 aconnector->i2c = NULL;
7654 }
7655 return res;
7656}
7657
7658int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7659{
7660 switch (adev->mode_info.num_crtc) {
7661 case 1:
7662 return 0x1;
7663 case 2:
7664 return 0x3;
7665 case 3:
7666 return 0x7;
7667 case 4:
7668 return 0xf;
7669 case 5:
7670 return 0x1f;
7671 case 6:
7672 default:
7673 return 0x3f;
7674 }
7675}
7676
7578ecda
AD
7677static int amdgpu_dm_encoder_init(struct drm_device *dev,
7678 struct amdgpu_encoder *aencoder,
7679 uint32_t link_index)
e7b07cee 7680{
1348969a 7681 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7682
7683 int res = drm_encoder_init(dev,
7684 &aencoder->base,
7685 &amdgpu_dm_encoder_funcs,
7686 DRM_MODE_ENCODER_TMDS,
7687 NULL);
7688
7689 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7690
7691 if (!res)
7692 aencoder->encoder_id = link_index;
7693 else
7694 aencoder->encoder_id = -1;
7695
7696 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7697
7698 return res;
7699}
7700
3ee6b26b
AD
7701static void manage_dm_interrupts(struct amdgpu_device *adev,
7702 struct amdgpu_crtc *acrtc,
7703 bool enable)
e7b07cee
HW
7704{
7705 /*
8fe684e9
NK
7706 * We have no guarantee that the frontend index maps to the same
7707 * backend index - some even map to more than one.
7708 *
7709 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7710 */
7711 int irq_type =
734dd01d 7712 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7713 adev,
7714 acrtc->crtc_id);
7715
7716 if (enable) {
7717 drm_crtc_vblank_on(&acrtc->base);
7718 amdgpu_irq_get(
7719 adev,
7720 &adev->pageflip_irq,
7721 irq_type);
86bc2219
WL
7722#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7723 amdgpu_irq_get(
7724 adev,
7725 &adev->vline0_irq,
7726 irq_type);
7727#endif
e7b07cee 7728 } else {
86bc2219
WL
7729#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7730 amdgpu_irq_put(
7731 adev,
7732 &adev->vline0_irq,
7733 irq_type);
7734#endif
e7b07cee
HW
7735 amdgpu_irq_put(
7736 adev,
7737 &adev->pageflip_irq,
7738 irq_type);
7739 drm_crtc_vblank_off(&acrtc->base);
7740 }
7741}
7742
8fe684e9
NK
7743static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7744 struct amdgpu_crtc *acrtc)
7745{
7746 int irq_type =
7747 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7748
7749 /**
7750 * This reads the current state for the IRQ and force reapplies
7751 * the setting to hardware.
7752 */
7753 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7754}
7755
3ee6b26b
AD
7756static bool
7757is_scaling_state_different(const struct dm_connector_state *dm_state,
7758 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7759{
7760 if (dm_state->scaling != old_dm_state->scaling)
7761 return true;
7762 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7763 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7764 return true;
7765 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7766 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7767 return true;
b830ebc9
HW
7768 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7769 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7770 return true;
e7b07cee
HW
7771 return false;
7772}
7773
0c8620d6
BL
7774#ifdef CONFIG_DRM_AMD_DC_HDCP
7775static bool is_content_protection_different(struct drm_connector_state *state,
7776 const struct drm_connector_state *old_state,
7777 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7778{
7779 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7780 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7781
31c0ed90 7782 /* Handle: Type0/1 change */
53e108aa
BL
7783 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7784 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7785 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7786 return true;
7787 }
7788
31c0ed90
BL
7789 /* CP is being re enabled, ignore this
7790 *
7791 * Handles: ENABLED -> DESIRED
7792 */
0c8620d6
BL
7793 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7794 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7795 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7796 return false;
7797 }
7798
31c0ed90
BL
7799 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7800 *
7801 * Handles: UNDESIRED -> ENABLED
7802 */
0c8620d6
BL
7803 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7804 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7805 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7806
7807 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7808 * hot-plug, headless s3, dpms
31c0ed90
BL
7809 *
7810 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7811 */
97f6c917
BL
7812 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7813 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7814 dm_con_state->update_hdcp = false;
0c8620d6 7815 return true;
97f6c917 7816 }
0c8620d6 7817
31c0ed90
BL
7818 /*
7819 * Handles: UNDESIRED -> UNDESIRED
7820 * DESIRED -> DESIRED
7821 * ENABLED -> ENABLED
7822 */
0c8620d6
BL
7823 if (old_state->content_protection == state->content_protection)
7824 return false;
7825
31c0ed90
BL
7826 /*
7827 * Handles: UNDESIRED -> DESIRED
7828 * DESIRED -> UNDESIRED
7829 * ENABLED -> UNDESIRED
7830 */
97f6c917 7831 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
7832 return true;
7833
31c0ed90
BL
7834 /*
7835 * Handles: DESIRED -> ENABLED
7836 */
0c8620d6
BL
7837 return false;
7838}
7839
0c8620d6 7840#endif
3ee6b26b
AD
7841static void remove_stream(struct amdgpu_device *adev,
7842 struct amdgpu_crtc *acrtc,
7843 struct dc_stream_state *stream)
e7b07cee
HW
7844{
7845 /* this is the update mode case */
e7b07cee
HW
7846
7847 acrtc->otg_inst = -1;
7848 acrtc->enabled = false;
7849}
7850
7578ecda
AD
7851static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7852 struct dc_cursor_position *position)
2a8f6ccb 7853{
f4c2cc43 7854 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
7855 int x, y;
7856 int xorigin = 0, yorigin = 0;
7857
e371e19c 7858 if (!crtc || !plane->state->fb)
2a8f6ccb 7859 return 0;
2a8f6ccb
HW
7860
7861 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7862 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7863 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7864 __func__,
7865 plane->state->crtc_w,
7866 plane->state->crtc_h);
7867 return -EINVAL;
7868 }
7869
7870 x = plane->state->crtc_x;
7871 y = plane->state->crtc_y;
c14a005c 7872
e371e19c
NK
7873 if (x <= -amdgpu_crtc->max_cursor_width ||
7874 y <= -amdgpu_crtc->max_cursor_height)
7875 return 0;
7876
2a8f6ccb
HW
7877 if (x < 0) {
7878 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7879 x = 0;
7880 }
7881 if (y < 0) {
7882 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7883 y = 0;
7884 }
7885 position->enable = true;
d243b6ff 7886 position->translate_by_source = true;
2a8f6ccb
HW
7887 position->x = x;
7888 position->y = y;
7889 position->x_hotspot = xorigin;
7890 position->y_hotspot = yorigin;
7891
7892 return 0;
7893}
7894
3ee6b26b
AD
7895static void handle_cursor_update(struct drm_plane *plane,
7896 struct drm_plane_state *old_plane_state)
e7b07cee 7897{
1348969a 7898 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
7899 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7900 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7901 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7902 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7903 uint64_t address = afb ? afb->address : 0;
6a30a929 7904 struct dc_cursor_position position = {0};
2a8f6ccb
HW
7905 struct dc_cursor_attributes attributes;
7906 int ret;
7907
e7b07cee
HW
7908 if (!plane->state->fb && !old_plane_state->fb)
7909 return;
7910
cb2318b7 7911 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
7912 __func__,
7913 amdgpu_crtc->crtc_id,
7914 plane->state->crtc_w,
7915 plane->state->crtc_h);
2a8f6ccb
HW
7916
7917 ret = get_cursor_position(plane, crtc, &position);
7918 if (ret)
7919 return;
7920
7921 if (!position.enable) {
7922 /* turn off cursor */
674e78ac
NK
7923 if (crtc_state && crtc_state->stream) {
7924 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
7925 dc_stream_set_cursor_position(crtc_state->stream,
7926 &position);
674e78ac
NK
7927 mutex_unlock(&adev->dm.dc_lock);
7928 }
2a8f6ccb 7929 return;
e7b07cee 7930 }
e7b07cee 7931
2a8f6ccb
HW
7932 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7933 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7934
c1cefe11 7935 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
7936 attributes.address.high_part = upper_32_bits(address);
7937 attributes.address.low_part = lower_32_bits(address);
7938 attributes.width = plane->state->crtc_w;
7939 attributes.height = plane->state->crtc_h;
7940 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7941 attributes.rotation_angle = 0;
7942 attributes.attribute_flags.value = 0;
7943
03a66367 7944 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 7945
886daac9 7946 if (crtc_state->stream) {
674e78ac 7947 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
7948 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7949 &attributes))
7950 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 7951
2a8f6ccb
HW
7952 if (!dc_stream_set_cursor_position(crtc_state->stream,
7953 &position))
7954 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 7955 mutex_unlock(&adev->dm.dc_lock);
886daac9 7956 }
2a8f6ccb 7957}
e7b07cee
HW
7958
7959static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7960{
7961
7962 assert_spin_locked(&acrtc->base.dev->event_lock);
7963 WARN_ON(acrtc->event);
7964
7965 acrtc->event = acrtc->base.state->event;
7966
7967 /* Set the flip status */
7968 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7969
7970 /* Mark this event as consumed */
7971 acrtc->base.state->event = NULL;
7972
cb2318b7
VL
7973 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7974 acrtc->crtc_id);
e7b07cee
HW
7975}
7976
bb47de73
NK
7977static void update_freesync_state_on_stream(
7978 struct amdgpu_display_manager *dm,
7979 struct dm_crtc_state *new_crtc_state,
180db303
NK
7980 struct dc_stream_state *new_stream,
7981 struct dc_plane_state *surface,
7982 u32 flip_timestamp_in_us)
bb47de73 7983{
09aef2c4 7984 struct mod_vrr_params vrr_params;
bb47de73 7985 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 7986 struct amdgpu_device *adev = dm->adev;
585d450c 7987 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 7988 unsigned long flags;
4cda3243 7989 bool pack_sdp_v1_3 = false;
bb47de73
NK
7990
7991 if (!new_stream)
7992 return;
7993
7994 /*
7995 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7996 * For now it's sufficient to just guard against these conditions.
7997 */
7998
7999 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8000 return;
8001
4a580877 8002 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8003 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8004
180db303
NK
8005 if (surface) {
8006 mod_freesync_handle_preflip(
8007 dm->freesync_module,
8008 surface,
8009 new_stream,
8010 flip_timestamp_in_us,
8011 &vrr_params);
09aef2c4
MK
8012
8013 if (adev->family < AMDGPU_FAMILY_AI &&
8014 amdgpu_dm_vrr_active(new_crtc_state)) {
8015 mod_freesync_handle_v_update(dm->freesync_module,
8016 new_stream, &vrr_params);
e63e2491
EB
8017
8018 /* Need to call this before the frame ends. */
8019 dc_stream_adjust_vmin_vmax(dm->dc,
8020 new_crtc_state->stream,
8021 &vrr_params.adjust);
09aef2c4 8022 }
180db303 8023 }
bb47de73
NK
8024
8025 mod_freesync_build_vrr_infopacket(
8026 dm->freesync_module,
8027 new_stream,
180db303 8028 &vrr_params,
ecd0136b
HT
8029 PACKET_TYPE_VRR,
8030 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8031 &vrr_infopacket,
8032 pack_sdp_v1_3);
bb47de73 8033
8a48b44c 8034 new_crtc_state->freesync_timing_changed |=
585d450c 8035 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8036 &vrr_params.adjust,
8037 sizeof(vrr_params.adjust)) != 0);
bb47de73 8038
8a48b44c 8039 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8040 (memcmp(&new_crtc_state->vrr_infopacket,
8041 &vrr_infopacket,
8042 sizeof(vrr_infopacket)) != 0);
8043
585d450c 8044 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8045 new_crtc_state->vrr_infopacket = vrr_infopacket;
8046
585d450c 8047 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8048 new_stream->vrr_infopacket = vrr_infopacket;
8049
8050 if (new_crtc_state->freesync_vrr_info_changed)
8051 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8052 new_crtc_state->base.crtc->base.id,
8053 (int)new_crtc_state->base.vrr_enabled,
180db303 8054 (int)vrr_params.state);
09aef2c4 8055
4a580877 8056 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8057}
8058
585d450c 8059static void update_stream_irq_parameters(
e854194c
MK
8060 struct amdgpu_display_manager *dm,
8061 struct dm_crtc_state *new_crtc_state)
8062{
8063 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8064 struct mod_vrr_params vrr_params;
e854194c 8065 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8066 struct amdgpu_device *adev = dm->adev;
585d450c 8067 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8068 unsigned long flags;
e854194c
MK
8069
8070 if (!new_stream)
8071 return;
8072
8073 /*
8074 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8075 * For now it's sufficient to just guard against these conditions.
8076 */
8077 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8078 return;
8079
4a580877 8080 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8081 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8082
e854194c
MK
8083 if (new_crtc_state->vrr_supported &&
8084 config.min_refresh_in_uhz &&
8085 config.max_refresh_in_uhz) {
a85ba005
NC
8086 /*
8087 * if freesync compatible mode was set, config.state will be set
8088 * in atomic check
8089 */
8090 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8091 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8092 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8093 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8094 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8095 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8096 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8097 } else {
8098 config.state = new_crtc_state->base.vrr_enabled ?
8099 VRR_STATE_ACTIVE_VARIABLE :
8100 VRR_STATE_INACTIVE;
8101 }
e854194c
MK
8102 } else {
8103 config.state = VRR_STATE_UNSUPPORTED;
8104 }
8105
8106 mod_freesync_build_vrr_params(dm->freesync_module,
8107 new_stream,
8108 &config, &vrr_params);
8109
8110 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8111 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8112 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8113
585d450c
AP
8114 new_crtc_state->freesync_config = config;
8115 /* Copy state for access from DM IRQ handler */
8116 acrtc->dm_irq_params.freesync_config = config;
8117 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8118 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8119 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8120}
8121
66b0c973
MK
8122static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8123 struct dm_crtc_state *new_state)
8124{
8125 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8126 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8127
8128 if (!old_vrr_active && new_vrr_active) {
8129 /* Transition VRR inactive -> active:
8130 * While VRR is active, we must not disable vblank irq, as a
8131 * reenable after disable would compute bogus vblank/pflip
8132 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8133 *
8134 * We also need vupdate irq for the actual core vblank handling
8135 * at end of vblank.
66b0c973 8136 */
d2574c33 8137 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8138 drm_crtc_vblank_get(new_state->base.crtc);
8139 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8140 __func__, new_state->base.crtc->base.id);
8141 } else if (old_vrr_active && !new_vrr_active) {
8142 /* Transition VRR active -> inactive:
8143 * Allow vblank irq disable again for fixed refresh rate.
8144 */
d2574c33 8145 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
8146 drm_crtc_vblank_put(new_state->base.crtc);
8147 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8148 __func__, new_state->base.crtc->base.id);
8149 }
8150}
8151
8ad27806
NK
8152static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8153{
8154 struct drm_plane *plane;
8155 struct drm_plane_state *old_plane_state, *new_plane_state;
8156 int i;
8157
8158 /*
8159 * TODO: Make this per-stream so we don't issue redundant updates for
8160 * commits with multiple streams.
8161 */
8162 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8163 new_plane_state, i)
8164 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8165 handle_cursor_update(plane, old_plane_state);
8166}
8167
3be5262e 8168static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 8169 struct dc_state *dc_state,
3ee6b26b
AD
8170 struct drm_device *dev,
8171 struct amdgpu_display_manager *dm,
8172 struct drm_crtc *pcrtc,
420cd472 8173 bool wait_for_vblank)
e7b07cee 8174{
efc8278e 8175 uint32_t i;
8a48b44c 8176 uint64_t timestamp_ns;
e7b07cee 8177 struct drm_plane *plane;
0bc9706d 8178 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 8179 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
8180 struct drm_crtc_state *new_pcrtc_state =
8181 drm_atomic_get_new_crtc_state(state, pcrtc);
8182 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
8183 struct dm_crtc_state *dm_old_crtc_state =
8184 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 8185 int planes_count = 0, vpos, hpos;
570c91d5 8186 long r;
e7b07cee 8187 unsigned long flags;
8a48b44c 8188 struct amdgpu_bo *abo;
fdd1fe57
MK
8189 uint32_t target_vblank, last_flip_vblank;
8190 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 8191 bool pflip_present = false;
bc7f670e
DF
8192 struct {
8193 struct dc_surface_update surface_updates[MAX_SURFACES];
8194 struct dc_plane_info plane_infos[MAX_SURFACES];
8195 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 8196 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 8197 struct dc_stream_update stream_update;
74aa7bd4 8198 } *bundle;
bc7f670e 8199
74aa7bd4 8200 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 8201
74aa7bd4
DF
8202 if (!bundle) {
8203 dm_error("Failed to allocate update bundle\n");
4b510503
NK
8204 goto cleanup;
8205 }
e7b07cee 8206
8ad27806
NK
8207 /*
8208 * Disable the cursor first if we're disabling all the planes.
8209 * It'll remain on the screen after the planes are re-enabled
8210 * if we don't.
8211 */
8212 if (acrtc_state->active_planes == 0)
8213 amdgpu_dm_commit_cursors(state);
8214
e7b07cee 8215 /* update planes when needed */
efc8278e 8216 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 8217 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 8218 struct drm_crtc_state *new_crtc_state;
0bc9706d 8219 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 8220 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 8221 bool plane_needs_flip;
c7af5f77 8222 struct dc_plane_state *dc_plane;
54d76575 8223 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 8224
80c218d5
NK
8225 /* Cursor plane is handled after stream updates */
8226 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 8227 continue;
e7b07cee 8228
f5ba60fe
DD
8229 if (!fb || !crtc || pcrtc != crtc)
8230 continue;
8231
8232 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8233 if (!new_crtc_state->active)
e7b07cee
HW
8234 continue;
8235
bc7f670e 8236 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 8237
74aa7bd4 8238 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 8239 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
8240 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8241 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 8242 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 8243 }
8a48b44c 8244
695af5f9
NK
8245 fill_dc_scaling_info(new_plane_state,
8246 &bundle->scaling_infos[planes_count]);
8a48b44c 8247
695af5f9
NK
8248 bundle->surface_updates[planes_count].scaling_info =
8249 &bundle->scaling_infos[planes_count];
8a48b44c 8250
f5031000 8251 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 8252
f5031000 8253 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 8254
f5031000
DF
8255 if (!plane_needs_flip) {
8256 planes_count += 1;
8257 continue;
8258 }
8a48b44c 8259
2fac0f53
CK
8260 abo = gem_to_amdgpu_bo(fb->obj[0]);
8261
f8308898
AG
8262 /*
8263 * Wait for all fences on this FB. Do limited wait to avoid
8264 * deadlock during GPU reset when this fence will not signal
8265 * but we hold reservation lock for the BO.
8266 */
52791eee 8267 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 8268 false,
f8308898
AG
8269 msecs_to_jiffies(5000));
8270 if (unlikely(r <= 0))
ed8a5fb2 8271 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 8272
695af5f9 8273 fill_dc_plane_info_and_addr(
8ce5d842 8274 dm->adev, new_plane_state,
6eed95b0 8275 afb->tiling_flags,
695af5f9 8276 &bundle->plane_infos[planes_count],
87b7ebc2 8277 &bundle->flip_addrs[planes_count].address,
6eed95b0 8278 afb->tmz_surface, false);
87b7ebc2 8279
4711c033 8280 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
8281 new_plane_state->plane->index,
8282 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
8283
8284 bundle->surface_updates[planes_count].plane_info =
8285 &bundle->plane_infos[planes_count];
8a48b44c 8286
caff0e66
NK
8287 /*
8288 * Only allow immediate flips for fast updates that don't
8289 * change FB pitch, DCC state, rotation or mirroing.
8290 */
f5031000 8291 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 8292 crtc->state->async_flip &&
caff0e66 8293 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 8294
f5031000
DF
8295 timestamp_ns = ktime_get_ns();
8296 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8297 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8298 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 8299
f5031000
DF
8300 if (!bundle->surface_updates[planes_count].surface) {
8301 DRM_ERROR("No surface for CRTC: id=%d\n",
8302 acrtc_attach->crtc_id);
8303 continue;
bc7f670e
DF
8304 }
8305
f5031000
DF
8306 if (plane == pcrtc->primary)
8307 update_freesync_state_on_stream(
8308 dm,
8309 acrtc_state,
8310 acrtc_state->stream,
8311 dc_plane,
8312 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 8313
4711c033 8314 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
8315 __func__,
8316 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8317 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
8318
8319 planes_count += 1;
8320
8a48b44c
DF
8321 }
8322
74aa7bd4 8323 if (pflip_present) {
634092b1
MK
8324 if (!vrr_active) {
8325 /* Use old throttling in non-vrr fixed refresh rate mode
8326 * to keep flip scheduling based on target vblank counts
8327 * working in a backwards compatible way, e.g., for
8328 * clients using the GLX_OML_sync_control extension or
8329 * DRI3/Present extension with defined target_msc.
8330 */
e3eff4b5 8331 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
8332 }
8333 else {
8334 /* For variable refresh rate mode only:
8335 * Get vblank of last completed flip to avoid > 1 vrr
8336 * flips per video frame by use of throttling, but allow
8337 * flip programming anywhere in the possibly large
8338 * variable vrr vblank interval for fine-grained flip
8339 * timing control and more opportunity to avoid stutter
8340 * on late submission of flips.
8341 */
8342 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 8343 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
8344 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8345 }
8346
fdd1fe57 8347 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
8348
8349 /*
8350 * Wait until we're out of the vertical blank period before the one
8351 * targeted by the flip
8352 */
8353 while ((acrtc_attach->enabled &&
8354 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8355 0, &vpos, &hpos, NULL,
8356 NULL, &pcrtc->hwmode)
8357 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8358 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8359 (int)(target_vblank -
e3eff4b5 8360 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
8361 usleep_range(1000, 1100);
8362 }
8363
8fe684e9
NK
8364 /**
8365 * Prepare the flip event for the pageflip interrupt to handle.
8366 *
8367 * This only works in the case where we've already turned on the
8368 * appropriate hardware blocks (eg. HUBP) so in the transition case
8369 * from 0 -> n planes we have to skip a hardware generated event
8370 * and rely on sending it from software.
8371 */
8372 if (acrtc_attach->base.state->event &&
8373 acrtc_state->active_planes > 0) {
8a48b44c
DF
8374 drm_crtc_vblank_get(pcrtc);
8375
8376 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8377
8378 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8379 prepare_flip_isr(acrtc_attach);
8380
8381 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8382 }
8383
8384 if (acrtc_state->stream) {
8a48b44c 8385 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 8386 bundle->stream_update.vrr_infopacket =
8a48b44c 8387 &acrtc_state->stream->vrr_infopacket;
e7b07cee 8388 }
e7b07cee
HW
8389 }
8390
bc92c065 8391 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
8392 if ((planes_count || acrtc_state->active_planes == 0) &&
8393 acrtc_state->stream) {
b6e881c9 8394 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 8395 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
8396 bundle->stream_update.src = acrtc_state->stream->src;
8397 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
8398 }
8399
cf020d49
NK
8400 if (new_pcrtc_state->color_mgmt_changed) {
8401 /*
8402 * TODO: This isn't fully correct since we've actually
8403 * already modified the stream in place.
8404 */
8405 bundle->stream_update.gamut_remap =
8406 &acrtc_state->stream->gamut_remap_matrix;
8407 bundle->stream_update.output_csc_transform =
8408 &acrtc_state->stream->csc_color_matrix;
8409 bundle->stream_update.out_transfer_func =
8410 acrtc_state->stream->out_transfer_func;
8411 }
bc7f670e 8412
8a48b44c 8413 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 8414 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 8415 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 8416
e63e2491
EB
8417 /*
8418 * If FreeSync state on the stream has changed then we need to
8419 * re-adjust the min/max bounds now that DC doesn't handle this
8420 * as part of commit.
8421 */
a85ba005 8422 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
8423 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8424 dc_stream_adjust_vmin_vmax(
8425 dm->dc, acrtc_state->stream,
585d450c 8426 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
8427 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8428 }
bc7f670e 8429 mutex_lock(&dm->dc_lock);
8c322309 8430 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 8431 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
8432 amdgpu_dm_psr_disable(acrtc_state->stream);
8433
bc7f670e 8434 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 8435 bundle->surface_updates,
bc7f670e
DF
8436 planes_count,
8437 acrtc_state->stream,
efc8278e
AJ
8438 &bundle->stream_update,
8439 dc_state);
8c322309 8440
8fe684e9
NK
8441 /**
8442 * Enable or disable the interrupts on the backend.
8443 *
8444 * Most pipes are put into power gating when unused.
8445 *
8446 * When power gating is enabled on a pipe we lose the
8447 * interrupt enablement state when power gating is disabled.
8448 *
8449 * So we need to update the IRQ control state in hardware
8450 * whenever the pipe turns on (since it could be previously
8451 * power gated) or off (since some pipes can't be power gated
8452 * on some ASICs).
8453 */
8454 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8455 dm_update_pflip_irq_state(drm_to_adev(dev),
8456 acrtc_attach);
8fe684e9 8457
8c322309 8458 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8459 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8460 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
8461 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8462 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
8463 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8464 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
8465 amdgpu_dm_psr_enable(acrtc_state->stream);
8466 }
8467
bc7f670e 8468 mutex_unlock(&dm->dc_lock);
e7b07cee 8469 }
4b510503 8470
8ad27806
NK
8471 /*
8472 * Update cursor state *after* programming all the planes.
8473 * This avoids redundant programming in the case where we're going
8474 * to be disabling a single plane - those pipes are being disabled.
8475 */
8476 if (acrtc_state->active_planes)
8477 amdgpu_dm_commit_cursors(state);
80c218d5 8478
4b510503 8479cleanup:
74aa7bd4 8480 kfree(bundle);
e7b07cee
HW
8481}
8482
6ce8f316
NK
8483static void amdgpu_dm_commit_audio(struct drm_device *dev,
8484 struct drm_atomic_state *state)
8485{
1348969a 8486 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8487 struct amdgpu_dm_connector *aconnector;
8488 struct drm_connector *connector;
8489 struct drm_connector_state *old_con_state, *new_con_state;
8490 struct drm_crtc_state *new_crtc_state;
8491 struct dm_crtc_state *new_dm_crtc_state;
8492 const struct dc_stream_status *status;
8493 int i, inst;
8494
8495 /* Notify device removals. */
8496 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8497 if (old_con_state->crtc != new_con_state->crtc) {
8498 /* CRTC changes require notification. */
8499 goto notify;
8500 }
8501
8502 if (!new_con_state->crtc)
8503 continue;
8504
8505 new_crtc_state = drm_atomic_get_new_crtc_state(
8506 state, new_con_state->crtc);
8507
8508 if (!new_crtc_state)
8509 continue;
8510
8511 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8512 continue;
8513
8514 notify:
8515 aconnector = to_amdgpu_dm_connector(connector);
8516
8517 mutex_lock(&adev->dm.audio_lock);
8518 inst = aconnector->audio_inst;
8519 aconnector->audio_inst = -1;
8520 mutex_unlock(&adev->dm.audio_lock);
8521
8522 amdgpu_dm_audio_eld_notify(adev, inst);
8523 }
8524
8525 /* Notify audio device additions. */
8526 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8527 if (!new_con_state->crtc)
8528 continue;
8529
8530 new_crtc_state = drm_atomic_get_new_crtc_state(
8531 state, new_con_state->crtc);
8532
8533 if (!new_crtc_state)
8534 continue;
8535
8536 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8537 continue;
8538
8539 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8540 if (!new_dm_crtc_state->stream)
8541 continue;
8542
8543 status = dc_stream_get_status(new_dm_crtc_state->stream);
8544 if (!status)
8545 continue;
8546
8547 aconnector = to_amdgpu_dm_connector(connector);
8548
8549 mutex_lock(&adev->dm.audio_lock);
8550 inst = status->audio_inst;
8551 aconnector->audio_inst = inst;
8552 mutex_unlock(&adev->dm.audio_lock);
8553
8554 amdgpu_dm_audio_eld_notify(adev, inst);
8555 }
8556}
8557
1f6010a9 8558/*
27b3f4fc
LSL
8559 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8560 * @crtc_state: the DRM CRTC state
8561 * @stream_state: the DC stream state.
8562 *
8563 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8564 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8565 */
8566static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8567 struct dc_stream_state *stream_state)
8568{
b9952f93 8569 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8570}
e7b07cee 8571
b8592b48
LL
8572/**
8573 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8574 * @state: The atomic state to commit
8575 *
8576 * This will tell DC to commit the constructed DC state from atomic_check,
8577 * programming the hardware. Any failures here implies a hardware failure, since
8578 * atomic check should have filtered anything non-kosher.
8579 */
7578ecda 8580static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8581{
8582 struct drm_device *dev = state->dev;
1348969a 8583 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8584 struct amdgpu_display_manager *dm = &adev->dm;
8585 struct dm_atomic_state *dm_state;
eb3dc897 8586 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8587 uint32_t i, j;
5cc6dcbd 8588 struct drm_crtc *crtc;
0bc9706d 8589 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8590 unsigned long flags;
8591 bool wait_for_vblank = true;
8592 struct drm_connector *connector;
c2cea706 8593 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8594 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8595 int crtc_disable_count = 0;
6ee90e88 8596 bool mode_set_reset_required = false;
e7b07cee 8597
e8a98235
RS
8598 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8599
e7b07cee
HW
8600 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8601
eb3dc897
NK
8602 dm_state = dm_atomic_get_new_state(state);
8603 if (dm_state && dm_state->context) {
8604 dc_state = dm_state->context;
8605 } else {
8606 /* No state changes, retain current state. */
813d20dc 8607 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8608 ASSERT(dc_state_temp);
8609 dc_state = dc_state_temp;
8610 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8611 }
e7b07cee 8612
6d90a208
AP
8613 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8614 new_crtc_state, i) {
8615 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8616
8617 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8618
8619 if (old_crtc_state->active &&
8620 (!new_crtc_state->active ||
8621 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8622 manage_dm_interrupts(adev, acrtc, false);
8623 dc_stream_release(dm_old_crtc_state->stream);
8624 }
8625 }
8626
8976f73b
RS
8627 drm_atomic_helper_calc_timestamping_constants(state);
8628
e7b07cee 8629 /* update changed items */
0bc9706d 8630 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8631 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8632
54d76575
LSL
8633 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8634 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8635
4711c033 8636 DRM_DEBUG_ATOMIC(
e7b07cee
HW
8637 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8638 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8639 "connectors_changed:%d\n",
8640 acrtc->crtc_id,
0bc9706d
LSL
8641 new_crtc_state->enable,
8642 new_crtc_state->active,
8643 new_crtc_state->planes_changed,
8644 new_crtc_state->mode_changed,
8645 new_crtc_state->active_changed,
8646 new_crtc_state->connectors_changed);
e7b07cee 8647
5c68c652
VL
8648 /* Disable cursor if disabling crtc */
8649 if (old_crtc_state->active && !new_crtc_state->active) {
8650 struct dc_cursor_position position;
8651
8652 memset(&position, 0, sizeof(position));
8653 mutex_lock(&dm->dc_lock);
8654 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8655 mutex_unlock(&dm->dc_lock);
8656 }
8657
27b3f4fc
LSL
8658 /* Copy all transient state flags into dc state */
8659 if (dm_new_crtc_state->stream) {
8660 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8661 dm_new_crtc_state->stream);
8662 }
8663
e7b07cee
HW
8664 /* handles headless hotplug case, updating new_state and
8665 * aconnector as needed
8666 */
8667
54d76575 8668 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8669
4711c033 8670 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8671
54d76575 8672 if (!dm_new_crtc_state->stream) {
e7b07cee 8673 /*
b830ebc9
HW
8674 * this could happen because of issues with
8675 * userspace notifications delivery.
8676 * In this case userspace tries to set mode on
1f6010a9
DF
8677 * display which is disconnected in fact.
8678 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8679 * We expect reset mode will come soon.
8680 *
8681 * This can also happen when unplug is done
8682 * during resume sequence ended
8683 *
8684 * In this case, we want to pretend we still
8685 * have a sink to keep the pipe running so that
8686 * hw state is consistent with the sw state
8687 */
f1ad2f5e 8688 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8689 __func__, acrtc->base.base.id);
8690 continue;
8691 }
8692
54d76575
LSL
8693 if (dm_old_crtc_state->stream)
8694 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8695
97028037
LP
8696 pm_runtime_get_noresume(dev->dev);
8697
e7b07cee 8698 acrtc->enabled = true;
0bc9706d
LSL
8699 acrtc->hw_mode = new_crtc_state->mode;
8700 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8701 mode_set_reset_required = true;
0bc9706d 8702 } else if (modereset_required(new_crtc_state)) {
4711c033 8703 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8704 /* i.e. reset mode */
6ee90e88 8705 if (dm_old_crtc_state->stream)
54d76575 8706 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 8707
6ee90e88 8708 mode_set_reset_required = true;
e7b07cee
HW
8709 }
8710 } /* for_each_crtc_in_state() */
8711
eb3dc897 8712 if (dc_state) {
6ee90e88 8713 /* if there mode set or reset, disable eDP PSR */
8714 if (mode_set_reset_required)
8715 amdgpu_dm_psr_disable_all(dm);
8716
eb3dc897 8717 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8718 mutex_lock(&dm->dc_lock);
eb3dc897 8719 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
8720#if defined(CONFIG_DRM_AMD_DC_DCN)
8721 /* Allow idle optimization when vblank count is 0 for display off */
8722 if (dm->active_vblank_irq_count == 0)
8723 dc_allow_idle_optimizations(dm->dc,true);
8724#endif
674e78ac 8725 mutex_unlock(&dm->dc_lock);
fa2123db 8726 }
fe8858bb 8727
0bc9706d 8728 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8729 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8730
54d76575 8731 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8732
54d76575 8733 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8734 const struct dc_stream_status *status =
54d76575 8735 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8736
eb3dc897 8737 if (!status)
09f609c3
LL
8738 status = dc_stream_get_status_from_state(dc_state,
8739 dm_new_crtc_state->stream);
e7b07cee 8740 if (!status)
54d76575 8741 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8742 else
8743 acrtc->otg_inst = status->primary_otg_inst;
8744 }
8745 }
0c8620d6
BL
8746#ifdef CONFIG_DRM_AMD_DC_HDCP
8747 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8748 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8749 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8750 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8751
8752 new_crtc_state = NULL;
8753
8754 if (acrtc)
8755 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8756
8757 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8758
8759 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8760 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8761 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8762 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8763 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8764 continue;
8765 }
8766
8767 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8768 hdcp_update_display(
8769 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8770 new_con_state->hdcp_content_type,
0e86d3d4 8771 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
8772 }
8773#endif
e7b07cee 8774
02d6a6fc 8775 /* Handle connector state changes */
c2cea706 8776 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8777 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8778 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8779 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 8780 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 8781 struct dc_stream_update stream_update;
b232d4ed 8782 struct dc_info_packet hdr_packet;
e7b07cee 8783 struct dc_stream_status *status = NULL;
b232d4ed 8784 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8785
efc8278e 8786 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
8787 memset(&stream_update, 0, sizeof(stream_update));
8788
44d09c6a 8789 if (acrtc) {
0bc9706d 8790 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8791 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8792 }
0bc9706d 8793
e7b07cee 8794 /* Skip any modesets/resets */
0bc9706d 8795 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8796 continue;
8797
54d76575 8798 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8799 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8800
b232d4ed
NK
8801 scaling_changed = is_scaling_state_different(dm_new_con_state,
8802 dm_old_con_state);
8803
8804 abm_changed = dm_new_crtc_state->abm_level !=
8805 dm_old_crtc_state->abm_level;
8806
8807 hdr_changed =
8808 is_hdr_metadata_different(old_con_state, new_con_state);
8809
8810 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8811 continue;
e7b07cee 8812
b6e881c9 8813 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8814 if (scaling_changed) {
02d6a6fc 8815 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8816 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8817
02d6a6fc
DF
8818 stream_update.src = dm_new_crtc_state->stream->src;
8819 stream_update.dst = dm_new_crtc_state->stream->dst;
8820 }
8821
b232d4ed 8822 if (abm_changed) {
02d6a6fc
DF
8823 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8824
8825 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8826 }
70e8ffc5 8827
b232d4ed
NK
8828 if (hdr_changed) {
8829 fill_hdr_info_packet(new_con_state, &hdr_packet);
8830 stream_update.hdr_static_metadata = &hdr_packet;
8831 }
8832
54d76575 8833 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8834 WARN_ON(!status);
3be5262e 8835 WARN_ON(!status->plane_count);
e7b07cee 8836
02d6a6fc
DF
8837 /*
8838 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8839 * Here we create an empty update on each plane.
8840 * To fix this, DC should permit updating only stream properties.
8841 */
8842 for (j = 0; j < status->plane_count; j++)
efc8278e 8843 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
8844
8845
8846 mutex_lock(&dm->dc_lock);
8847 dc_commit_updates_for_stream(dm->dc,
efc8278e 8848 dummy_updates,
02d6a6fc
DF
8849 status->plane_count,
8850 dm_new_crtc_state->stream,
efc8278e
AJ
8851 &stream_update,
8852 dc_state);
02d6a6fc 8853 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8854 }
8855
b5e83f6f 8856 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 8857 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 8858 new_crtc_state, i) {
fe2a1965
LP
8859 if (old_crtc_state->active && !new_crtc_state->active)
8860 crtc_disable_count++;
8861
54d76575 8862 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 8863 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 8864
585d450c
AP
8865 /* For freesync config update on crtc state and params for irq */
8866 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 8867
66b0c973
MK
8868 /* Handle vrr on->off / off->on transitions */
8869 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8870 dm_new_crtc_state);
e7b07cee
HW
8871 }
8872
8fe684e9
NK
8873 /**
8874 * Enable interrupts for CRTCs that are newly enabled or went through
8875 * a modeset. It was intentionally deferred until after the front end
8876 * state was modified to wait until the OTG was on and so the IRQ
8877 * handlers didn't access stale or invalid state.
8878 */
8879 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8880 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 8881#ifdef CONFIG_DEBUG_FS
86bc2219 8882 bool configure_crc = false;
8e7b6fee
WL
8883 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8884#endif
585d450c
AP
8885 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8886
8fe684e9
NK
8887 if (new_crtc_state->active &&
8888 (!old_crtc_state->active ||
8889 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
8890 dc_stream_retain(dm_new_crtc_state->stream);
8891 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 8892 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 8893
24eb9374 8894#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
8895 /**
8896 * Frontend may have changed so reapply the CRC capture
8897 * settings for the stream.
8898 */
8899 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8e7b6fee
WL
8900 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8901 cur_crc_src = acrtc->dm_irq_params.crc_src;
8902 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
c920888c 8903
8e7b6fee 8904 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
8905 configure_crc = true;
8906#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8907 if (amdgpu_dm_crc_window_is_activated(crtc))
8908 configure_crc = false;
8909#endif
e2881d6d 8910 }
c920888c 8911
86bc2219 8912 if (configure_crc)
c920888c 8913 amdgpu_dm_crtc_configure_crc_source(
86bc2219 8914 crtc, dm_new_crtc_state, cur_crc_src);
24eb9374 8915#endif
8fe684e9
NK
8916 }
8917 }
e7b07cee 8918
420cd472 8919 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 8920 if (new_crtc_state->async_flip)
420cd472
DF
8921 wait_for_vblank = false;
8922
e7b07cee 8923 /* update planes when needed per crtc*/
5cc6dcbd 8924 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 8925 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8926
54d76575 8927 if (dm_new_crtc_state->stream)
eb3dc897 8928 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 8929 dm, crtc, wait_for_vblank);
e7b07cee
HW
8930 }
8931
6ce8f316
NK
8932 /* Update audio instances for each connector. */
8933 amdgpu_dm_commit_audio(dev, state);
8934
e7b07cee
HW
8935 /*
8936 * send vblank event on all events not handled in flip and
8937 * mark consumed event for drm_atomic_helper_commit_hw_done
8938 */
4a580877 8939 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 8940 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8941
0bc9706d
LSL
8942 if (new_crtc_state->event)
8943 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 8944
0bc9706d 8945 new_crtc_state->event = NULL;
e7b07cee 8946 }
4a580877 8947 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 8948
29c8f234
LL
8949 /* Signal HW programming completion */
8950 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
8951
8952 if (wait_for_vblank)
320a1274 8953 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
8954
8955 drm_atomic_helper_cleanup_planes(dev, state);
97028037 8956
5f6fab24
AD
8957 /* return the stolen vga memory back to VRAM */
8958 if (!adev->mman.keep_stolen_vga_memory)
8959 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8960 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8961
1f6010a9
DF
8962 /*
8963 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
8964 * so we can put the GPU into runtime suspend if we're not driving any
8965 * displays anymore
8966 */
fe2a1965
LP
8967 for (i = 0; i < crtc_disable_count; i++)
8968 pm_runtime_put_autosuspend(dev->dev);
97028037 8969 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
8970
8971 if (dc_state_temp)
8972 dc_release_state(dc_state_temp);
e7b07cee
HW
8973}
8974
8975
8976static int dm_force_atomic_commit(struct drm_connector *connector)
8977{
8978 int ret = 0;
8979 struct drm_device *ddev = connector->dev;
8980 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8981 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8982 struct drm_plane *plane = disconnected_acrtc->base.primary;
8983 struct drm_connector_state *conn_state;
8984 struct drm_crtc_state *crtc_state;
8985 struct drm_plane_state *plane_state;
8986
8987 if (!state)
8988 return -ENOMEM;
8989
8990 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8991
8992 /* Construct an atomic state to restore previous display setting */
8993
8994 /*
8995 * Attach connectors to drm_atomic_state
8996 */
8997 conn_state = drm_atomic_get_connector_state(state, connector);
8998
8999 ret = PTR_ERR_OR_ZERO(conn_state);
9000 if (ret)
2dc39051 9001 goto out;
e7b07cee
HW
9002
9003 /* Attach crtc to drm_atomic_state*/
9004 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9005
9006 ret = PTR_ERR_OR_ZERO(crtc_state);
9007 if (ret)
2dc39051 9008 goto out;
e7b07cee
HW
9009
9010 /* force a restore */
9011 crtc_state->mode_changed = true;
9012
9013 /* Attach plane to drm_atomic_state */
9014 plane_state = drm_atomic_get_plane_state(state, plane);
9015
9016 ret = PTR_ERR_OR_ZERO(plane_state);
9017 if (ret)
2dc39051 9018 goto out;
e7b07cee
HW
9019
9020 /* Call commit internally with the state we just constructed */
9021 ret = drm_atomic_commit(state);
e7b07cee 9022
2dc39051 9023out:
e7b07cee 9024 drm_atomic_state_put(state);
2dc39051
VL
9025 if (ret)
9026 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9027
9028 return ret;
9029}
9030
9031/*
1f6010a9
DF
9032 * This function handles all cases when set mode does not come upon hotplug.
9033 * This includes when a display is unplugged then plugged back into the
9034 * same port and when running without usermode desktop manager supprot
e7b07cee 9035 */
3ee6b26b
AD
9036void dm_restore_drm_connector_state(struct drm_device *dev,
9037 struct drm_connector *connector)
e7b07cee 9038{
c84dec2f 9039 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9040 struct amdgpu_crtc *disconnected_acrtc;
9041 struct dm_crtc_state *acrtc_state;
9042
9043 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9044 return;
9045
9046 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9047 if (!disconnected_acrtc)
9048 return;
e7b07cee 9049
70e8ffc5
HW
9050 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9051 if (!acrtc_state->stream)
e7b07cee
HW
9052 return;
9053
9054 /*
9055 * If the previous sink is not released and different from the current,
9056 * we deduce we are in a state where we can not rely on usermode call
9057 * to turn on the display, so we do it here
9058 */
9059 if (acrtc_state->stream->sink != aconnector->dc_sink)
9060 dm_force_atomic_commit(&aconnector->base);
9061}
9062
1f6010a9 9063/*
e7b07cee
HW
9064 * Grabs all modesetting locks to serialize against any blocking commits,
9065 * Waits for completion of all non blocking commits.
9066 */
3ee6b26b
AD
9067static int do_aquire_global_lock(struct drm_device *dev,
9068 struct drm_atomic_state *state)
e7b07cee
HW
9069{
9070 struct drm_crtc *crtc;
9071 struct drm_crtc_commit *commit;
9072 long ret;
9073
1f6010a9
DF
9074 /*
9075 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9076 * ensure that when the framework release it the
9077 * extra locks we are locking here will get released to
9078 */
9079 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9080 if (ret)
9081 return ret;
9082
9083 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9084 spin_lock(&crtc->commit_lock);
9085 commit = list_first_entry_or_null(&crtc->commit_list,
9086 struct drm_crtc_commit, commit_entry);
9087 if (commit)
9088 drm_crtc_commit_get(commit);
9089 spin_unlock(&crtc->commit_lock);
9090
9091 if (!commit)
9092 continue;
9093
1f6010a9
DF
9094 /*
9095 * Make sure all pending HW programming completed and
e7b07cee
HW
9096 * page flips done
9097 */
9098 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9099
9100 if (ret > 0)
9101 ret = wait_for_completion_interruptible_timeout(
9102 &commit->flip_done, 10*HZ);
9103
9104 if (ret == 0)
9105 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9106 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9107
9108 drm_crtc_commit_put(commit);
9109 }
9110
9111 return ret < 0 ? ret : 0;
9112}
9113
bb47de73
NK
9114static void get_freesync_config_for_crtc(
9115 struct dm_crtc_state *new_crtc_state,
9116 struct dm_connector_state *new_con_state)
98e6436d
AK
9117{
9118 struct mod_freesync_config config = {0};
98e6436d
AK
9119 struct amdgpu_dm_connector *aconnector =
9120 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9121 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9122 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9123 bool fs_vid_mode = false;
98e6436d 9124
a057ec46 9125 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9126 vrefresh >= aconnector->min_vfreq &&
9127 vrefresh <= aconnector->max_vfreq;
bb47de73 9128
a057ec46
IB
9129 if (new_crtc_state->vrr_supported) {
9130 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
9131 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9132
9133 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9134 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9135 config.vsif_supported = true;
180db303 9136 config.btr = true;
98e6436d 9137
a85ba005
NC
9138 if (fs_vid_mode) {
9139 config.state = VRR_STATE_ACTIVE_FIXED;
9140 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9141 goto out;
9142 } else if (new_crtc_state->base.vrr_enabled) {
9143 config.state = VRR_STATE_ACTIVE_VARIABLE;
9144 } else {
9145 config.state = VRR_STATE_INACTIVE;
9146 }
9147 }
9148out:
bb47de73
NK
9149 new_crtc_state->freesync_config = config;
9150}
98e6436d 9151
bb47de73
NK
9152static void reset_freesync_config_for_crtc(
9153 struct dm_crtc_state *new_crtc_state)
9154{
9155 new_crtc_state->vrr_supported = false;
98e6436d 9156
bb47de73
NK
9157 memset(&new_crtc_state->vrr_infopacket, 0,
9158 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9159}
9160
a85ba005
NC
9161static bool
9162is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9163 struct drm_crtc_state *new_crtc_state)
9164{
9165 struct drm_display_mode old_mode, new_mode;
9166
9167 if (!old_crtc_state || !new_crtc_state)
9168 return false;
9169
9170 old_mode = old_crtc_state->mode;
9171 new_mode = new_crtc_state->mode;
9172
9173 if (old_mode.clock == new_mode.clock &&
9174 old_mode.hdisplay == new_mode.hdisplay &&
9175 old_mode.vdisplay == new_mode.vdisplay &&
9176 old_mode.htotal == new_mode.htotal &&
9177 old_mode.vtotal != new_mode.vtotal &&
9178 old_mode.hsync_start == new_mode.hsync_start &&
9179 old_mode.vsync_start != new_mode.vsync_start &&
9180 old_mode.hsync_end == new_mode.hsync_end &&
9181 old_mode.vsync_end != new_mode.vsync_end &&
9182 old_mode.hskew == new_mode.hskew &&
9183 old_mode.vscan == new_mode.vscan &&
9184 (old_mode.vsync_end - old_mode.vsync_start) ==
9185 (new_mode.vsync_end - new_mode.vsync_start))
9186 return true;
9187
9188 return false;
9189}
9190
9191static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9192 uint64_t num, den, res;
9193 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9194
9195 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9196
9197 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9198 den = (unsigned long long)new_crtc_state->mode.htotal *
9199 (unsigned long long)new_crtc_state->mode.vtotal;
9200
9201 res = div_u64(num, den);
9202 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9203}
9204
4b9674e5
LL
9205static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9206 struct drm_atomic_state *state,
9207 struct drm_crtc *crtc,
9208 struct drm_crtc_state *old_crtc_state,
9209 struct drm_crtc_state *new_crtc_state,
9210 bool enable,
9211 bool *lock_and_validation_needed)
e7b07cee 9212{
eb3dc897 9213 struct dm_atomic_state *dm_state = NULL;
54d76575 9214 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 9215 struct dc_stream_state *new_stream;
62f55537 9216 int ret = 0;
d4d4a645 9217
1f6010a9
DF
9218 /*
9219 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9220 * update changed items
9221 */
4b9674e5
LL
9222 struct amdgpu_crtc *acrtc = NULL;
9223 struct amdgpu_dm_connector *aconnector = NULL;
9224 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9225 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 9226
4b9674e5 9227 new_stream = NULL;
9635b754 9228
4b9674e5
LL
9229 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9230 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9231 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 9232 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 9233
4b9674e5
LL
9234 /* TODO This hack should go away */
9235 if (aconnector && enable) {
9236 /* Make sure fake sink is created in plug-in scenario */
9237 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9238 &aconnector->base);
9239 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9240 &aconnector->base);
19f89e23 9241
4b9674e5
LL
9242 if (IS_ERR(drm_new_conn_state)) {
9243 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9244 goto fail;
9245 }
19f89e23 9246
4b9674e5
LL
9247 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9248 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 9249
02d35a67
JFZ
9250 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9251 goto skip_modeset;
9252
cbd14ae7
SW
9253 new_stream = create_validate_stream_for_sink(aconnector,
9254 &new_crtc_state->mode,
9255 dm_new_conn_state,
9256 dm_old_crtc_state->stream);
19f89e23 9257
4b9674e5
LL
9258 /*
9259 * we can have no stream on ACTION_SET if a display
9260 * was disconnected during S3, in this case it is not an
9261 * error, the OS will be updated after detection, and
9262 * will do the right thing on next atomic commit
9263 */
19f89e23 9264
4b9674e5
LL
9265 if (!new_stream) {
9266 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9267 __func__, acrtc->base.base.id);
9268 ret = -ENOMEM;
9269 goto fail;
9270 }
e7b07cee 9271
3d4e52d0
VL
9272 /*
9273 * TODO: Check VSDB bits to decide whether this should
9274 * be enabled or not.
9275 */
9276 new_stream->triggered_crtc_reset.enabled =
9277 dm->force_timing_sync;
9278
4b9674e5 9279 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 9280
88694af9
NK
9281 ret = fill_hdr_info_packet(drm_new_conn_state,
9282 &new_stream->hdr_static_metadata);
9283 if (ret)
9284 goto fail;
9285
7e930949
NK
9286 /*
9287 * If we already removed the old stream from the context
9288 * (and set the new stream to NULL) then we can't reuse
9289 * the old stream even if the stream and scaling are unchanged.
9290 * We'll hit the BUG_ON and black screen.
9291 *
9292 * TODO: Refactor this function to allow this check to work
9293 * in all conditions.
9294 */
a85ba005
NC
9295 if (amdgpu_freesync_vid_mode &&
9296 dm_new_crtc_state->stream &&
9297 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9298 goto skip_modeset;
9299
7e930949
NK
9300 if (dm_new_crtc_state->stream &&
9301 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
9302 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9303 new_crtc_state->mode_changed = false;
9304 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9305 new_crtc_state->mode_changed);
62f55537 9306 }
4b9674e5 9307 }
b830ebc9 9308
02d35a67 9309 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
9310 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9311 goto skip_modeset;
e7b07cee 9312
4711c033 9313 DRM_DEBUG_ATOMIC(
4b9674e5
LL
9314 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9315 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9316 "connectors_changed:%d\n",
9317 acrtc->crtc_id,
9318 new_crtc_state->enable,
9319 new_crtc_state->active,
9320 new_crtc_state->planes_changed,
9321 new_crtc_state->mode_changed,
9322 new_crtc_state->active_changed,
9323 new_crtc_state->connectors_changed);
62f55537 9324
4b9674e5
LL
9325 /* Remove stream for any changed/disabled CRTC */
9326 if (!enable) {
62f55537 9327
4b9674e5
LL
9328 if (!dm_old_crtc_state->stream)
9329 goto skip_modeset;
eb3dc897 9330
a85ba005
NC
9331 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9332 is_timing_unchanged_for_freesync(new_crtc_state,
9333 old_crtc_state)) {
9334 new_crtc_state->mode_changed = false;
9335 DRM_DEBUG_DRIVER(
9336 "Mode change not required for front porch change, "
9337 "setting mode_changed to %d",
9338 new_crtc_state->mode_changed);
9339
9340 set_freesync_fixed_config(dm_new_crtc_state);
9341
9342 goto skip_modeset;
9343 } else if (amdgpu_freesync_vid_mode && aconnector &&
9344 is_freesync_video_mode(&new_crtc_state->mode,
9345 aconnector)) {
9346 set_freesync_fixed_config(dm_new_crtc_state);
9347 }
9348
4b9674e5
LL
9349 ret = dm_atomic_get_state(state, &dm_state);
9350 if (ret)
9351 goto fail;
e7b07cee 9352
4b9674e5
LL
9353 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9354 crtc->base.id);
62f55537 9355
4b9674e5
LL
9356 /* i.e. reset mode */
9357 if (dc_remove_stream_from_ctx(
9358 dm->dc,
9359 dm_state->context,
9360 dm_old_crtc_state->stream) != DC_OK) {
9361 ret = -EINVAL;
9362 goto fail;
9363 }
62f55537 9364
4b9674e5
LL
9365 dc_stream_release(dm_old_crtc_state->stream);
9366 dm_new_crtc_state->stream = NULL;
bb47de73 9367
4b9674e5 9368 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 9369
4b9674e5 9370 *lock_and_validation_needed = true;
62f55537 9371
4b9674e5
LL
9372 } else {/* Add stream for any updated/enabled CRTC */
9373 /*
9374 * Quick fix to prevent NULL pointer on new_stream when
9375 * added MST connectors not found in existing crtc_state in the chained mode
9376 * TODO: need to dig out the root cause of that
9377 */
9378 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9379 goto skip_modeset;
62f55537 9380
4b9674e5
LL
9381 if (modereset_required(new_crtc_state))
9382 goto skip_modeset;
62f55537 9383
4b9674e5
LL
9384 if (modeset_required(new_crtc_state, new_stream,
9385 dm_old_crtc_state->stream)) {
62f55537 9386
4b9674e5 9387 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 9388
4b9674e5
LL
9389 ret = dm_atomic_get_state(state, &dm_state);
9390 if (ret)
9391 goto fail;
27b3f4fc 9392
4b9674e5 9393 dm_new_crtc_state->stream = new_stream;
62f55537 9394
4b9674e5 9395 dc_stream_retain(new_stream);
1dc90497 9396
4711c033
LT
9397 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9398 crtc->base.id);
1dc90497 9399
4b9674e5
LL
9400 if (dc_add_stream_to_ctx(
9401 dm->dc,
9402 dm_state->context,
9403 dm_new_crtc_state->stream) != DC_OK) {
9404 ret = -EINVAL;
9405 goto fail;
9b690ef3
BL
9406 }
9407
4b9674e5
LL
9408 *lock_and_validation_needed = true;
9409 }
9410 }
e277adc5 9411
4b9674e5
LL
9412skip_modeset:
9413 /* Release extra reference */
9414 if (new_stream)
9415 dc_stream_release(new_stream);
e277adc5 9416
4b9674e5
LL
9417 /*
9418 * We want to do dc stream updates that do not require a
9419 * full modeset below.
9420 */
2afda735 9421 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
9422 return 0;
9423 /*
9424 * Given above conditions, the dc state cannot be NULL because:
9425 * 1. We're in the process of enabling CRTCs (just been added
9426 * to the dc context, or already is on the context)
9427 * 2. Has a valid connector attached, and
9428 * 3. Is currently active and enabled.
9429 * => The dc stream state currently exists.
9430 */
9431 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 9432
4b9674e5
LL
9433 /* Scaling or underscan settings */
9434 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9435 update_stream_scaling_settings(
9436 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 9437
b05e2c5e
DF
9438 /* ABM settings */
9439 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9440
4b9674e5
LL
9441 /*
9442 * Color management settings. We also update color properties
9443 * when a modeset is needed, to ensure it gets reprogrammed.
9444 */
9445 if (dm_new_crtc_state->base.color_mgmt_changed ||
9446 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 9447 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
9448 if (ret)
9449 goto fail;
62f55537 9450 }
e7b07cee 9451
4b9674e5
LL
9452 /* Update Freesync settings. */
9453 get_freesync_config_for_crtc(dm_new_crtc_state,
9454 dm_new_conn_state);
9455
62f55537 9456 return ret;
9635b754
DS
9457
9458fail:
9459 if (new_stream)
9460 dc_stream_release(new_stream);
9461 return ret;
62f55537 9462}
9b690ef3 9463
f6ff2a08
NK
9464static bool should_reset_plane(struct drm_atomic_state *state,
9465 struct drm_plane *plane,
9466 struct drm_plane_state *old_plane_state,
9467 struct drm_plane_state *new_plane_state)
9468{
9469 struct drm_plane *other;
9470 struct drm_plane_state *old_other_state, *new_other_state;
9471 struct drm_crtc_state *new_crtc_state;
9472 int i;
9473
70a1efac
NK
9474 /*
9475 * TODO: Remove this hack once the checks below are sufficient
9476 * enough to determine when we need to reset all the planes on
9477 * the stream.
9478 */
9479 if (state->allow_modeset)
9480 return true;
9481
f6ff2a08
NK
9482 /* Exit early if we know that we're adding or removing the plane. */
9483 if (old_plane_state->crtc != new_plane_state->crtc)
9484 return true;
9485
9486 /* old crtc == new_crtc == NULL, plane not in context. */
9487 if (!new_plane_state->crtc)
9488 return false;
9489
9490 new_crtc_state =
9491 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9492
9493 if (!new_crtc_state)
9494 return true;
9495
7316c4ad
NK
9496 /* CRTC Degamma changes currently require us to recreate planes. */
9497 if (new_crtc_state->color_mgmt_changed)
9498 return true;
9499
f6ff2a08
NK
9500 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9501 return true;
9502
9503 /*
9504 * If there are any new primary or overlay planes being added or
9505 * removed then the z-order can potentially change. To ensure
9506 * correct z-order and pipe acquisition the current DC architecture
9507 * requires us to remove and recreate all existing planes.
9508 *
9509 * TODO: Come up with a more elegant solution for this.
9510 */
9511 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 9512 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
9513 if (other->type == DRM_PLANE_TYPE_CURSOR)
9514 continue;
9515
9516 if (old_other_state->crtc != new_plane_state->crtc &&
9517 new_other_state->crtc != new_plane_state->crtc)
9518 continue;
9519
9520 if (old_other_state->crtc != new_other_state->crtc)
9521 return true;
9522
dc4cb30d
NK
9523 /* Src/dst size and scaling updates. */
9524 if (old_other_state->src_w != new_other_state->src_w ||
9525 old_other_state->src_h != new_other_state->src_h ||
9526 old_other_state->crtc_w != new_other_state->crtc_w ||
9527 old_other_state->crtc_h != new_other_state->crtc_h)
9528 return true;
9529
9530 /* Rotation / mirroring updates. */
9531 if (old_other_state->rotation != new_other_state->rotation)
9532 return true;
9533
9534 /* Blending updates. */
9535 if (old_other_state->pixel_blend_mode !=
9536 new_other_state->pixel_blend_mode)
9537 return true;
9538
9539 /* Alpha updates. */
9540 if (old_other_state->alpha != new_other_state->alpha)
9541 return true;
9542
9543 /* Colorspace changes. */
9544 if (old_other_state->color_range != new_other_state->color_range ||
9545 old_other_state->color_encoding != new_other_state->color_encoding)
9546 return true;
9547
9a81cc60
NK
9548 /* Framebuffer checks fall at the end. */
9549 if (!old_other_state->fb || !new_other_state->fb)
9550 continue;
9551
9552 /* Pixel format changes can require bandwidth updates. */
9553 if (old_other_state->fb->format != new_other_state->fb->format)
9554 return true;
9555
6eed95b0
BN
9556 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9557 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9558
9559 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9560 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9561 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9562 return true;
9563 }
9564
9565 return false;
9566}
9567
b0455fda
SS
9568static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9569 struct drm_plane_state *new_plane_state,
9570 struct drm_framebuffer *fb)
9571{
e72868c4
SS
9572 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9573 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9574 unsigned int pitch;
e72868c4 9575 bool linear;
b0455fda
SS
9576
9577 if (fb->width > new_acrtc->max_cursor_width ||
9578 fb->height > new_acrtc->max_cursor_height) {
9579 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9580 new_plane_state->fb->width,
9581 new_plane_state->fb->height);
9582 return -EINVAL;
9583 }
9584 if (new_plane_state->src_w != fb->width << 16 ||
9585 new_plane_state->src_h != fb->height << 16) {
9586 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9587 return -EINVAL;
9588 }
9589
9590 /* Pitch in pixels */
9591 pitch = fb->pitches[0] / fb->format->cpp[0];
9592
9593 if (fb->width != pitch) {
9594 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9595 fb->width, pitch);
9596 return -EINVAL;
9597 }
9598
9599 switch (pitch) {
9600 case 64:
9601 case 128:
9602 case 256:
9603 /* FB pitch is supported by cursor plane */
9604 break;
9605 default:
9606 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9607 return -EINVAL;
9608 }
9609
e72868c4
SS
9610 /* Core DRM takes care of checking FB modifiers, so we only need to
9611 * check tiling flags when the FB doesn't have a modifier. */
9612 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9613 if (adev->family < AMDGPU_FAMILY_AI) {
9614 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9615 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9616 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9617 } else {
9618 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9619 }
9620 if (!linear) {
9621 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9622 return -EINVAL;
9623 }
9624 }
9625
b0455fda
SS
9626 return 0;
9627}
9628
9e869063
LL
9629static int dm_update_plane_state(struct dc *dc,
9630 struct drm_atomic_state *state,
9631 struct drm_plane *plane,
9632 struct drm_plane_state *old_plane_state,
9633 struct drm_plane_state *new_plane_state,
9634 bool enable,
9635 bool *lock_and_validation_needed)
62f55537 9636{
eb3dc897
NK
9637
9638 struct dm_atomic_state *dm_state = NULL;
62f55537 9639 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9640 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9641 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9642 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9643 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9644 bool needs_reset;
62f55537 9645 int ret = 0;
e7b07cee 9646
9b690ef3 9647
9e869063
LL
9648 new_plane_crtc = new_plane_state->crtc;
9649 old_plane_crtc = old_plane_state->crtc;
9650 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9651 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9652
626bf90f
SS
9653 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9654 if (!enable || !new_plane_crtc ||
9655 drm_atomic_plane_disabling(plane->state, new_plane_state))
9656 return 0;
9657
9658 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9659
5f581248
SS
9660 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9661 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9662 return -EINVAL;
9663 }
9664
24f99d2b 9665 if (new_plane_state->fb) {
b0455fda
SS
9666 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9667 new_plane_state->fb);
9668 if (ret)
9669 return ret;
24f99d2b
SS
9670 }
9671
9e869063 9672 return 0;
626bf90f 9673 }
9b690ef3 9674
f6ff2a08
NK
9675 needs_reset = should_reset_plane(state, plane, old_plane_state,
9676 new_plane_state);
9677
9e869063
LL
9678 /* Remove any changed/removed planes */
9679 if (!enable) {
f6ff2a08 9680 if (!needs_reset)
9e869063 9681 return 0;
a7b06724 9682
9e869063
LL
9683 if (!old_plane_crtc)
9684 return 0;
62f55537 9685
9e869063
LL
9686 old_crtc_state = drm_atomic_get_old_crtc_state(
9687 state, old_plane_crtc);
9688 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9689
9e869063
LL
9690 if (!dm_old_crtc_state->stream)
9691 return 0;
62f55537 9692
9e869063
LL
9693 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9694 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9695
9e869063
LL
9696 ret = dm_atomic_get_state(state, &dm_state);
9697 if (ret)
9698 return ret;
eb3dc897 9699
9e869063
LL
9700 if (!dc_remove_plane_from_context(
9701 dc,
9702 dm_old_crtc_state->stream,
9703 dm_old_plane_state->dc_state,
9704 dm_state->context)) {
62f55537 9705
c3537613 9706 return -EINVAL;
9e869063 9707 }
e7b07cee 9708
9b690ef3 9709
9e869063
LL
9710 dc_plane_state_release(dm_old_plane_state->dc_state);
9711 dm_new_plane_state->dc_state = NULL;
1dc90497 9712
9e869063 9713 *lock_and_validation_needed = true;
1dc90497 9714
9e869063
LL
9715 } else { /* Add new planes */
9716 struct dc_plane_state *dc_new_plane_state;
1dc90497 9717
9e869063
LL
9718 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9719 return 0;
e7b07cee 9720
9e869063
LL
9721 if (!new_plane_crtc)
9722 return 0;
e7b07cee 9723
9e869063
LL
9724 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9725 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9726
9e869063
LL
9727 if (!dm_new_crtc_state->stream)
9728 return 0;
62f55537 9729
f6ff2a08 9730 if (!needs_reset)
9e869063 9731 return 0;
62f55537 9732
8c44515b
AP
9733 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9734 if (ret)
9735 return ret;
9736
9e869063 9737 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9738
9e869063
LL
9739 dc_new_plane_state = dc_create_plane_state(dc);
9740 if (!dc_new_plane_state)
9741 return -ENOMEM;
62f55537 9742
4711c033
LT
9743 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9744 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9745
695af5f9 9746 ret = fill_dc_plane_attributes(
1348969a 9747 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9748 dc_new_plane_state,
9749 new_plane_state,
9750 new_crtc_state);
9751 if (ret) {
9752 dc_plane_state_release(dc_new_plane_state);
9753 return ret;
9754 }
62f55537 9755
9e869063
LL
9756 ret = dm_atomic_get_state(state, &dm_state);
9757 if (ret) {
9758 dc_plane_state_release(dc_new_plane_state);
9759 return ret;
9760 }
eb3dc897 9761
9e869063
LL
9762 /*
9763 * Any atomic check errors that occur after this will
9764 * not need a release. The plane state will be attached
9765 * to the stream, and therefore part of the atomic
9766 * state. It'll be released when the atomic state is
9767 * cleaned.
9768 */
9769 if (!dc_add_plane_to_context(
9770 dc,
9771 dm_new_crtc_state->stream,
9772 dc_new_plane_state,
9773 dm_state->context)) {
62f55537 9774
9e869063
LL
9775 dc_plane_state_release(dc_new_plane_state);
9776 return -EINVAL;
9777 }
8c45c5db 9778
9e869063 9779 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9780
9e869063
LL
9781 /* Tell DC to do a full surface update every time there
9782 * is a plane change. Inefficient, but works for now.
9783 */
9784 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9785
9786 *lock_and_validation_needed = true;
62f55537 9787 }
e7b07cee
HW
9788
9789
62f55537
AG
9790 return ret;
9791}
a87fa993 9792
12f4849a
SS
9793static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9794 struct drm_crtc *crtc,
9795 struct drm_crtc_state *new_crtc_state)
9796{
9797 struct drm_plane_state *new_cursor_state, *new_primary_state;
9798 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9799
9800 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9801 * cursor per pipe but it's going to inherit the scaling and
9802 * positioning from the underlying pipe. Check the cursor plane's
9803 * blending properties match the primary plane's. */
9804
9805 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9806 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
7df4ceb6
SE
9807 if (!new_cursor_state || !new_primary_state ||
9808 !new_cursor_state->fb || !new_primary_state->fb) {
12f4849a
SS
9809 return 0;
9810 }
9811
9812 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9813 (new_cursor_state->src_w >> 16);
9814 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9815 (new_cursor_state->src_h >> 16);
9816
9817 primary_scale_w = new_primary_state->crtc_w * 1000 /
9818 (new_primary_state->src_w >> 16);
9819 primary_scale_h = new_primary_state->crtc_h * 1000 /
9820 (new_primary_state->src_h >> 16);
9821
9822 if (cursor_scale_w != primary_scale_w ||
9823 cursor_scale_h != primary_scale_h) {
9824 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9825 return -EINVAL;
9826 }
9827
9828 return 0;
9829}
9830
e10517b3 9831#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9832static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9833{
9834 struct drm_connector *connector;
9835 struct drm_connector_state *conn_state;
9836 struct amdgpu_dm_connector *aconnector = NULL;
9837 int i;
9838 for_each_new_connector_in_state(state, connector, conn_state, i) {
9839 if (conn_state->crtc != crtc)
9840 continue;
9841
9842 aconnector = to_amdgpu_dm_connector(connector);
9843 if (!aconnector->port || !aconnector->mst_port)
9844 aconnector = NULL;
9845 else
9846 break;
9847 }
9848
9849 if (!aconnector)
9850 return 0;
9851
9852 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9853}
e10517b3 9854#endif
44be939f 9855
b8592b48
LL
9856/**
9857 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9858 * @dev: The DRM device
9859 * @state: The atomic state to commit
9860 *
9861 * Validate that the given atomic state is programmable by DC into hardware.
9862 * This involves constructing a &struct dc_state reflecting the new hardware
9863 * state we wish to commit, then querying DC to see if it is programmable. It's
9864 * important not to modify the existing DC state. Otherwise, atomic_check
9865 * may unexpectedly commit hardware changes.
9866 *
9867 * When validating the DC state, it's important that the right locks are
9868 * acquired. For full updates case which removes/adds/updates streams on one
9869 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9870 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 9871 * flip using DRMs synchronization events.
b8592b48
LL
9872 *
9873 * Note that DM adds the affected connectors for all CRTCs in state, when that
9874 * might not seem necessary. This is because DC stream creation requires the
9875 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9876 * be possible but non-trivial - a possible TODO item.
9877 *
9878 * Return: -Error code if validation failed.
9879 */
7578ecda
AD
9880static int amdgpu_dm_atomic_check(struct drm_device *dev,
9881 struct drm_atomic_state *state)
62f55537 9882{
1348969a 9883 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 9884 struct dm_atomic_state *dm_state = NULL;
62f55537 9885 struct dc *dc = adev->dm.dc;
62f55537 9886 struct drm_connector *connector;
c2cea706 9887 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 9888 struct drm_crtc *crtc;
fc9e9920 9889 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
9890 struct drm_plane *plane;
9891 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 9892 enum dc_status status;
1e88ad0a 9893 int ret, i;
62f55537 9894 bool lock_and_validation_needed = false;
886876ec 9895 struct dm_crtc_state *dm_old_crtc_state;
62f55537 9896
e8a98235 9897 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 9898
62f55537 9899 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
9900 if (ret)
9901 goto fail;
62f55537 9902
c5892a10
SW
9903 /* Check connector changes */
9904 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9905 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9906 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9907
9908 /* Skip connectors that are disabled or part of modeset already. */
9909 if (!old_con_state->crtc && !new_con_state->crtc)
9910 continue;
9911
9912 if (!new_con_state->crtc)
9913 continue;
9914
9915 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9916 if (IS_ERR(new_crtc_state)) {
9917 ret = PTR_ERR(new_crtc_state);
9918 goto fail;
9919 }
9920
9921 if (dm_old_con_state->abm_level !=
9922 dm_new_con_state->abm_level)
9923 new_crtc_state->connectors_changed = true;
9924 }
9925
e10517b3 9926#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 9927 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
9928 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9929 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9930 ret = add_affected_mst_dsc_crtcs(state, crtc);
9931 if (ret)
9932 goto fail;
9933 }
9934 }
9935 }
e10517b3 9936#endif
1e88ad0a 9937 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
9938 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9939
1e88ad0a 9940 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 9941 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
9942 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9943 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 9944 continue;
7bef1af3 9945
1e88ad0a
S
9946 if (!new_crtc_state->enable)
9947 continue;
fc9e9920 9948
1e88ad0a
S
9949 ret = drm_atomic_add_affected_connectors(state, crtc);
9950 if (ret)
9951 return ret;
fc9e9920 9952
1e88ad0a
S
9953 ret = drm_atomic_add_affected_planes(state, crtc);
9954 if (ret)
9955 goto fail;
115a385c 9956
cbac53f7 9957 if (dm_old_crtc_state->dsc_force_changed)
115a385c 9958 new_crtc_state->mode_changed = true;
e7b07cee
HW
9959 }
9960
2d9e6431
NK
9961 /*
9962 * Add all primary and overlay planes on the CRTC to the state
9963 * whenever a plane is enabled to maintain correct z-ordering
9964 * and to enable fast surface updates.
9965 */
9966 drm_for_each_crtc(crtc, dev) {
9967 bool modified = false;
9968
9969 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9970 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9971 continue;
9972
9973 if (new_plane_state->crtc == crtc ||
9974 old_plane_state->crtc == crtc) {
9975 modified = true;
9976 break;
9977 }
9978 }
9979
9980 if (!modified)
9981 continue;
9982
9983 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9984 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9985 continue;
9986
9987 new_plane_state =
9988 drm_atomic_get_plane_state(state, plane);
9989
9990 if (IS_ERR(new_plane_state)) {
9991 ret = PTR_ERR(new_plane_state);
9992 goto fail;
9993 }
9994 }
9995 }
9996
62f55537 9997 /* Remove exiting planes if they are modified */
9e869063
LL
9998 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9999 ret = dm_update_plane_state(dc, state, plane,
10000 old_plane_state,
10001 new_plane_state,
10002 false,
10003 &lock_and_validation_needed);
10004 if (ret)
10005 goto fail;
62f55537
AG
10006 }
10007
10008 /* Disable all crtcs which require disable */
4b9674e5
LL
10009 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10010 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10011 old_crtc_state,
10012 new_crtc_state,
10013 false,
10014 &lock_and_validation_needed);
10015 if (ret)
10016 goto fail;
62f55537
AG
10017 }
10018
10019 /* Enable all crtcs which require enable */
4b9674e5
LL
10020 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10021 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10022 old_crtc_state,
10023 new_crtc_state,
10024 true,
10025 &lock_and_validation_needed);
10026 if (ret)
10027 goto fail;
62f55537
AG
10028 }
10029
10030 /* Add new/modified planes */
9e869063
LL
10031 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10032 ret = dm_update_plane_state(dc, state, plane,
10033 old_plane_state,
10034 new_plane_state,
10035 true,
10036 &lock_and_validation_needed);
10037 if (ret)
10038 goto fail;
62f55537
AG
10039 }
10040
b349f76e
ES
10041 /* Run this here since we want to validate the streams we created */
10042 ret = drm_atomic_helper_check_planes(dev, state);
10043 if (ret)
10044 goto fail;
62f55537 10045
12f4849a
SS
10046 /* Check cursor planes scaling */
10047 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10048 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10049 if (ret)
10050 goto fail;
10051 }
10052
43d10d30
NK
10053 if (state->legacy_cursor_update) {
10054 /*
10055 * This is a fast cursor update coming from the plane update
10056 * helper, check if it can be done asynchronously for better
10057 * performance.
10058 */
10059 state->async_update =
10060 !drm_atomic_helper_async_check(dev, state);
10061
10062 /*
10063 * Skip the remaining global validation if this is an async
10064 * update. Cursor updates can be done without affecting
10065 * state or bandwidth calcs and this avoids the performance
10066 * penalty of locking the private state object and
10067 * allocating a new dc_state.
10068 */
10069 if (state->async_update)
10070 return 0;
10071 }
10072
ebdd27e1 10073 /* Check scaling and underscan changes*/
1f6010a9 10074 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10075 * new stream into context w\o causing full reset. Need to
10076 * decide how to handle.
10077 */
c2cea706 10078 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10079 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10080 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10081 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10082
10083 /* Skip any modesets/resets */
0bc9706d
LSL
10084 if (!acrtc || drm_atomic_crtc_needs_modeset(
10085 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10086 continue;
10087
b830ebc9 10088 /* Skip any thing not scale or underscan changes */
54d76575 10089 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10090 continue;
10091
10092 lock_and_validation_needed = true;
10093 }
10094
f6d7c7fa
NK
10095 /**
10096 * Streams and planes are reset when there are changes that affect
10097 * bandwidth. Anything that affects bandwidth needs to go through
10098 * DC global validation to ensure that the configuration can be applied
10099 * to hardware.
10100 *
10101 * We have to currently stall out here in atomic_check for outstanding
10102 * commits to finish in this case because our IRQ handlers reference
10103 * DRM state directly - we can end up disabling interrupts too early
10104 * if we don't.
10105 *
10106 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10107 */
f6d7c7fa 10108 if (lock_and_validation_needed) {
eb3dc897
NK
10109 ret = dm_atomic_get_state(state, &dm_state);
10110 if (ret)
10111 goto fail;
e7b07cee
HW
10112
10113 ret = do_aquire_global_lock(dev, state);
10114 if (ret)
10115 goto fail;
1dc90497 10116
d9fe1a4c 10117#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
10118 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10119 goto fail;
10120
29b9ba74
ML
10121 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10122 if (ret)
10123 goto fail;
d9fe1a4c 10124#endif
29b9ba74 10125
ded58c7b
ZL
10126 /*
10127 * Perform validation of MST topology in the state:
10128 * We need to perform MST atomic check before calling
10129 * dc_validate_global_state(), or there is a chance
10130 * to get stuck in an infinite loop and hang eventually.
10131 */
10132 ret = drm_dp_mst_atomic_check(state);
10133 if (ret)
10134 goto fail;
74a16675
RS
10135 status = dc_validate_global_state(dc, dm_state->context, false);
10136 if (status != DC_OK) {
10137 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10138 dc_status_to_str(status), status);
e7b07cee
HW
10139 ret = -EINVAL;
10140 goto fail;
10141 }
bd200d19 10142 } else {
674e78ac 10143 /*
bd200d19
NK
10144 * The commit is a fast update. Fast updates shouldn't change
10145 * the DC context, affect global validation, and can have their
10146 * commit work done in parallel with other commits not touching
10147 * the same resource. If we have a new DC context as part of
10148 * the DM atomic state from validation we need to free it and
10149 * retain the existing one instead.
fde9f39a
MR
10150 *
10151 * Furthermore, since the DM atomic state only contains the DC
10152 * context and can safely be annulled, we can free the state
10153 * and clear the associated private object now to free
10154 * some memory and avoid a possible use-after-free later.
674e78ac 10155 */
bd200d19 10156
fde9f39a
MR
10157 for (i = 0; i < state->num_private_objs; i++) {
10158 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 10159
fde9f39a
MR
10160 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10161 int j = state->num_private_objs-1;
bd200d19 10162
fde9f39a
MR
10163 dm_atomic_destroy_state(obj,
10164 state->private_objs[i].state);
10165
10166 /* If i is not at the end of the array then the
10167 * last element needs to be moved to where i was
10168 * before the array can safely be truncated.
10169 */
10170 if (i != j)
10171 state->private_objs[i] =
10172 state->private_objs[j];
bd200d19 10173
fde9f39a
MR
10174 state->private_objs[j].ptr = NULL;
10175 state->private_objs[j].state = NULL;
10176 state->private_objs[j].old_state = NULL;
10177 state->private_objs[j].new_state = NULL;
10178
10179 state->num_private_objs = j;
10180 break;
10181 }
bd200d19 10182 }
e7b07cee
HW
10183 }
10184
caff0e66
NK
10185 /* Store the overall update type for use later in atomic check. */
10186 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10187 struct dm_crtc_state *dm_new_crtc_state =
10188 to_dm_crtc_state(new_crtc_state);
10189
f6d7c7fa
NK
10190 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10191 UPDATE_TYPE_FULL :
10192 UPDATE_TYPE_FAST;
e7b07cee
HW
10193 }
10194
10195 /* Must be success */
10196 WARN_ON(ret);
e8a98235
RS
10197
10198 trace_amdgpu_dm_atomic_check_finish(state, ret);
10199
e7b07cee
HW
10200 return ret;
10201
10202fail:
10203 if (ret == -EDEADLK)
01e28f9c 10204 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 10205 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 10206 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 10207 else
01e28f9c 10208 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 10209
e8a98235
RS
10210 trace_amdgpu_dm_atomic_check_finish(state, ret);
10211
e7b07cee
HW
10212 return ret;
10213}
10214
3ee6b26b
AD
10215static bool is_dp_capable_without_timing_msa(struct dc *dc,
10216 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
10217{
10218 uint8_t dpcd_data;
10219 bool capable = false;
10220
c84dec2f 10221 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
10222 dm_helpers_dp_read_dpcd(
10223 NULL,
c84dec2f 10224 amdgpu_dm_connector->dc_link,
e7b07cee
HW
10225 DP_DOWN_STREAM_PORT_COUNT,
10226 &dpcd_data,
10227 sizeof(dpcd_data))) {
10228 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10229 }
10230
10231 return capable;
10232}
f9b4f20c
SW
10233
10234static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10235 uint8_t *edid_ext, int len,
10236 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10237{
10238 int i;
10239 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10240 struct dc *dc = adev->dm.dc;
10241
10242 /* send extension block to DMCU for parsing */
10243 for (i = 0; i < len; i += 8) {
10244 bool res;
10245 int offset;
10246
10247 /* send 8 bytes a time */
10248 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10249 return false;
10250
10251 if (i+8 == len) {
10252 /* EDID block sent completed, expect result */
10253 int version, min_rate, max_rate;
10254
10255 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10256 if (res) {
10257 /* amd vsdb found */
10258 vsdb_info->freesync_supported = 1;
10259 vsdb_info->amd_vsdb_version = version;
10260 vsdb_info->min_refresh_rate_hz = min_rate;
10261 vsdb_info->max_refresh_rate_hz = max_rate;
10262 return true;
10263 }
10264 /* not amd vsdb */
10265 return false;
10266 }
10267
10268 /* check for ack*/
10269 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10270 if (!res)
10271 return false;
10272 }
10273
10274 return false;
10275}
10276
7c7dd774 10277static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
10278 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10279{
10280 uint8_t *edid_ext = NULL;
10281 int i;
10282 bool valid_vsdb_found = false;
10283
10284 /*----- drm_find_cea_extension() -----*/
10285 /* No EDID or EDID extensions */
10286 if (edid == NULL || edid->extensions == 0)
7c7dd774 10287 return -ENODEV;
f9b4f20c
SW
10288
10289 /* Find CEA extension */
10290 for (i = 0; i < edid->extensions; i++) {
10291 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10292 if (edid_ext[0] == CEA_EXT)
10293 break;
10294 }
10295
10296 if (i == edid->extensions)
7c7dd774 10297 return -ENODEV;
f9b4f20c
SW
10298
10299 /*----- cea_db_offsets() -----*/
10300 if (edid_ext[0] != CEA_EXT)
7c7dd774 10301 return -ENODEV;
f9b4f20c
SW
10302
10303 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
10304
10305 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
10306}
10307
98e6436d
AK
10308void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10309 struct edid *edid)
e7b07cee 10310{
eb0709ba 10311 int i = 0;
e7b07cee
HW
10312 struct detailed_timing *timing;
10313 struct detailed_non_pixel *data;
10314 struct detailed_data_monitor_range *range;
c84dec2f
HW
10315 struct amdgpu_dm_connector *amdgpu_dm_connector =
10316 to_amdgpu_dm_connector(connector);
bb47de73 10317 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
10318
10319 struct drm_device *dev = connector->dev;
1348969a 10320 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 10321 bool freesync_capable = false;
f9b4f20c 10322 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 10323
8218d7f1
HW
10324 if (!connector->state) {
10325 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 10326 goto update;
8218d7f1
HW
10327 }
10328
98e6436d
AK
10329 if (!edid) {
10330 dm_con_state = to_dm_connector_state(connector->state);
10331
10332 amdgpu_dm_connector->min_vfreq = 0;
10333 amdgpu_dm_connector->max_vfreq = 0;
10334 amdgpu_dm_connector->pixel_clock_mhz = 0;
10335
bb47de73 10336 goto update;
98e6436d
AK
10337 }
10338
8218d7f1
HW
10339 dm_con_state = to_dm_connector_state(connector->state);
10340
c84dec2f 10341 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 10342 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 10343 goto update;
e7b07cee
HW
10344 }
10345 if (!adev->dm.freesync_module)
bb47de73 10346 goto update;
f9b4f20c
SW
10347
10348
10349 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10350 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10351 bool edid_check_required = false;
10352
10353 if (edid) {
e7b07cee
HW
10354 edid_check_required = is_dp_capable_without_timing_msa(
10355 adev->dm.dc,
c84dec2f 10356 amdgpu_dm_connector);
e7b07cee 10357 }
e7b07cee 10358
f9b4f20c
SW
10359 if (edid_check_required == true && (edid->version > 1 ||
10360 (edid->version == 1 && edid->revision > 1))) {
10361 for (i = 0; i < 4; i++) {
e7b07cee 10362
f9b4f20c
SW
10363 timing = &edid->detailed_timings[i];
10364 data = &timing->data.other_data;
10365 range = &data->data.range;
10366 /*
10367 * Check if monitor has continuous frequency mode
10368 */
10369 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10370 continue;
10371 /*
10372 * Check for flag range limits only. If flag == 1 then
10373 * no additional timing information provided.
10374 * Default GTF, GTF Secondary curve and CVT are not
10375 * supported
10376 */
10377 if (range->flags != 1)
10378 continue;
a0ffc3fd 10379
f9b4f20c
SW
10380 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10381 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10382 amdgpu_dm_connector->pixel_clock_mhz =
10383 range->pixel_clock_mhz * 10;
a0ffc3fd 10384
f9b4f20c
SW
10385 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10386 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 10387
f9b4f20c
SW
10388 break;
10389 }
98e6436d 10390
f9b4f20c
SW
10391 if (amdgpu_dm_connector->max_vfreq -
10392 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 10393
f9b4f20c
SW
10394 freesync_capable = true;
10395 }
10396 }
10397 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
10398 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10399 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
10400 timing = &edid->detailed_timings[i];
10401 data = &timing->data.other_data;
10402
10403 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10404 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10405 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10406 freesync_capable = true;
10407
10408 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10409 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
10410 }
10411 }
bb47de73
NK
10412
10413update:
10414 if (dm_con_state)
10415 dm_con_state->freesync_capable = freesync_capable;
10416
10417 if (connector->vrr_capable_property)
10418 drm_connector_set_vrr_capable_property(connector,
10419 freesync_capable);
e7b07cee
HW
10420}
10421
8c322309
RL
10422static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10423{
10424 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10425
10426 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10427 return;
10428 if (link->type == dc_connection_none)
10429 return;
10430 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10431 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
10432 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10433
10434 if (dpcd_data[0] == 0) {
1cfbbdde 10435 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
10436 link->psr_settings.psr_feature_enabled = false;
10437 } else {
1cfbbdde 10438 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
10439 link->psr_settings.psr_feature_enabled = true;
10440 }
10441
10442 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
10443 }
10444}
10445
10446/*
10447 * amdgpu_dm_link_setup_psr() - configure psr link
10448 * @stream: stream state
10449 *
10450 * Return: true if success
10451 */
10452static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10453{
10454 struct dc_link *link = NULL;
10455 struct psr_config psr_config = {0};
10456 struct psr_context psr_context = {0};
8c322309
RL
10457 bool ret = false;
10458
10459 if (stream == NULL)
10460 return false;
10461
10462 link = stream->link;
8c322309 10463
d1ebfdd8 10464 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
10465
10466 if (psr_config.psr_version > 0) {
10467 psr_config.psr_exit_link_training_required = 0x1;
10468 psr_config.psr_frame_capture_indication_req = 0;
10469 psr_config.psr_rfb_setup_time = 0x37;
10470 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10471 psr_config.allow_smu_optimizations = 0x0;
10472
10473 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10474
10475 }
d1ebfdd8 10476 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
10477
10478 return ret;
10479}
10480
10481/*
10482 * amdgpu_dm_psr_enable() - enable psr f/w
10483 * @stream: stream state
10484 *
10485 * Return: true if success
10486 */
10487bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10488{
10489 struct dc_link *link = stream->link;
5b5abe95
AK
10490 unsigned int vsync_rate_hz = 0;
10491 struct dc_static_screen_params params = {0};
10492 /* Calculate number of static frames before generating interrupt to
10493 * enter PSR.
10494 */
5b5abe95
AK
10495 // Init fail safe of 2 frames static
10496 unsigned int num_frames_static = 2;
8c322309
RL
10497
10498 DRM_DEBUG_DRIVER("Enabling psr...\n");
10499
5b5abe95
AK
10500 vsync_rate_hz = div64_u64(div64_u64((
10501 stream->timing.pix_clk_100hz * 100),
10502 stream->timing.v_total),
10503 stream->timing.h_total);
10504
10505 /* Round up
10506 * Calculate number of frames such that at least 30 ms of time has
10507 * passed.
10508 */
7aa62404
RL
10509 if (vsync_rate_hz != 0) {
10510 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 10511 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 10512 }
5b5abe95
AK
10513
10514 params.triggers.cursor_update = true;
10515 params.triggers.overlay_update = true;
10516 params.triggers.surface_update = true;
10517 params.num_frames = num_frames_static;
8c322309 10518
5b5abe95 10519 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 10520 &stream, 1,
5b5abe95 10521 &params);
8c322309 10522
1d496907 10523 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
10524}
10525
10526/*
10527 * amdgpu_dm_psr_disable() - disable psr f/w
10528 * @stream: stream state
10529 *
10530 * Return: true if success
10531 */
10532static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10533{
10534
10535 DRM_DEBUG_DRIVER("Disabling psr...\n");
10536
1d496907 10537 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 10538}
3d4e52d0 10539
6ee90e88 10540/*
10541 * amdgpu_dm_psr_disable() - disable psr f/w
10542 * if psr is enabled on any stream
10543 *
10544 * Return: true if success
10545 */
10546static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10547{
10548 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10549 return dc_set_psr_allow_active(dm->dc, false);
10550}
10551
3d4e52d0
VL
10552void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10553{
1348969a 10554 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
10555 struct dc *dc = adev->dm.dc;
10556 int i;
10557
10558 mutex_lock(&adev->dm.dc_lock);
10559 if (dc->current_state) {
10560 for (i = 0; i < dc->current_state->stream_count; ++i)
10561 dc->current_state->streams[i]
10562 ->triggered_crtc_reset.enabled =
10563 adev->dm.force_timing_sync;
10564
10565 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10566 dc_trigger_sync(dc, dc->current_state);
10567 }
10568 mutex_unlock(&adev->dm.dc_lock);
10569}
9d83722d
RS
10570
10571void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10572 uint32_t value, const char *func_name)
10573{
10574#ifdef DM_CHECK_ADDR_0
10575 if (address == 0) {
10576 DC_ERR("invalid register write. address = 0");
10577 return;
10578 }
10579#endif
10580 cgs_write_register(ctx->cgs_device, address, value);
10581 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10582}
10583
10584uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10585 const char *func_name)
10586{
10587 uint32_t value;
10588#ifdef DM_CHECK_ADDR_0
10589 if (address == 0) {
10590 DC_ERR("invalid register read; address = 0\n");
10591 return 0;
10592 }
10593#endif
10594
10595 if (ctx->dmub_srv &&
10596 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10597 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10598 ASSERT(false);
10599 return 0;
10600 }
10601
10602 value = cgs_read_register(ctx->cgs_device, address);
10603
10604 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10605
10606 return value;
10607}