drm/amd/amdgpu: fix the kfd pre_reset sequence in sriov
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
53e108aa 51#include <drm/drm_hdcp.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
4562236b
HW
54
55#include "amd_shared.h"
56#include "amdgpu_dm_irq.h"
57#include "dm_helpers.h"
e7b07cee 58#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
59#if defined(CONFIG_DEBUG_FS)
60#include "amdgpu_dm_debugfs.h"
61#endif
f4594cd1 62#include "amdgpu_dm_psr.h"
4562236b
HW
63
64#include "ivsrcid/ivsrcid_vislands30.h"
65
81927e28 66#include "i2caux_interface.h"
4562236b
HW
67#include <linux/module.h>
68#include <linux/moduleparam.h>
e7b07cee 69#include <linux/types.h>
97028037 70#include <linux/pm_runtime.h>
09d21852 71#include <linux/pci.h>
a94d5569 72#include <linux/firmware.h>
6ce8f316 73#include <linux/component.h>
4562236b
HW
74
75#include <drm/drm_atomic.h>
674e78ac 76#include <drm/drm_atomic_uapi.h>
4562236b
HW
77#include <drm/drm_atomic_helper.h>
78#include <drm/drm_dp_mst_helper.h>
e7b07cee 79#include <drm/drm_fb_helper.h>
09d21852 80#include <drm/drm_fourcc.h>
e7b07cee 81#include <drm/drm_edid.h>
09d21852 82#include <drm/drm_vblank.h>
6ce8f316 83#include <drm/drm_audio_component.h>
4562236b 84
b86a1aa3 85#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 86#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 87
ad941f7a
FX
88#include "dcn/dcn_1_0_offset.h"
89#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
90#include "soc15_hw_ip.h"
91#include "vega10_ip_offset.h"
ff5ef992
AD
92
93#include "soc15_common.h"
94#endif
95
e7b07cee 96#include "modules/inc/mod_freesync.h"
bbf854dc 97#include "modules/power/power_helpers.h"
ecd0136b 98#include "modules/inc/mod_info_packet.h"
e7b07cee 99
743b9786
NK
100#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
102#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
104#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
106#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
108#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
110#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
112#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
114#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 116
a94d5569
DF
117#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
118MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 119
5ea23931
RL
120#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
121MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122
8c7aea40
NK
123/* Number of bytes in PSP header for firmware. */
124#define PSP_HEADER_BYTES 0x100
125
126/* Number of bytes in PSP footer for firmware. */
127#define PSP_FOOTER_BYTES 0x100
128
b8592b48
LL
129/**
130 * DOC: overview
131 *
132 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 133 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
134 * requests into DC requests, and DC responses into DRM responses.
135 *
136 * The root control structure is &struct amdgpu_display_manager.
137 */
138
7578ecda
AD
139/* basic init/fini API */
140static int amdgpu_dm_init(struct amdgpu_device *adev);
141static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 142static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 143
0f877894
OV
144static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145{
146 switch (link->dpcd_caps.dongle_type) {
147 case DISPLAY_DONGLE_NONE:
148 return DRM_MODE_SUBCONNECTOR_Native;
149 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 return DRM_MODE_SUBCONNECTOR_VGA;
151 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 return DRM_MODE_SUBCONNECTOR_DVID;
154 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 return DRM_MODE_SUBCONNECTOR_HDMIA;
157 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 default:
159 return DRM_MODE_SUBCONNECTOR_Unknown;
160 }
161}
162
163static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164{
165 struct dc_link *link = aconnector->dc_link;
166 struct drm_connector *connector = &aconnector->base;
167 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168
169 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 return;
171
172 if (aconnector->dc_sink)
173 subconnector = get_subconnector_type(link);
174
175 drm_object_property_set_value(&connector->base,
176 connector->dev->mode_config.dp_subconnector_property,
177 subconnector);
178}
179
1f6010a9
DF
180/*
181 * initializes drm_device display related structures, based on the information
7578ecda
AD
182 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183 * drm_encoder, drm_mode_config
184 *
185 * Returns 0 on success
186 */
187static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188/* removes and deallocates the drm structures, created by the above function */
189static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190
7578ecda 191static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 192 struct drm_plane *plane,
cc1fec57
NK
193 unsigned long possible_crtcs,
194 const struct dc_plane_cap *plane_cap);
7578ecda
AD
195static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 struct drm_plane *plane,
197 uint32_t link_index);
198static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 struct amdgpu_dm_connector *amdgpu_dm_connector,
200 uint32_t link_index,
201 struct amdgpu_encoder *amdgpu_encoder);
202static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 struct amdgpu_encoder *aencoder,
204 uint32_t link_index);
205
206static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207
7578ecda
AD
208static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209
210static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 struct drm_atomic_state *state);
212
674e78ac
NK
213static void handle_cursor_update(struct drm_plane *plane,
214 struct drm_plane_state *old_plane_state);
7578ecda 215
dfbbfe3c
BN
216static const struct drm_format_info *
217amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218
e27c41d5
JS
219static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220
a85ba005
NC
221static bool
222is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 struct drm_crtc_state *new_crtc_state);
4562236b
HW
224/*
225 * dm_vblank_get_counter
226 *
227 * @brief
228 * Get counter for number of vertical blanks
229 *
230 * @param
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
233 *
234 * @return
235 * Counter for vertical blanks
236 */
237static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238{
239 if (crtc >= adev->mode_info.num_crtc)
240 return 0;
241 else {
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
585d450c 244 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 crtc);
4562236b
HW
247 return 0;
248 }
249
585d450c 250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
251 }
252}
253
254static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 255 u32 *vbl, u32 *position)
4562236b 256{
81c50963
ST
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
4562236b
HW
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 return -EINVAL;
261 else {
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
585d450c 264 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 crtc);
4562236b
HW
267 return 0;
268 }
269
81c50963
ST
270 /*
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
273 */
585d450c 274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
275 &v_blank_start,
276 &v_blank_end,
277 &h_position,
278 &v_position);
279
e806208d
AG
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
282 }
283
284 return 0;
285}
286
287static bool dm_is_idle(void *handle)
288{
289 /* XXX todo */
290 return true;
291}
292
293static int dm_wait_for_idle(void *handle)
294{
295 /* XXX todo */
296 return 0;
297}
298
299static bool dm_check_soft_reset(void *handle)
300{
301 return false;
302}
303
304static int dm_soft_reset(void *handle)
305{
306 /* XXX todo */
307 return 0;
308}
309
3ee6b26b
AD
310static struct amdgpu_crtc *
311get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 int otg_inst)
4562236b 313{
4a580877 314 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
317
bcd74374 318 if (WARN_ON(otg_inst == -1))
4562236b 319 return adev->mode_info.crtcs[0];
4562236b
HW
320
321 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 amdgpu_crtc = to_amdgpu_crtc(crtc);
323
324 if (amdgpu_crtc->otg_inst == otg_inst)
325 return amdgpu_crtc;
326 }
327
328 return NULL;
329}
330
585d450c
AP
331static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332{
333 return acrtc->dm_irq_params.freesync_config.state ==
334 VRR_STATE_ACTIVE_VARIABLE ||
335 acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_FIXED;
337}
338
66b0c973
MK
339static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340{
341 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
343}
344
a85ba005
NC
345static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346 struct dm_crtc_state *new_state)
347{
348 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
349 return true;
350 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351 return true;
352 else
353 return false;
354}
355
b8e8c934
HW
356/**
357 * dm_pflip_high_irq() - Handle pageflip interrupt
358 * @interrupt_params: ignored
359 *
360 * Handles the pageflip interrupt by notifying all interested parties
361 * that the pageflip has been completed.
362 */
4562236b
HW
363static void dm_pflip_high_irq(void *interrupt_params)
364{
4562236b
HW
365 struct amdgpu_crtc *amdgpu_crtc;
366 struct common_irq_params *irq_params = interrupt_params;
367 struct amdgpu_device *adev = irq_params->adev;
368 unsigned long flags;
71bbe51a 369 struct drm_pending_vblank_event *e;
71bbe51a
MK
370 uint32_t vpos, hpos, v_blank_start, v_blank_end;
371 bool vrr_active;
4562236b
HW
372
373 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
374
375 /* IRQ could occur when in initial stage */
1f6010a9 376 /* TODO work and BO cleanup */
4562236b 377 if (amdgpu_crtc == NULL) {
cb2318b7 378 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
379 return;
380 }
381
4a580877 382 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
383
384 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 385 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
386 amdgpu_crtc->pflip_status,
387 AMDGPU_FLIP_SUBMITTED,
388 amdgpu_crtc->crtc_id,
389 amdgpu_crtc);
4a580877 390 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
391 return;
392 }
393
71bbe51a
MK
394 /* page flip completed. */
395 e = amdgpu_crtc->event;
396 amdgpu_crtc->event = NULL;
4562236b 397
bcd74374 398 WARN_ON(!e);
1159898a 399
585d450c 400 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
401
402 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
403 if (!vrr_active ||
585d450c 404 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
405 &v_blank_end, &hpos, &vpos) ||
406 (vpos < v_blank_start)) {
407 /* Update to correct count and vblank timestamp if racing with
408 * vblank irq. This also updates to the correct vblank timestamp
409 * even in VRR mode, as scanout is past the front-porch atm.
410 */
411 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 412
71bbe51a
MK
413 /* Wake up userspace by sending the pageflip event with proper
414 * count and timestamp of vblank of flip completion.
415 */
416 if (e) {
417 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418
419 /* Event sent, so done with vblank for this flip */
420 drm_crtc_vblank_put(&amdgpu_crtc->base);
421 }
422 } else if (e) {
423 /* VRR active and inside front-porch: vblank count and
424 * timestamp for pageflip event will only be up to date after
425 * drm_crtc_handle_vblank() has been executed from late vblank
426 * irq handler after start of back-porch (vline 0). We queue the
427 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 * updated timestamp and count, once it runs after us.
429 *
430 * We need to open-code this instead of using the helper
431 * drm_crtc_arm_vblank_event(), as that helper would
432 * call drm_crtc_accurate_vblank_count(), which we must
433 * not call in VRR mode while we are in front-porch!
434 */
435
436 /* sequence will be replaced by real count during send-out. */
437 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 e->pipe = amdgpu_crtc->crtc_id;
439
4a580877 440 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
441 e = NULL;
442 }
4562236b 443
fdd1fe57
MK
444 /* Keep track of vblank of this flip for flip throttling. We use the
445 * cooked hw counter, as that one incremented at start of this vblank
446 * of pageflip completion, so last_flip_vblank is the forbidden count
447 * for queueing new pageflips if vsync + VRR is enabled.
448 */
5d1c59c4 449 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 450 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 451
54f5499a 452 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 453 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 454
cb2318b7
VL
455 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 amdgpu_crtc->crtc_id, amdgpu_crtc,
457 vrr_active, (int) !e);
4562236b
HW
458}
459
d2574c33
MK
460static void dm_vupdate_high_irq(void *interrupt_params)
461{
462 struct common_irq_params *irq_params = interrupt_params;
463 struct amdgpu_device *adev = irq_params->adev;
464 struct amdgpu_crtc *acrtc;
47588233
RS
465 struct drm_device *drm_dev;
466 struct drm_vblank_crtc *vblank;
467 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 468 unsigned long flags;
585d450c 469 int vrr_active;
d2574c33
MK
470
471 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472
473 if (acrtc) {
585d450c 474 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
475 drm_dev = acrtc->base.dev;
476 vblank = &drm_dev->vblank[acrtc->base.index];
477 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 frame_duration_ns = vblank->time - previous_timestamp;
479
480 if (frame_duration_ns > 0) {
481 trace_amdgpu_refresh_rate_track(acrtc->base.index,
482 frame_duration_ns,
483 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 }
d2574c33 486
cb2318b7 487 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 488 acrtc->crtc_id,
585d450c 489 vrr_active);
d2574c33
MK
490
491 /* Core vblank handling is done here after end of front-porch in
492 * vrr mode, as vblank timestamping will give valid results
493 * while now done after front-porch. This will also deliver
494 * page-flip completion events that have been queued to us
495 * if a pageflip happened inside front-porch.
496 */
585d450c 497 if (vrr_active) {
d2574c33 498 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
499
500 /* BTR processing for pre-DCE12 ASICs */
585d450c 501 if (acrtc->dm_irq_params.stream &&
09aef2c4 502 adev->family < AMDGPU_FAMILY_AI) {
4a580877 503 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
504 mod_freesync_handle_v_update(
505 adev->dm.freesync_module,
585d450c
AP
506 acrtc->dm_irq_params.stream,
507 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
508
509 dc_stream_adjust_vmin_vmax(
510 adev->dm.dc,
585d450c
AP
511 acrtc->dm_irq_params.stream,
512 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 513 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
514 }
515 }
d2574c33
MK
516 }
517}
518
b8e8c934
HW
519/**
520 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 521 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
522 *
523 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524 * event handler.
525 */
4562236b
HW
526static void dm_crtc_high_irq(void *interrupt_params)
527{
528 struct common_irq_params *irq_params = interrupt_params;
529 struct amdgpu_device *adev = irq_params->adev;
4562236b 530 struct amdgpu_crtc *acrtc;
09aef2c4 531 unsigned long flags;
585d450c 532 int vrr_active;
4562236b 533
b57de80a 534 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
535 if (!acrtc)
536 return;
537
585d450c 538 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 539
cb2318b7 540 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 541 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 542
2346ef47
NK
543 /**
544 * Core vblank handling at start of front-porch is only possible
545 * in non-vrr mode, as only there vblank timestamping will give
546 * valid results while done in front-porch. Otherwise defer it
547 * to dm_vupdate_high_irq after end of front-porch.
548 */
585d450c 549 if (!vrr_active)
2346ef47
NK
550 drm_crtc_handle_vblank(&acrtc->base);
551
552 /**
553 * Following stuff must happen at start of vblank, for crc
554 * computation and below-the-range btr support in vrr mode.
555 */
16f17eda 556 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
557
558 /* BTR updates need to happen before VUPDATE on Vega and above. */
559 if (adev->family < AMDGPU_FAMILY_AI)
560 return;
16f17eda 561
4a580877 562 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 563
585d450c
AP
564 if (acrtc->dm_irq_params.stream &&
565 acrtc->dm_irq_params.vrr_params.supported &&
566 acrtc->dm_irq_params.freesync_config.state ==
567 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 568 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
569 acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params);
16f17eda 571
585d450c
AP
572 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
574 }
575
2b5aed9a
MK
576 /*
577 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 * In that case, pageflip completion interrupts won't fire and pageflip
579 * completion events won't get delivered. Prevent this by sending
580 * pending pageflip events from here if a flip is still pending.
581 *
582 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 * avoid race conditions between flip programming and completion,
584 * which could cause too early flip completion events.
585 */
2346ef47
NK
586 if (adev->family >= AMDGPU_FAMILY_RV &&
587 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 588 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
589 if (acrtc->event) {
590 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591 acrtc->event = NULL;
592 drm_crtc_vblank_put(&acrtc->base);
593 }
594 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 }
596
4a580877 597 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
598}
599
86bc2219 600#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 601#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
602/**
603 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604 * DCN generation ASICs
48e01bf4 605 * @interrupt_params: interrupt parameters
86bc2219
WL
606 *
607 * Used to set crc window/read out crc value at vertical line 0 position
608 */
86bc2219
WL
609static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610{
611 struct common_irq_params *irq_params = interrupt_params;
612 struct amdgpu_device *adev = irq_params->adev;
613 struct amdgpu_crtc *acrtc;
614
615 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616
617 if (!acrtc)
618 return;
619
620 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621}
622#endif
86bc2219 623
e27c41d5
JS
624/**
625 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
626 * @adev: amdgpu_device pointer
627 * @notify: dmub notification structure
628 *
629 * Dmub AUX or SET_CONFIG command completion processing callback
630 * Copies dmub notification to DM which is to be read by AUX command.
631 * issuing thread and also signals the event to wake up the thread.
632 */
633void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
634{
635 if (adev->dm.dmub_notify)
636 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
637 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
638 complete(&adev->dm.dmub_aux_transfer_done);
639}
640
641/**
642 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
643 * @adev: amdgpu_device pointer
644 * @notify: dmub notification structure
645 *
646 * Dmub Hpd interrupt processing callback. Gets displayindex through the
647 * ink index and calls helper to do the processing.
648 */
649void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
650{
651 struct amdgpu_dm_connector *aconnector;
f6e03f80 652 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
653 struct drm_connector *connector;
654 struct drm_connector_list_iter iter;
655 struct dc_link *link;
656 uint8_t link_index = 0;
657 struct drm_device *dev = adev->dm.ddev;
658
659 if (adev == NULL)
660 return;
661
662 if (notify == NULL) {
663 DRM_ERROR("DMUB HPD callback notification was NULL");
664 return;
665 }
666
667 if (notify->link_index > adev->dm.dc->link_count) {
668 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
669 return;
670 }
671
672 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
673
674 link_index = notify->link_index;
675
676 link = adev->dm.dc->links[link_index];
677
678 drm_connector_list_iter_begin(dev, &iter);
679 drm_for_each_connector_iter(connector, &iter) {
680 aconnector = to_amdgpu_dm_connector(connector);
681 if (link && aconnector->dc_link == link) {
682 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 683 hpd_aconnector = aconnector;
e27c41d5
JS
684 break;
685 }
686 }
687 drm_connector_list_iter_end(&iter);
688 drm_modeset_unlock(&dev->mode_config.connection_mutex);
689
f6e03f80
JS
690 if (hpd_aconnector)
691 handle_hpd_irq_helper(hpd_aconnector);
e27c41d5
JS
692}
693
694/**
695 * register_dmub_notify_callback - Sets callback for DMUB notify
696 * @adev: amdgpu_device pointer
697 * @type: Type of dmub notification
698 * @callback: Dmub interrupt callback function
699 * @dmub_int_thread_offload: offload indicator
700 *
701 * API to register a dmub callback handler for a dmub notification
702 * Also sets indicator whether callback processing to be offloaded.
703 * to dmub interrupt handling thread
704 * Return: true if successfully registered, false if there is existing registration
705 */
706bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
707dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
708{
709 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
710 adev->dm.dmub_callback[type] = callback;
711 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
712 } else
713 return false;
714
715 return true;
716}
717
718static void dm_handle_hpd_work(struct work_struct *work)
719{
720 struct dmub_hpd_work *dmub_hpd_wrk;
721
722 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
723
724 if (!dmub_hpd_wrk->dmub_notify) {
725 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
726 return;
727 }
728
729 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
730 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
731 dmub_hpd_wrk->dmub_notify);
732 }
094b21c1
JS
733
734 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
735 kfree(dmub_hpd_wrk);
736
737}
738
e25515e2 739#define DMUB_TRACE_MAX_READ 64
81927e28
JS
740/**
741 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
742 * @interrupt_params: used for determining the Outbox instance
743 *
744 * Handles the Outbox Interrupt
745 * event handler.
746 */
81927e28
JS
747static void dm_dmub_outbox1_low_irq(void *interrupt_params)
748{
749 struct dmub_notification notify;
750 struct common_irq_params *irq_params = interrupt_params;
751 struct amdgpu_device *adev = irq_params->adev;
752 struct amdgpu_display_manager *dm = &adev->dm;
753 struct dmcub_trace_buf_entry entry = { 0 };
754 uint32_t count = 0;
e27c41d5 755 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 756 struct dc_link *plink = NULL;
81927e28 757
f6e03f80
JS
758 if (dc_enable_dmub_notifications(adev->dm.dc) &&
759 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 760
f6e03f80
JS
761 do {
762 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
763 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
764 DRM_ERROR("DM: notify type %d invalid!", notify.type);
765 continue;
766 }
767 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
768 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
769 if (!dmub_hpd_wrk) {
770 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
771 return;
772 }
773 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
774 if (!dmub_hpd_wrk->dmub_notify) {
775 kfree(dmub_hpd_wrk);
776 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
777 return;
778 }
779 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
780 if (dmub_hpd_wrk->dmub_notify)
781 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
782 dmub_hpd_wrk->adev = adev;
783 if (notify.type == DMUB_NOTIFICATION_HPD) {
784 plink = adev->dm.dc->links[notify.link_index];
785 if (plink) {
786 plink->hpd_status =
787 notify.hpd_status ==
788 DP_HPD_PLUG ? true : false;
789 }
e27c41d5 790 }
f6e03f80
JS
791 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
792 } else {
793 dm->dmub_callback[notify.type](adev, &notify);
794 }
795 } while (notify.pending_notification);
81927e28
JS
796 }
797
798
799 do {
800 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
801 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
802 entry.param0, entry.param1);
803
804 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
805 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
806 } else
807 break;
808
809 count++;
810
811 } while (count <= DMUB_TRACE_MAX_READ);
812
f6e03f80
JS
813 if (count > DMUB_TRACE_MAX_READ)
814 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 815}
86bc2219
WL
816#endif
817
4562236b
HW
818static int dm_set_clockgating_state(void *handle,
819 enum amd_clockgating_state state)
820{
821 return 0;
822}
823
824static int dm_set_powergating_state(void *handle,
825 enum amd_powergating_state state)
826{
827 return 0;
828}
829
830/* Prototypes of private functions */
831static int dm_early_init(void* handle);
832
a32e24b4 833/* Allocate memory for FBC compressed data */
3e332d3a 834static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 835{
3e332d3a 836 struct drm_device *dev = connector->dev;
1348969a 837 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 838 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
839 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
840 struct drm_display_mode *mode;
42e67c3b
RL
841 unsigned long max_size = 0;
842
843 if (adev->dm.dc->fbc_compressor == NULL)
844 return;
a32e24b4 845
3e332d3a 846 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
847 return;
848
3e332d3a
RL
849 if (compressor->bo_ptr)
850 return;
42e67c3b 851
42e67c3b 852
3e332d3a
RL
853 list_for_each_entry(mode, &connector->modes, head) {
854 if (max_size < mode->htotal * mode->vtotal)
855 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
856 }
857
858 if (max_size) {
859 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 860 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 861 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
862
863 if (r)
42e67c3b
RL
864 DRM_ERROR("DM: Failed to initialize FBC\n");
865 else {
866 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
867 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
868 }
869
a32e24b4
RL
870 }
871
872}
a32e24b4 873
6ce8f316
NK
874static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
875 int pipe, bool *enabled,
876 unsigned char *buf, int max_bytes)
877{
878 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 879 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
880 struct drm_connector *connector;
881 struct drm_connector_list_iter conn_iter;
882 struct amdgpu_dm_connector *aconnector;
883 int ret = 0;
884
885 *enabled = false;
886
887 mutex_lock(&adev->dm.audio_lock);
888
889 drm_connector_list_iter_begin(dev, &conn_iter);
890 drm_for_each_connector_iter(connector, &conn_iter) {
891 aconnector = to_amdgpu_dm_connector(connector);
892 if (aconnector->audio_inst != port)
893 continue;
894
895 *enabled = true;
896 ret = drm_eld_size(connector->eld);
897 memcpy(buf, connector->eld, min(max_bytes, ret));
898
899 break;
900 }
901 drm_connector_list_iter_end(&conn_iter);
902
903 mutex_unlock(&adev->dm.audio_lock);
904
905 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
906
907 return ret;
908}
909
910static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
911 .get_eld = amdgpu_dm_audio_component_get_eld,
912};
913
914static int amdgpu_dm_audio_component_bind(struct device *kdev,
915 struct device *hda_kdev, void *data)
916{
917 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 918 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
919 struct drm_audio_component *acomp = data;
920
921 acomp->ops = &amdgpu_dm_audio_component_ops;
922 acomp->dev = kdev;
923 adev->dm.audio_component = acomp;
924
925 return 0;
926}
927
928static void amdgpu_dm_audio_component_unbind(struct device *kdev,
929 struct device *hda_kdev, void *data)
930{
931 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 932 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
933 struct drm_audio_component *acomp = data;
934
935 acomp->ops = NULL;
936 acomp->dev = NULL;
937 adev->dm.audio_component = NULL;
938}
939
940static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
941 .bind = amdgpu_dm_audio_component_bind,
942 .unbind = amdgpu_dm_audio_component_unbind,
943};
944
945static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
946{
947 int i, ret;
948
949 if (!amdgpu_audio)
950 return 0;
951
952 adev->mode_info.audio.enabled = true;
953
954 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
955
956 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
957 adev->mode_info.audio.pin[i].channels = -1;
958 adev->mode_info.audio.pin[i].rate = -1;
959 adev->mode_info.audio.pin[i].bits_per_sample = -1;
960 adev->mode_info.audio.pin[i].status_bits = 0;
961 adev->mode_info.audio.pin[i].category_code = 0;
962 adev->mode_info.audio.pin[i].connected = false;
963 adev->mode_info.audio.pin[i].id =
964 adev->dm.dc->res_pool->audios[i]->inst;
965 adev->mode_info.audio.pin[i].offset = 0;
966 }
967
968 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
969 if (ret < 0)
970 return ret;
971
972 adev->dm.audio_registered = true;
973
974 return 0;
975}
976
977static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
978{
979 if (!amdgpu_audio)
980 return;
981
982 if (!adev->mode_info.audio.enabled)
983 return;
984
985 if (adev->dm.audio_registered) {
986 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
987 adev->dm.audio_registered = false;
988 }
989
990 /* TODO: Disable audio? */
991
992 adev->mode_info.audio.enabled = false;
993}
994
dfd84d90 995static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
996{
997 struct drm_audio_component *acomp = adev->dm.audio_component;
998
999 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1000 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1001
1002 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1003 pin, -1);
1004 }
1005}
1006
743b9786
NK
1007static int dm_dmub_hw_init(struct amdgpu_device *adev)
1008{
743b9786
NK
1009 const struct dmcub_firmware_header_v1_0 *hdr;
1010 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1011 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1012 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1013 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1014 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1015 struct dmub_srv_hw_params hw_params;
1016 enum dmub_status status;
1017 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1018 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786 1019 bool has_hw_support;
5b109397 1020 struct dc *dc = adev->dm.dc;
743b9786
NK
1021
1022 if (!dmub_srv)
1023 /* DMUB isn't supported on the ASIC. */
1024 return 0;
1025
8c7aea40
NK
1026 if (!fb_info) {
1027 DRM_ERROR("No framebuffer info for DMUB service.\n");
1028 return -EINVAL;
1029 }
1030
743b9786
NK
1031 if (!dmub_fw) {
1032 /* Firmware required for DMUB support. */
1033 DRM_ERROR("No firmware provided for DMUB.\n");
1034 return -EINVAL;
1035 }
1036
1037 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1038 if (status != DMUB_STATUS_OK) {
1039 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1040 return -EINVAL;
1041 }
1042
1043 if (!has_hw_support) {
1044 DRM_INFO("DMUB unsupported on ASIC\n");
1045 return 0;
1046 }
1047
1048 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1049
743b9786
NK
1050 fw_inst_const = dmub_fw->data +
1051 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1052 PSP_HEADER_BYTES;
743b9786
NK
1053
1054 fw_bss_data = dmub_fw->data +
1055 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1056 le32_to_cpu(hdr->inst_const_bytes);
1057
1058 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1059 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1060 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1061
1062 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1063
ddde28a5
HW
1064 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1065 * amdgpu_ucode_init_single_fw will load dmub firmware
1066 * fw_inst_const part to cw0; otherwise, the firmware back door load
1067 * will be done by dm_dmub_hw_init
1068 */
1069 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1070 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1071 fw_inst_const_size);
1072 }
1073
a576b345
NK
1074 if (fw_bss_data_size)
1075 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1076 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1077
1078 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1079 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1080 adev->bios_size);
1081
1082 /* Reset regions that need to be reset. */
1083 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1084 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1085
1086 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1087 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1088
1089 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1090 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1091
1092 /* Initialize hardware. */
1093 memset(&hw_params, 0, sizeof(hw_params));
1094 hw_params.fb_base = adev->gmc.fb_start;
1095 hw_params.fb_offset = adev->gmc.aper_base;
1096
31a7f4bb
HW
1097 /* backdoor load firmware and trigger dmub running */
1098 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1099 hw_params.load_inst_const = true;
1100
743b9786
NK
1101 if (dmcu)
1102 hw_params.psp_version = dmcu->psp_version;
1103
8c7aea40
NK
1104 for (i = 0; i < fb_info->num_fb; ++i)
1105 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1106
5b109397
JS
1107 switch (adev->asic_type) {
1108 case CHIP_YELLOW_CARP:
1109 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1110 hw_params.dpia_supported = true;
1111#if defined(CONFIG_DRM_AMD_DC_DCN)
1112 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1113#endif
1114 }
1115 break;
1116 default:
1117 break;
1118 }
1119
743b9786
NK
1120 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1121 if (status != DMUB_STATUS_OK) {
1122 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1123 return -EINVAL;
1124 }
1125
1126 /* Wait for firmware load to finish. */
1127 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1128 if (status != DMUB_STATUS_OK)
1129 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1130
1131 /* Init DMCU and ABM if available. */
1132 if (dmcu && abm) {
1133 dmcu->funcs->dmcu_init(dmcu);
1134 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1135 }
1136
051b7887
RL
1137 if (!adev->dm.dc->ctx->dmub_srv)
1138 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1139 if (!adev->dm.dc->ctx->dmub_srv) {
1140 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1141 return -ENOMEM;
1142 }
1143
743b9786
NK
1144 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1145 adev->dm.dmcub_fw_version);
1146
1147 return 0;
1148}
1149
a3fe0e33 1150#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1151static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1152{
c0fb85ae
YZ
1153 uint64_t pt_base;
1154 uint32_t logical_addr_low;
1155 uint32_t logical_addr_high;
1156 uint32_t agp_base, agp_bot, agp_top;
1157 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1158
a0f884f5
NK
1159 memset(pa_config, 0, sizeof(*pa_config));
1160
c0fb85ae
YZ
1161 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1162 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1163
c0fb85ae
YZ
1164 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1165 /*
1166 * Raven2 has a HW issue that it is unable to use the vram which
1167 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1168 * workaround that increase system aperture high address (add 1)
1169 * to get rid of the VM fault and hardware hang.
1170 */
1171 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1172 else
1173 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1174
c0fb85ae
YZ
1175 agp_base = 0;
1176 agp_bot = adev->gmc.agp_start >> 24;
1177 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1178
c44a22b3 1179
c0fb85ae
YZ
1180 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1181 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1182 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1183 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1184 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1185 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1186
c0fb85ae
YZ
1187 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1188 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1189
1190 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1191 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1192 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1193
1194 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1195 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1196 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1197
1198 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1199 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1200 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1201
1202 pa_config->is_hvm_enabled = 0;
c44a22b3 1203
c44a22b3 1204}
e6cd859d 1205#endif
ea3b4242 1206#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1207static void vblank_control_worker(struct work_struct *work)
ea3b4242 1208{
09a5df6c
NK
1209 struct vblank_control_work *vblank_work =
1210 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1211 struct amdgpu_display_manager *dm = vblank_work->dm;
1212
1213 mutex_lock(&dm->dc_lock);
1214
1215 if (vblank_work->enable)
1216 dm->active_vblank_irq_count++;
5af50b0b 1217 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1218 dm->active_vblank_irq_count--;
1219
2cbcb78c 1220 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1221
4711c033 1222 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1223
58aa1c50
NK
1224 /* Control PSR based on vblank requirements from OS */
1225 if (vblank_work->stream && vblank_work->stream->link) {
1226 if (vblank_work->enable) {
1227 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1228 amdgpu_dm_psr_disable(vblank_work->stream);
1229 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1230 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1231 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1232 amdgpu_dm_psr_enable(vblank_work->stream);
1233 }
1234 }
1235
ea3b4242 1236 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1237
1238 dc_stream_release(vblank_work->stream);
1239
09a5df6c 1240 kfree(vblank_work);
ea3b4242
QZ
1241}
1242
ea3b4242 1243#endif
8e794421
WL
1244
1245static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1246{
1247 struct hpd_rx_irq_offload_work *offload_work;
1248 struct amdgpu_dm_connector *aconnector;
1249 struct dc_link *dc_link;
1250 struct amdgpu_device *adev;
1251 enum dc_connection_type new_connection_type = dc_connection_none;
1252 unsigned long flags;
1253
1254 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1255 aconnector = offload_work->offload_wq->aconnector;
1256
1257 if (!aconnector) {
1258 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1259 goto skip;
1260 }
1261
1262 adev = drm_to_adev(aconnector->base.dev);
1263 dc_link = aconnector->dc_link;
1264
1265 mutex_lock(&aconnector->hpd_lock);
1266 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1267 DRM_ERROR("KMS: Failed to detect connector\n");
1268 mutex_unlock(&aconnector->hpd_lock);
1269
1270 if (new_connection_type == dc_connection_none)
1271 goto skip;
1272
1273 if (amdgpu_in_reset(adev))
1274 goto skip;
1275
1276 mutex_lock(&adev->dm.dc_lock);
1277 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1278 dc_link_dp_handle_automated_test(dc_link);
1279 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1280 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1281 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1282 dc_link_dp_handle_link_loss(dc_link);
1283 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1284 offload_work->offload_wq->is_handling_link_loss = false;
1285 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1286 }
1287 mutex_unlock(&adev->dm.dc_lock);
1288
1289skip:
1290 kfree(offload_work);
1291
1292}
1293
1294static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1295{
1296 int max_caps = dc->caps.max_links;
1297 int i = 0;
1298 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1299
1300 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1301
1302 if (!hpd_rx_offload_wq)
1303 return NULL;
1304
1305
1306 for (i = 0; i < max_caps; i++) {
1307 hpd_rx_offload_wq[i].wq =
1308 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1309
1310 if (hpd_rx_offload_wq[i].wq == NULL) {
1311 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1312 return NULL;
1313 }
1314
1315 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1316 }
1317
1318 return hpd_rx_offload_wq;
1319}
1320
3ce51649
AD
1321struct amdgpu_stutter_quirk {
1322 u16 chip_vendor;
1323 u16 chip_device;
1324 u16 subsys_vendor;
1325 u16 subsys_device;
1326 u8 revision;
1327};
1328
1329static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1330 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1331 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1332 { 0, 0, 0, 0, 0 },
1333};
1334
1335static bool dm_should_disable_stutter(struct pci_dev *pdev)
1336{
1337 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1338
1339 while (p && p->chip_device != 0) {
1340 if (pdev->vendor == p->chip_vendor &&
1341 pdev->device == p->chip_device &&
1342 pdev->subsystem_vendor == p->subsys_vendor &&
1343 pdev->subsystem_device == p->subsys_device &&
1344 pdev->revision == p->revision) {
1345 return true;
1346 }
1347 ++p;
1348 }
1349 return false;
1350}
1351
7578ecda 1352static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1353{
1354 struct dc_init_data init_data;
52704fca
BL
1355#ifdef CONFIG_DRM_AMD_DC_HDCP
1356 struct dc_callback_init init_params;
1357#endif
743b9786 1358 int r;
52704fca 1359
4a580877 1360 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1361 adev->dm.adev = adev;
1362
4562236b
HW
1363 /* Zero all the fields */
1364 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1365#ifdef CONFIG_DRM_AMD_DC_HDCP
1366 memset(&init_params, 0, sizeof(init_params));
1367#endif
4562236b 1368
674e78ac 1369 mutex_init(&adev->dm.dc_lock);
6ce8f316 1370 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1371#if defined(CONFIG_DRM_AMD_DC_DCN)
1372 spin_lock_init(&adev->dm.vblank_lock);
1373#endif
674e78ac 1374
4562236b
HW
1375 if(amdgpu_dm_irq_init(adev)) {
1376 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1377 goto error;
1378 }
1379
1380 init_data.asic_id.chip_family = adev->family;
1381
2dc31ca1 1382 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1383 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1384 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1385
770d13b1 1386 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1387 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1388 init_data.asic_id.atombios_base_address =
1389 adev->mode_info.atom_context->bios;
1390
1391 init_data.driver = adev;
1392
1393 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1394
1395 if (!adev->dm.cgs_device) {
1396 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1397 goto error;
1398 }
1399
1400 init_data.cgs_device = adev->dm.cgs_device;
1401
4562236b
HW
1402 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1403
60fb100b
AD
1404 switch (adev->asic_type) {
1405 case CHIP_CARRIZO:
1406 case CHIP_STONEY:
1ebcaebd
NK
1407 init_data.flags.gpu_vm_support = true;
1408 break;
60fb100b 1409 default:
1d789535 1410 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1411 case IP_VERSION(2, 1, 0):
1412 init_data.flags.gpu_vm_support = true;
91adec9e
ML
1413 switch (adev->dm.dmcub_fw_version) {
1414 case 0: /* development */
1415 case 0x1: /* linux-firmware.git hash 6d9f399 */
1416 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1417 init_data.flags.disable_dmcu = false;
1418 break;
1419 default:
1420 init_data.flags.disable_dmcu = true;
1421 }
c08182f2 1422 break;
559f591d
AD
1423 case IP_VERSION(1, 0, 0):
1424 case IP_VERSION(1, 0, 1):
c08182f2
AD
1425 case IP_VERSION(3, 0, 1):
1426 case IP_VERSION(3, 1, 2):
1427 case IP_VERSION(3, 1, 3):
1428 init_data.flags.gpu_vm_support = true;
1429 break;
1430 case IP_VERSION(2, 0, 3):
1431 init_data.flags.disable_dmcu = true;
1432 break;
1433 default:
1434 break;
1435 }
60fb100b
AD
1436 break;
1437 }
6e227308 1438
04b94af4
AD
1439 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1440 init_data.flags.fbc_support = true;
1441
d99f38ae
AD
1442 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1443 init_data.flags.multi_mon_pp_mclk_switch = true;
1444
eaf56410
LL
1445 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1446 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1447
1448 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1449 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1450
27eaa492 1451 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1452
0dd79532 1453 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1454 /* Display Core create. */
1455 adev->dm.dc = dc_create(&init_data);
1456
423788c7 1457 if (adev->dm.dc) {
76121231 1458 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1459 } else {
76121231 1460 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1461 goto error;
1462 }
4562236b 1463
8a791dab
HW
1464 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1465 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1466 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1467 }
1468
f99d8762
HW
1469 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1470 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1471 if (dm_should_disable_stutter(adev->pdev))
1472 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1473
8a791dab
HW
1474 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1475 adev->dm.dc->debug.disable_stutter = true;
1476
1477 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1478 adev->dm.dc->debug.disable_dsc = true;
1479
1480 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1481 adev->dm.dc->debug.disable_clock_gate = true;
1482
743b9786
NK
1483 r = dm_dmub_hw_init(adev);
1484 if (r) {
1485 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1486 goto error;
1487 }
1488
bb6785c1
NK
1489 dc_hardware_init(adev->dm.dc);
1490
8e794421
WL
1491 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1492 if (!adev->dm.hpd_rx_offload_wq) {
1493 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1494 goto error;
1495 }
1496
0b08c54b 1497#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1498 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1499 struct dc_phy_addr_space_config pa_config;
1500
0b08c54b 1501 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1502
0b08c54b
YZ
1503 // Call the DC init_memory func
1504 dc_setup_system_context(adev->dm.dc, &pa_config);
1505 }
1506#endif
c0fb85ae 1507
4562236b
HW
1508 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1509 if (!adev->dm.freesync_module) {
1510 DRM_ERROR(
1511 "amdgpu: failed to initialize freesync_module.\n");
1512 } else
f1ad2f5e 1513 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1514 adev->dm.freesync_module);
1515
e277adc5
LSL
1516 amdgpu_dm_init_color_mod();
1517
ea3b4242
QZ
1518#if defined(CONFIG_DRM_AMD_DC_DCN)
1519 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1520 adev->dm.vblank_control_workqueue =
1521 create_singlethread_workqueue("dm_vblank_control_workqueue");
1522 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1523 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1524 }
1525#endif
1526
52704fca 1527#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1528 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1529 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1530
96a3b32e
BL
1531 if (!adev->dm.hdcp_workqueue)
1532 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1533 else
1534 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1535
96a3b32e
BL
1536 dc_init_callbacks(adev->dm.dc, &init_params);
1537 }
9a65df19
WL
1538#endif
1539#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1540 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1541#endif
81927e28
JS
1542 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1543 init_completion(&adev->dm.dmub_aux_transfer_done);
1544 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1545 if (!adev->dm.dmub_notify) {
1546 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1547 goto error;
1548 }
e27c41d5
JS
1549
1550 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1551 if (!adev->dm.delayed_hpd_wq) {
1552 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1553 goto error;
1554 }
1555
81927e28 1556 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1557#if defined(CONFIG_DRM_AMD_DC_DCN)
1558 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1559 dmub_aux_setconfig_callback, false)) {
1560 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1561 goto error;
1562 }
1563 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1564 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1565 goto error;
1566 }
1567#endif
81927e28
JS
1568 }
1569
4562236b
HW
1570 if (amdgpu_dm_initialize_drm_device(adev)) {
1571 DRM_ERROR(
1572 "amdgpu: failed to initialize sw for display support.\n");
1573 goto error;
1574 }
1575
f74367e4
AD
1576 /* create fake encoders for MST */
1577 dm_dp_create_fake_mst_encoders(adev);
1578
4562236b
HW
1579 /* TODO: Add_display_info? */
1580
1581 /* TODO use dynamic cursor width */
4a580877
LT
1582 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1583 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1584
4a580877 1585 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1586 DRM_ERROR(
1587 "amdgpu: failed to initialize sw for display support.\n");
1588 goto error;
1589 }
1590
c0fb85ae 1591
f1ad2f5e 1592 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1593
1594 return 0;
1595error:
1596 amdgpu_dm_fini(adev);
1597
59d0f396 1598 return -EINVAL;
4562236b
HW
1599}
1600
e9669fb7
AG
1601static int amdgpu_dm_early_fini(void *handle)
1602{
1603 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1604
1605 amdgpu_dm_audio_fini(adev);
1606
1607 return 0;
1608}
1609
7578ecda 1610static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1611{
f74367e4
AD
1612 int i;
1613
09a5df6c
NK
1614#if defined(CONFIG_DRM_AMD_DC_DCN)
1615 if (adev->dm.vblank_control_workqueue) {
1616 destroy_workqueue(adev->dm.vblank_control_workqueue);
1617 adev->dm.vblank_control_workqueue = NULL;
1618 }
1619#endif
1620
f74367e4
AD
1621 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1622 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1623 }
1624
4562236b 1625 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1626
9a65df19
WL
1627#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1628 if (adev->dm.crc_rd_wrk) {
1629 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1630 kfree(adev->dm.crc_rd_wrk);
1631 adev->dm.crc_rd_wrk = NULL;
1632 }
1633#endif
52704fca
BL
1634#ifdef CONFIG_DRM_AMD_DC_HDCP
1635 if (adev->dm.hdcp_workqueue) {
e96b1b29 1636 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1637 adev->dm.hdcp_workqueue = NULL;
1638 }
1639
1640 if (adev->dm.dc)
1641 dc_deinit_callbacks(adev->dm.dc);
1642#endif
51ba6912 1643
3beac533 1644 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1645
81927e28
JS
1646 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1647 kfree(adev->dm.dmub_notify);
1648 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1649 destroy_workqueue(adev->dm.delayed_hpd_wq);
1650 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1651 }
1652
743b9786
NK
1653 if (adev->dm.dmub_bo)
1654 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1655 &adev->dm.dmub_bo_gpu_addr,
1656 &adev->dm.dmub_bo_cpu_addr);
52704fca 1657
006c26a0
AG
1658 if (adev->dm.hpd_rx_offload_wq) {
1659 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1660 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1661 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1662 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1663 }
1664 }
1665
1666 kfree(adev->dm.hpd_rx_offload_wq);
1667 adev->dm.hpd_rx_offload_wq = NULL;
1668 }
1669
c8bdf2b6
ED
1670 /* DC Destroy TODO: Replace destroy DAL */
1671 if (adev->dm.dc)
1672 dc_destroy(&adev->dm.dc);
4562236b
HW
1673 /*
1674 * TODO: pageflip, vlank interrupt
1675 *
1676 * amdgpu_dm_irq_fini(adev);
1677 */
1678
1679 if (adev->dm.cgs_device) {
1680 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1681 adev->dm.cgs_device = NULL;
1682 }
1683 if (adev->dm.freesync_module) {
1684 mod_freesync_destroy(adev->dm.freesync_module);
1685 adev->dm.freesync_module = NULL;
1686 }
674e78ac 1687
6ce8f316 1688 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1689 mutex_destroy(&adev->dm.dc_lock);
1690
4562236b
HW
1691 return;
1692}
1693
a94d5569 1694static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1695{
a7669aff 1696 const char *fw_name_dmcu = NULL;
a94d5569
DF
1697 int r;
1698 const struct dmcu_firmware_header_v1_0 *hdr;
1699
1700 switch(adev->asic_type) {
55e56389
MR
1701#if defined(CONFIG_DRM_AMD_DC_SI)
1702 case CHIP_TAHITI:
1703 case CHIP_PITCAIRN:
1704 case CHIP_VERDE:
1705 case CHIP_OLAND:
1706#endif
a94d5569
DF
1707 case CHIP_BONAIRE:
1708 case CHIP_HAWAII:
1709 case CHIP_KAVERI:
1710 case CHIP_KABINI:
1711 case CHIP_MULLINS:
1712 case CHIP_TONGA:
1713 case CHIP_FIJI:
1714 case CHIP_CARRIZO:
1715 case CHIP_STONEY:
1716 case CHIP_POLARIS11:
1717 case CHIP_POLARIS10:
1718 case CHIP_POLARIS12:
1719 case CHIP_VEGAM:
1720 case CHIP_VEGA10:
1721 case CHIP_VEGA12:
1722 case CHIP_VEGA20:
1723 return 0;
5ea23931
RL
1724 case CHIP_NAVI12:
1725 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1726 break;
a94d5569 1727 case CHIP_RAVEN:
a7669aff
HW
1728 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1729 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1730 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1731 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1732 else
a7669aff 1733 return 0;
a94d5569
DF
1734 break;
1735 default:
1d789535 1736 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1737 case IP_VERSION(2, 0, 2):
1738 case IP_VERSION(2, 0, 3):
1739 case IP_VERSION(2, 0, 0):
1740 case IP_VERSION(2, 1, 0):
1741 case IP_VERSION(3, 0, 0):
1742 case IP_VERSION(3, 0, 2):
1743 case IP_VERSION(3, 0, 3):
1744 case IP_VERSION(3, 0, 1):
1745 case IP_VERSION(3, 1, 2):
1746 case IP_VERSION(3, 1, 3):
1747 return 0;
1748 default:
1749 break;
1750 }
a94d5569 1751 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1752 return -EINVAL;
a94d5569
DF
1753 }
1754
1755 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1756 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1757 return 0;
1758 }
1759
1760 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1761 if (r == -ENOENT) {
1762 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1763 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1764 adev->dm.fw_dmcu = NULL;
1765 return 0;
1766 }
1767 if (r) {
1768 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1769 fw_name_dmcu);
1770 return r;
1771 }
1772
1773 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1774 if (r) {
1775 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1776 fw_name_dmcu);
1777 release_firmware(adev->dm.fw_dmcu);
1778 adev->dm.fw_dmcu = NULL;
1779 return r;
1780 }
1781
1782 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1783 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1784 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1785 adev->firmware.fw_size +=
1786 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1787
1788 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1789 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1790 adev->firmware.fw_size +=
1791 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1792
ee6e89c0
DF
1793 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1794
a94d5569
DF
1795 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1796
4562236b
HW
1797 return 0;
1798}
1799
743b9786
NK
1800static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1801{
1802 struct amdgpu_device *adev = ctx;
1803
1804 return dm_read_reg(adev->dm.dc->ctx, address);
1805}
1806
1807static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1808 uint32_t value)
1809{
1810 struct amdgpu_device *adev = ctx;
1811
1812 return dm_write_reg(adev->dm.dc->ctx, address, value);
1813}
1814
1815static int dm_dmub_sw_init(struct amdgpu_device *adev)
1816{
1817 struct dmub_srv_create_params create_params;
8c7aea40
NK
1818 struct dmub_srv_region_params region_params;
1819 struct dmub_srv_region_info region_info;
1820 struct dmub_srv_fb_params fb_params;
1821 struct dmub_srv_fb_info *fb_info;
1822 struct dmub_srv *dmub_srv;
743b9786
NK
1823 const struct dmcub_firmware_header_v1_0 *hdr;
1824 const char *fw_name_dmub;
1825 enum dmub_asic dmub_asic;
1826 enum dmub_status status;
1827 int r;
1828
1d789535 1829 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1830 case IP_VERSION(2, 1, 0):
743b9786
NK
1831 dmub_asic = DMUB_ASIC_DCN21;
1832 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1833 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1834 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1835 break;
c08182f2 1836 case IP_VERSION(3, 0, 0):
1d789535 1837 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1838 dmub_asic = DMUB_ASIC_DCN30;
1839 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1840 } else {
1841 dmub_asic = DMUB_ASIC_DCN30;
1842 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1843 }
79037324 1844 break;
c08182f2 1845 case IP_VERSION(3, 0, 1):
469989ca
RL
1846 dmub_asic = DMUB_ASIC_DCN301;
1847 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1848 break;
c08182f2 1849 case IP_VERSION(3, 0, 2):
2a411205
BL
1850 dmub_asic = DMUB_ASIC_DCN302;
1851 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1852 break;
c08182f2 1853 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1854 dmub_asic = DMUB_ASIC_DCN303;
1855 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1856 break;
c08182f2
AD
1857 case IP_VERSION(3, 1, 2):
1858 case IP_VERSION(3, 1, 3):
3137f792 1859 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1860 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1861 break;
743b9786
NK
1862
1863 default:
1864 /* ASIC doesn't support DMUB. */
1865 return 0;
1866 }
1867
743b9786
NK
1868 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1869 if (r) {
1870 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1871 return 0;
1872 }
1873
1874 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1875 if (r) {
1876 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1877 return 0;
1878 }
1879
743b9786 1880 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1881 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1882
9a6ed547
NK
1883 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1884 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1885 AMDGPU_UCODE_ID_DMCUB;
1886 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1887 adev->dm.dmub_fw;
1888 adev->firmware.fw_size +=
1889 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1890
9a6ed547
NK
1891 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1892 adev->dm.dmcub_fw_version);
1893 }
1894
743b9786 1895
8c7aea40
NK
1896 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1897 dmub_srv = adev->dm.dmub_srv;
1898
1899 if (!dmub_srv) {
1900 DRM_ERROR("Failed to allocate DMUB service!\n");
1901 return -ENOMEM;
1902 }
1903
1904 memset(&create_params, 0, sizeof(create_params));
1905 create_params.user_ctx = adev;
1906 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1907 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1908 create_params.asic = dmub_asic;
1909
1910 /* Create the DMUB service. */
1911 status = dmub_srv_create(dmub_srv, &create_params);
1912 if (status != DMUB_STATUS_OK) {
1913 DRM_ERROR("Error creating DMUB service: %d\n", status);
1914 return -EINVAL;
1915 }
1916
1917 /* Calculate the size of all the regions for the DMUB service. */
1918 memset(&region_params, 0, sizeof(region_params));
1919
1920 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1921 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1922 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1923 region_params.vbios_size = adev->bios_size;
0922b899 1924 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1925 adev->dm.dmub_fw->data +
1926 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1927 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1928 region_params.fw_inst_const =
1929 adev->dm.dmub_fw->data +
1930 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1931 PSP_HEADER_BYTES;
8c7aea40
NK
1932
1933 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1934 &region_info);
1935
1936 if (status != DMUB_STATUS_OK) {
1937 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1938 return -EINVAL;
1939 }
1940
1941 /*
1942 * Allocate a framebuffer based on the total size of all the regions.
1943 * TODO: Move this into GART.
1944 */
1945 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1946 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1947 &adev->dm.dmub_bo_gpu_addr,
1948 &adev->dm.dmub_bo_cpu_addr);
1949 if (r)
1950 return r;
1951
1952 /* Rebase the regions on the framebuffer address. */
1953 memset(&fb_params, 0, sizeof(fb_params));
1954 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1955 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1956 fb_params.region_info = &region_info;
1957
1958 adev->dm.dmub_fb_info =
1959 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1960 fb_info = adev->dm.dmub_fb_info;
1961
1962 if (!fb_info) {
1963 DRM_ERROR(
1964 "Failed to allocate framebuffer info for DMUB service!\n");
1965 return -ENOMEM;
1966 }
1967
1968 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1969 if (status != DMUB_STATUS_OK) {
1970 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1971 return -EINVAL;
1972 }
1973
743b9786
NK
1974 return 0;
1975}
1976
a94d5569
DF
1977static int dm_sw_init(void *handle)
1978{
1979 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1980 int r;
1981
1982 r = dm_dmub_sw_init(adev);
1983 if (r)
1984 return r;
a94d5569
DF
1985
1986 return load_dmcu_fw(adev);
1987}
1988
4562236b
HW
1989static int dm_sw_fini(void *handle)
1990{
a94d5569
DF
1991 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1992
8c7aea40
NK
1993 kfree(adev->dm.dmub_fb_info);
1994 adev->dm.dmub_fb_info = NULL;
1995
743b9786
NK
1996 if (adev->dm.dmub_srv) {
1997 dmub_srv_destroy(adev->dm.dmub_srv);
1998 adev->dm.dmub_srv = NULL;
1999 }
2000
75e1658e
ND
2001 release_firmware(adev->dm.dmub_fw);
2002 adev->dm.dmub_fw = NULL;
743b9786 2003
75e1658e
ND
2004 release_firmware(adev->dm.fw_dmcu);
2005 adev->dm.fw_dmcu = NULL;
a94d5569 2006
4562236b
HW
2007 return 0;
2008}
2009
7abcf6b5 2010static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2011{
c84dec2f 2012 struct amdgpu_dm_connector *aconnector;
4562236b 2013 struct drm_connector *connector;
f8d2d39e 2014 struct drm_connector_list_iter iter;
7abcf6b5 2015 int ret = 0;
4562236b 2016
f8d2d39e
LP
2017 drm_connector_list_iter_begin(dev, &iter);
2018 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2019 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2020 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2021 aconnector->mst_mgr.aux) {
f1ad2f5e 2022 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2023 aconnector,
2024 aconnector->base.base.id);
7abcf6b5
AG
2025
2026 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2027 if (ret < 0) {
2028 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2029 aconnector->dc_link->type =
2030 dc_connection_single;
2031 break;
7abcf6b5 2032 }
f8d2d39e 2033 }
4562236b 2034 }
f8d2d39e 2035 drm_connector_list_iter_end(&iter);
4562236b 2036
7abcf6b5
AG
2037 return ret;
2038}
2039
2040static int dm_late_init(void *handle)
2041{
42e67c3b 2042 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2043
bbf854dc
DF
2044 struct dmcu_iram_parameters params;
2045 unsigned int linear_lut[16];
2046 int i;
17bdb4a8 2047 struct dmcu *dmcu = NULL;
bbf854dc 2048
17bdb4a8
JFZ
2049 dmcu = adev->dm.dc->res_pool->dmcu;
2050
bbf854dc
DF
2051 for (i = 0; i < 16; i++)
2052 linear_lut[i] = 0xFFFF * i / 15;
2053
2054 params.set = 0;
75068994 2055 params.backlight_ramping_override = false;
bbf854dc
DF
2056 params.backlight_ramping_start = 0xCCCC;
2057 params.backlight_ramping_reduction = 0xCCCCCCCC;
2058 params.backlight_lut_array_size = 16;
2059 params.backlight_lut_array = linear_lut;
2060
2ad0cdf9
AK
2061 /* Min backlight level after ABM reduction, Don't allow below 1%
2062 * 0xFFFF x 0.01 = 0x28F
2063 */
2064 params.min_abm_backlight = 0x28F;
5cb32419 2065 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2066 * dmcu object will be null.
2067 * ABM 2.4 and up are implemented on dmcub.
2068 */
2069 if (dmcu) {
2070 if (!dmcu_load_iram(dmcu, params))
2071 return -EINVAL;
2072 } else if (adev->dm.dc->ctx->dmub_srv) {
2073 struct dc_link *edp_links[MAX_NUM_EDP];
2074 int edp_num;
bbf854dc 2075
6e568e43
JW
2076 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2077 for (i = 0; i < edp_num; i++) {
2078 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2079 return -EINVAL;
2080 }
2081 }
bbf854dc 2082
4a580877 2083 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2084}
2085
2086static void s3_handle_mst(struct drm_device *dev, bool suspend)
2087{
c84dec2f 2088 struct amdgpu_dm_connector *aconnector;
4562236b 2089 struct drm_connector *connector;
f8d2d39e 2090 struct drm_connector_list_iter iter;
fe7553be
LP
2091 struct drm_dp_mst_topology_mgr *mgr;
2092 int ret;
2093 bool need_hotplug = false;
4562236b 2094
f8d2d39e
LP
2095 drm_connector_list_iter_begin(dev, &iter);
2096 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2097 aconnector = to_amdgpu_dm_connector(connector);
2098 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2099 aconnector->mst_port)
2100 continue;
2101
2102 mgr = &aconnector->mst_mgr;
2103
2104 if (suspend) {
2105 drm_dp_mst_topology_mgr_suspend(mgr);
2106 } else {
6f85f738 2107 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2108 if (ret < 0) {
2109 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2110 need_hotplug = true;
2111 }
2112 }
4562236b 2113 }
f8d2d39e 2114 drm_connector_list_iter_end(&iter);
fe7553be
LP
2115
2116 if (need_hotplug)
2117 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2118}
2119
9340dfd3
HW
2120static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2121{
2122 struct smu_context *smu = &adev->smu;
2123 int ret = 0;
2124
2125 if (!is_support_sw_smu(adev))
2126 return 0;
2127
2128 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2129 * on window driver dc implementation.
2130 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2131 * should be passed to smu during boot up and resume from s3.
2132 * boot up: dc calculate dcn watermark clock settings within dc_create,
2133 * dcn20_resource_construct
2134 * then call pplib functions below to pass the settings to smu:
2135 * smu_set_watermarks_for_clock_ranges
2136 * smu_set_watermarks_table
2137 * navi10_set_watermarks_table
2138 * smu_write_watermarks_table
2139 *
2140 * For Renoir, clock settings of dcn watermark are also fixed values.
2141 * dc has implemented different flow for window driver:
2142 * dc_hardware_init / dc_set_power_state
2143 * dcn10_init_hw
2144 * notify_wm_ranges
2145 * set_wm_ranges
2146 * -- Linux
2147 * smu_set_watermarks_for_clock_ranges
2148 * renoir_set_watermarks_table
2149 * smu_write_watermarks_table
2150 *
2151 * For Linux,
2152 * dc_hardware_init -> amdgpu_dm_init
2153 * dc_set_power_state --> dm_resume
2154 *
2155 * therefore, this function apply to navi10/12/14 but not Renoir
2156 * *
2157 */
1d789535 2158 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2159 case IP_VERSION(2, 0, 2):
2160 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2161 break;
2162 default:
2163 return 0;
2164 }
2165
e7a95eea
EQ
2166 ret = smu_write_watermarks_table(smu);
2167 if (ret) {
2168 DRM_ERROR("Failed to update WMTABLE!\n");
2169 return ret;
9340dfd3
HW
2170 }
2171
9340dfd3
HW
2172 return 0;
2173}
2174
b8592b48
LL
2175/**
2176 * dm_hw_init() - Initialize DC device
28d687ea 2177 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2178 *
2179 * Initialize the &struct amdgpu_display_manager device. This involves calling
2180 * the initializers of each DM component, then populating the struct with them.
2181 *
2182 * Although the function implies hardware initialization, both hardware and
2183 * software are initialized here. Splitting them out to their relevant init
2184 * hooks is a future TODO item.
2185 *
2186 * Some notable things that are initialized here:
2187 *
2188 * - Display Core, both software and hardware
2189 * - DC modules that we need (freesync and color management)
2190 * - DRM software states
2191 * - Interrupt sources and handlers
2192 * - Vblank support
2193 * - Debug FS entries, if enabled
2194 */
4562236b
HW
2195static int dm_hw_init(void *handle)
2196{
2197 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2198 /* Create DAL display manager */
2199 amdgpu_dm_init(adev);
4562236b
HW
2200 amdgpu_dm_hpd_init(adev);
2201
4562236b
HW
2202 return 0;
2203}
2204
b8592b48
LL
2205/**
2206 * dm_hw_fini() - Teardown DC device
28d687ea 2207 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2208 *
2209 * Teardown components within &struct amdgpu_display_manager that require
2210 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2211 * were loaded. Also flush IRQ workqueues and disable them.
2212 */
4562236b
HW
2213static int dm_hw_fini(void *handle)
2214{
2215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2216
2217 amdgpu_dm_hpd_fini(adev);
2218
2219 amdgpu_dm_irq_fini(adev);
21de3396 2220 amdgpu_dm_fini(adev);
4562236b
HW
2221 return 0;
2222}
2223
cdaae837
BL
2224
2225static int dm_enable_vblank(struct drm_crtc *crtc);
2226static void dm_disable_vblank(struct drm_crtc *crtc);
2227
2228static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2229 struct dc_state *state, bool enable)
2230{
2231 enum dc_irq_source irq_source;
2232 struct amdgpu_crtc *acrtc;
2233 int rc = -EBUSY;
2234 int i = 0;
2235
2236 for (i = 0; i < state->stream_count; i++) {
2237 acrtc = get_crtc_by_otg_inst(
2238 adev, state->stream_status[i].primary_otg_inst);
2239
2240 if (acrtc && state->stream_status[i].plane_count != 0) {
2241 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2242 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2243 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2244 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2245 if (rc)
2246 DRM_WARN("Failed to %s pflip interrupts\n",
2247 enable ? "enable" : "disable");
2248
2249 if (enable) {
2250 rc = dm_enable_vblank(&acrtc->base);
2251 if (rc)
2252 DRM_WARN("Failed to enable vblank interrupts\n");
2253 } else {
2254 dm_disable_vblank(&acrtc->base);
2255 }
2256
2257 }
2258 }
2259
2260}
2261
dfd84d90 2262static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2263{
2264 struct dc_state *context = NULL;
2265 enum dc_status res = DC_ERROR_UNEXPECTED;
2266 int i;
2267 struct dc_stream_state *del_streams[MAX_PIPES];
2268 int del_streams_count = 0;
2269
2270 memset(del_streams, 0, sizeof(del_streams));
2271
2272 context = dc_create_state(dc);
2273 if (context == NULL)
2274 goto context_alloc_fail;
2275
2276 dc_resource_state_copy_construct_current(dc, context);
2277
2278 /* First remove from context all streams */
2279 for (i = 0; i < context->stream_count; i++) {
2280 struct dc_stream_state *stream = context->streams[i];
2281
2282 del_streams[del_streams_count++] = stream;
2283 }
2284
2285 /* Remove all planes for removed streams and then remove the streams */
2286 for (i = 0; i < del_streams_count; i++) {
2287 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2288 res = DC_FAIL_DETACH_SURFACES;
2289 goto fail;
2290 }
2291
2292 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2293 if (res != DC_OK)
2294 goto fail;
2295 }
2296
2297
2298 res = dc_validate_global_state(dc, context, false);
2299
2300 if (res != DC_OK) {
2301 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2302 goto fail;
2303 }
2304
2305 res = dc_commit_state(dc, context);
2306
2307fail:
2308 dc_release_state(context);
2309
2310context_alloc_fail:
2311 return res;
2312}
2313
8e794421
WL
2314static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2315{
2316 int i;
2317
2318 if (dm->hpd_rx_offload_wq) {
2319 for (i = 0; i < dm->dc->caps.max_links; i++)
2320 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2321 }
2322}
2323
4562236b
HW
2324static int dm_suspend(void *handle)
2325{
2326 struct amdgpu_device *adev = handle;
2327 struct amdgpu_display_manager *dm = &adev->dm;
2328 int ret = 0;
4562236b 2329
53b3f8f4 2330 if (amdgpu_in_reset(adev)) {
cdaae837 2331 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2332
2333#if defined(CONFIG_DRM_AMD_DC_DCN)
2334 dc_allow_idle_optimizations(adev->dm.dc, false);
2335#endif
2336
cdaae837
BL
2337 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2338
2339 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2340
2341 amdgpu_dm_commit_zero_streams(dm->dc);
2342
2343 amdgpu_dm_irq_suspend(adev);
2344
8e794421
WL
2345 hpd_rx_irq_work_suspend(dm);
2346
cdaae837
BL
2347 return ret;
2348 }
4562236b 2349
d2f0b53b 2350 WARN_ON(adev->dm.cached_state);
4a580877 2351 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2352
4a580877 2353 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2354
4562236b
HW
2355 amdgpu_dm_irq_suspend(adev);
2356
8e794421
WL
2357 hpd_rx_irq_work_suspend(dm);
2358
32f5062d 2359 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2360
1c2075d4 2361 return 0;
4562236b
HW
2362}
2363
1daf8c63
AD
2364static struct amdgpu_dm_connector *
2365amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2366 struct drm_crtc *crtc)
4562236b
HW
2367{
2368 uint32_t i;
c2cea706 2369 struct drm_connector_state *new_con_state;
4562236b
HW
2370 struct drm_connector *connector;
2371 struct drm_crtc *crtc_from_state;
2372
c2cea706
LSL
2373 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2374 crtc_from_state = new_con_state->crtc;
4562236b
HW
2375
2376 if (crtc_from_state == crtc)
c84dec2f 2377 return to_amdgpu_dm_connector(connector);
4562236b
HW
2378 }
2379
2380 return NULL;
2381}
2382
fbbdadf2
BL
2383static void emulated_link_detect(struct dc_link *link)
2384{
2385 struct dc_sink_init_data sink_init_data = { 0 };
2386 struct display_sink_capability sink_caps = { 0 };
2387 enum dc_edid_status edid_status;
2388 struct dc_context *dc_ctx = link->ctx;
2389 struct dc_sink *sink = NULL;
2390 struct dc_sink *prev_sink = NULL;
2391
2392 link->type = dc_connection_none;
2393 prev_sink = link->local_sink;
2394
30164a16
VL
2395 if (prev_sink)
2396 dc_sink_release(prev_sink);
fbbdadf2
BL
2397
2398 switch (link->connector_signal) {
2399 case SIGNAL_TYPE_HDMI_TYPE_A: {
2400 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2401 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2402 break;
2403 }
2404
2405 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2406 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2407 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2408 break;
2409 }
2410
2411 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2412 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2413 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2414 break;
2415 }
2416
2417 case SIGNAL_TYPE_LVDS: {
2418 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2419 sink_caps.signal = SIGNAL_TYPE_LVDS;
2420 break;
2421 }
2422
2423 case SIGNAL_TYPE_EDP: {
2424 sink_caps.transaction_type =
2425 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2426 sink_caps.signal = SIGNAL_TYPE_EDP;
2427 break;
2428 }
2429
2430 case SIGNAL_TYPE_DISPLAY_PORT: {
2431 sink_caps.transaction_type =
2432 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2433 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2434 break;
2435 }
2436
2437 default:
2438 DC_ERROR("Invalid connector type! signal:%d\n",
2439 link->connector_signal);
2440 return;
2441 }
2442
2443 sink_init_data.link = link;
2444 sink_init_data.sink_signal = sink_caps.signal;
2445
2446 sink = dc_sink_create(&sink_init_data);
2447 if (!sink) {
2448 DC_ERROR("Failed to create sink!\n");
2449 return;
2450 }
2451
dcd5fb82 2452 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2453 link->local_sink = sink;
2454
2455 edid_status = dm_helpers_read_local_edid(
2456 link->ctx,
2457 link,
2458 sink);
2459
2460 if (edid_status != EDID_OK)
2461 DC_ERROR("Failed to read EDID");
2462
2463}
2464
cdaae837
BL
2465static void dm_gpureset_commit_state(struct dc_state *dc_state,
2466 struct amdgpu_display_manager *dm)
2467{
2468 struct {
2469 struct dc_surface_update surface_updates[MAX_SURFACES];
2470 struct dc_plane_info plane_infos[MAX_SURFACES];
2471 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2472 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2473 struct dc_stream_update stream_update;
2474 } * bundle;
2475 int k, m;
2476
2477 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2478
2479 if (!bundle) {
2480 dm_error("Failed to allocate update bundle\n");
2481 goto cleanup;
2482 }
2483
2484 for (k = 0; k < dc_state->stream_count; k++) {
2485 bundle->stream_update.stream = dc_state->streams[k];
2486
2487 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2488 bundle->surface_updates[m].surface =
2489 dc_state->stream_status->plane_states[m];
2490 bundle->surface_updates[m].surface->force_full_update =
2491 true;
2492 }
2493 dc_commit_updates_for_stream(
2494 dm->dc, bundle->surface_updates,
2495 dc_state->stream_status->plane_count,
efc8278e 2496 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2497 }
2498
2499cleanup:
2500 kfree(bundle);
2501
2502 return;
2503}
2504
035f5496 2505static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2506{
2507 struct dc_stream_state *stream_state;
2508 struct amdgpu_dm_connector *aconnector = link->priv;
2509 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2510 struct dc_stream_update stream_update;
2511 bool dpms_off = true;
2512
2513 memset(&stream_update, 0, sizeof(stream_update));
2514 stream_update.dpms_off = &dpms_off;
2515
2516 mutex_lock(&adev->dm.dc_lock);
2517 stream_state = dc_stream_find_from_link(link);
2518
2519 if (stream_state == NULL) {
2520 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2521 mutex_unlock(&adev->dm.dc_lock);
2522 return;
2523 }
2524
2525 stream_update.stream = stream_state;
035f5496 2526 acrtc_state->force_dpms_off = true;
3c4d55c9 2527 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2528 stream_state, &stream_update,
2529 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2530 mutex_unlock(&adev->dm.dc_lock);
2531}
2532
4562236b
HW
2533static int dm_resume(void *handle)
2534{
2535 struct amdgpu_device *adev = handle;
4a580877 2536 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2537 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2538 struct amdgpu_dm_connector *aconnector;
4562236b 2539 struct drm_connector *connector;
f8d2d39e 2540 struct drm_connector_list_iter iter;
4562236b 2541 struct drm_crtc *crtc;
c2cea706 2542 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2543 struct dm_crtc_state *dm_new_crtc_state;
2544 struct drm_plane *plane;
2545 struct drm_plane_state *new_plane_state;
2546 struct dm_plane_state *dm_new_plane_state;
113b7a01 2547 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2548 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2549 struct dc_state *dc_state;
2550 int i, r, j;
4562236b 2551
53b3f8f4 2552 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2553 dc_state = dm->cached_dc_state;
2554
2555 r = dm_dmub_hw_init(adev);
2556 if (r)
2557 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2558
2559 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2560 dc_resume(dm->dc);
2561
2562 amdgpu_dm_irq_resume_early(adev);
2563
2564 for (i = 0; i < dc_state->stream_count; i++) {
2565 dc_state->streams[i]->mode_changed = true;
2566 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2567 dc_state->stream_status->plane_states[j]->update_flags.raw
2568 = 0xffffffff;
2569 }
2570 }
8fe44c08 2571#if defined(CONFIG_DRM_AMD_DC_DCN)
1ebcaebd
NK
2572 /*
2573 * Resource allocation happens for link encoders for newer ASIC in
2574 * dc_validate_global_state, so we need to revalidate it.
2575 *
2576 * This shouldn't fail (it passed once before), so warn if it does.
2577 */
2578 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2579#endif
cdaae837
BL
2580
2581 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2582
cdaae837
BL
2583 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2584
2585 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2586
2587 dc_release_state(dm->cached_dc_state);
2588 dm->cached_dc_state = NULL;
2589
2590 amdgpu_dm_irq_resume_late(adev);
2591
2592 mutex_unlock(&dm->dc_lock);
2593
2594 return 0;
2595 }
113b7a01
LL
2596 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2597 dc_release_state(dm_state->context);
2598 dm_state->context = dc_create_state(dm->dc);
2599 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2600 dc_resource_state_construct(dm->dc, dm_state->context);
2601
8c7aea40
NK
2602 /* Before powering on DC we need to re-initialize DMUB. */
2603 r = dm_dmub_hw_init(adev);
2604 if (r)
2605 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2606
a80aa93d
ML
2607 /* power on hardware */
2608 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2609
4562236b
HW
2610 /* program HPD filter */
2611 dc_resume(dm->dc);
2612
4562236b
HW
2613 /*
2614 * early enable HPD Rx IRQ, should be done before set mode as short
2615 * pulse interrupts are used for MST
2616 */
2617 amdgpu_dm_irq_resume_early(adev);
2618
d20ebea8 2619 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2620 s3_handle_mst(ddev, false);
2621
4562236b 2622 /* Do detection*/
f8d2d39e
LP
2623 drm_connector_list_iter_begin(ddev, &iter);
2624 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2625 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2626
2627 /*
2628 * this is the case when traversing through already created
2629 * MST connectors, should be skipped
2630 */
2631 if (aconnector->mst_port)
2632 continue;
2633
03ea364c 2634 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2635 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2636 DRM_ERROR("KMS: Failed to detect connector\n");
2637
2638 if (aconnector->base.force && new_connection_type == dc_connection_none)
2639 emulated_link_detect(aconnector->dc_link);
2640 else
2641 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2642
2643 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2644 aconnector->fake_enable = false;
2645
dcd5fb82
MF
2646 if (aconnector->dc_sink)
2647 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2648 aconnector->dc_sink = NULL;
2649 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2650 mutex_unlock(&aconnector->hpd_lock);
4562236b 2651 }
f8d2d39e 2652 drm_connector_list_iter_end(&iter);
4562236b 2653
1f6010a9 2654 /* Force mode set in atomic commit */
a80aa93d 2655 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2656 new_crtc_state->active_changed = true;
4f346e65 2657
fcb4019e
LSL
2658 /*
2659 * atomic_check is expected to create the dc states. We need to release
2660 * them here, since they were duplicated as part of the suspend
2661 * procedure.
2662 */
a80aa93d 2663 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2664 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2665 if (dm_new_crtc_state->stream) {
2666 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2667 dc_stream_release(dm_new_crtc_state->stream);
2668 dm_new_crtc_state->stream = NULL;
2669 }
2670 }
2671
a80aa93d 2672 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2673 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2674 if (dm_new_plane_state->dc_state) {
2675 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2676 dc_plane_state_release(dm_new_plane_state->dc_state);
2677 dm_new_plane_state->dc_state = NULL;
2678 }
2679 }
2680
2d1af6a1 2681 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2682
a80aa93d 2683 dm->cached_state = NULL;
0a214e2f 2684
9faa4237 2685 amdgpu_dm_irq_resume_late(adev);
4562236b 2686
9340dfd3
HW
2687 amdgpu_dm_smu_write_watermarks_table(adev);
2688
2d1af6a1 2689 return 0;
4562236b
HW
2690}
2691
b8592b48
LL
2692/**
2693 * DOC: DM Lifecycle
2694 *
2695 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2696 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2697 * the base driver's device list to be initialized and torn down accordingly.
2698 *
2699 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2700 */
2701
4562236b
HW
2702static const struct amd_ip_funcs amdgpu_dm_funcs = {
2703 .name = "dm",
2704 .early_init = dm_early_init,
7abcf6b5 2705 .late_init = dm_late_init,
4562236b
HW
2706 .sw_init = dm_sw_init,
2707 .sw_fini = dm_sw_fini,
e9669fb7 2708 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2709 .hw_init = dm_hw_init,
2710 .hw_fini = dm_hw_fini,
2711 .suspend = dm_suspend,
2712 .resume = dm_resume,
2713 .is_idle = dm_is_idle,
2714 .wait_for_idle = dm_wait_for_idle,
2715 .check_soft_reset = dm_check_soft_reset,
2716 .soft_reset = dm_soft_reset,
2717 .set_clockgating_state = dm_set_clockgating_state,
2718 .set_powergating_state = dm_set_powergating_state,
2719};
2720
2721const struct amdgpu_ip_block_version dm_ip_block =
2722{
2723 .type = AMD_IP_BLOCK_TYPE_DCE,
2724 .major = 1,
2725 .minor = 0,
2726 .rev = 0,
2727 .funcs = &amdgpu_dm_funcs,
2728};
2729
ca3268c4 2730
b8592b48
LL
2731/**
2732 * DOC: atomic
2733 *
2734 * *WIP*
2735 */
0a323b84 2736
b3663f70 2737static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2738 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2739 .get_format_info = amd_get_format_info,
366c1baa 2740 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2741 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2742 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2743};
2744
2745static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2746 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2747};
2748
94562810
RS
2749static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2750{
2751 u32 max_cll, min_cll, max, min, q, r;
2752 struct amdgpu_dm_backlight_caps *caps;
2753 struct amdgpu_display_manager *dm;
2754 struct drm_connector *conn_base;
2755 struct amdgpu_device *adev;
ec11fe37 2756 struct dc_link *link = NULL;
94562810
RS
2757 static const u8 pre_computed_values[] = {
2758 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2759 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2760 int i;
94562810
RS
2761
2762 if (!aconnector || !aconnector->dc_link)
2763 return;
2764
ec11fe37 2765 link = aconnector->dc_link;
2766 if (link->connector_signal != SIGNAL_TYPE_EDP)
2767 return;
2768
94562810 2769 conn_base = &aconnector->base;
1348969a 2770 adev = drm_to_adev(conn_base->dev);
94562810 2771 dm = &adev->dm;
7fd13bae
AD
2772 for (i = 0; i < dm->num_of_edps; i++) {
2773 if (link == dm->backlight_link[i])
2774 break;
2775 }
2776 if (i >= dm->num_of_edps)
2777 return;
2778 caps = &dm->backlight_caps[i];
94562810
RS
2779 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2780 caps->aux_support = false;
2781 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2782 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2783
d0ae0b64 2784 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2785 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2786 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2787 caps->aux_support = true;
2788
7a46f05e
TI
2789 if (amdgpu_backlight == 0)
2790 caps->aux_support = false;
2791 else if (amdgpu_backlight == 1)
2792 caps->aux_support = true;
2793
94562810
RS
2794 /* From the specification (CTA-861-G), for calculating the maximum
2795 * luminance we need to use:
2796 * Luminance = 50*2**(CV/32)
2797 * Where CV is a one-byte value.
2798 * For calculating this expression we may need float point precision;
2799 * to avoid this complexity level, we take advantage that CV is divided
2800 * by a constant. From the Euclids division algorithm, we know that CV
2801 * can be written as: CV = 32*q + r. Next, we replace CV in the
2802 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2803 * need to pre-compute the value of r/32. For pre-computing the values
2804 * We just used the following Ruby line:
2805 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2806 * The results of the above expressions can be verified at
2807 * pre_computed_values.
2808 */
2809 q = max_cll >> 5;
2810 r = max_cll % 32;
2811 max = (1 << q) * pre_computed_values[r];
2812
2813 // min luminance: maxLum * (CV/255)^2 / 100
2814 q = DIV_ROUND_CLOSEST(min_cll, 255);
2815 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2816
2817 caps->aux_max_input_signal = max;
2818 caps->aux_min_input_signal = min;
2819}
2820
97e51c16
HW
2821void amdgpu_dm_update_connector_after_detect(
2822 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2823{
2824 struct drm_connector *connector = &aconnector->base;
2825 struct drm_device *dev = connector->dev;
b73a22d3 2826 struct dc_sink *sink;
4562236b
HW
2827
2828 /* MST handled by drm_mst framework */
2829 if (aconnector->mst_mgr.mst_state == true)
2830 return;
2831
4562236b 2832 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2833 if (sink)
2834 dc_sink_retain(sink);
4562236b 2835
1f6010a9
DF
2836 /*
2837 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2838 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2839 * Skip if already done during boot.
4562236b
HW
2840 */
2841 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2842 && aconnector->dc_em_sink) {
2843
1f6010a9
DF
2844 /*
2845 * For S3 resume with headless use eml_sink to fake stream
2846 * because on resume connector->sink is set to NULL
4562236b
HW
2847 */
2848 mutex_lock(&dev->mode_config.mutex);
2849
2850 if (sink) {
922aa1e1 2851 if (aconnector->dc_sink) {
98e6436d 2852 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2853 /*
2854 * retain and release below are used to
2855 * bump up refcount for sink because the link doesn't point
2856 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2857 * reshuffle by UMD we will get into unwanted dc_sink release
2858 */
dcd5fb82 2859 dc_sink_release(aconnector->dc_sink);
922aa1e1 2860 }
4562236b 2861 aconnector->dc_sink = sink;
dcd5fb82 2862 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2863 amdgpu_dm_update_freesync_caps(connector,
2864 aconnector->edid);
4562236b 2865 } else {
98e6436d 2866 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2867 if (!aconnector->dc_sink) {
4562236b 2868 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2869 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2870 }
4562236b
HW
2871 }
2872
2873 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2874
2875 if (sink)
2876 dc_sink_release(sink);
4562236b
HW
2877 return;
2878 }
2879
2880 /*
2881 * TODO: temporary guard to look for proper fix
2882 * if this sink is MST sink, we should not do anything
2883 */
dcd5fb82
MF
2884 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2885 dc_sink_release(sink);
4562236b 2886 return;
dcd5fb82 2887 }
4562236b
HW
2888
2889 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2890 /*
2891 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2892 * Do nothing!!
2893 */
f1ad2f5e 2894 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2895 aconnector->connector_id);
dcd5fb82
MF
2896 if (sink)
2897 dc_sink_release(sink);
4562236b
HW
2898 return;
2899 }
2900
f1ad2f5e 2901 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2902 aconnector->connector_id, aconnector->dc_sink, sink);
2903
2904 mutex_lock(&dev->mode_config.mutex);
2905
1f6010a9
DF
2906 /*
2907 * 1. Update status of the drm connector
2908 * 2. Send an event and let userspace tell us what to do
2909 */
4562236b 2910 if (sink) {
1f6010a9
DF
2911 /*
2912 * TODO: check if we still need the S3 mode update workaround.
2913 * If yes, put it here.
2914 */
c64b0d6b 2915 if (aconnector->dc_sink) {
98e6436d 2916 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2917 dc_sink_release(aconnector->dc_sink);
2918 }
4562236b
HW
2919
2920 aconnector->dc_sink = sink;
dcd5fb82 2921 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2922 if (sink->dc_edid.length == 0) {
4562236b 2923 aconnector->edid = NULL;
e6142dd5
AP
2924 if (aconnector->dc_link->aux_mode) {
2925 drm_dp_cec_unset_edid(
2926 &aconnector->dm_dp_aux.aux);
2927 }
900b3cb1 2928 } else {
4562236b 2929 aconnector->edid =
e6142dd5 2930 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2931
c555f023 2932 drm_connector_update_edid_property(connector,
e6142dd5 2933 aconnector->edid);
e6142dd5
AP
2934 if (aconnector->dc_link->aux_mode)
2935 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2936 aconnector->edid);
4562236b 2937 }
e6142dd5 2938
98e6436d 2939 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2940 update_connector_ext_caps(aconnector);
4562236b 2941 } else {
e86e8947 2942 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2943 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2944 drm_connector_update_edid_property(connector, NULL);
4562236b 2945 aconnector->num_modes = 0;
dcd5fb82 2946 dc_sink_release(aconnector->dc_sink);
4562236b 2947 aconnector->dc_sink = NULL;
5326c452 2948 aconnector->edid = NULL;
0c8620d6
BL
2949#ifdef CONFIG_DRM_AMD_DC_HDCP
2950 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2951 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2952 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2953#endif
4562236b
HW
2954 }
2955
2956 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2957
0f877894
OV
2958 update_subconnector_property(aconnector);
2959
dcd5fb82
MF
2960 if (sink)
2961 dc_sink_release(sink);
4562236b
HW
2962}
2963
e27c41d5 2964static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 2965{
4562236b
HW
2966 struct drm_connector *connector = &aconnector->base;
2967 struct drm_device *dev = connector->dev;
fbbdadf2 2968 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2969 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2970 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 2971 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 2972
b972b4f9
HW
2973 if (adev->dm.disable_hpd_irq)
2974 return;
2975
035f5496
AP
2976 if (dm_con_state->base.state && dm_con_state->base.crtc)
2977 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2978 dm_con_state->base.state,
2979 dm_con_state->base.crtc));
1f6010a9
DF
2980 /*
2981 * In case of failure or MST no need to update connector status or notify the OS
2982 * since (for MST case) MST does this in its own context.
4562236b
HW
2983 */
2984 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2985
0c8620d6 2986#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2987 if (adev->dm.hdcp_workqueue) {
96a3b32e 2988 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2989 dm_con_state->update_hdcp = true;
2990 }
0c8620d6 2991#endif
2e0ac3d6
HW
2992 if (aconnector->fake_enable)
2993 aconnector->fake_enable = false;
2994
fbbdadf2
BL
2995 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2996 DRM_ERROR("KMS: Failed to detect connector\n");
2997
2998 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2999 emulated_link_detect(aconnector->dc_link);
3000
fbbdadf2
BL
3001 drm_modeset_lock_all(dev);
3002 dm_restore_drm_connector_state(dev, connector);
3003 drm_modeset_unlock_all(dev);
3004
3005 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3006 drm_kms_helper_hotplug_event(dev);
3007
3008 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3009 if (new_connection_type == dc_connection_none &&
035f5496
AP
3010 aconnector->dc_link->type == dc_connection_none &&
3011 dm_crtc_state)
3012 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 3013
3c4d55c9 3014 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3015
3016 drm_modeset_lock_all(dev);
3017 dm_restore_drm_connector_state(dev, connector);
3018 drm_modeset_unlock_all(dev);
3019
3020 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3021 drm_kms_helper_hotplug_event(dev);
3022 }
3023 mutex_unlock(&aconnector->hpd_lock);
3024
3025}
3026
e27c41d5
JS
3027static void handle_hpd_irq(void *param)
3028{
3029 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3030
3031 handle_hpd_irq_helper(aconnector);
3032
3033}
3034
8e794421 3035static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3036{
3037 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3038 uint8_t dret;
3039 bool new_irq_handled = false;
3040 int dpcd_addr;
3041 int dpcd_bytes_to_read;
3042
3043 const int max_process_count = 30;
3044 int process_count = 0;
3045
3046 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3047
3048 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3049 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3050 /* DPCD 0x200 - 0x201 for downstream IRQ */
3051 dpcd_addr = DP_SINK_COUNT;
3052 } else {
3053 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3054 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3055 dpcd_addr = DP_SINK_COUNT_ESI;
3056 }
3057
3058 dret = drm_dp_dpcd_read(
3059 &aconnector->dm_dp_aux.aux,
3060 dpcd_addr,
3061 esi,
3062 dpcd_bytes_to_read);
3063
3064 while (dret == dpcd_bytes_to_read &&
3065 process_count < max_process_count) {
3066 uint8_t retry;
3067 dret = 0;
3068
3069 process_count++;
3070
f1ad2f5e 3071 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3072 /* handle HPD short pulse irq */
3073 if (aconnector->mst_mgr.mst_state)
3074 drm_dp_mst_hpd_irq(
3075 &aconnector->mst_mgr,
3076 esi,
3077 &new_irq_handled);
4562236b
HW
3078
3079 if (new_irq_handled) {
3080 /* ACK at DPCD to notify down stream */
3081 const int ack_dpcd_bytes_to_write =
3082 dpcd_bytes_to_read - 1;
3083
3084 for (retry = 0; retry < 3; retry++) {
3085 uint8_t wret;
3086
3087 wret = drm_dp_dpcd_write(
3088 &aconnector->dm_dp_aux.aux,
3089 dpcd_addr + 1,
3090 &esi[1],
3091 ack_dpcd_bytes_to_write);
3092 if (wret == ack_dpcd_bytes_to_write)
3093 break;
3094 }
3095
1f6010a9 3096 /* check if there is new irq to be handled */
4562236b
HW
3097 dret = drm_dp_dpcd_read(
3098 &aconnector->dm_dp_aux.aux,
3099 dpcd_addr,
3100 esi,
3101 dpcd_bytes_to_read);
3102
3103 new_irq_handled = false;
d4a6e8a9 3104 } else {
4562236b 3105 break;
d4a6e8a9 3106 }
4562236b
HW
3107 }
3108
3109 if (process_count == max_process_count)
f1ad2f5e 3110 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3111}
3112
8e794421
WL
3113static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3114 union hpd_irq_data hpd_irq_data)
3115{
3116 struct hpd_rx_irq_offload_work *offload_work =
3117 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3118
3119 if (!offload_work) {
3120 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3121 return;
3122 }
3123
3124 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3125 offload_work->data = hpd_irq_data;
3126 offload_work->offload_wq = offload_wq;
3127
3128 queue_work(offload_wq->wq, &offload_work->work);
3129 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3130}
3131
4562236b
HW
3132static void handle_hpd_rx_irq(void *param)
3133{
c84dec2f 3134 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3135 struct drm_connector *connector = &aconnector->base;
3136 struct drm_device *dev = connector->dev;
53cbf65c 3137 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3138 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3139 bool result = false;
fbbdadf2 3140 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3141 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3142 union hpd_irq_data hpd_irq_data;
8e794421
WL
3143 bool link_loss = false;
3144 bool has_left_work = false;
3145 int idx = aconnector->base.index;
3146 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3147
3148 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3149
b972b4f9
HW
3150 if (adev->dm.disable_hpd_irq)
3151 return;
3152
1f6010a9
DF
3153 /*
3154 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3155 * conflict, after implement i2c helper, this mutex should be
3156 * retired.
3157 */
b86e7eef 3158 mutex_lock(&aconnector->hpd_lock);
4562236b 3159
8e794421
WL
3160 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3161 &link_loss, true, &has_left_work);
3083a984 3162
8e794421
WL
3163 if (!has_left_work)
3164 goto out;
3165
3166 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3167 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3168 goto out;
3169 }
3170
3171 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3172 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3173 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3174 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3175 goto out;
3176 }
3083a984 3177
8e794421
WL
3178 if (link_loss) {
3179 bool skip = false;
d2aa1356 3180
8e794421
WL
3181 spin_lock(&offload_wq->offload_lock);
3182 skip = offload_wq->is_handling_link_loss;
3183
3184 if (!skip)
3185 offload_wq->is_handling_link_loss = true;
3186
3187 spin_unlock(&offload_wq->offload_lock);
3188
3189 if (!skip)
3190 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3191
3192 goto out;
3193 }
3194 }
c8ea79a8 3195
3083a984 3196out:
c8ea79a8 3197 if (result && !is_mst_root_connector) {
4562236b 3198 /* Downstream Port status changed. */
fbbdadf2
BL
3199 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3200 DRM_ERROR("KMS: Failed to detect connector\n");
3201
3202 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3203 emulated_link_detect(dc_link);
3204
3205 if (aconnector->fake_enable)
3206 aconnector->fake_enable = false;
3207
3208 amdgpu_dm_update_connector_after_detect(aconnector);
3209
3210
3211 drm_modeset_lock_all(dev);
3212 dm_restore_drm_connector_state(dev, connector);
3213 drm_modeset_unlock_all(dev);
3214
3215 drm_kms_helper_hotplug_event(dev);
3216 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3217
3218 if (aconnector->fake_enable)
3219 aconnector->fake_enable = false;
3220
4562236b
HW
3221 amdgpu_dm_update_connector_after_detect(aconnector);
3222
3223
3224 drm_modeset_lock_all(dev);
3225 dm_restore_drm_connector_state(dev, connector);
3226 drm_modeset_unlock_all(dev);
3227
3228 drm_kms_helper_hotplug_event(dev);
3229 }
3230 }
2a0f9270 3231#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3232 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3233 if (adev->dm.hdcp_workqueue)
3234 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3235 }
2a0f9270 3236#endif
4562236b 3237
b86e7eef 3238 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3239 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3240
3241 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3242}
3243
3244static void register_hpd_handlers(struct amdgpu_device *adev)
3245{
4a580877 3246 struct drm_device *dev = adev_to_drm(adev);
4562236b 3247 struct drm_connector *connector;
c84dec2f 3248 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3249 const struct dc_link *dc_link;
3250 struct dc_interrupt_params int_params = {0};
3251
3252 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3253 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3254
3255 list_for_each_entry(connector,
3256 &dev->mode_config.connector_list, head) {
3257
c84dec2f 3258 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3259 dc_link = aconnector->dc_link;
3260
3261 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3262 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3263 int_params.irq_source = dc_link->irq_source_hpd;
3264
3265 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3266 handle_hpd_irq,
3267 (void *) aconnector);
3268 }
3269
3270 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3271
3272 /* Also register for DP short pulse (hpd_rx). */
3273 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3274 int_params.irq_source = dc_link->irq_source_hpd_rx;
3275
3276 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3277 handle_hpd_rx_irq,
3278 (void *) aconnector);
8e794421
WL
3279
3280 if (adev->dm.hpd_rx_offload_wq)
3281 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3282 aconnector;
4562236b
HW
3283 }
3284 }
3285}
3286
55e56389
MR
3287#if defined(CONFIG_DRM_AMD_DC_SI)
3288/* Register IRQ sources and initialize IRQ callbacks */
3289static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3290{
3291 struct dc *dc = adev->dm.dc;
3292 struct common_irq_params *c_irq_params;
3293 struct dc_interrupt_params int_params = {0};
3294 int r;
3295 int i;
3296 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3297
3298 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3299 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3300
3301 /*
3302 * Actions of amdgpu_irq_add_id():
3303 * 1. Register a set() function with base driver.
3304 * Base driver will call set() function to enable/disable an
3305 * interrupt in DC hardware.
3306 * 2. Register amdgpu_dm_irq_handler().
3307 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3308 * coming from DC hardware.
3309 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3310 * for acknowledging and handling. */
3311
3312 /* Use VBLANK interrupt */
3313 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3314 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3315 if (r) {
3316 DRM_ERROR("Failed to add crtc irq id!\n");
3317 return r;
3318 }
3319
3320 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3321 int_params.irq_source =
3322 dc_interrupt_to_irq_source(dc, i+1 , 0);
3323
3324 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3325
3326 c_irq_params->adev = adev;
3327 c_irq_params->irq_src = int_params.irq_source;
3328
3329 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3330 dm_crtc_high_irq, c_irq_params);
3331 }
3332
3333 /* Use GRPH_PFLIP interrupt */
3334 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3335 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3336 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3337 if (r) {
3338 DRM_ERROR("Failed to add page flip irq id!\n");
3339 return r;
3340 }
3341
3342 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3343 int_params.irq_source =
3344 dc_interrupt_to_irq_source(dc, i, 0);
3345
3346 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3347
3348 c_irq_params->adev = adev;
3349 c_irq_params->irq_src = int_params.irq_source;
3350
3351 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3352 dm_pflip_high_irq, c_irq_params);
3353
3354 }
3355
3356 /* HPD */
3357 r = amdgpu_irq_add_id(adev, client_id,
3358 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3359 if (r) {
3360 DRM_ERROR("Failed to add hpd irq id!\n");
3361 return r;
3362 }
3363
3364 register_hpd_handlers(adev);
3365
3366 return 0;
3367}
3368#endif
3369
4562236b
HW
3370/* Register IRQ sources and initialize IRQ callbacks */
3371static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3372{
3373 struct dc *dc = adev->dm.dc;
3374 struct common_irq_params *c_irq_params;
3375 struct dc_interrupt_params int_params = {0};
3376 int r;
3377 int i;
1ffdeca6 3378 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3379
c08182f2 3380 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3381 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3382
3383 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3384 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3385
1f6010a9
DF
3386 /*
3387 * Actions of amdgpu_irq_add_id():
4562236b
HW
3388 * 1. Register a set() function with base driver.
3389 * Base driver will call set() function to enable/disable an
3390 * interrupt in DC hardware.
3391 * 2. Register amdgpu_dm_irq_handler().
3392 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3393 * coming from DC hardware.
3394 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3395 * for acknowledging and handling. */
3396
b57de80a 3397 /* Use VBLANK interrupt */
e9029155 3398 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3399 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3400 if (r) {
3401 DRM_ERROR("Failed to add crtc irq id!\n");
3402 return r;
3403 }
3404
3405 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3406 int_params.irq_source =
3d761e79 3407 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3408
b57de80a 3409 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3410
3411 c_irq_params->adev = adev;
3412 c_irq_params->irq_src = int_params.irq_source;
3413
3414 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3415 dm_crtc_high_irq, c_irq_params);
3416 }
3417
d2574c33
MK
3418 /* Use VUPDATE interrupt */
3419 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3420 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3421 if (r) {
3422 DRM_ERROR("Failed to add vupdate irq id!\n");
3423 return r;
3424 }
3425
3426 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3427 int_params.irq_source =
3428 dc_interrupt_to_irq_source(dc, i, 0);
3429
3430 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3431
3432 c_irq_params->adev = adev;
3433 c_irq_params->irq_src = int_params.irq_source;
3434
3435 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436 dm_vupdate_high_irq, c_irq_params);
3437 }
3438
3d761e79 3439 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3440 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3441 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3442 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3443 if (r) {
3444 DRM_ERROR("Failed to add page flip irq id!\n");
3445 return r;
3446 }
3447
3448 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3449 int_params.irq_source =
3450 dc_interrupt_to_irq_source(dc, i, 0);
3451
3452 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3453
3454 c_irq_params->adev = adev;
3455 c_irq_params->irq_src = int_params.irq_source;
3456
3457 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3458 dm_pflip_high_irq, c_irq_params);
3459
3460 }
3461
3462 /* HPD */
2c8ad2d5
AD
3463 r = amdgpu_irq_add_id(adev, client_id,
3464 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3465 if (r) {
3466 DRM_ERROR("Failed to add hpd irq id!\n");
3467 return r;
3468 }
3469
3470 register_hpd_handlers(adev);
3471
3472 return 0;
3473}
3474
b86a1aa3 3475#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3476/* Register IRQ sources and initialize IRQ callbacks */
3477static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3478{
3479 struct dc *dc = adev->dm.dc;
3480 struct common_irq_params *c_irq_params;
3481 struct dc_interrupt_params int_params = {0};
3482 int r;
3483 int i;
660d5406
WL
3484#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3485 static const unsigned int vrtl_int_srcid[] = {
3486 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3487 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3488 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3489 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3490 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3491 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3492 };
3493#endif
ff5ef992
AD
3494
3495 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3496 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3497
1f6010a9
DF
3498 /*
3499 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3500 * 1. Register a set() function with base driver.
3501 * Base driver will call set() function to enable/disable an
3502 * interrupt in DC hardware.
3503 * 2. Register amdgpu_dm_irq_handler().
3504 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3505 * coming from DC hardware.
3506 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3507 * for acknowledging and handling.
1f6010a9 3508 */
ff5ef992
AD
3509
3510 /* Use VSTARTUP interrupt */
3511 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3512 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3513 i++) {
3760f76c 3514 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3515
3516 if (r) {
3517 DRM_ERROR("Failed to add crtc irq id!\n");
3518 return r;
3519 }
3520
3521 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3522 int_params.irq_source =
3523 dc_interrupt_to_irq_source(dc, i, 0);
3524
3525 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3526
3527 c_irq_params->adev = adev;
3528 c_irq_params->irq_src = int_params.irq_source;
3529
2346ef47
NK
3530 amdgpu_dm_irq_register_interrupt(
3531 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3532 }
3533
86bc2219
WL
3534 /* Use otg vertical line interrupt */
3535#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3536 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3537 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3538 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3539
3540 if (r) {
3541 DRM_ERROR("Failed to add vline0 irq id!\n");
3542 return r;
3543 }
3544
3545 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3546 int_params.irq_source =
660d5406
WL
3547 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3548
3549 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3550 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3551 break;
3552 }
86bc2219
WL
3553
3554 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3555 - DC_IRQ_SOURCE_DC1_VLINE0];
3556
3557 c_irq_params->adev = adev;
3558 c_irq_params->irq_src = int_params.irq_source;
3559
3560 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3561 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3562 }
3563#endif
3564
2346ef47
NK
3565 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3566 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3567 * to trigger at end of each vblank, regardless of state of the lock,
3568 * matching DCE behaviour.
3569 */
3570 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3571 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3572 i++) {
3573 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3574
3575 if (r) {
3576 DRM_ERROR("Failed to add vupdate irq id!\n");
3577 return r;
3578 }
3579
3580 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3581 int_params.irq_source =
3582 dc_interrupt_to_irq_source(dc, i, 0);
3583
3584 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3585
3586 c_irq_params->adev = adev;
3587 c_irq_params->irq_src = int_params.irq_source;
3588
ff5ef992 3589 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3590 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3591 }
3592
ff5ef992
AD
3593 /* Use GRPH_PFLIP interrupt */
3594 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3595 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3596 i++) {
3760f76c 3597 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3598 if (r) {
3599 DRM_ERROR("Failed to add page flip irq id!\n");
3600 return r;
3601 }
3602
3603 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3604 int_params.irq_source =
3605 dc_interrupt_to_irq_source(dc, i, 0);
3606
3607 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3608
3609 c_irq_params->adev = adev;
3610 c_irq_params->irq_src = int_params.irq_source;
3611
3612 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3613 dm_pflip_high_irq, c_irq_params);
3614
3615 }
3616
81927e28
JS
3617 /* HPD */
3618 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3619 &adev->hpd_irq);
3620 if (r) {
3621 DRM_ERROR("Failed to add hpd irq id!\n");
3622 return r;
3623 }
a08f16cf 3624
81927e28 3625 register_hpd_handlers(adev);
a08f16cf 3626
81927e28
JS
3627 return 0;
3628}
3629/* Register Outbox IRQ sources and initialize IRQ callbacks */
3630static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3631{
3632 struct dc *dc = adev->dm.dc;
3633 struct common_irq_params *c_irq_params;
3634 struct dc_interrupt_params int_params = {0};
3635 int r, i;
3636
3637 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3638 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3639
3640 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3641 &adev->dmub_outbox_irq);
3642 if (r) {
3643 DRM_ERROR("Failed to add outbox irq id!\n");
3644 return r;
3645 }
3646
3647 if (dc->ctx->dmub_srv) {
3648 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3649 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3650 int_params.irq_source =
81927e28 3651 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3652
81927e28 3653 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3654
3655 c_irq_params->adev = adev;
3656 c_irq_params->irq_src = int_params.irq_source;
3657
3658 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3659 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3660 }
3661
ff5ef992
AD
3662 return 0;
3663}
3664#endif
3665
eb3dc897
NK
3666/*
3667 * Acquires the lock for the atomic state object and returns
3668 * the new atomic state.
3669 *
3670 * This should only be called during atomic check.
3671 */
3672static int dm_atomic_get_state(struct drm_atomic_state *state,
3673 struct dm_atomic_state **dm_state)
3674{
3675 struct drm_device *dev = state->dev;
1348969a 3676 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3677 struct amdgpu_display_manager *dm = &adev->dm;
3678 struct drm_private_state *priv_state;
eb3dc897
NK
3679
3680 if (*dm_state)
3681 return 0;
3682
eb3dc897
NK
3683 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3684 if (IS_ERR(priv_state))
3685 return PTR_ERR(priv_state);
3686
3687 *dm_state = to_dm_atomic_state(priv_state);
3688
3689 return 0;
3690}
3691
dfd84d90 3692static struct dm_atomic_state *
eb3dc897
NK
3693dm_atomic_get_new_state(struct drm_atomic_state *state)
3694{
3695 struct drm_device *dev = state->dev;
1348969a 3696 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3697 struct amdgpu_display_manager *dm = &adev->dm;
3698 struct drm_private_obj *obj;
3699 struct drm_private_state *new_obj_state;
3700 int i;
3701
3702 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3703 if (obj->funcs == dm->atomic_obj.funcs)
3704 return to_dm_atomic_state(new_obj_state);
3705 }
3706
3707 return NULL;
3708}
3709
eb3dc897
NK
3710static struct drm_private_state *
3711dm_atomic_duplicate_state(struct drm_private_obj *obj)
3712{
3713 struct dm_atomic_state *old_state, *new_state;
3714
3715 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3716 if (!new_state)
3717 return NULL;
3718
3719 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3720
813d20dc
AW
3721 old_state = to_dm_atomic_state(obj->state);
3722
3723 if (old_state && old_state->context)
3724 new_state->context = dc_copy_state(old_state->context);
3725
eb3dc897
NK
3726 if (!new_state->context) {
3727 kfree(new_state);
3728 return NULL;
3729 }
3730
eb3dc897
NK
3731 return &new_state->base;
3732}
3733
3734static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3735 struct drm_private_state *state)
3736{
3737 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3738
3739 if (dm_state && dm_state->context)
3740 dc_release_state(dm_state->context);
3741
3742 kfree(dm_state);
3743}
3744
3745static struct drm_private_state_funcs dm_atomic_state_funcs = {
3746 .atomic_duplicate_state = dm_atomic_duplicate_state,
3747 .atomic_destroy_state = dm_atomic_destroy_state,
3748};
3749
4562236b
HW
3750static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3751{
eb3dc897 3752 struct dm_atomic_state *state;
4562236b
HW
3753 int r;
3754
3755 adev->mode_info.mode_config_initialized = true;
3756
4a580877
LT
3757 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3758 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3759
4a580877
LT
3760 adev_to_drm(adev)->mode_config.max_width = 16384;
3761 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3762
4a580877
LT
3763 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3764 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3765 /* indicates support for immediate flip */
4a580877 3766 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3767
4a580877 3768 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3769
eb3dc897
NK
3770 state = kzalloc(sizeof(*state), GFP_KERNEL);
3771 if (!state)
3772 return -ENOMEM;
3773
813d20dc 3774 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3775 if (!state->context) {
3776 kfree(state);
3777 return -ENOMEM;
3778 }
3779
3780 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3781
4a580877 3782 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3783 &adev->dm.atomic_obj,
eb3dc897
NK
3784 &state->base,
3785 &dm_atomic_state_funcs);
3786
3dc9b1ce 3787 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3788 if (r) {
3789 dc_release_state(state->context);
3790 kfree(state);
4562236b 3791 return r;
b67a468a 3792 }
4562236b 3793
6ce8f316 3794 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3795 if (r) {
3796 dc_release_state(state->context);
3797 kfree(state);
6ce8f316 3798 return r;
b67a468a 3799 }
6ce8f316 3800
4562236b
HW
3801 return 0;
3802}
3803
206bbafe
DF
3804#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3805#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3806#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3807
4562236b
HW
3808#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3809 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3810
7fd13bae
AD
3811static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3812 int bl_idx)
206bbafe
DF
3813{
3814#if defined(CONFIG_ACPI)
3815 struct amdgpu_dm_backlight_caps caps;
3816
58965855
FS
3817 memset(&caps, 0, sizeof(caps));
3818
7fd13bae 3819 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3820 return;
3821
f9b7f370 3822 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3823 if (caps.caps_valid) {
7fd13bae 3824 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3825 if (caps.aux_support)
3826 return;
7fd13bae
AD
3827 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3828 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3829 } else {
7fd13bae 3830 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3831 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3832 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3833 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3834 }
3835#else
7fd13bae 3836 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3837 return;
3838
7fd13bae
AD
3839 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3840 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3841#endif
3842}
3843
69d9f427
AM
3844static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3845 unsigned *min, unsigned *max)
94562810 3846{
94562810 3847 if (!caps)
69d9f427 3848 return 0;
94562810 3849
69d9f427
AM
3850 if (caps->aux_support) {
3851 // Firmware limits are in nits, DC API wants millinits.
3852 *max = 1000 * caps->aux_max_input_signal;
3853 *min = 1000 * caps->aux_min_input_signal;
94562810 3854 } else {
69d9f427
AM
3855 // Firmware limits are 8-bit, PWM control is 16-bit.
3856 *max = 0x101 * caps->max_input_signal;
3857 *min = 0x101 * caps->min_input_signal;
94562810 3858 }
69d9f427
AM
3859 return 1;
3860}
94562810 3861
69d9f427
AM
3862static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3863 uint32_t brightness)
3864{
3865 unsigned min, max;
94562810 3866
69d9f427
AM
3867 if (!get_brightness_range(caps, &min, &max))
3868 return brightness;
3869
3870 // Rescale 0..255 to min..max
3871 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3872 AMDGPU_MAX_BL_LEVEL);
3873}
3874
3875static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3876 uint32_t brightness)
3877{
3878 unsigned min, max;
3879
3880 if (!get_brightness_range(caps, &min, &max))
3881 return brightness;
3882
3883 if (brightness < min)
3884 return 0;
3885 // Rescale min..max to 0..255
3886 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3887 max - min);
94562810
RS
3888}
3889
3d6c9164 3890static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3891 int bl_idx,
3d6c9164 3892 u32 user_brightness)
4562236b 3893{
206bbafe 3894 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3895 struct dc_link *link;
3896 u32 brightness;
94562810 3897 bool rc;
4562236b 3898
7fd13bae
AD
3899 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3900 caps = dm->backlight_caps[bl_idx];
94562810 3901
7fd13bae
AD
3902 dm->brightness[bl_idx] = user_brightness;
3903 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3904 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3905
3d6c9164 3906 /* Change brightness based on AUX property */
118b4627 3907 if (caps.aux_support) {
7fd13bae
AD
3908 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3909 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3910 if (!rc)
3911 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3912 } else {
7fd13bae
AD
3913 rc = dc_link_set_backlight_level(link, brightness, 0);
3914 if (!rc)
3915 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3916 }
94562810
RS
3917
3918 return rc ? 0 : 1;
4562236b
HW
3919}
3920
3d6c9164 3921static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3922{
620a0d27 3923 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3924 int i;
3d6c9164 3925
7fd13bae
AD
3926 for (i = 0; i < dm->num_of_edps; i++) {
3927 if (bd == dm->backlight_dev[i])
3928 break;
3929 }
3930 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3931 i = 0;
3932 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3933
3934 return 0;
3935}
3936
7fd13bae
AD
3937static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3938 int bl_idx)
3d6c9164 3939{
0ad3e64e 3940 struct amdgpu_dm_backlight_caps caps;
7fd13bae 3941 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 3942
7fd13bae
AD
3943 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3944 caps = dm->backlight_caps[bl_idx];
620a0d27 3945
0ad3e64e 3946 if (caps.aux_support) {
0ad3e64e
AD
3947 u32 avg, peak;
3948 bool rc;
3949
3950 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3951 if (!rc)
7fd13bae 3952 return dm->brightness[bl_idx];
0ad3e64e
AD
3953 return convert_brightness_to_user(&caps, avg);
3954 } else {
7fd13bae 3955 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
3956
3957 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 3958 return dm->brightness[bl_idx];
0ad3e64e
AD
3959 return convert_brightness_to_user(&caps, ret);
3960 }
4562236b
HW
3961}
3962
3d6c9164
AD
3963static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3964{
3965 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3966 int i;
3d6c9164 3967
7fd13bae
AD
3968 for (i = 0; i < dm->num_of_edps; i++) {
3969 if (bd == dm->backlight_dev[i])
3970 break;
3971 }
3972 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3973 i = 0;
3974 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
3975}
3976
4562236b 3977static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3978 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3979 .get_brightness = amdgpu_dm_backlight_get_brightness,
3980 .update_status = amdgpu_dm_backlight_update_status,
3981};
3982
7578ecda
AD
3983static void
3984amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3985{
3986 char bl_name[16];
3987 struct backlight_properties props = { 0 };
3988
7fd13bae
AD
3989 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3990 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 3991
4562236b 3992 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3993 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3994 props.type = BACKLIGHT_RAW;
3995
3996 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 3997 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 3998
7fd13bae
AD
3999 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4000 adev_to_drm(dm->adev)->dev,
4001 dm,
4002 &amdgpu_dm_backlight_ops,
4003 &props);
4562236b 4004
7fd13bae 4005 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4006 DRM_ERROR("DM: Backlight registration failed!\n");
4007 else
f1ad2f5e 4008 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4009}
4562236b
HW
4010#endif
4011
df534fff 4012static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4013 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4014 enum drm_plane_type plane_type,
4015 const struct dc_plane_cap *plane_cap)
df534fff 4016{
f180b4bc 4017 struct drm_plane *plane;
df534fff
S
4018 unsigned long possible_crtcs;
4019 int ret = 0;
4020
f180b4bc 4021 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4022 if (!plane) {
4023 DRM_ERROR("KMS: Failed to allocate plane\n");
4024 return -ENOMEM;
4025 }
b2fddb13 4026 plane->type = plane_type;
df534fff
S
4027
4028 /*
b2fddb13
NK
4029 * HACK: IGT tests expect that the primary plane for a CRTC
4030 * can only have one possible CRTC. Only expose support for
4031 * any CRTC if they're not going to be used as a primary plane
4032 * for a CRTC - like overlay or underlay planes.
df534fff
S
4033 */
4034 possible_crtcs = 1 << plane_id;
4035 if (plane_id >= dm->dc->caps.max_streams)
4036 possible_crtcs = 0xff;
4037
cc1fec57 4038 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4039
4040 if (ret) {
4041 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4042 kfree(plane);
df534fff
S
4043 return ret;
4044 }
4045
54087768
NK
4046 if (mode_info)
4047 mode_info->planes[plane_id] = plane;
4048
df534fff
S
4049 return ret;
4050}
4051
89fc8d4e
HW
4052
4053static void register_backlight_device(struct amdgpu_display_manager *dm,
4054 struct dc_link *link)
4055{
4056#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4057 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4058
4059 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4060 link->type != dc_connection_none) {
1f6010a9
DF
4061 /*
4062 * Event if registration failed, we should continue with
89fc8d4e
HW
4063 * DM initialization because not having a backlight control
4064 * is better then a black screen.
4065 */
7fd13bae 4066 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4067 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4068
7fd13bae 4069 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4070 dm->backlight_link[dm->num_of_edps] = link;
4071 dm->num_of_edps++;
4072 }
89fc8d4e
HW
4073 }
4074#endif
4075}
4076
4077
1f6010a9
DF
4078/*
4079 * In this architecture, the association
4562236b
HW
4080 * connector -> encoder -> crtc
4081 * id not really requried. The crtc and connector will hold the
4082 * display_index as an abstraction to use with DAL component
4083 *
4084 * Returns 0 on success
4085 */
7578ecda 4086static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4087{
4088 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4089 int32_t i;
c84dec2f 4090 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4091 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4092 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4093 uint32_t link_cnt;
cc1fec57 4094 int32_t primary_planes;
fbbdadf2 4095 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4096 const struct dc_plane_cap *plane;
9470620e 4097 bool psr_feature_enabled = false;
4562236b 4098
d58159de
AD
4099 dm->display_indexes_num = dm->dc->caps.max_streams;
4100 /* Update the actual used number of crtc */
4101 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4102
4562236b 4103 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4104 if (amdgpu_dm_mode_config_init(dm->adev)) {
4105 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4106 return -EINVAL;
4562236b
HW
4107 }
4108
b2fddb13
NK
4109 /* There is one primary plane per CRTC */
4110 primary_planes = dm->dc->caps.max_streams;
54087768 4111 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4112
b2fddb13
NK
4113 /*
4114 * Initialize primary planes, implicit planes for legacy IOCTLS.
4115 * Order is reversed to match iteration order in atomic check.
4116 */
4117 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4118 plane = &dm->dc->caps.planes[i];
4119
b2fddb13 4120 if (initialize_plane(dm, mode_info, i,
cc1fec57 4121 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4122 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4123 goto fail;
d4e13b0d 4124 }
df534fff 4125 }
92f3ac40 4126
0d579c7e
NK
4127 /*
4128 * Initialize overlay planes, index starting after primary planes.
4129 * These planes have a higher DRM index than the primary planes since
4130 * they should be considered as having a higher z-order.
4131 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4132 *
4133 * Only support DCN for now, and only expose one so we don't encourage
4134 * userspace to use up all the pipes.
0d579c7e 4135 */
cc1fec57
NK
4136 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4137 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4138
4139 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4140 continue;
4141
4142 if (!plane->blends_with_above || !plane->blends_with_below)
4143 continue;
4144
ea36ad34 4145 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4146 continue;
4147
54087768 4148 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4149 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4150 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4151 goto fail;
d4e13b0d 4152 }
cc1fec57
NK
4153
4154 /* Only create one overlay plane. */
4155 break;
d4e13b0d 4156 }
4562236b 4157
d4e13b0d 4158 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4159 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4160 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4161 goto fail;
4562236b 4162 }
4562236b 4163
50610b74 4164#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28 4165 /* Use Outbox interrupt */
1d789535 4166 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4167 case IP_VERSION(3, 0, 0):
4168 case IP_VERSION(3, 1, 2):
4169 case IP_VERSION(3, 1, 3):
4170 case IP_VERSION(2, 1, 0):
81927e28
JS
4171 if (register_outbox_irq_handlers(dm->adev)) {
4172 DRM_ERROR("DM: Failed to initialize IRQ\n");
4173 goto fail;
4174 }
4175 break;
4176 default:
c08182f2 4177 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4178 adev->ip_versions[DCE_HWIP][0]);
81927e28 4179 }
9470620e
NK
4180
4181 /* Determine whether to enable PSR support by default. */
4182 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4183 switch (adev->ip_versions[DCE_HWIP][0]) {
4184 case IP_VERSION(3, 1, 2):
4185 case IP_VERSION(3, 1, 3):
4186 psr_feature_enabled = true;
4187 break;
4188 default:
4189 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4190 break;
4191 }
4192 }
50610b74 4193#endif
81927e28 4194
4562236b
HW
4195 /* loops over all connectors on the board */
4196 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4197 struct dc_link *link = NULL;
4562236b
HW
4198
4199 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4200 DRM_ERROR(
4201 "KMS: Cannot support more than %d display indexes\n",
4202 AMDGPU_DM_MAX_DISPLAY_INDEX);
4203 continue;
4204 }
4205
4206 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4207 if (!aconnector)
cd8a2ae8 4208 goto fail;
4562236b
HW
4209
4210 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4211 if (!aencoder)
cd8a2ae8 4212 goto fail;
4562236b
HW
4213
4214 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4215 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4216 goto fail;
4562236b
HW
4217 }
4218
4219 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4220 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4221 goto fail;
4562236b
HW
4222 }
4223
89fc8d4e
HW
4224 link = dc_get_link_at_index(dm->dc, i);
4225
fbbdadf2
BL
4226 if (!dc_link_detect_sink(link, &new_connection_type))
4227 DRM_ERROR("KMS: Failed to detect connector\n");
4228
4229 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4230 emulated_link_detect(link);
4231 amdgpu_dm_update_connector_after_detect(aconnector);
4232
4233 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4234 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4235 register_backlight_device(dm, link);
9470620e
NK
4236
4237 if (psr_feature_enabled)
397a9bc5 4238 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
4239 }
4240
4241
4562236b
HW
4242 }
4243
4244 /* Software is initialized. Now we can register interrupt handlers. */
4245 switch (adev->asic_type) {
55e56389
MR
4246#if defined(CONFIG_DRM_AMD_DC_SI)
4247 case CHIP_TAHITI:
4248 case CHIP_PITCAIRN:
4249 case CHIP_VERDE:
4250 case CHIP_OLAND:
4251 if (dce60_register_irq_handlers(dm->adev)) {
4252 DRM_ERROR("DM: Failed to initialize IRQ\n");
4253 goto fail;
4254 }
4255 break;
4256#endif
4562236b
HW
4257 case CHIP_BONAIRE:
4258 case CHIP_HAWAII:
cd4b356f
AD
4259 case CHIP_KAVERI:
4260 case CHIP_KABINI:
4261 case CHIP_MULLINS:
4562236b
HW
4262 case CHIP_TONGA:
4263 case CHIP_FIJI:
4264 case CHIP_CARRIZO:
4265 case CHIP_STONEY:
4266 case CHIP_POLARIS11:
4267 case CHIP_POLARIS10:
b264d345 4268 case CHIP_POLARIS12:
7737de91 4269 case CHIP_VEGAM:
2c8ad2d5 4270 case CHIP_VEGA10:
2325ff30 4271 case CHIP_VEGA12:
1fe6bf2f 4272 case CHIP_VEGA20:
4562236b
HW
4273 if (dce110_register_irq_handlers(dm->adev)) {
4274 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4275 goto fail;
4562236b
HW
4276 }
4277 break;
4278 default:
c08182f2 4279#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4280 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4281 case IP_VERSION(1, 0, 0):
4282 case IP_VERSION(1, 0, 1):
c08182f2
AD
4283 case IP_VERSION(2, 0, 2):
4284 case IP_VERSION(2, 0, 3):
4285 case IP_VERSION(2, 0, 0):
4286 case IP_VERSION(2, 1, 0):
4287 case IP_VERSION(3, 0, 0):
4288 case IP_VERSION(3, 0, 2):
4289 case IP_VERSION(3, 0, 3):
4290 case IP_VERSION(3, 0, 1):
4291 case IP_VERSION(3, 1, 2):
4292 case IP_VERSION(3, 1, 3):
4293 if (dcn10_register_irq_handlers(dm->adev)) {
4294 DRM_ERROR("DM: Failed to initialize IRQ\n");
4295 goto fail;
4296 }
4297 break;
4298 default:
2cbc6f42 4299 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4300 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4301 goto fail;
c08182f2
AD
4302 }
4303#endif
2cbc6f42 4304 break;
4562236b
HW
4305 }
4306
4562236b 4307 return 0;
cd8a2ae8 4308fail:
4562236b 4309 kfree(aencoder);
4562236b 4310 kfree(aconnector);
54087768 4311
59d0f396 4312 return -EINVAL;
4562236b
HW
4313}
4314
7578ecda 4315static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4316{
eb3dc897 4317 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4318 return;
4319}
4320
4321/******************************************************************************
4322 * amdgpu_display_funcs functions
4323 *****************************************************************************/
4324
1f6010a9 4325/*
4562236b
HW
4326 * dm_bandwidth_update - program display watermarks
4327 *
4328 * @adev: amdgpu_device pointer
4329 *
4330 * Calculate and program the display watermarks and line buffer allocation.
4331 */
4332static void dm_bandwidth_update(struct amdgpu_device *adev)
4333{
49c07a99 4334 /* TODO: implement later */
4562236b
HW
4335}
4336
39cc5be2 4337static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4338 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4339 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4340 .backlight_set_level = NULL, /* never called for DC */
4341 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4342 .hpd_sense = NULL,/* called unconditionally */
4343 .hpd_set_polarity = NULL, /* called unconditionally */
4344 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4345 .page_flip_get_scanoutpos =
4346 dm_crtc_get_scanoutpos,/* called unconditionally */
4347 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4348 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4349};
4350
4351#if defined(CONFIG_DEBUG_KERNEL_DC)
4352
3ee6b26b
AD
4353static ssize_t s3_debug_store(struct device *device,
4354 struct device_attribute *attr,
4355 const char *buf,
4356 size_t count)
4562236b
HW
4357{
4358 int ret;
4359 int s3_state;
ef1de361 4360 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4361 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4362
4363 ret = kstrtoint(buf, 0, &s3_state);
4364
4365 if (ret == 0) {
4366 if (s3_state) {
4367 dm_resume(adev);
4a580877 4368 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4369 } else
4370 dm_suspend(adev);
4371 }
4372
4373 return ret == 0 ? count : 0;
4374}
4375
4376DEVICE_ATTR_WO(s3_debug);
4377
4378#endif
4379
4380static int dm_early_init(void *handle)
4381{
4382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4383
4562236b 4384 switch (adev->asic_type) {
55e56389
MR
4385#if defined(CONFIG_DRM_AMD_DC_SI)
4386 case CHIP_TAHITI:
4387 case CHIP_PITCAIRN:
4388 case CHIP_VERDE:
4389 adev->mode_info.num_crtc = 6;
4390 adev->mode_info.num_hpd = 6;
4391 adev->mode_info.num_dig = 6;
4392 break;
4393 case CHIP_OLAND:
4394 adev->mode_info.num_crtc = 2;
4395 adev->mode_info.num_hpd = 2;
4396 adev->mode_info.num_dig = 2;
4397 break;
4398#endif
4562236b
HW
4399 case CHIP_BONAIRE:
4400 case CHIP_HAWAII:
4401 adev->mode_info.num_crtc = 6;
4402 adev->mode_info.num_hpd = 6;
4403 adev->mode_info.num_dig = 6;
4562236b 4404 break;
cd4b356f
AD
4405 case CHIP_KAVERI:
4406 adev->mode_info.num_crtc = 4;
4407 adev->mode_info.num_hpd = 6;
4408 adev->mode_info.num_dig = 7;
cd4b356f
AD
4409 break;
4410 case CHIP_KABINI:
4411 case CHIP_MULLINS:
4412 adev->mode_info.num_crtc = 2;
4413 adev->mode_info.num_hpd = 6;
4414 adev->mode_info.num_dig = 6;
cd4b356f 4415 break;
4562236b
HW
4416 case CHIP_FIJI:
4417 case CHIP_TONGA:
4418 adev->mode_info.num_crtc = 6;
4419 adev->mode_info.num_hpd = 6;
4420 adev->mode_info.num_dig = 7;
4562236b
HW
4421 break;
4422 case CHIP_CARRIZO:
4423 adev->mode_info.num_crtc = 3;
4424 adev->mode_info.num_hpd = 6;
4425 adev->mode_info.num_dig = 9;
4562236b
HW
4426 break;
4427 case CHIP_STONEY:
4428 adev->mode_info.num_crtc = 2;
4429 adev->mode_info.num_hpd = 6;
4430 adev->mode_info.num_dig = 9;
4562236b
HW
4431 break;
4432 case CHIP_POLARIS11:
b264d345 4433 case CHIP_POLARIS12:
4562236b
HW
4434 adev->mode_info.num_crtc = 5;
4435 adev->mode_info.num_hpd = 5;
4436 adev->mode_info.num_dig = 5;
4562236b
HW
4437 break;
4438 case CHIP_POLARIS10:
7737de91 4439 case CHIP_VEGAM:
4562236b
HW
4440 adev->mode_info.num_crtc = 6;
4441 adev->mode_info.num_hpd = 6;
4442 adev->mode_info.num_dig = 6;
4562236b 4443 break;
2c8ad2d5 4444 case CHIP_VEGA10:
2325ff30 4445 case CHIP_VEGA12:
1fe6bf2f 4446 case CHIP_VEGA20:
2c8ad2d5
AD
4447 adev->mode_info.num_crtc = 6;
4448 adev->mode_info.num_hpd = 6;
4449 adev->mode_info.num_dig = 6;
4450 break;
4562236b 4451 default:
c08182f2 4452#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4453 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4454 case IP_VERSION(2, 0, 2):
4455 case IP_VERSION(3, 0, 0):
4456 adev->mode_info.num_crtc = 6;
4457 adev->mode_info.num_hpd = 6;
4458 adev->mode_info.num_dig = 6;
4459 break;
4460 case IP_VERSION(2, 0, 0):
4461 case IP_VERSION(3, 0, 2):
4462 adev->mode_info.num_crtc = 5;
4463 adev->mode_info.num_hpd = 5;
4464 adev->mode_info.num_dig = 5;
4465 break;
4466 case IP_VERSION(2, 0, 3):
4467 case IP_VERSION(3, 0, 3):
4468 adev->mode_info.num_crtc = 2;
4469 adev->mode_info.num_hpd = 2;
4470 adev->mode_info.num_dig = 2;
4471 break;
559f591d
AD
4472 case IP_VERSION(1, 0, 0):
4473 case IP_VERSION(1, 0, 1):
c08182f2
AD
4474 case IP_VERSION(3, 0, 1):
4475 case IP_VERSION(2, 1, 0):
4476 case IP_VERSION(3, 1, 2):
4477 case IP_VERSION(3, 1, 3):
4478 adev->mode_info.num_crtc = 4;
4479 adev->mode_info.num_hpd = 4;
4480 adev->mode_info.num_dig = 4;
4481 break;
4482 default:
2cbc6f42 4483 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4484 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4485 return -EINVAL;
c08182f2
AD
4486 }
4487#endif
2cbc6f42 4488 break;
4562236b
HW
4489 }
4490
c8dd5715
MD
4491 amdgpu_dm_set_irq_funcs(adev);
4492
39cc5be2
AD
4493 if (adev->mode_info.funcs == NULL)
4494 adev->mode_info.funcs = &dm_display_funcs;
4495
1f6010a9
DF
4496 /*
4497 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4498 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4499 * amdgpu_device_init()
4500 */
4562236b
HW
4501#if defined(CONFIG_DEBUG_KERNEL_DC)
4502 device_create_file(
4a580877 4503 adev_to_drm(adev)->dev,
4562236b
HW
4504 &dev_attr_s3_debug);
4505#endif
4506
4507 return 0;
4508}
4509
9b690ef3 4510static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4511 struct dc_stream_state *new_stream,
4512 struct dc_stream_state *old_stream)
9b690ef3 4513{
2afda735 4514 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4515}
4516
4517static bool modereset_required(struct drm_crtc_state *crtc_state)
4518{
2afda735 4519 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4520}
4521
7578ecda 4522static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4523{
4524 drm_encoder_cleanup(encoder);
4525 kfree(encoder);
4526}
4527
4528static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4529 .destroy = amdgpu_dm_encoder_destroy,
4530};
4531
e7b07cee 4532
6300b3bd
MK
4533static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4534 struct drm_framebuffer *fb,
4535 int *min_downscale, int *max_upscale)
4536{
4537 struct amdgpu_device *adev = drm_to_adev(dev);
4538 struct dc *dc = adev->dm.dc;
4539 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4540 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4541
4542 switch (fb->format->format) {
4543 case DRM_FORMAT_P010:
4544 case DRM_FORMAT_NV12:
4545 case DRM_FORMAT_NV21:
4546 *max_upscale = plane_cap->max_upscale_factor.nv12;
4547 *min_downscale = plane_cap->max_downscale_factor.nv12;
4548 break;
4549
4550 case DRM_FORMAT_XRGB16161616F:
4551 case DRM_FORMAT_ARGB16161616F:
4552 case DRM_FORMAT_XBGR16161616F:
4553 case DRM_FORMAT_ABGR16161616F:
4554 *max_upscale = plane_cap->max_upscale_factor.fp16;
4555 *min_downscale = plane_cap->max_downscale_factor.fp16;
4556 break;
4557
4558 default:
4559 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4560 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4561 break;
4562 }
4563
4564 /*
4565 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4566 * scaling factor of 1.0 == 1000 units.
4567 */
4568 if (*max_upscale == 1)
4569 *max_upscale = 1000;
4570
4571 if (*min_downscale == 1)
4572 *min_downscale = 1000;
4573}
4574
4575
695af5f9
NK
4576static int fill_dc_scaling_info(const struct drm_plane_state *state,
4577 struct dc_scaling_info *scaling_info)
e7b07cee 4578{
6300b3bd 4579 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4580
695af5f9 4581 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4582
695af5f9
NK
4583 /* Source is fixed 16.16 but we ignore mantissa for now... */
4584 scaling_info->src_rect.x = state->src_x >> 16;
4585 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4586
d89f6048
HW
4587 /*
4588 * For reasons we don't (yet) fully understand a non-zero
4589 * src_y coordinate into an NV12 buffer can cause a
4590 * system hang. To avoid hangs (and maybe be overly cautious)
4591 * let's reject both non-zero src_x and src_y.
4592 *
4593 * We currently know of only one use-case to reproduce a
4594 * scenario with non-zero src_x and src_y for NV12, which
4595 * is to gesture the YouTube Android app into full screen
4596 * on ChromeOS.
4597 */
4598 if (state->fb &&
4599 state->fb->format->format == DRM_FORMAT_NV12 &&
4600 (scaling_info->src_rect.x != 0 ||
4601 scaling_info->src_rect.y != 0))
4602 return -EINVAL;
4603
695af5f9
NK
4604 scaling_info->src_rect.width = state->src_w >> 16;
4605 if (scaling_info->src_rect.width == 0)
4606 return -EINVAL;
4607
4608 scaling_info->src_rect.height = state->src_h >> 16;
4609 if (scaling_info->src_rect.height == 0)
4610 return -EINVAL;
4611
4612 scaling_info->dst_rect.x = state->crtc_x;
4613 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4614
4615 if (state->crtc_w == 0)
695af5f9 4616 return -EINVAL;
e7b07cee 4617
695af5f9 4618 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4619
4620 if (state->crtc_h == 0)
695af5f9 4621 return -EINVAL;
e7b07cee 4622
695af5f9 4623 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4624
695af5f9
NK
4625 /* DRM doesn't specify clipping on destination output. */
4626 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4627
6300b3bd
MK
4628 /* Validate scaling per-format with DC plane caps */
4629 if (state->plane && state->plane->dev && state->fb) {
4630 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4631 &min_downscale, &max_upscale);
4632 } else {
4633 min_downscale = 250;
4634 max_upscale = 16000;
4635 }
4636
6491f0c0
NK
4637 scale_w = scaling_info->dst_rect.width * 1000 /
4638 scaling_info->src_rect.width;
e7b07cee 4639
6300b3bd 4640 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4641 return -EINVAL;
4642
4643 scale_h = scaling_info->dst_rect.height * 1000 /
4644 scaling_info->src_rect.height;
4645
6300b3bd 4646 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4647 return -EINVAL;
4648
695af5f9
NK
4649 /*
4650 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4651 * assume reasonable defaults based on the format.
4652 */
e7b07cee 4653
695af5f9 4654 return 0;
4562236b 4655}
695af5f9 4656
a3241991
BN
4657static void
4658fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4659 uint64_t tiling_flags)
e7b07cee 4660{
a3241991
BN
4661 /* Fill GFX8 params */
4662 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4663 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4664
a3241991
BN
4665 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4666 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4667 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4668 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4669 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4670
a3241991
BN
4671 /* XXX fix me for VI */
4672 tiling_info->gfx8.num_banks = num_banks;
4673 tiling_info->gfx8.array_mode =
4674 DC_ARRAY_2D_TILED_THIN1;
4675 tiling_info->gfx8.tile_split = tile_split;
4676 tiling_info->gfx8.bank_width = bankw;
4677 tiling_info->gfx8.bank_height = bankh;
4678 tiling_info->gfx8.tile_aspect = mtaspect;
4679 tiling_info->gfx8.tile_mode =
4680 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4681 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4682 == DC_ARRAY_1D_TILED_THIN1) {
4683 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4684 }
4685
a3241991
BN
4686 tiling_info->gfx8.pipe_config =
4687 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4688}
4689
a3241991
BN
4690static void
4691fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4692 union dc_tiling_info *tiling_info)
4693{
4694 tiling_info->gfx9.num_pipes =
4695 adev->gfx.config.gb_addr_config_fields.num_pipes;
4696 tiling_info->gfx9.num_banks =
4697 adev->gfx.config.gb_addr_config_fields.num_banks;
4698 tiling_info->gfx9.pipe_interleave =
4699 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4700 tiling_info->gfx9.num_shader_engines =
4701 adev->gfx.config.gb_addr_config_fields.num_se;
4702 tiling_info->gfx9.max_compressed_frags =
4703 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4704 tiling_info->gfx9.num_rb_per_se =
4705 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4706 tiling_info->gfx9.shaderEnable = 1;
1d789535 4707 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4708 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4709}
4710
695af5f9 4711static int
a3241991
BN
4712validate_dcc(struct amdgpu_device *adev,
4713 const enum surface_pixel_format format,
4714 const enum dc_rotation_angle rotation,
4715 const union dc_tiling_info *tiling_info,
4716 const struct dc_plane_dcc_param *dcc,
4717 const struct dc_plane_address *address,
4718 const struct plane_size *plane_size)
7df7e505
NK
4719{
4720 struct dc *dc = adev->dm.dc;
8daa1218
NC
4721 struct dc_dcc_surface_param input;
4722 struct dc_surface_dcc_cap output;
7df7e505 4723
8daa1218
NC
4724 memset(&input, 0, sizeof(input));
4725 memset(&output, 0, sizeof(output));
4726
a3241991 4727 if (!dcc->enable)
87b7ebc2
RS
4728 return 0;
4729
a3241991
BN
4730 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4731 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4732 return -EINVAL;
7df7e505 4733
695af5f9 4734 input.format = format;
12e2b2d4
DL
4735 input.surface_size.width = plane_size->surface_size.width;
4736 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4737 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4738
695af5f9 4739 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4740 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4741 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4742 input.scan = SCAN_DIRECTION_VERTICAL;
4743
4744 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4745 return -EINVAL;
7df7e505
NK
4746
4747 if (!output.capable)
09e5665a 4748 return -EINVAL;
7df7e505 4749
a3241991
BN
4750 if (dcc->independent_64b_blks == 0 &&
4751 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4752 return -EINVAL;
7df7e505 4753
a3241991
BN
4754 return 0;
4755}
4756
37384b3f
BN
4757static bool
4758modifier_has_dcc(uint64_t modifier)
4759{
4760 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4761}
4762
4763static unsigned
4764modifier_gfx9_swizzle_mode(uint64_t modifier)
4765{
4766 if (modifier == DRM_FORMAT_MOD_LINEAR)
4767 return 0;
4768
4769 return AMD_FMT_MOD_GET(TILE, modifier);
4770}
4771
dfbbfe3c
BN
4772static const struct drm_format_info *
4773amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4774{
816853f9 4775 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4776}
4777
37384b3f
BN
4778static void
4779fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4780 union dc_tiling_info *tiling_info,
4781 uint64_t modifier)
4782{
4783 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4784 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4785 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4786 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4787
4788 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4789
4790 if (!IS_AMD_FMT_MOD(modifier))
4791 return;
4792
4793 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4794 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4795
4796 if (adev->family >= AMDGPU_FAMILY_NV) {
4797 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4798 } else {
4799 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4800
4801 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4802 }
4803}
4804
faa37f54
BN
4805enum dm_micro_swizzle {
4806 MICRO_SWIZZLE_Z = 0,
4807 MICRO_SWIZZLE_S = 1,
4808 MICRO_SWIZZLE_D = 2,
4809 MICRO_SWIZZLE_R = 3
4810};
4811
4812static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4813 uint32_t format,
4814 uint64_t modifier)
4815{
4816 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4817 const struct drm_format_info *info = drm_format_info(format);
fe180178 4818 int i;
faa37f54
BN
4819
4820 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4821
4822 if (!info)
4823 return false;
4824
4825 /*
fe180178
QZ
4826 * We always have to allow these modifiers:
4827 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4828 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4829 */
fe180178
QZ
4830 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4831 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4832 return true;
fe180178 4833 }
faa37f54 4834
fe180178
QZ
4835 /* Check that the modifier is on the list of the plane's supported modifiers. */
4836 for (i = 0; i < plane->modifier_count; i++) {
4837 if (modifier == plane->modifiers[i])
4838 break;
4839 }
4840 if (i == plane->modifier_count)
faa37f54
BN
4841 return false;
4842
4843 /*
4844 * For D swizzle the canonical modifier depends on the bpp, so check
4845 * it here.
4846 */
4847 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4848 adev->family >= AMDGPU_FAMILY_NV) {
4849 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4850 return false;
4851 }
4852
4853 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4854 info->cpp[0] < 8)
4855 return false;
4856
4857 if (modifier_has_dcc(modifier)) {
4858 /* Per radeonsi comments 16/64 bpp are more complicated. */
4859 if (info->cpp[0] != 4)
4860 return false;
951796f2
SS
4861 /* We support multi-planar formats, but not when combined with
4862 * additional DCC metadata planes. */
4863 if (info->num_planes > 1)
4864 return false;
faa37f54
BN
4865 }
4866
4867 return true;
4868}
4869
4870static void
4871add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4872{
4873 if (!*mods)
4874 return;
4875
4876 if (*cap - *size < 1) {
4877 uint64_t new_cap = *cap * 2;
4878 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4879
4880 if (!new_mods) {
4881 kfree(*mods);
4882 *mods = NULL;
4883 return;
4884 }
4885
4886 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4887 kfree(*mods);
4888 *mods = new_mods;
4889 *cap = new_cap;
4890 }
4891
4892 (*mods)[*size] = mod;
4893 *size += 1;
4894}
4895
4896static void
4897add_gfx9_modifiers(const struct amdgpu_device *adev,
4898 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4899{
4900 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4901 int pipe_xor_bits = min(8, pipes +
4902 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4903 int bank_xor_bits = min(8 - pipe_xor_bits,
4904 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4905 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4906 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4907
4908
4909 if (adev->family == AMDGPU_FAMILY_RV) {
4910 /* Raven2 and later */
4911 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4912
4913 /*
4914 * No _D DCC swizzles yet because we only allow 32bpp, which
4915 * doesn't support _D on DCN
4916 */
4917
4918 if (has_constant_encode) {
4919 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4920 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4921 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4922 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4923 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4924 AMD_FMT_MOD_SET(DCC, 1) |
4925 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4926 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4927 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4928 }
4929
4930 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4931 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4932 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4933 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4934 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4935 AMD_FMT_MOD_SET(DCC, 1) |
4936 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4937 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4938 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4939
4940 if (has_constant_encode) {
4941 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4942 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4943 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4944 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4945 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4946 AMD_FMT_MOD_SET(DCC, 1) |
4947 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4948 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4949 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4950
4951 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4952 AMD_FMT_MOD_SET(RB, rb) |
4953 AMD_FMT_MOD_SET(PIPE, pipes));
4954 }
4955
4956 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4957 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4958 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4959 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4960 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4961 AMD_FMT_MOD_SET(DCC, 1) |
4962 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4963 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4964 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4965 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4966 AMD_FMT_MOD_SET(RB, rb) |
4967 AMD_FMT_MOD_SET(PIPE, pipes));
4968 }
4969
4970 /*
4971 * Only supported for 64bpp on Raven, will be filtered on format in
4972 * dm_plane_format_mod_supported.
4973 */
4974 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4975 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4976 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4977 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4978 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4979
4980 if (adev->family == AMDGPU_FAMILY_RV) {
4981 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4982 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4983 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4984 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4985 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4986 }
4987
4988 /*
4989 * Only supported for 64bpp on Raven, will be filtered on format in
4990 * dm_plane_format_mod_supported.
4991 */
4992 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4993 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4994 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4995
4996 if (adev->family == AMDGPU_FAMILY_RV) {
4997 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4998 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4999 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5000 }
5001}
5002
5003static void
5004add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5005 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5006{
5007 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5008
5009 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5011 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5012 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5013 AMD_FMT_MOD_SET(DCC, 1) |
5014 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5015 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5016 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5017
5018 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5019 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5020 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5021 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5022 AMD_FMT_MOD_SET(DCC, 1) |
5023 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5024 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5025 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5026 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5027
5028 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5029 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5030 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5031 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5032
5033 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5034 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5035 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5036 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5037
5038
5039 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5040 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5041 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5042 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5043
5044 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5045 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5046 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5047}
5048
5049static void
5050add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5051 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5052{
5053 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5054 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5055
5056 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5057 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5058 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5059 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5060 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5061 AMD_FMT_MOD_SET(DCC, 1) |
5062 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5063 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5064 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5065 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5066
7f6ab50a
JA
5067 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5069 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5070 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5071 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5072 AMD_FMT_MOD_SET(DCC, 1) |
5073 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5074 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5075 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5076
faa37f54
BN
5077 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5078 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5079 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5080 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5081 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5082 AMD_FMT_MOD_SET(DCC, 1) |
5083 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5084 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5085 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5086 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5087 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5088
7f6ab50a
JA
5089 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5090 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5091 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5092 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5093 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5094 AMD_FMT_MOD_SET(DCC, 1) |
5095 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5096 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5097 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5098 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5099
faa37f54
BN
5100 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5101 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5102 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5103 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5104 AMD_FMT_MOD_SET(PACKERS, pkrs));
5105
5106 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5107 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5108 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5109 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5110 AMD_FMT_MOD_SET(PACKERS, pkrs));
5111
5112 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5113 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5114 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5115 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5116
5117 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5118 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5119 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5120}
5121
5122static int
5123get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5124{
5125 uint64_t size = 0, capacity = 128;
5126 *mods = NULL;
5127
5128 /* We have not hooked up any pre-GFX9 modifiers. */
5129 if (adev->family < AMDGPU_FAMILY_AI)
5130 return 0;
5131
5132 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5133
5134 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5135 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5136 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5137 return *mods ? 0 : -ENOMEM;
5138 }
5139
5140 switch (adev->family) {
5141 case AMDGPU_FAMILY_AI:
5142 case AMDGPU_FAMILY_RV:
5143 add_gfx9_modifiers(adev, mods, &size, &capacity);
5144 break;
5145 case AMDGPU_FAMILY_NV:
5146 case AMDGPU_FAMILY_VGH:
1ebcaebd 5147 case AMDGPU_FAMILY_YC:
1d789535 5148 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5149 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5150 else
5151 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5152 break;
5153 }
5154
5155 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5156
5157 /* INVALID marks the end of the list. */
5158 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5159
5160 if (!*mods)
5161 return -ENOMEM;
5162
5163 return 0;
5164}
5165
37384b3f
BN
5166static int
5167fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5168 const struct amdgpu_framebuffer *afb,
5169 const enum surface_pixel_format format,
5170 const enum dc_rotation_angle rotation,
5171 const struct plane_size *plane_size,
5172 union dc_tiling_info *tiling_info,
5173 struct dc_plane_dcc_param *dcc,
5174 struct dc_plane_address *address,
5175 const bool force_disable_dcc)
5176{
5177 const uint64_t modifier = afb->base.modifier;
2be7f77f 5178 int ret = 0;
37384b3f
BN
5179
5180 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5181 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5182
5183 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5184 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5185 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5186 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5187
5188 dcc->enable = 1;
5189 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5190 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5191 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5192 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5193 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5194 else if (independent_128b_blks)
5195 dcc->dcc_ind_blk = hubp_ind_block_128b;
5196 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5197 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5198 else
5199 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5200 } else {
5201 if (independent_64b_blks)
5202 dcc->dcc_ind_blk = hubp_ind_block_64b;
5203 else
5204 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5205 }
37384b3f
BN
5206
5207 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5208 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5209 }
5210
5211 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5212 if (ret)
2be7f77f 5213 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5214
2be7f77f 5215 return ret;
09e5665a
NK
5216}
5217
5218static int
320932bf 5219fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5220 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5221 const enum surface_pixel_format format,
5222 const enum dc_rotation_angle rotation,
5223 const uint64_t tiling_flags,
09e5665a 5224 union dc_tiling_info *tiling_info,
12e2b2d4 5225 struct plane_size *plane_size,
09e5665a 5226 struct dc_plane_dcc_param *dcc,
87b7ebc2 5227 struct dc_plane_address *address,
5888f07a 5228 bool tmz_surface,
87b7ebc2 5229 bool force_disable_dcc)
09e5665a 5230{
320932bf 5231 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5232 int ret;
5233
5234 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5235 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5236 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5237 memset(address, 0, sizeof(*address));
5238
5888f07a
HW
5239 address->tmz_surface = tmz_surface;
5240
695af5f9 5241 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5242 uint64_t addr = afb->address + fb->offsets[0];
5243
12e2b2d4
DL
5244 plane_size->surface_size.x = 0;
5245 plane_size->surface_size.y = 0;
5246 plane_size->surface_size.width = fb->width;
5247 plane_size->surface_size.height = fb->height;
5248 plane_size->surface_pitch =
320932bf
NK
5249 fb->pitches[0] / fb->format->cpp[0];
5250
e0634e8d 5251 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5252 address->grph.addr.low_part = lower_32_bits(addr);
5253 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5254 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5255 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5256 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5257
12e2b2d4
DL
5258 plane_size->surface_size.x = 0;
5259 plane_size->surface_size.y = 0;
5260 plane_size->surface_size.width = fb->width;
5261 plane_size->surface_size.height = fb->height;
5262 plane_size->surface_pitch =
320932bf
NK
5263 fb->pitches[0] / fb->format->cpp[0];
5264
12e2b2d4
DL
5265 plane_size->chroma_size.x = 0;
5266 plane_size->chroma_size.y = 0;
320932bf 5267 /* TODO: set these based on surface format */
12e2b2d4
DL
5268 plane_size->chroma_size.width = fb->width / 2;
5269 plane_size->chroma_size.height = fb->height / 2;
320932bf 5270
12e2b2d4 5271 plane_size->chroma_pitch =
320932bf
NK
5272 fb->pitches[1] / fb->format->cpp[1];
5273
e0634e8d
NK
5274 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5275 address->video_progressive.luma_addr.low_part =
be7b9b32 5276 lower_32_bits(luma_addr);
e0634e8d 5277 address->video_progressive.luma_addr.high_part =
be7b9b32 5278 upper_32_bits(luma_addr);
e0634e8d
NK
5279 address->video_progressive.chroma_addr.low_part =
5280 lower_32_bits(chroma_addr);
5281 address->video_progressive.chroma_addr.high_part =
5282 upper_32_bits(chroma_addr);
5283 }
09e5665a 5284
a3241991 5285 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5286 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5287 rotation, plane_size,
5288 tiling_info, dcc,
5289 address,
5290 force_disable_dcc);
09e5665a
NK
5291 if (ret)
5292 return ret;
a3241991
BN
5293 } else {
5294 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5295 }
5296
5297 return 0;
7df7e505
NK
5298}
5299
d74004b6 5300static void
695af5f9 5301fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5302 bool *per_pixel_alpha, bool *global_alpha,
5303 int *global_alpha_value)
5304{
5305 *per_pixel_alpha = false;
5306 *global_alpha = false;
5307 *global_alpha_value = 0xff;
5308
5309 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5310 return;
5311
5312 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5313 static const uint32_t alpha_formats[] = {
5314 DRM_FORMAT_ARGB8888,
5315 DRM_FORMAT_RGBA8888,
5316 DRM_FORMAT_ABGR8888,
5317 };
5318 uint32_t format = plane_state->fb->format->format;
5319 unsigned int i;
5320
5321 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5322 if (format == alpha_formats[i]) {
5323 *per_pixel_alpha = true;
5324 break;
5325 }
5326 }
5327 }
5328
5329 if (plane_state->alpha < 0xffff) {
5330 *global_alpha = true;
5331 *global_alpha_value = plane_state->alpha >> 8;
5332 }
5333}
5334
004fefa3
NK
5335static int
5336fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5337 const enum surface_pixel_format format,
004fefa3
NK
5338 enum dc_color_space *color_space)
5339{
5340 bool full_range;
5341
5342 *color_space = COLOR_SPACE_SRGB;
5343
5344 /* DRM color properties only affect non-RGB formats. */
695af5f9 5345 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5346 return 0;
5347
5348 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5349
5350 switch (plane_state->color_encoding) {
5351 case DRM_COLOR_YCBCR_BT601:
5352 if (full_range)
5353 *color_space = COLOR_SPACE_YCBCR601;
5354 else
5355 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5356 break;
5357
5358 case DRM_COLOR_YCBCR_BT709:
5359 if (full_range)
5360 *color_space = COLOR_SPACE_YCBCR709;
5361 else
5362 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5363 break;
5364
5365 case DRM_COLOR_YCBCR_BT2020:
5366 if (full_range)
5367 *color_space = COLOR_SPACE_2020_YCBCR;
5368 else
5369 return -EINVAL;
5370 break;
5371
5372 default:
5373 return -EINVAL;
5374 }
5375
5376 return 0;
5377}
5378
695af5f9
NK
5379static int
5380fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5381 const struct drm_plane_state *plane_state,
5382 const uint64_t tiling_flags,
5383 struct dc_plane_info *plane_info,
87b7ebc2 5384 struct dc_plane_address *address,
5888f07a 5385 bool tmz_surface,
87b7ebc2 5386 bool force_disable_dcc)
695af5f9
NK
5387{
5388 const struct drm_framebuffer *fb = plane_state->fb;
5389 const struct amdgpu_framebuffer *afb =
5390 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5391 int ret;
5392
5393 memset(plane_info, 0, sizeof(*plane_info));
5394
5395 switch (fb->format->format) {
5396 case DRM_FORMAT_C8:
5397 plane_info->format =
5398 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5399 break;
5400 case DRM_FORMAT_RGB565:
5401 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5402 break;
5403 case DRM_FORMAT_XRGB8888:
5404 case DRM_FORMAT_ARGB8888:
5405 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5406 break;
5407 case DRM_FORMAT_XRGB2101010:
5408 case DRM_FORMAT_ARGB2101010:
5409 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5410 break;
5411 case DRM_FORMAT_XBGR2101010:
5412 case DRM_FORMAT_ABGR2101010:
5413 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5414 break;
5415 case DRM_FORMAT_XBGR8888:
5416 case DRM_FORMAT_ABGR8888:
5417 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5418 break;
5419 case DRM_FORMAT_NV21:
5420 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5421 break;
5422 case DRM_FORMAT_NV12:
5423 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5424 break;
cbec6477
SW
5425 case DRM_FORMAT_P010:
5426 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5427 break;
492548dc
SW
5428 case DRM_FORMAT_XRGB16161616F:
5429 case DRM_FORMAT_ARGB16161616F:
5430 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5431 break;
2a5195dc
MK
5432 case DRM_FORMAT_XBGR16161616F:
5433 case DRM_FORMAT_ABGR16161616F:
5434 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5435 break;
58020403
MK
5436 case DRM_FORMAT_XRGB16161616:
5437 case DRM_FORMAT_ARGB16161616:
5438 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5439 break;
5440 case DRM_FORMAT_XBGR16161616:
5441 case DRM_FORMAT_ABGR16161616:
5442 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5443 break;
695af5f9
NK
5444 default:
5445 DRM_ERROR(
92f1d09c
SA
5446 "Unsupported screen format %p4cc\n",
5447 &fb->format->format);
695af5f9
NK
5448 return -EINVAL;
5449 }
5450
5451 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5452 case DRM_MODE_ROTATE_0:
5453 plane_info->rotation = ROTATION_ANGLE_0;
5454 break;
5455 case DRM_MODE_ROTATE_90:
5456 plane_info->rotation = ROTATION_ANGLE_90;
5457 break;
5458 case DRM_MODE_ROTATE_180:
5459 plane_info->rotation = ROTATION_ANGLE_180;
5460 break;
5461 case DRM_MODE_ROTATE_270:
5462 plane_info->rotation = ROTATION_ANGLE_270;
5463 break;
5464 default:
5465 plane_info->rotation = ROTATION_ANGLE_0;
5466 break;
5467 }
5468
5469 plane_info->visible = true;
5470 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5471
6d83a32d
MS
5472 plane_info->layer_index = 0;
5473
695af5f9
NK
5474 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5475 &plane_info->color_space);
5476 if (ret)
5477 return ret;
5478
5479 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5480 plane_info->rotation, tiling_flags,
5481 &plane_info->tiling_info,
5482 &plane_info->plane_size,
5888f07a 5483 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5484 force_disable_dcc);
695af5f9
NK
5485 if (ret)
5486 return ret;
5487
5488 fill_blending_from_plane_state(
5489 plane_state, &plane_info->per_pixel_alpha,
5490 &plane_info->global_alpha, &plane_info->global_alpha_value);
5491
5492 return 0;
5493}
5494
5495static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5496 struct dc_plane_state *dc_plane_state,
5497 struct drm_plane_state *plane_state,
5498 struct drm_crtc_state *crtc_state)
e7b07cee 5499{
cf020d49 5500 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5501 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5502 struct dc_scaling_info scaling_info;
5503 struct dc_plane_info plane_info;
695af5f9 5504 int ret;
87b7ebc2 5505 bool force_disable_dcc = false;
e7b07cee 5506
695af5f9
NK
5507 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5508 if (ret)
5509 return ret;
e7b07cee 5510
695af5f9
NK
5511 dc_plane_state->src_rect = scaling_info.src_rect;
5512 dc_plane_state->dst_rect = scaling_info.dst_rect;
5513 dc_plane_state->clip_rect = scaling_info.clip_rect;
5514 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5515
87b7ebc2 5516 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5517 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5518 afb->tiling_flags,
695af5f9 5519 &plane_info,
87b7ebc2 5520 &dc_plane_state->address,
6eed95b0 5521 afb->tmz_surface,
87b7ebc2 5522 force_disable_dcc);
004fefa3
NK
5523 if (ret)
5524 return ret;
5525
695af5f9
NK
5526 dc_plane_state->format = plane_info.format;
5527 dc_plane_state->color_space = plane_info.color_space;
5528 dc_plane_state->format = plane_info.format;
5529 dc_plane_state->plane_size = plane_info.plane_size;
5530 dc_plane_state->rotation = plane_info.rotation;
5531 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5532 dc_plane_state->stereo_format = plane_info.stereo_format;
5533 dc_plane_state->tiling_info = plane_info.tiling_info;
5534 dc_plane_state->visible = plane_info.visible;
5535 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5536 dc_plane_state->global_alpha = plane_info.global_alpha;
5537 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5538 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5539 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5540 dc_plane_state->flip_int_enabled = true;
695af5f9 5541
e277adc5
LSL
5542 /*
5543 * Always set input transfer function, since plane state is refreshed
5544 * every time.
5545 */
cf020d49
NK
5546 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5547 if (ret)
5548 return ret;
e7b07cee 5549
cf020d49 5550 return 0;
e7b07cee
HW
5551}
5552
3ee6b26b
AD
5553static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5554 const struct dm_connector_state *dm_state,
5555 struct dc_stream_state *stream)
e7b07cee
HW
5556{
5557 enum amdgpu_rmx_type rmx_type;
5558
5559 struct rect src = { 0 }; /* viewport in composition space*/
5560 struct rect dst = { 0 }; /* stream addressable area */
5561
5562 /* no mode. nothing to be done */
5563 if (!mode)
5564 return;
5565
5566 /* Full screen scaling by default */
5567 src.width = mode->hdisplay;
5568 src.height = mode->vdisplay;
5569 dst.width = stream->timing.h_addressable;
5570 dst.height = stream->timing.v_addressable;
5571
f4791779
HW
5572 if (dm_state) {
5573 rmx_type = dm_state->scaling;
5574 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5575 if (src.width * dst.height <
5576 src.height * dst.width) {
5577 /* height needs less upscaling/more downscaling */
5578 dst.width = src.width *
5579 dst.height / src.height;
5580 } else {
5581 /* width needs less upscaling/more downscaling */
5582 dst.height = src.height *
5583 dst.width / src.width;
5584 }
5585 } else if (rmx_type == RMX_CENTER) {
5586 dst = src;
e7b07cee 5587 }
e7b07cee 5588
f4791779
HW
5589 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5590 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5591
f4791779
HW
5592 if (dm_state->underscan_enable) {
5593 dst.x += dm_state->underscan_hborder / 2;
5594 dst.y += dm_state->underscan_vborder / 2;
5595 dst.width -= dm_state->underscan_hborder;
5596 dst.height -= dm_state->underscan_vborder;
5597 }
e7b07cee
HW
5598 }
5599
5600 stream->src = src;
5601 stream->dst = dst;
5602
4711c033
LT
5603 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5604 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5605
5606}
5607
3ee6b26b 5608static enum dc_color_depth
42ba01fc 5609convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5610 bool is_y420, int requested_bpc)
e7b07cee 5611{
1bc22f20 5612 uint8_t bpc;
01c22997 5613
1bc22f20
SW
5614 if (is_y420) {
5615 bpc = 8;
5616
5617 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5618 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5619 bpc = 16;
5620 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5621 bpc = 12;
5622 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5623 bpc = 10;
5624 } else {
5625 bpc = (uint8_t)connector->display_info.bpc;
5626 /* Assume 8 bpc by default if no bpc is specified. */
5627 bpc = bpc ? bpc : 8;
5628 }
e7b07cee 5629
cbd14ae7 5630 if (requested_bpc > 0) {
01c22997
NK
5631 /*
5632 * Cap display bpc based on the user requested value.
5633 *
5634 * The value for state->max_bpc may not correctly updated
5635 * depending on when the connector gets added to the state
5636 * or if this was called outside of atomic check, so it
5637 * can't be used directly.
5638 */
cbd14ae7 5639 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5640
1825fd34
NK
5641 /* Round down to the nearest even number. */
5642 bpc = bpc - (bpc & 1);
5643 }
07e3a1cf 5644
e7b07cee
HW
5645 switch (bpc) {
5646 case 0:
1f6010a9
DF
5647 /*
5648 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5649 * EDID revision before 1.4
5650 * TODO: Fix edid parsing
5651 */
5652 return COLOR_DEPTH_888;
5653 case 6:
5654 return COLOR_DEPTH_666;
5655 case 8:
5656 return COLOR_DEPTH_888;
5657 case 10:
5658 return COLOR_DEPTH_101010;
5659 case 12:
5660 return COLOR_DEPTH_121212;
5661 case 14:
5662 return COLOR_DEPTH_141414;
5663 case 16:
5664 return COLOR_DEPTH_161616;
5665 default:
5666 return COLOR_DEPTH_UNDEFINED;
5667 }
5668}
5669
3ee6b26b
AD
5670static enum dc_aspect_ratio
5671get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5672{
e11d4147
LSL
5673 /* 1-1 mapping, since both enums follow the HDMI spec. */
5674 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5675}
5676
3ee6b26b
AD
5677static enum dc_color_space
5678get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5679{
5680 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5681
5682 switch (dc_crtc_timing->pixel_encoding) {
5683 case PIXEL_ENCODING_YCBCR422:
5684 case PIXEL_ENCODING_YCBCR444:
5685 case PIXEL_ENCODING_YCBCR420:
5686 {
5687 /*
5688 * 27030khz is the separation point between HDTV and SDTV
5689 * according to HDMI spec, we use YCbCr709 and YCbCr601
5690 * respectively
5691 */
380604e2 5692 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5693 if (dc_crtc_timing->flags.Y_ONLY)
5694 color_space =
5695 COLOR_SPACE_YCBCR709_LIMITED;
5696 else
5697 color_space = COLOR_SPACE_YCBCR709;
5698 } else {
5699 if (dc_crtc_timing->flags.Y_ONLY)
5700 color_space =
5701 COLOR_SPACE_YCBCR601_LIMITED;
5702 else
5703 color_space = COLOR_SPACE_YCBCR601;
5704 }
5705
5706 }
5707 break;
5708 case PIXEL_ENCODING_RGB:
5709 color_space = COLOR_SPACE_SRGB;
5710 break;
5711
5712 default:
5713 WARN_ON(1);
5714 break;
5715 }
5716
5717 return color_space;
5718}
5719
ea117312
TA
5720static bool adjust_colour_depth_from_display_info(
5721 struct dc_crtc_timing *timing_out,
5722 const struct drm_display_info *info)
400443e8 5723{
ea117312 5724 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5725 int normalized_clk;
400443e8 5726 do {
380604e2 5727 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5728 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5729 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5730 normalized_clk /= 2;
5731 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5732 switch (depth) {
5733 case COLOR_DEPTH_888:
5734 break;
400443e8
ML
5735 case COLOR_DEPTH_101010:
5736 normalized_clk = (normalized_clk * 30) / 24;
5737 break;
5738 case COLOR_DEPTH_121212:
5739 normalized_clk = (normalized_clk * 36) / 24;
5740 break;
5741 case COLOR_DEPTH_161616:
5742 normalized_clk = (normalized_clk * 48) / 24;
5743 break;
5744 default:
ea117312
TA
5745 /* The above depths are the only ones valid for HDMI. */
5746 return false;
400443e8 5747 }
ea117312
TA
5748 if (normalized_clk <= info->max_tmds_clock) {
5749 timing_out->display_color_depth = depth;
5750 return true;
5751 }
5752 } while (--depth > COLOR_DEPTH_666);
5753 return false;
400443e8 5754}
e7b07cee 5755
42ba01fc
NK
5756static void fill_stream_properties_from_drm_display_mode(
5757 struct dc_stream_state *stream,
5758 const struct drm_display_mode *mode_in,
5759 const struct drm_connector *connector,
5760 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5761 const struct dc_stream_state *old_stream,
5762 int requested_bpc)
e7b07cee
HW
5763{
5764 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5765 const struct drm_display_info *info = &connector->display_info;
d4252eee 5766 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5767 struct hdmi_vendor_infoframe hv_frame;
5768 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5769
acf83f86
WL
5770 memset(&hv_frame, 0, sizeof(hv_frame));
5771 memset(&avi_frame, 0, sizeof(avi_frame));
5772
e7b07cee
HW
5773 timing_out->h_border_left = 0;
5774 timing_out->h_border_right = 0;
5775 timing_out->v_border_top = 0;
5776 timing_out->v_border_bottom = 0;
5777 /* TODO: un-hardcode */
fe61a2f1 5778 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5779 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5780 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5781 else if (drm_mode_is_420_also(info, mode_in)
5782 && aconnector->force_yuv420_output)
5783 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5784 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5785 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5786 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5787 else
5788 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5789
5790 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5791 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5792 connector,
5793 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5794 requested_bpc);
e7b07cee
HW
5795 timing_out->scan_type = SCANNING_TYPE_NODATA;
5796 timing_out->hdmi_vic = 0;
b333730d
BL
5797
5798 if(old_stream) {
5799 timing_out->vic = old_stream->timing.vic;
5800 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5801 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5802 } else {
5803 timing_out->vic = drm_match_cea_mode(mode_in);
5804 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5805 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5806 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5807 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5808 }
e7b07cee 5809
1cb1d477
WL
5810 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5811 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5812 timing_out->vic = avi_frame.video_code;
5813 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5814 timing_out->hdmi_vic = hv_frame.vic;
5815 }
5816
fe8858bb
NC
5817 if (is_freesync_video_mode(mode_in, aconnector)) {
5818 timing_out->h_addressable = mode_in->hdisplay;
5819 timing_out->h_total = mode_in->htotal;
5820 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5821 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5822 timing_out->v_total = mode_in->vtotal;
5823 timing_out->v_addressable = mode_in->vdisplay;
5824 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5825 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5826 timing_out->pix_clk_100hz = mode_in->clock * 10;
5827 } else {
5828 timing_out->h_addressable = mode_in->crtc_hdisplay;
5829 timing_out->h_total = mode_in->crtc_htotal;
5830 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5831 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5832 timing_out->v_total = mode_in->crtc_vtotal;
5833 timing_out->v_addressable = mode_in->crtc_vdisplay;
5834 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5835 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5836 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5837 }
a85ba005 5838
e7b07cee 5839 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5840
5841 stream->output_color_space = get_output_color_space(timing_out);
5842
e43a432c
AK
5843 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5844 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5845 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5846 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5847 drm_mode_is_420_also(info, mode_in) &&
5848 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5849 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5850 adjust_colour_depth_from_display_info(timing_out, info);
5851 }
5852 }
e7b07cee
HW
5853}
5854
3ee6b26b
AD
5855static void fill_audio_info(struct audio_info *audio_info,
5856 const struct drm_connector *drm_connector,
5857 const struct dc_sink *dc_sink)
e7b07cee
HW
5858{
5859 int i = 0;
5860 int cea_revision = 0;
5861 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5862
5863 audio_info->manufacture_id = edid_caps->manufacturer_id;
5864 audio_info->product_id = edid_caps->product_id;
5865
5866 cea_revision = drm_connector->display_info.cea_rev;
5867
090afc1e 5868 strscpy(audio_info->display_name,
d2b2562c 5869 edid_caps->display_name,
090afc1e 5870 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5871
b830ebc9 5872 if (cea_revision >= 3) {
e7b07cee
HW
5873 audio_info->mode_count = edid_caps->audio_mode_count;
5874
5875 for (i = 0; i < audio_info->mode_count; ++i) {
5876 audio_info->modes[i].format_code =
5877 (enum audio_format_code)
5878 (edid_caps->audio_modes[i].format_code);
5879 audio_info->modes[i].channel_count =
5880 edid_caps->audio_modes[i].channel_count;
5881 audio_info->modes[i].sample_rates.all =
5882 edid_caps->audio_modes[i].sample_rate;
5883 audio_info->modes[i].sample_size =
5884 edid_caps->audio_modes[i].sample_size;
5885 }
5886 }
5887
5888 audio_info->flags.all = edid_caps->speaker_flags;
5889
5890 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5891 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5892 audio_info->video_latency = drm_connector->video_latency[0];
5893 audio_info->audio_latency = drm_connector->audio_latency[0];
5894 }
5895
5896 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5897
5898}
5899
3ee6b26b
AD
5900static void
5901copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5902 struct drm_display_mode *dst_mode)
e7b07cee
HW
5903{
5904 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5905 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5906 dst_mode->crtc_clock = src_mode->crtc_clock;
5907 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5908 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5909 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5910 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5911 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5912 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5913 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5914 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5915 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5916 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5917 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5918}
5919
3ee6b26b
AD
5920static void
5921decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5922 const struct drm_display_mode *native_mode,
5923 bool scale_enabled)
e7b07cee
HW
5924{
5925 if (scale_enabled) {
5926 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5927 } else if (native_mode->clock == drm_mode->clock &&
5928 native_mode->htotal == drm_mode->htotal &&
5929 native_mode->vtotal == drm_mode->vtotal) {
5930 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5931 } else {
5932 /* no scaling nor amdgpu inserted, no need to patch */
5933 }
5934}
5935
aed15309
ML
5936static struct dc_sink *
5937create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5938{
2e0ac3d6 5939 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5940 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5941 sink_init_data.link = aconnector->dc_link;
5942 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5943
5944 sink = dc_sink_create(&sink_init_data);
423788c7 5945 if (!sink) {
2e0ac3d6 5946 DRM_ERROR("Failed to create sink!\n");
aed15309 5947 return NULL;
423788c7 5948 }
2e0ac3d6 5949 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5950
aed15309 5951 return sink;
2e0ac3d6
HW
5952}
5953
fa2123db
ML
5954static void set_multisync_trigger_params(
5955 struct dc_stream_state *stream)
5956{
ec372186
ML
5957 struct dc_stream_state *master = NULL;
5958
fa2123db 5959 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5960 master = stream->triggered_crtc_reset.event_source;
5961 stream->triggered_crtc_reset.event =
5962 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5963 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5964 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5965 }
5966}
5967
5968static void set_master_stream(struct dc_stream_state *stream_set[],
5969 int stream_count)
5970{
5971 int j, highest_rfr = 0, master_stream = 0;
5972
5973 for (j = 0; j < stream_count; j++) {
5974 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5975 int refresh_rate = 0;
5976
380604e2 5977 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5978 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5979 if (refresh_rate > highest_rfr) {
5980 highest_rfr = refresh_rate;
5981 master_stream = j;
5982 }
5983 }
5984 }
5985 for (j = 0; j < stream_count; j++) {
03736f4c 5986 if (stream_set[j])
fa2123db
ML
5987 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5988 }
5989}
5990
5991static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5992{
5993 int i = 0;
ec372186 5994 struct dc_stream_state *stream;
fa2123db
ML
5995
5996 if (context->stream_count < 2)
5997 return;
5998 for (i = 0; i < context->stream_count ; i++) {
5999 if (!context->streams[i])
6000 continue;
1f6010a9
DF
6001 /*
6002 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6003 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6004 * For now it's set to false
fa2123db 6005 */
fa2123db 6006 }
ec372186 6007
fa2123db 6008 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6009
6010 for (i = 0; i < context->stream_count ; i++) {
6011 stream = context->streams[i];
6012
6013 if (!stream)
6014 continue;
6015
6016 set_multisync_trigger_params(stream);
6017 }
fa2123db
ML
6018}
6019
ea2be5c0 6020#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6021static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6022 struct dc_sink *sink, struct dc_stream_state *stream,
6023 struct dsc_dec_dpcd_caps *dsc_caps)
6024{
6025 stream->timing.flags.DSC = 0;
6026
6027 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
998b7ad2
FZ
6028 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6029 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6030 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6031 dsc_caps);
998b7ad2
FZ
6032 }
6033}
6034
6035static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6036 struct dc_sink *sink, struct dc_stream_state *stream,
6037 struct dsc_dec_dpcd_caps *dsc_caps)
6038{
6039 struct drm_connector *drm_connector = &aconnector->base;
6040 uint32_t link_bandwidth_kbps;
f1c1a982 6041 uint32_t max_dsc_target_bpp_limit_override = 0;
998b7ad2
FZ
6042
6043 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6044 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6045
6046 if (stream->link && stream->link->local_sink)
6047 max_dsc_target_bpp_limit_override =
6048 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6049
998b7ad2
FZ
6050 /* Set DSC policy according to dsc_clock_en */
6051 dc_dsc_policy_set_enable_dsc_when_not_needed(
6052 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6053
6054 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6055
6056 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6057 dsc_caps,
6058 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6059 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6060 link_bandwidth_kbps,
6061 &stream->timing,
6062 &stream->timing.dsc_cfg)) {
6063 stream->timing.flags.DSC = 1;
6064 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6065 }
6066 }
6067
6068 /* Overwrite the stream flag if DSC is enabled through debugfs */
6069 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6070 stream->timing.flags.DSC = 1;
6071
6072 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6073 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6074
6075 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6076 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6077
6078 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6079 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6080}
ea2be5c0 6081#endif
998b7ad2 6082
5fd953a3
RS
6083/**
6084 * DOC: FreeSync Video
6085 *
6086 * When a userspace application wants to play a video, the content follows a
6087 * standard format definition that usually specifies the FPS for that format.
6088 * The below list illustrates some video format and the expected FPS,
6089 * respectively:
6090 *
6091 * - TV/NTSC (23.976 FPS)
6092 * - Cinema (24 FPS)
6093 * - TV/PAL (25 FPS)
6094 * - TV/NTSC (29.97 FPS)
6095 * - TV/NTSC (30 FPS)
6096 * - Cinema HFR (48 FPS)
6097 * - TV/PAL (50 FPS)
6098 * - Commonly used (60 FPS)
12cdff6b 6099 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6100 *
6101 * The list of standards video format is not huge and can be added to the
6102 * connector modeset list beforehand. With that, userspace can leverage
6103 * FreeSync to extends the front porch in order to attain the target refresh
6104 * rate. Such a switch will happen seamlessly, without screen blanking or
6105 * reprogramming of the output in any other way. If the userspace requests a
6106 * modesetting change compatible with FreeSync modes that only differ in the
6107 * refresh rate, DC will skip the full update and avoid blink during the
6108 * transition. For example, the video player can change the modesetting from
6109 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6110 * causing any display blink. This same concept can be applied to a mode
6111 * setting change.
6112 */
a85ba005
NC
6113static struct drm_display_mode *
6114get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6115 bool use_probed_modes)
6116{
6117 struct drm_display_mode *m, *m_pref = NULL;
6118 u16 current_refresh, highest_refresh;
6119 struct list_head *list_head = use_probed_modes ?
6120 &aconnector->base.probed_modes :
6121 &aconnector->base.modes;
6122
6123 if (aconnector->freesync_vid_base.clock != 0)
6124 return &aconnector->freesync_vid_base;
6125
6126 /* Find the preferred mode */
6127 list_for_each_entry (m, list_head, head) {
6128 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6129 m_pref = m;
6130 break;
6131 }
6132 }
6133
6134 if (!m_pref) {
6135 /* Probably an EDID with no preferred mode. Fallback to first entry */
6136 m_pref = list_first_entry_or_null(
6137 &aconnector->base.modes, struct drm_display_mode, head);
6138 if (!m_pref) {
6139 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6140 return NULL;
6141 }
6142 }
6143
6144 highest_refresh = drm_mode_vrefresh(m_pref);
6145
6146 /*
6147 * Find the mode with highest refresh rate with same resolution.
6148 * For some monitors, preferred mode is not the mode with highest
6149 * supported refresh rate.
6150 */
6151 list_for_each_entry (m, list_head, head) {
6152 current_refresh = drm_mode_vrefresh(m);
6153
6154 if (m->hdisplay == m_pref->hdisplay &&
6155 m->vdisplay == m_pref->vdisplay &&
6156 highest_refresh < current_refresh) {
6157 highest_refresh = current_refresh;
6158 m_pref = m;
6159 }
6160 }
6161
6162 aconnector->freesync_vid_base = *m_pref;
6163 return m_pref;
6164}
6165
fe8858bb 6166static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6167 struct amdgpu_dm_connector *aconnector)
6168{
6169 struct drm_display_mode *high_mode;
6170 int timing_diff;
6171
6172 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6173 if (!high_mode || !mode)
6174 return false;
6175
6176 timing_diff = high_mode->vtotal - mode->vtotal;
6177
6178 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6179 high_mode->hdisplay != mode->hdisplay ||
6180 high_mode->vdisplay != mode->vdisplay ||
6181 high_mode->hsync_start != mode->hsync_start ||
6182 high_mode->hsync_end != mode->hsync_end ||
6183 high_mode->htotal != mode->htotal ||
6184 high_mode->hskew != mode->hskew ||
6185 high_mode->vscan != mode->vscan ||
6186 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6187 high_mode->vsync_end - mode->vsync_end != timing_diff)
6188 return false;
6189 else
6190 return true;
6191}
6192
3ee6b26b
AD
6193static struct dc_stream_state *
6194create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6195 const struct drm_display_mode *drm_mode,
b333730d 6196 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6197 const struct dc_stream_state *old_stream,
6198 int requested_bpc)
e7b07cee
HW
6199{
6200 struct drm_display_mode *preferred_mode = NULL;
391ef035 6201 struct drm_connector *drm_connector;
42ba01fc
NK
6202 const struct drm_connector_state *con_state =
6203 dm_state ? &dm_state->base : NULL;
0971c40e 6204 struct dc_stream_state *stream = NULL;
e7b07cee 6205 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6206 struct drm_display_mode saved_mode;
6207 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6208 bool native_mode_found = false;
b0781603
NK
6209 bool recalculate_timing = false;
6210 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6211 int mode_refresh;
58124bf8 6212 int preferred_refresh = 0;
defeb878 6213#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6214 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6215#endif
aed15309 6216 struct dc_sink *sink = NULL;
a85ba005
NC
6217
6218 memset(&saved_mode, 0, sizeof(saved_mode));
6219
b830ebc9 6220 if (aconnector == NULL) {
e7b07cee 6221 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6222 return stream;
e7b07cee
HW
6223 }
6224
e7b07cee 6225 drm_connector = &aconnector->base;
2e0ac3d6 6226
f4ac176e 6227 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6228 sink = create_fake_sink(aconnector);
6229 if (!sink)
6230 return stream;
aed15309
ML
6231 } else {
6232 sink = aconnector->dc_sink;
dcd5fb82 6233 dc_sink_retain(sink);
f4ac176e 6234 }
2e0ac3d6 6235
aed15309 6236 stream = dc_create_stream_for_sink(sink);
4562236b 6237
b830ebc9 6238 if (stream == NULL) {
e7b07cee 6239 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6240 goto finish;
e7b07cee
HW
6241 }
6242
ceb3dbb4
JL
6243 stream->dm_stream_context = aconnector;
6244
4a36fcba
WL
6245 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6246 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6247
e7b07cee
HW
6248 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6249 /* Search for preferred mode */
6250 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6251 native_mode_found = true;
6252 break;
6253 }
6254 }
6255 if (!native_mode_found)
6256 preferred_mode = list_first_entry_or_null(
6257 &aconnector->base.modes,
6258 struct drm_display_mode,
6259 head);
6260
b333730d
BL
6261 mode_refresh = drm_mode_vrefresh(&mode);
6262
b830ebc9 6263 if (preferred_mode == NULL) {
1f6010a9
DF
6264 /*
6265 * This may not be an error, the use case is when we have no
e7b07cee
HW
6266 * usermode calls to reset and set mode upon hotplug. In this
6267 * case, we call set mode ourselves to restore the previous mode
6268 * and the modelist may not be filled in in time.
6269 */
f1ad2f5e 6270 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6271 } else {
b0781603 6272 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
6273 is_freesync_video_mode(&mode, aconnector);
6274 if (recalculate_timing) {
6275 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6276 saved_mode = mode;
6277 mode = *freesync_mode;
6278 } else {
6279 decide_crtc_timing_for_drm_display_mode(
b0781603 6280 &mode, preferred_mode, scale);
a85ba005 6281
b0781603
NK
6282 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6283 }
e7b07cee
HW
6284 }
6285
a85ba005
NC
6286 if (recalculate_timing)
6287 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6288 else if (!dm_state)
f783577c
JFZ
6289 drm_mode_set_crtcinfo(&mode, 0);
6290
a85ba005 6291 /*
b333730d
BL
6292 * If scaling is enabled and refresh rate didn't change
6293 * we copy the vic and polarities of the old timings
6294 */
b0781603 6295 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6296 fill_stream_properties_from_drm_display_mode(
6297 stream, &mode, &aconnector->base, con_state, NULL,
6298 requested_bpc);
b333730d 6299 else
a85ba005
NC
6300 fill_stream_properties_from_drm_display_mode(
6301 stream, &mode, &aconnector->base, con_state, old_stream,
6302 requested_bpc);
b333730d 6303
defeb878 6304#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6305 /* SST DSC determination policy */
6306 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6307 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6308 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6309#endif
6310
e7b07cee
HW
6311 update_stream_scaling_settings(&mode, dm_state, stream);
6312
6313 fill_audio_info(
6314 &stream->audio_info,
6315 drm_connector,
aed15309 6316 sink);
e7b07cee 6317
ceb3dbb4 6318 update_stream_signal(stream, sink);
9182b4cb 6319
d832fc3b 6320 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6321 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6322
8a488f5d
RL
6323 if (stream->link->psr_settings.psr_feature_enabled) {
6324 //
6325 // should decide stream support vsc sdp colorimetry capability
6326 // before building vsc info packet
6327 //
6328 stream->use_vsc_sdp_for_colorimetry = false;
6329 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6330 stream->use_vsc_sdp_for_colorimetry =
6331 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6332 } else {
6333 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6334 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6335 }
8a488f5d 6336 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
6337 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6338
8c322309 6339 }
aed15309 6340finish:
dcd5fb82 6341 dc_sink_release(sink);
9e3efe3e 6342
e7b07cee
HW
6343 return stream;
6344}
6345
7578ecda 6346static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6347{
6348 drm_crtc_cleanup(crtc);
6349 kfree(crtc);
6350}
6351
6352static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6353 struct drm_crtc_state *state)
e7b07cee
HW
6354{
6355 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6356
6357 /* TODO Destroy dc_stream objects are stream object is flattened */
6358 if (cur->stream)
6359 dc_stream_release(cur->stream);
6360
6361
6362 __drm_atomic_helper_crtc_destroy_state(state);
6363
6364
6365 kfree(state);
6366}
6367
6368static void dm_crtc_reset_state(struct drm_crtc *crtc)
6369{
6370 struct dm_crtc_state *state;
6371
6372 if (crtc->state)
6373 dm_crtc_destroy_state(crtc, crtc->state);
6374
6375 state = kzalloc(sizeof(*state), GFP_KERNEL);
6376 if (WARN_ON(!state))
6377 return;
6378
1f8a52ec 6379 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6380}
6381
6382static struct drm_crtc_state *
6383dm_crtc_duplicate_state(struct drm_crtc *crtc)
6384{
6385 struct dm_crtc_state *state, *cur;
6386
6387 cur = to_dm_crtc_state(crtc->state);
6388
6389 if (WARN_ON(!crtc->state))
6390 return NULL;
6391
2004f45e 6392 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6393 if (!state)
6394 return NULL;
e7b07cee
HW
6395
6396 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6397
6398 if (cur->stream) {
6399 state->stream = cur->stream;
6400 dc_stream_retain(state->stream);
6401 }
6402
d6ef9b41 6403 state->active_planes = cur->active_planes;
98e6436d 6404 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6405 state->abm_level = cur->abm_level;
bb47de73
NK
6406 state->vrr_supported = cur->vrr_supported;
6407 state->freesync_config = cur->freesync_config;
cf020d49
NK
6408 state->cm_has_degamma = cur->cm_has_degamma;
6409 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6410 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6411 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6412
6413 return &state->base;
6414}
6415
86bc2219 6416#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6417static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6418{
6419 crtc_debugfs_init(crtc);
6420
6421 return 0;
6422}
6423#endif
6424
d2574c33
MK
6425static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6426{
6427 enum dc_irq_source irq_source;
6428 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6429 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6430 int rc;
6431
6432 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6433
6434 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6435
4711c033
LT
6436 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6437 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6438 return rc;
6439}
589d2739
HW
6440
6441static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6442{
6443 enum dc_irq_source irq_source;
6444 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6445 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6446 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6447#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6448 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6449 struct vblank_control_work *work;
ea3b4242 6450#endif
d2574c33
MK
6451 int rc = 0;
6452
6453 if (enable) {
6454 /* vblank irq on -> Only need vupdate irq in vrr mode */
6455 if (amdgpu_dm_vrr_active(acrtc_state))
6456 rc = dm_set_vupdate_irq(crtc, true);
6457 } else {
6458 /* vblank irq off -> vupdate irq off */
6459 rc = dm_set_vupdate_irq(crtc, false);
6460 }
6461
6462 if (rc)
6463 return rc;
589d2739
HW
6464
6465 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6466
6467 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6468 return -EBUSY;
6469
98ab5f35
BL
6470 if (amdgpu_in_reset(adev))
6471 return 0;
6472
4928b480 6473#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6474 if (dm->vblank_control_workqueue) {
6475 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6476 if (!work)
6477 return -ENOMEM;
09a5df6c 6478
06dd1888
NK
6479 INIT_WORK(&work->work, vblank_control_worker);
6480 work->dm = dm;
6481 work->acrtc = acrtc;
6482 work->enable = enable;
09a5df6c 6483
06dd1888
NK
6484 if (acrtc_state->stream) {
6485 dc_stream_retain(acrtc_state->stream);
6486 work->stream = acrtc_state->stream;
6487 }
58aa1c50 6488
06dd1888
NK
6489 queue_work(dm->vblank_control_workqueue, &work->work);
6490 }
4928b480 6491#endif
71338cb4 6492
71338cb4 6493 return 0;
589d2739
HW
6494}
6495
6496static int dm_enable_vblank(struct drm_crtc *crtc)
6497{
6498 return dm_set_vblank(crtc, true);
6499}
6500
6501static void dm_disable_vblank(struct drm_crtc *crtc)
6502{
6503 dm_set_vblank(crtc, false);
6504}
6505
e7b07cee
HW
6506/* Implemented only the options currently availible for the driver */
6507static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6508 .reset = dm_crtc_reset_state,
6509 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6510 .set_config = drm_atomic_helper_set_config,
6511 .page_flip = drm_atomic_helper_page_flip,
6512 .atomic_duplicate_state = dm_crtc_duplicate_state,
6513 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6514 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6515 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6516 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6517 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6518 .enable_vblank = dm_enable_vblank,
6519 .disable_vblank = dm_disable_vblank,
e3eff4b5 6520 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6521#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6522 .late_register = amdgpu_dm_crtc_late_register,
6523#endif
e7b07cee
HW
6524};
6525
6526static enum drm_connector_status
6527amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6528{
6529 bool connected;
c84dec2f 6530 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6531
1f6010a9
DF
6532 /*
6533 * Notes:
e7b07cee
HW
6534 * 1. This interface is NOT called in context of HPD irq.
6535 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6536 * makes it a bad place for *any* MST-related activity.
6537 */
e7b07cee 6538
8580d60b
HW
6539 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6540 !aconnector->fake_enable)
e7b07cee
HW
6541 connected = (aconnector->dc_sink != NULL);
6542 else
6543 connected = (aconnector->base.force == DRM_FORCE_ON);
6544
0f877894
OV
6545 update_subconnector_property(aconnector);
6546
e7b07cee
HW
6547 return (connected ? connector_status_connected :
6548 connector_status_disconnected);
6549}
6550
3ee6b26b
AD
6551int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6552 struct drm_connector_state *connector_state,
6553 struct drm_property *property,
6554 uint64_t val)
e7b07cee
HW
6555{
6556 struct drm_device *dev = connector->dev;
1348969a 6557 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6558 struct dm_connector_state *dm_old_state =
6559 to_dm_connector_state(connector->state);
6560 struct dm_connector_state *dm_new_state =
6561 to_dm_connector_state(connector_state);
6562
6563 int ret = -EINVAL;
6564
6565 if (property == dev->mode_config.scaling_mode_property) {
6566 enum amdgpu_rmx_type rmx_type;
6567
6568 switch (val) {
6569 case DRM_MODE_SCALE_CENTER:
6570 rmx_type = RMX_CENTER;
6571 break;
6572 case DRM_MODE_SCALE_ASPECT:
6573 rmx_type = RMX_ASPECT;
6574 break;
6575 case DRM_MODE_SCALE_FULLSCREEN:
6576 rmx_type = RMX_FULL;
6577 break;
6578 case DRM_MODE_SCALE_NONE:
6579 default:
6580 rmx_type = RMX_OFF;
6581 break;
6582 }
6583
6584 if (dm_old_state->scaling == rmx_type)
6585 return 0;
6586
6587 dm_new_state->scaling = rmx_type;
6588 ret = 0;
6589 } else if (property == adev->mode_info.underscan_hborder_property) {
6590 dm_new_state->underscan_hborder = val;
6591 ret = 0;
6592 } else if (property == adev->mode_info.underscan_vborder_property) {
6593 dm_new_state->underscan_vborder = val;
6594 ret = 0;
6595 } else if (property == adev->mode_info.underscan_property) {
6596 dm_new_state->underscan_enable = val;
6597 ret = 0;
c1ee92f9
DF
6598 } else if (property == adev->mode_info.abm_level_property) {
6599 dm_new_state->abm_level = val;
6600 ret = 0;
e7b07cee
HW
6601 }
6602
6603 return ret;
6604}
6605
3ee6b26b
AD
6606int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6607 const struct drm_connector_state *state,
6608 struct drm_property *property,
6609 uint64_t *val)
e7b07cee
HW
6610{
6611 struct drm_device *dev = connector->dev;
1348969a 6612 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6613 struct dm_connector_state *dm_state =
6614 to_dm_connector_state(state);
6615 int ret = -EINVAL;
6616
6617 if (property == dev->mode_config.scaling_mode_property) {
6618 switch (dm_state->scaling) {
6619 case RMX_CENTER:
6620 *val = DRM_MODE_SCALE_CENTER;
6621 break;
6622 case RMX_ASPECT:
6623 *val = DRM_MODE_SCALE_ASPECT;
6624 break;
6625 case RMX_FULL:
6626 *val = DRM_MODE_SCALE_FULLSCREEN;
6627 break;
6628 case RMX_OFF:
6629 default:
6630 *val = DRM_MODE_SCALE_NONE;
6631 break;
6632 }
6633 ret = 0;
6634 } else if (property == adev->mode_info.underscan_hborder_property) {
6635 *val = dm_state->underscan_hborder;
6636 ret = 0;
6637 } else if (property == adev->mode_info.underscan_vborder_property) {
6638 *val = dm_state->underscan_vborder;
6639 ret = 0;
6640 } else if (property == adev->mode_info.underscan_property) {
6641 *val = dm_state->underscan_enable;
6642 ret = 0;
c1ee92f9
DF
6643 } else if (property == adev->mode_info.abm_level_property) {
6644 *val = dm_state->abm_level;
6645 ret = 0;
e7b07cee 6646 }
c1ee92f9 6647
e7b07cee
HW
6648 return ret;
6649}
6650
526c654a
ED
6651static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6652{
6653 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6654
6655 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6656}
6657
7578ecda 6658static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6659{
c84dec2f 6660 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6661 const struct dc_link *link = aconnector->dc_link;
1348969a 6662 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6663 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6664 int i;
ada8ce15 6665
5dff80bd
AG
6666 /*
6667 * Call only if mst_mgr was iniitalized before since it's not done
6668 * for all connector types.
6669 */
6670 if (aconnector->mst_mgr.dev)
6671 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6672
e7b07cee
HW
6673#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6674 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6675 for (i = 0; i < dm->num_of_edps; i++) {
6676 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6677 backlight_device_unregister(dm->backlight_dev[i]);
6678 dm->backlight_dev[i] = NULL;
6679 }
e7b07cee
HW
6680 }
6681#endif
dcd5fb82
MF
6682
6683 if (aconnector->dc_em_sink)
6684 dc_sink_release(aconnector->dc_em_sink);
6685 aconnector->dc_em_sink = NULL;
6686 if (aconnector->dc_sink)
6687 dc_sink_release(aconnector->dc_sink);
6688 aconnector->dc_sink = NULL;
6689
e86e8947 6690 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6691 drm_connector_unregister(connector);
6692 drm_connector_cleanup(connector);
526c654a
ED
6693 if (aconnector->i2c) {
6694 i2c_del_adapter(&aconnector->i2c->base);
6695 kfree(aconnector->i2c);
6696 }
7daec99f 6697 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6698
e7b07cee
HW
6699 kfree(connector);
6700}
6701
6702void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6703{
6704 struct dm_connector_state *state =
6705 to_dm_connector_state(connector->state);
6706
df099b9b
LSL
6707 if (connector->state)
6708 __drm_atomic_helper_connector_destroy_state(connector->state);
6709
e7b07cee
HW
6710 kfree(state);
6711
6712 state = kzalloc(sizeof(*state), GFP_KERNEL);
6713
6714 if (state) {
6715 state->scaling = RMX_OFF;
6716 state->underscan_enable = false;
6717 state->underscan_hborder = 0;
6718 state->underscan_vborder = 0;
01933ba4 6719 state->base.max_requested_bpc = 8;
3261e013
ML
6720 state->vcpi_slots = 0;
6721 state->pbn = 0;
c3e50f89
NK
6722 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6723 state->abm_level = amdgpu_dm_abm_level;
6724
df099b9b 6725 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6726 }
6727}
6728
3ee6b26b
AD
6729struct drm_connector_state *
6730amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6731{
6732 struct dm_connector_state *state =
6733 to_dm_connector_state(connector->state);
6734
6735 struct dm_connector_state *new_state =
6736 kmemdup(state, sizeof(*state), GFP_KERNEL);
6737
98e6436d
AK
6738 if (!new_state)
6739 return NULL;
e7b07cee 6740
98e6436d
AK
6741 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6742
6743 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6744 new_state->abm_level = state->abm_level;
922454c2
NK
6745 new_state->scaling = state->scaling;
6746 new_state->underscan_enable = state->underscan_enable;
6747 new_state->underscan_hborder = state->underscan_hborder;
6748 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6749 new_state->vcpi_slots = state->vcpi_slots;
6750 new_state->pbn = state->pbn;
98e6436d 6751 return &new_state->base;
e7b07cee
HW
6752}
6753
14f04fa4
AD
6754static int
6755amdgpu_dm_connector_late_register(struct drm_connector *connector)
6756{
6757 struct amdgpu_dm_connector *amdgpu_dm_connector =
6758 to_amdgpu_dm_connector(connector);
00a8037e 6759 int r;
14f04fa4 6760
00a8037e
AD
6761 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6762 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6763 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6764 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6765 if (r)
6766 return r;
6767 }
6768
6769#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6770 connector_debugfs_init(amdgpu_dm_connector);
6771#endif
6772
6773 return 0;
6774}
6775
e7b07cee
HW
6776static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6777 .reset = amdgpu_dm_connector_funcs_reset,
6778 .detect = amdgpu_dm_connector_detect,
6779 .fill_modes = drm_helper_probe_single_connector_modes,
6780 .destroy = amdgpu_dm_connector_destroy,
6781 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6782 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6783 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6784 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6785 .late_register = amdgpu_dm_connector_late_register,
526c654a 6786 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6787};
6788
e7b07cee
HW
6789static int get_modes(struct drm_connector *connector)
6790{
6791 return amdgpu_dm_connector_get_modes(connector);
6792}
6793
c84dec2f 6794static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6795{
6796 struct dc_sink_init_data init_params = {
6797 .link = aconnector->dc_link,
6798 .sink_signal = SIGNAL_TYPE_VIRTUAL
6799 };
70e8ffc5 6800 struct edid *edid;
e7b07cee 6801
a89ff457 6802 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6803 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6804 aconnector->base.name);
6805
6806 aconnector->base.force = DRM_FORCE_OFF;
6807 aconnector->base.override_edid = false;
6808 return;
6809 }
6810
70e8ffc5
HW
6811 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6812
e7b07cee
HW
6813 aconnector->edid = edid;
6814
6815 aconnector->dc_em_sink = dc_link_add_remote_sink(
6816 aconnector->dc_link,
6817 (uint8_t *)edid,
6818 (edid->extensions + 1) * EDID_LENGTH,
6819 &init_params);
6820
dcd5fb82 6821 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6822 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6823 aconnector->dc_link->local_sink :
6824 aconnector->dc_em_sink;
dcd5fb82
MF
6825 dc_sink_retain(aconnector->dc_sink);
6826 }
e7b07cee
HW
6827}
6828
c84dec2f 6829static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6830{
6831 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6832
1f6010a9
DF
6833 /*
6834 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6835 * Those settings have to be != 0 to get initial modeset
6836 */
6837 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6838 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6839 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6840 }
6841
6842
6843 aconnector->base.override_edid = true;
6844 create_eml_sink(aconnector);
6845}
6846
cbd14ae7
SW
6847static struct dc_stream_state *
6848create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6849 const struct drm_display_mode *drm_mode,
6850 const struct dm_connector_state *dm_state,
6851 const struct dc_stream_state *old_stream)
6852{
6853 struct drm_connector *connector = &aconnector->base;
1348969a 6854 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6855 struct dc_stream_state *stream;
4b7da34b
SW
6856 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6857 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6858 enum dc_status dc_result = DC_OK;
6859
6860 do {
6861 stream = create_stream_for_sink(aconnector, drm_mode,
6862 dm_state, old_stream,
6863 requested_bpc);
6864 if (stream == NULL) {
6865 DRM_ERROR("Failed to create stream for sink!\n");
6866 break;
6867 }
6868
6869 dc_result = dc_validate_stream(adev->dm.dc, stream);
6870
6871 if (dc_result != DC_OK) {
74a16675 6872 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6873 drm_mode->hdisplay,
6874 drm_mode->vdisplay,
6875 drm_mode->clock,
74a16675
RS
6876 dc_result,
6877 dc_status_to_str(dc_result));
cbd14ae7
SW
6878
6879 dc_stream_release(stream);
6880 stream = NULL;
6881 requested_bpc -= 2; /* lower bpc to retry validation */
6882 }
6883
6884 } while (stream == NULL && requested_bpc >= 6);
6885
68eb3ae3
WS
6886 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6887 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6888
6889 aconnector->force_yuv420_output = true;
6890 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6891 dm_state, old_stream);
6892 aconnector->force_yuv420_output = false;
6893 }
6894
cbd14ae7
SW
6895 return stream;
6896}
6897
ba9ca088 6898enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6899 struct drm_display_mode *mode)
e7b07cee
HW
6900{
6901 int result = MODE_ERROR;
6902 struct dc_sink *dc_sink;
e7b07cee 6903 /* TODO: Unhardcode stream count */
0971c40e 6904 struct dc_stream_state *stream;
c84dec2f 6905 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6906
6907 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6908 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6909 return result;
6910
1f6010a9
DF
6911 /*
6912 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6913 * EDID mgmt
6914 */
6915 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6916 !aconnector->dc_em_sink)
6917 handle_edid_mgmt(aconnector);
6918
c84dec2f 6919 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6920
ad975f44
VL
6921 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6922 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6923 DRM_ERROR("dc_sink is NULL!\n");
6924 goto fail;
6925 }
6926
cbd14ae7
SW
6927 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6928 if (stream) {
6929 dc_stream_release(stream);
e7b07cee 6930 result = MODE_OK;
cbd14ae7 6931 }
e7b07cee
HW
6932
6933fail:
6934 /* TODO: error handling*/
6935 return result;
6936}
6937
88694af9
NK
6938static int fill_hdr_info_packet(const struct drm_connector_state *state,
6939 struct dc_info_packet *out)
6940{
6941 struct hdmi_drm_infoframe frame;
6942 unsigned char buf[30]; /* 26 + 4 */
6943 ssize_t len;
6944 int ret, i;
6945
6946 memset(out, 0, sizeof(*out));
6947
6948 if (!state->hdr_output_metadata)
6949 return 0;
6950
6951 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6952 if (ret)
6953 return ret;
6954
6955 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6956 if (len < 0)
6957 return (int)len;
6958
6959 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6960 if (len != 30)
6961 return -EINVAL;
6962
6963 /* Prepare the infopacket for DC. */
6964 switch (state->connector->connector_type) {
6965 case DRM_MODE_CONNECTOR_HDMIA:
6966 out->hb0 = 0x87; /* type */
6967 out->hb1 = 0x01; /* version */
6968 out->hb2 = 0x1A; /* length */
6969 out->sb[0] = buf[3]; /* checksum */
6970 i = 1;
6971 break;
6972
6973 case DRM_MODE_CONNECTOR_DisplayPort:
6974 case DRM_MODE_CONNECTOR_eDP:
6975 out->hb0 = 0x00; /* sdp id, zero */
6976 out->hb1 = 0x87; /* type */
6977 out->hb2 = 0x1D; /* payload len - 1 */
6978 out->hb3 = (0x13 << 2); /* sdp version */
6979 out->sb[0] = 0x01; /* version */
6980 out->sb[1] = 0x1A; /* length */
6981 i = 2;
6982 break;
6983
6984 default:
6985 return -EINVAL;
6986 }
6987
6988 memcpy(&out->sb[i], &buf[4], 26);
6989 out->valid = true;
6990
6991 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6992 sizeof(out->sb), false);
6993
6994 return 0;
6995}
6996
88694af9
NK
6997static int
6998amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6999 struct drm_atomic_state *state)
88694af9 7000{
51e857af
SP
7001 struct drm_connector_state *new_con_state =
7002 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7003 struct drm_connector_state *old_con_state =
7004 drm_atomic_get_old_connector_state(state, conn);
7005 struct drm_crtc *crtc = new_con_state->crtc;
7006 struct drm_crtc_state *new_crtc_state;
7007 int ret;
7008
e8a98235
RS
7009 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7010
88694af9
NK
7011 if (!crtc)
7012 return 0;
7013
72921cdf 7014 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7015 struct dc_info_packet hdr_infopacket;
7016
7017 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7018 if (ret)
7019 return ret;
7020
7021 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7022 if (IS_ERR(new_crtc_state))
7023 return PTR_ERR(new_crtc_state);
7024
7025 /*
7026 * DC considers the stream backends changed if the
7027 * static metadata changes. Forcing the modeset also
7028 * gives a simple way for userspace to switch from
b232d4ed
NK
7029 * 8bpc to 10bpc when setting the metadata to enter
7030 * or exit HDR.
7031 *
7032 * Changing the static metadata after it's been
7033 * set is permissible, however. So only force a
7034 * modeset if we're entering or exiting HDR.
88694af9 7035 */
b232d4ed
NK
7036 new_crtc_state->mode_changed =
7037 !old_con_state->hdr_output_metadata ||
7038 !new_con_state->hdr_output_metadata;
88694af9
NK
7039 }
7040
7041 return 0;
7042}
7043
e7b07cee
HW
7044static const struct drm_connector_helper_funcs
7045amdgpu_dm_connector_helper_funcs = {
7046 /*
1f6010a9 7047 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7048 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7049 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7050 * in get_modes call back, not just return the modes count
7051 */
e7b07cee
HW
7052 .get_modes = get_modes,
7053 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7054 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7055};
7056
7057static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7058{
7059}
7060
d6ef9b41 7061static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7062{
7063 struct drm_atomic_state *state = new_crtc_state->state;
7064 struct drm_plane *plane;
7065 int num_active = 0;
7066
7067 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7068 struct drm_plane_state *new_plane_state;
7069
7070 /* Cursor planes are "fake". */
7071 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7072 continue;
7073
7074 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7075
7076 if (!new_plane_state) {
7077 /*
7078 * The plane is enable on the CRTC and hasn't changed
7079 * state. This means that it previously passed
7080 * validation and is therefore enabled.
7081 */
7082 num_active += 1;
7083 continue;
7084 }
7085
7086 /* We need a framebuffer to be considered enabled. */
7087 num_active += (new_plane_state->fb != NULL);
7088 }
7089
d6ef9b41
NK
7090 return num_active;
7091}
7092
8fe684e9
NK
7093static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7094 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7095{
7096 struct dm_crtc_state *dm_new_crtc_state =
7097 to_dm_crtc_state(new_crtc_state);
7098
7099 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7100
7101 if (!dm_new_crtc_state->stream)
7102 return;
7103
7104 dm_new_crtc_state->active_planes =
7105 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7106}
7107
3ee6b26b 7108static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7109 struct drm_atomic_state *state)
e7b07cee 7110{
29b77ad7
MR
7111 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7112 crtc);
1348969a 7113 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7114 struct dc *dc = adev->dm.dc;
29b77ad7 7115 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7116 int ret = -EINVAL;
7117
5b8c5969 7118 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7119
29b77ad7 7120 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7121
bcd74374
ND
7122 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7123 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7124 return ret;
7125 }
7126
bc92c065 7127 /*
b836a274
MD
7128 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7129 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7130 * planes are disabled, which is not supported by the hardware. And there is legacy
7131 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7132 */
29b77ad7 7133 if (crtc_state->enable &&
ea9522f5
SS
7134 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7135 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7136 return -EINVAL;
ea9522f5 7137 }
c14a005c 7138
b836a274
MD
7139 /* In some use cases, like reset, no stream is attached */
7140 if (!dm_crtc_state->stream)
7141 return 0;
7142
62c933f9 7143 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7144 return 0;
7145
ea9522f5 7146 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7147 return ret;
7148}
7149
3ee6b26b
AD
7150static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7151 const struct drm_display_mode *mode,
7152 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7153{
7154 return true;
7155}
7156
7157static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7158 .disable = dm_crtc_helper_disable,
7159 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7160 .mode_fixup = dm_crtc_helper_mode_fixup,
7161 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7162};
7163
7164static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7165{
7166
7167}
7168
3261e013
ML
7169static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7170{
7171 switch (display_color_depth) {
7172 case COLOR_DEPTH_666:
7173 return 6;
7174 case COLOR_DEPTH_888:
7175 return 8;
7176 case COLOR_DEPTH_101010:
7177 return 10;
7178 case COLOR_DEPTH_121212:
7179 return 12;
7180 case COLOR_DEPTH_141414:
7181 return 14;
7182 case COLOR_DEPTH_161616:
7183 return 16;
7184 default:
7185 break;
7186 }
7187 return 0;
7188}
7189
3ee6b26b
AD
7190static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7191 struct drm_crtc_state *crtc_state,
7192 struct drm_connector_state *conn_state)
e7b07cee 7193{
3261e013
ML
7194 struct drm_atomic_state *state = crtc_state->state;
7195 struct drm_connector *connector = conn_state->connector;
7196 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7197 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7198 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7199 struct drm_dp_mst_topology_mgr *mst_mgr;
7200 struct drm_dp_mst_port *mst_port;
7201 enum dc_color_depth color_depth;
7202 int clock, bpp = 0;
1bc22f20 7203 bool is_y420 = false;
3261e013
ML
7204
7205 if (!aconnector->port || !aconnector->dc_sink)
7206 return 0;
7207
7208 mst_port = aconnector->port;
7209 mst_mgr = &aconnector->mst_port->mst_mgr;
7210
7211 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7212 return 0;
7213
7214 if (!state->duplicated) {
cbd14ae7 7215 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7216 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7217 aconnector->force_yuv420_output;
cbd14ae7
SW
7218 color_depth = convert_color_depth_from_display_info(connector,
7219 is_y420,
7220 max_bpc);
3261e013
ML
7221 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7222 clock = adjusted_mode->clock;
dc48529f 7223 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7224 }
7225 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7226 mst_mgr,
7227 mst_port,
1c6c1cb5 7228 dm_new_connector_state->pbn,
03ca9600 7229 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7230 if (dm_new_connector_state->vcpi_slots < 0) {
7231 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7232 return dm_new_connector_state->vcpi_slots;
7233 }
e7b07cee
HW
7234 return 0;
7235}
7236
7237const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7238 .disable = dm_encoder_helper_disable,
7239 .atomic_check = dm_encoder_helper_atomic_check
7240};
7241
d9fe1a4c 7242#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7243static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7244 struct dc_state *dc_state,
7245 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7246{
7247 struct dc_stream_state *stream = NULL;
7248 struct drm_connector *connector;
5760dcb9 7249 struct drm_connector_state *new_con_state;
29b9ba74
ML
7250 struct amdgpu_dm_connector *aconnector;
7251 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7252 int i, j;
7253 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7254
5760dcb9 7255 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7256
7257 aconnector = to_amdgpu_dm_connector(connector);
7258
7259 if (!aconnector->port)
7260 continue;
7261
7262 if (!new_con_state || !new_con_state->crtc)
7263 continue;
7264
7265 dm_conn_state = to_dm_connector_state(new_con_state);
7266
7267 for (j = 0; j < dc_state->stream_count; j++) {
7268 stream = dc_state->streams[j];
7269 if (!stream)
7270 continue;
7271
7272 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7273 break;
7274
7275 stream = NULL;
7276 }
7277
7278 if (!stream)
7279 continue;
7280
29b9ba74 7281 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7282 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7283 for (j = 0; j < dc_state->stream_count; j++) {
7284 if (vars[j].aconnector == aconnector) {
7285 pbn = vars[j].pbn;
7286 break;
7287 }
7288 }
7289
a550bb16
HW
7290 if (j == dc_state->stream_count)
7291 continue;
7292
7293 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7294
7295 if (stream->timing.flags.DSC != 1) {
7296 dm_conn_state->pbn = pbn;
7297 dm_conn_state->vcpi_slots = slot_num;
7298
7299 drm_dp_mst_atomic_enable_dsc(state,
7300 aconnector->port,
7301 dm_conn_state->pbn,
7302 0,
7303 false);
7304 continue;
7305 }
7306
29b9ba74
ML
7307 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7308 aconnector->port,
7309 pbn, pbn_div,
7310 true);
7311 if (vcpi < 0)
7312 return vcpi;
7313
7314 dm_conn_state->pbn = pbn;
7315 dm_conn_state->vcpi_slots = vcpi;
7316 }
7317 return 0;
7318}
d9fe1a4c 7319#endif
29b9ba74 7320
e7b07cee
HW
7321static void dm_drm_plane_reset(struct drm_plane *plane)
7322{
7323 struct dm_plane_state *amdgpu_state = NULL;
7324
7325 if (plane->state)
7326 plane->funcs->atomic_destroy_state(plane, plane->state);
7327
7328 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7329 WARN_ON(amdgpu_state == NULL);
1f6010a9 7330
7ddaef96
NK
7331 if (amdgpu_state)
7332 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7333}
7334
7335static struct drm_plane_state *
7336dm_drm_plane_duplicate_state(struct drm_plane *plane)
7337{
7338 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7339
7340 old_dm_plane_state = to_dm_plane_state(plane->state);
7341 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7342 if (!dm_plane_state)
7343 return NULL;
7344
7345 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7346
3be5262e
HW
7347 if (old_dm_plane_state->dc_state) {
7348 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7349 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7350 }
7351
7352 return &dm_plane_state->base;
7353}
7354
dfd84d90 7355static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7356 struct drm_plane_state *state)
e7b07cee
HW
7357{
7358 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7359
3be5262e
HW
7360 if (dm_plane_state->dc_state)
7361 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7362
0627bbd3 7363 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7364}
7365
7366static const struct drm_plane_funcs dm_plane_funcs = {
7367 .update_plane = drm_atomic_helper_update_plane,
7368 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7369 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7370 .reset = dm_drm_plane_reset,
7371 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7372 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7373 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7374};
7375
3ee6b26b
AD
7376static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7377 struct drm_plane_state *new_state)
e7b07cee
HW
7378{
7379 struct amdgpu_framebuffer *afb;
7380 struct drm_gem_object *obj;
5d43be0c 7381 struct amdgpu_device *adev;
e7b07cee 7382 struct amdgpu_bo *rbo;
e7b07cee 7383 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7384 struct list_head list;
7385 struct ttm_validate_buffer tv;
7386 struct ww_acquire_ctx ticket;
5d43be0c
CK
7387 uint32_t domain;
7388 int r;
e7b07cee
HW
7389
7390 if (!new_state->fb) {
4711c033 7391 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7392 return 0;
7393 }
7394
7395 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7396 obj = new_state->fb->obj[0];
e7b07cee 7397 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7398 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7399 INIT_LIST_HEAD(&list);
7400
7401 tv.bo = &rbo->tbo;
7402 tv.num_shared = 1;
7403 list_add(&tv.head, &list);
7404
9165fb87 7405 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7406 if (r) {
7407 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7408 return r;
0f257b09 7409 }
e7b07cee 7410
5d43be0c 7411 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7412 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7413 else
7414 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7415
7b7c6c81 7416 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7417 if (unlikely(r != 0)) {
30b7c614
HW
7418 if (r != -ERESTARTSYS)
7419 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7420 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7421 return r;
7422 }
7423
bb812f1e
JZ
7424 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7425 if (unlikely(r != 0)) {
7426 amdgpu_bo_unpin(rbo);
0f257b09 7427 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7428 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7429 return r;
7430 }
7df7e505 7431
0f257b09 7432 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7433
7b7c6c81 7434 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7435
7436 amdgpu_bo_ref(rbo);
7437
cf322b49
NK
7438 /**
7439 * We don't do surface updates on planes that have been newly created,
7440 * but we also don't have the afb->address during atomic check.
7441 *
7442 * Fill in buffer attributes depending on the address here, but only on
7443 * newly created planes since they're not being used by DC yet and this
7444 * won't modify global state.
7445 */
7446 dm_plane_state_old = to_dm_plane_state(plane->state);
7447 dm_plane_state_new = to_dm_plane_state(new_state);
7448
3be5262e 7449 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7450 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7451 struct dc_plane_state *plane_state =
7452 dm_plane_state_new->dc_state;
7453 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7454
320932bf 7455 fill_plane_buffer_attributes(
695af5f9 7456 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7457 afb->tiling_flags,
cf322b49
NK
7458 &plane_state->tiling_info, &plane_state->plane_size,
7459 &plane_state->dcc, &plane_state->address,
6eed95b0 7460 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7461 }
7462
e7b07cee
HW
7463 return 0;
7464}
7465
3ee6b26b
AD
7466static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7467 struct drm_plane_state *old_state)
e7b07cee
HW
7468{
7469 struct amdgpu_bo *rbo;
e7b07cee
HW
7470 int r;
7471
7472 if (!old_state->fb)
7473 return;
7474
e68d14dd 7475 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7476 r = amdgpu_bo_reserve(rbo, false);
7477 if (unlikely(r)) {
7478 DRM_ERROR("failed to reserve rbo before unpin\n");
7479 return;
b830ebc9
HW
7480 }
7481
7482 amdgpu_bo_unpin(rbo);
7483 amdgpu_bo_unreserve(rbo);
7484 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7485}
7486
8c44515b
AP
7487static int dm_plane_helper_check_state(struct drm_plane_state *state,
7488 struct drm_crtc_state *new_crtc_state)
7489{
6300b3bd
MK
7490 struct drm_framebuffer *fb = state->fb;
7491 int min_downscale, max_upscale;
7492 int min_scale = 0;
7493 int max_scale = INT_MAX;
7494
40d916a2 7495 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7496 if (fb && state->crtc) {
40d916a2
NC
7497 /* Validate viewport to cover the case when only the position changes */
7498 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7499 int viewport_width = state->crtc_w;
7500 int viewport_height = state->crtc_h;
7501
7502 if (state->crtc_x < 0)
7503 viewport_width += state->crtc_x;
7504 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7505 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7506
7507 if (state->crtc_y < 0)
7508 viewport_height += state->crtc_y;
7509 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7510 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7511
4abdb72b
NC
7512 if (viewport_width < 0 || viewport_height < 0) {
7513 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7514 return -EINVAL;
7515 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7516 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7517 return -EINVAL;
4abdb72b
NC
7518 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7519 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7520 return -EINVAL;
4abdb72b
NC
7521 }
7522
40d916a2
NC
7523 }
7524
7525 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7526 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7527 &min_downscale, &max_upscale);
7528 /*
7529 * Convert to drm convention: 16.16 fixed point, instead of dc's
7530 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7531 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7532 */
7533 min_scale = (1000 << 16) / max_upscale;
7534 max_scale = (1000 << 16) / min_downscale;
7535 }
8c44515b 7536
8c44515b 7537 return drm_atomic_helper_check_plane_state(
6300b3bd 7538 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7539}
7540
7578ecda 7541static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7542 struct drm_atomic_state *state)
cbd19488 7543{
7c11b99a
MR
7544 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7545 plane);
1348969a 7546 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7547 struct dc *dc = adev->dm.dc;
78171832 7548 struct dm_plane_state *dm_plane_state;
695af5f9 7549 struct dc_scaling_info scaling_info;
8c44515b 7550 struct drm_crtc_state *new_crtc_state;
695af5f9 7551 int ret;
78171832 7552
ba5c1649 7553 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7554
ba5c1649 7555 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7556
3be5262e 7557 if (!dm_plane_state->dc_state)
9a3329b1 7558 return 0;
cbd19488 7559
8c44515b 7560 new_crtc_state =
dec92020 7561 drm_atomic_get_new_crtc_state(state,
ba5c1649 7562 new_plane_state->crtc);
8c44515b
AP
7563 if (!new_crtc_state)
7564 return -EINVAL;
7565
ba5c1649 7566 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7567 if (ret)
7568 return ret;
7569
ba5c1649 7570 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
695af5f9
NK
7571 if (ret)
7572 return ret;
a05bcff1 7573
62c933f9 7574 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7575 return 0;
7576
7577 return -EINVAL;
7578}
7579
674e78ac 7580static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7581 struct drm_atomic_state *state)
674e78ac
NK
7582{
7583 /* Only support async updates on cursor planes. */
7584 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7585 return -EINVAL;
7586
7587 return 0;
7588}
7589
7590static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7591 struct drm_atomic_state *state)
674e78ac 7592{
5ddb0bd4
MR
7593 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7594 plane);
674e78ac 7595 struct drm_plane_state *old_state =
5ddb0bd4 7596 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7597
e8a98235
RS
7598 trace_amdgpu_dm_atomic_update_cursor(new_state);
7599
332af874 7600 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7601
7602 plane->state->src_x = new_state->src_x;
7603 plane->state->src_y = new_state->src_y;
7604 plane->state->src_w = new_state->src_w;
7605 plane->state->src_h = new_state->src_h;
7606 plane->state->crtc_x = new_state->crtc_x;
7607 plane->state->crtc_y = new_state->crtc_y;
7608 plane->state->crtc_w = new_state->crtc_w;
7609 plane->state->crtc_h = new_state->crtc_h;
7610
7611 handle_cursor_update(plane, old_state);
7612}
7613
e7b07cee
HW
7614static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7615 .prepare_fb = dm_plane_helper_prepare_fb,
7616 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7617 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7618 .atomic_async_check = dm_plane_atomic_async_check,
7619 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7620};
7621
7622/*
7623 * TODO: these are currently initialized to rgb formats only.
7624 * For future use cases we should either initialize them dynamically based on
7625 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7626 * check will succeed, and let DC implement proper check
e7b07cee 7627 */
d90371b0 7628static const uint32_t rgb_formats[] = {
e7b07cee
HW
7629 DRM_FORMAT_XRGB8888,
7630 DRM_FORMAT_ARGB8888,
7631 DRM_FORMAT_RGBA8888,
7632 DRM_FORMAT_XRGB2101010,
7633 DRM_FORMAT_XBGR2101010,
7634 DRM_FORMAT_ARGB2101010,
7635 DRM_FORMAT_ABGR2101010,
58020403
MK
7636 DRM_FORMAT_XRGB16161616,
7637 DRM_FORMAT_XBGR16161616,
7638 DRM_FORMAT_ARGB16161616,
7639 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7640 DRM_FORMAT_XBGR8888,
7641 DRM_FORMAT_ABGR8888,
46dd9ff7 7642 DRM_FORMAT_RGB565,
e7b07cee
HW
7643};
7644
0d579c7e
NK
7645static const uint32_t overlay_formats[] = {
7646 DRM_FORMAT_XRGB8888,
7647 DRM_FORMAT_ARGB8888,
7648 DRM_FORMAT_RGBA8888,
7649 DRM_FORMAT_XBGR8888,
7650 DRM_FORMAT_ABGR8888,
7267a1a9 7651 DRM_FORMAT_RGB565
e7b07cee
HW
7652};
7653
7654static const u32 cursor_formats[] = {
7655 DRM_FORMAT_ARGB8888
7656};
7657
37c6a93b
NK
7658static int get_plane_formats(const struct drm_plane *plane,
7659 const struct dc_plane_cap *plane_cap,
7660 uint32_t *formats, int max_formats)
e7b07cee 7661{
37c6a93b
NK
7662 int i, num_formats = 0;
7663
7664 /*
7665 * TODO: Query support for each group of formats directly from
7666 * DC plane caps. This will require adding more formats to the
7667 * caps list.
7668 */
e7b07cee 7669
f180b4bc 7670 switch (plane->type) {
e7b07cee 7671 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7672 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7673 if (num_formats >= max_formats)
7674 break;
7675
7676 formats[num_formats++] = rgb_formats[i];
7677 }
7678
ea36ad34 7679 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7680 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7681 if (plane_cap && plane_cap->pixel_format_support.p010)
7682 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7683 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7684 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7685 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7686 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7687 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7688 }
e7b07cee 7689 break;
37c6a93b 7690
e7b07cee 7691 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7692 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7693 if (num_formats >= max_formats)
7694 break;
7695
7696 formats[num_formats++] = overlay_formats[i];
7697 }
e7b07cee 7698 break;
37c6a93b 7699
e7b07cee 7700 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7701 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7702 if (num_formats >= max_formats)
7703 break;
7704
7705 formats[num_formats++] = cursor_formats[i];
7706 }
e7b07cee
HW
7707 break;
7708 }
7709
37c6a93b
NK
7710 return num_formats;
7711}
7712
7713static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7714 struct drm_plane *plane,
7715 unsigned long possible_crtcs,
7716 const struct dc_plane_cap *plane_cap)
7717{
7718 uint32_t formats[32];
7719 int num_formats;
7720 int res = -EPERM;
ecc874a6 7721 unsigned int supported_rotations;
faa37f54 7722 uint64_t *modifiers = NULL;
37c6a93b
NK
7723
7724 num_formats = get_plane_formats(plane, plane_cap, formats,
7725 ARRAY_SIZE(formats));
7726
faa37f54
BN
7727 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7728 if (res)
7729 return res;
7730
4a580877 7731 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7732 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7733 modifiers, plane->type, NULL);
7734 kfree(modifiers);
37c6a93b
NK
7735 if (res)
7736 return res;
7737
cc1fec57
NK
7738 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7739 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7740 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7741 BIT(DRM_MODE_BLEND_PREMULTI);
7742
7743 drm_plane_create_alpha_property(plane);
7744 drm_plane_create_blend_mode_property(plane, blend_caps);
7745 }
7746
fc8e5230 7747 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7748 plane_cap &&
7749 (plane_cap->pixel_format_support.nv12 ||
7750 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7751 /* This only affects YUV formats. */
7752 drm_plane_create_color_properties(
7753 plane,
7754 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7755 BIT(DRM_COLOR_YCBCR_BT709) |
7756 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7757 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7758 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7759 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7760 }
7761
ecc874a6
PLG
7762 supported_rotations =
7763 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7764 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7765
1347385f
SS
7766 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7767 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7768 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7769 supported_rotations);
ecc874a6 7770
f180b4bc 7771 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7772
96719c54 7773 /* Create (reset) the plane state */
f180b4bc
HW
7774 if (plane->funcs->reset)
7775 plane->funcs->reset(plane);
96719c54 7776
37c6a93b 7777 return 0;
e7b07cee
HW
7778}
7779
7578ecda
AD
7780static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7781 struct drm_plane *plane,
7782 uint32_t crtc_index)
e7b07cee
HW
7783{
7784 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7785 struct drm_plane *cursor_plane;
e7b07cee
HW
7786
7787 int res = -ENOMEM;
7788
7789 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7790 if (!cursor_plane)
7791 goto fail;
7792
f180b4bc 7793 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7794 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7795
7796 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7797 if (!acrtc)
7798 goto fail;
7799
7800 res = drm_crtc_init_with_planes(
7801 dm->ddev,
7802 &acrtc->base,
7803 plane,
f180b4bc 7804 cursor_plane,
e7b07cee
HW
7805 &amdgpu_dm_crtc_funcs, NULL);
7806
7807 if (res)
7808 goto fail;
7809
7810 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7811
96719c54
HW
7812 /* Create (reset) the plane state */
7813 if (acrtc->base.funcs->reset)
7814 acrtc->base.funcs->reset(&acrtc->base);
7815
e7b07cee
HW
7816 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7817 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7818
7819 acrtc->crtc_id = crtc_index;
7820 acrtc->base.enabled = false;
c37e2d29 7821 acrtc->otg_inst = -1;
e7b07cee
HW
7822
7823 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7824 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7825 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7826 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7827
e7b07cee
HW
7828 return 0;
7829
7830fail:
b830ebc9
HW
7831 kfree(acrtc);
7832 kfree(cursor_plane);
e7b07cee
HW
7833 return res;
7834}
7835
7836
7837static int to_drm_connector_type(enum signal_type st)
7838{
7839 switch (st) {
7840 case SIGNAL_TYPE_HDMI_TYPE_A:
7841 return DRM_MODE_CONNECTOR_HDMIA;
7842 case SIGNAL_TYPE_EDP:
7843 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7844 case SIGNAL_TYPE_LVDS:
7845 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7846 case SIGNAL_TYPE_RGB:
7847 return DRM_MODE_CONNECTOR_VGA;
7848 case SIGNAL_TYPE_DISPLAY_PORT:
7849 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7850 return DRM_MODE_CONNECTOR_DisplayPort;
7851 case SIGNAL_TYPE_DVI_DUAL_LINK:
7852 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7853 return DRM_MODE_CONNECTOR_DVID;
7854 case SIGNAL_TYPE_VIRTUAL:
7855 return DRM_MODE_CONNECTOR_VIRTUAL;
7856
7857 default:
7858 return DRM_MODE_CONNECTOR_Unknown;
7859 }
7860}
7861
2b4c1c05
DV
7862static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7863{
62afb4ad
JRS
7864 struct drm_encoder *encoder;
7865
7866 /* There is only one encoder per connector */
7867 drm_connector_for_each_possible_encoder(connector, encoder)
7868 return encoder;
7869
7870 return NULL;
2b4c1c05
DV
7871}
7872
e7b07cee
HW
7873static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7874{
e7b07cee
HW
7875 struct drm_encoder *encoder;
7876 struct amdgpu_encoder *amdgpu_encoder;
7877
2b4c1c05 7878 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7879
7880 if (encoder == NULL)
7881 return;
7882
7883 amdgpu_encoder = to_amdgpu_encoder(encoder);
7884
7885 amdgpu_encoder->native_mode.clock = 0;
7886
7887 if (!list_empty(&connector->probed_modes)) {
7888 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7889
e7b07cee 7890 list_for_each_entry(preferred_mode,
b830ebc9
HW
7891 &connector->probed_modes,
7892 head) {
7893 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7894 amdgpu_encoder->native_mode = *preferred_mode;
7895
e7b07cee
HW
7896 break;
7897 }
7898
7899 }
7900}
7901
3ee6b26b
AD
7902static struct drm_display_mode *
7903amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7904 char *name,
7905 int hdisplay, int vdisplay)
e7b07cee
HW
7906{
7907 struct drm_device *dev = encoder->dev;
7908 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7909 struct drm_display_mode *mode = NULL;
7910 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7911
7912 mode = drm_mode_duplicate(dev, native_mode);
7913
b830ebc9 7914 if (mode == NULL)
e7b07cee
HW
7915 return NULL;
7916
7917 mode->hdisplay = hdisplay;
7918 mode->vdisplay = vdisplay;
7919 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7920 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7921
7922 return mode;
7923
7924}
7925
7926static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7927 struct drm_connector *connector)
e7b07cee
HW
7928{
7929 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7930 struct drm_display_mode *mode = NULL;
7931 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7932 struct amdgpu_dm_connector *amdgpu_dm_connector =
7933 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7934 int i;
7935 int n;
7936 struct mode_size {
7937 char name[DRM_DISPLAY_MODE_LEN];
7938 int w;
7939 int h;
b830ebc9 7940 } common_modes[] = {
e7b07cee
HW
7941 { "640x480", 640, 480},
7942 { "800x600", 800, 600},
7943 { "1024x768", 1024, 768},
7944 { "1280x720", 1280, 720},
7945 { "1280x800", 1280, 800},
7946 {"1280x1024", 1280, 1024},
7947 { "1440x900", 1440, 900},
7948 {"1680x1050", 1680, 1050},
7949 {"1600x1200", 1600, 1200},
7950 {"1920x1080", 1920, 1080},
7951 {"1920x1200", 1920, 1200}
7952 };
7953
b830ebc9 7954 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7955
7956 for (i = 0; i < n; i++) {
7957 struct drm_display_mode *curmode = NULL;
7958 bool mode_existed = false;
7959
7960 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7961 common_modes[i].h > native_mode->vdisplay ||
7962 (common_modes[i].w == native_mode->hdisplay &&
7963 common_modes[i].h == native_mode->vdisplay))
7964 continue;
e7b07cee
HW
7965
7966 list_for_each_entry(curmode, &connector->probed_modes, head) {
7967 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7968 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7969 mode_existed = true;
7970 break;
7971 }
7972 }
7973
7974 if (mode_existed)
7975 continue;
7976
7977 mode = amdgpu_dm_create_common_mode(encoder,
7978 common_modes[i].name, common_modes[i].w,
7979 common_modes[i].h);
7980 drm_mode_probed_add(connector, mode);
c84dec2f 7981 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7982 }
7983}
7984
d77de788
SS
7985static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7986{
7987 struct drm_encoder *encoder;
7988 struct amdgpu_encoder *amdgpu_encoder;
7989 const struct drm_display_mode *native_mode;
7990
7991 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7992 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7993 return;
7994
7995 encoder = amdgpu_dm_connector_to_encoder(connector);
7996 if (!encoder)
7997 return;
7998
7999 amdgpu_encoder = to_amdgpu_encoder(encoder);
8000
8001 native_mode = &amdgpu_encoder->native_mode;
8002 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8003 return;
8004
8005 drm_connector_set_panel_orientation_with_quirk(connector,
8006 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8007 native_mode->hdisplay,
8008 native_mode->vdisplay);
8009}
8010
3ee6b26b
AD
8011static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8012 struct edid *edid)
e7b07cee 8013{
c84dec2f
HW
8014 struct amdgpu_dm_connector *amdgpu_dm_connector =
8015 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8016
8017 if (edid) {
8018 /* empty probed_modes */
8019 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8020 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8021 drm_add_edid_modes(connector, edid);
8022
f1e5e913
YMM
8023 /* sorting the probed modes before calling function
8024 * amdgpu_dm_get_native_mode() since EDID can have
8025 * more than one preferred mode. The modes that are
8026 * later in the probed mode list could be of higher
8027 * and preferred resolution. For example, 3840x2160
8028 * resolution in base EDID preferred timing and 4096x2160
8029 * preferred resolution in DID extension block later.
8030 */
8031 drm_mode_sort(&connector->probed_modes);
e7b07cee 8032 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8033
8034 /* Freesync capabilities are reset by calling
8035 * drm_add_edid_modes() and need to be
8036 * restored here.
8037 */
8038 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8039
8040 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8041 } else {
c84dec2f 8042 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8043 }
e7b07cee
HW
8044}
8045
a85ba005
NC
8046static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8047 struct drm_display_mode *mode)
8048{
8049 struct drm_display_mode *m;
8050
8051 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8052 if (drm_mode_equal(m, mode))
8053 return true;
8054 }
8055
8056 return false;
8057}
8058
8059static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8060{
8061 const struct drm_display_mode *m;
8062 struct drm_display_mode *new_mode;
8063 uint i;
8064 uint32_t new_modes_count = 0;
8065
8066 /* Standard FPS values
8067 *
12cdff6b
SC
8068 * 23.976 - TV/NTSC
8069 * 24 - Cinema
8070 * 25 - TV/PAL
8071 * 29.97 - TV/NTSC
8072 * 30 - TV/NTSC
8073 * 48 - Cinema HFR
8074 * 50 - TV/PAL
8075 * 60 - Commonly used
8076 * 48,72,96,120 - Multiples of 24
a85ba005 8077 */
9ce5ed6e
CIK
8078 static const uint32_t common_rates[] = {
8079 23976, 24000, 25000, 29970, 30000,
12cdff6b 8080 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8081 };
a85ba005
NC
8082
8083 /*
8084 * Find mode with highest refresh rate with the same resolution
8085 * as the preferred mode. Some monitors report a preferred mode
8086 * with lower resolution than the highest refresh rate supported.
8087 */
8088
8089 m = get_highest_refresh_rate_mode(aconnector, true);
8090 if (!m)
8091 return 0;
8092
8093 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8094 uint64_t target_vtotal, target_vtotal_diff;
8095 uint64_t num, den;
8096
8097 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8098 continue;
8099
8100 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8101 common_rates[i] > aconnector->max_vfreq * 1000)
8102 continue;
8103
8104 num = (unsigned long long)m->clock * 1000 * 1000;
8105 den = common_rates[i] * (unsigned long long)m->htotal;
8106 target_vtotal = div_u64(num, den);
8107 target_vtotal_diff = target_vtotal - m->vtotal;
8108
8109 /* Check for illegal modes */
8110 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8111 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8112 m->vtotal + target_vtotal_diff < m->vsync_end)
8113 continue;
8114
8115 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8116 if (!new_mode)
8117 goto out;
8118
8119 new_mode->vtotal += (u16)target_vtotal_diff;
8120 new_mode->vsync_start += (u16)target_vtotal_diff;
8121 new_mode->vsync_end += (u16)target_vtotal_diff;
8122 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8123 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8124
8125 if (!is_duplicate_mode(aconnector, new_mode)) {
8126 drm_mode_probed_add(&aconnector->base, new_mode);
8127 new_modes_count += 1;
8128 } else
8129 drm_mode_destroy(aconnector->base.dev, new_mode);
8130 }
8131 out:
8132 return new_modes_count;
8133}
8134
8135static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8136 struct edid *edid)
8137{
8138 struct amdgpu_dm_connector *amdgpu_dm_connector =
8139 to_amdgpu_dm_connector(connector);
8140
8141 if (!(amdgpu_freesync_vid_mode && edid))
8142 return;
fe8858bb 8143
a85ba005
NC
8144 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8145 amdgpu_dm_connector->num_modes +=
8146 add_fs_modes(amdgpu_dm_connector);
8147}
8148
7578ecda 8149static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8150{
c84dec2f
HW
8151 struct amdgpu_dm_connector *amdgpu_dm_connector =
8152 to_amdgpu_dm_connector(connector);
e7b07cee 8153 struct drm_encoder *encoder;
c84dec2f 8154 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8155
2b4c1c05 8156 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8157
5c0e6840 8158 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8159 amdgpu_dm_connector->num_modes =
8160 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8161 } else {
8162 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8163 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8164 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8165 }
3e332d3a 8166 amdgpu_dm_fbc_init(connector);
5099114b 8167
c84dec2f 8168 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8169}
8170
3ee6b26b
AD
8171void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8172 struct amdgpu_dm_connector *aconnector,
8173 int connector_type,
8174 struct dc_link *link,
8175 int link_index)
e7b07cee 8176{
1348969a 8177 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8178
f04bee34
NK
8179 /*
8180 * Some of the properties below require access to state, like bpc.
8181 * Allocate some default initial connector state with our reset helper.
8182 */
8183 if (aconnector->base.funcs->reset)
8184 aconnector->base.funcs->reset(&aconnector->base);
8185
e7b07cee
HW
8186 aconnector->connector_id = link_index;
8187 aconnector->dc_link = link;
8188 aconnector->base.interlace_allowed = false;
8189 aconnector->base.doublescan_allowed = false;
8190 aconnector->base.stereo_allowed = false;
8191 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8192 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8193 aconnector->audio_inst = -1;
e7b07cee
HW
8194 mutex_init(&aconnector->hpd_lock);
8195
1f6010a9
DF
8196 /*
8197 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8198 * which means HPD hot plug not supported
8199 */
e7b07cee
HW
8200 switch (connector_type) {
8201 case DRM_MODE_CONNECTOR_HDMIA:
8202 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8203 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8204 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8205 break;
8206 case DRM_MODE_CONNECTOR_DisplayPort:
8207 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
f6e03f80
JS
8208 if (link->is_dig_mapping_flexible &&
8209 link->dc->res_pool->funcs->link_encs_assign) {
8210 link->link_enc =
8211 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8212 if (!link->link_enc)
8213 link->link_enc =
8214 link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8215 }
8216
8217 if (link->link_enc)
8218 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8219 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8220 break;
8221 case DRM_MODE_CONNECTOR_DVID:
8222 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8223 break;
8224 default:
8225 break;
8226 }
8227
8228 drm_object_attach_property(&aconnector->base.base,
8229 dm->ddev->mode_config.scaling_mode_property,
8230 DRM_MODE_SCALE_NONE);
8231
8232 drm_object_attach_property(&aconnector->base.base,
8233 adev->mode_info.underscan_property,
8234 UNDERSCAN_OFF);
8235 drm_object_attach_property(&aconnector->base.base,
8236 adev->mode_info.underscan_hborder_property,
8237 0);
8238 drm_object_attach_property(&aconnector->base.base,
8239 adev->mode_info.underscan_vborder_property,
8240 0);
1825fd34 8241
8c61b31e
JFZ
8242 if (!aconnector->mst_port)
8243 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8244
4a8ca46b
RL
8245 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8246 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8247 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8248
c1ee92f9 8249 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8250 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8251 drm_object_attach_property(&aconnector->base.base,
8252 adev->mode_info.abm_level_property, 0);
8253 }
bb47de73
NK
8254
8255 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8256 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8257 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8258 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8259
8c61b31e
JFZ
8260 if (!aconnector->mst_port)
8261 drm_connector_attach_vrr_capable_property(&aconnector->base);
8262
0c8620d6 8263#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8264 if (adev->dm.hdcp_workqueue)
53e108aa 8265 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8266#endif
bb47de73 8267 }
e7b07cee
HW
8268}
8269
7578ecda
AD
8270static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8271 struct i2c_msg *msgs, int num)
e7b07cee
HW
8272{
8273 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8274 struct ddc_service *ddc_service = i2c->ddc_service;
8275 struct i2c_command cmd;
8276 int i;
8277 int result = -EIO;
8278
b830ebc9 8279 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8280
8281 if (!cmd.payloads)
8282 return result;
8283
8284 cmd.number_of_payloads = num;
8285 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8286 cmd.speed = 100;
8287
8288 for (i = 0; i < num; i++) {
8289 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8290 cmd.payloads[i].address = msgs[i].addr;
8291 cmd.payloads[i].length = msgs[i].len;
8292 cmd.payloads[i].data = msgs[i].buf;
8293 }
8294
c85e6e54
DF
8295 if (dc_submit_i2c(
8296 ddc_service->ctx->dc,
8297 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8298 &cmd))
8299 result = num;
8300
8301 kfree(cmd.payloads);
8302 return result;
8303}
8304
7578ecda 8305static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8306{
8307 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8308}
8309
8310static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8311 .master_xfer = amdgpu_dm_i2c_xfer,
8312 .functionality = amdgpu_dm_i2c_func,
8313};
8314
3ee6b26b
AD
8315static struct amdgpu_i2c_adapter *
8316create_i2c(struct ddc_service *ddc_service,
8317 int link_index,
8318 int *res)
e7b07cee
HW
8319{
8320 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8321 struct amdgpu_i2c_adapter *i2c;
8322
b830ebc9 8323 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8324 if (!i2c)
8325 return NULL;
e7b07cee
HW
8326 i2c->base.owner = THIS_MODULE;
8327 i2c->base.class = I2C_CLASS_DDC;
8328 i2c->base.dev.parent = &adev->pdev->dev;
8329 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8330 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8331 i2c_set_adapdata(&i2c->base, i2c);
8332 i2c->ddc_service = ddc_service;
f6e03f80
JS
8333 if (i2c->ddc_service->ddc_pin)
8334 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8335
8336 return i2c;
8337}
8338
89fc8d4e 8339
1f6010a9
DF
8340/*
8341 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8342 * dc_link which will be represented by this aconnector.
8343 */
7578ecda
AD
8344static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8345 struct amdgpu_dm_connector *aconnector,
8346 uint32_t link_index,
8347 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8348{
8349 int res = 0;
8350 int connector_type;
8351 struct dc *dc = dm->dc;
8352 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8353 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8354
8355 link->priv = aconnector;
e7b07cee 8356
f1ad2f5e 8357 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8358
8359 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8360 if (!i2c) {
8361 DRM_ERROR("Failed to create i2c adapter data\n");
8362 return -ENOMEM;
8363 }
8364
e7b07cee
HW
8365 aconnector->i2c = i2c;
8366 res = i2c_add_adapter(&i2c->base);
8367
8368 if (res) {
8369 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8370 goto out_free;
8371 }
8372
8373 connector_type = to_drm_connector_type(link->connector_signal);
8374
17165de2 8375 res = drm_connector_init_with_ddc(
e7b07cee
HW
8376 dm->ddev,
8377 &aconnector->base,
8378 &amdgpu_dm_connector_funcs,
17165de2
AP
8379 connector_type,
8380 &i2c->base);
e7b07cee
HW
8381
8382 if (res) {
8383 DRM_ERROR("connector_init failed\n");
8384 aconnector->connector_id = -1;
8385 goto out_free;
8386 }
8387
8388 drm_connector_helper_add(
8389 &aconnector->base,
8390 &amdgpu_dm_connector_helper_funcs);
8391
8392 amdgpu_dm_connector_init_helper(
8393 dm,
8394 aconnector,
8395 connector_type,
8396 link,
8397 link_index);
8398
cde4c44d 8399 drm_connector_attach_encoder(
e7b07cee
HW
8400 &aconnector->base, &aencoder->base);
8401
e7b07cee
HW
8402 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8403 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8404 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8405
e7b07cee
HW
8406out_free:
8407 if (res) {
8408 kfree(i2c);
8409 aconnector->i2c = NULL;
8410 }
8411 return res;
8412}
8413
8414int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8415{
8416 switch (adev->mode_info.num_crtc) {
8417 case 1:
8418 return 0x1;
8419 case 2:
8420 return 0x3;
8421 case 3:
8422 return 0x7;
8423 case 4:
8424 return 0xf;
8425 case 5:
8426 return 0x1f;
8427 case 6:
8428 default:
8429 return 0x3f;
8430 }
8431}
8432
7578ecda
AD
8433static int amdgpu_dm_encoder_init(struct drm_device *dev,
8434 struct amdgpu_encoder *aencoder,
8435 uint32_t link_index)
e7b07cee 8436{
1348969a 8437 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8438
8439 int res = drm_encoder_init(dev,
8440 &aencoder->base,
8441 &amdgpu_dm_encoder_funcs,
8442 DRM_MODE_ENCODER_TMDS,
8443 NULL);
8444
8445 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8446
8447 if (!res)
8448 aencoder->encoder_id = link_index;
8449 else
8450 aencoder->encoder_id = -1;
8451
8452 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8453
8454 return res;
8455}
8456
3ee6b26b
AD
8457static void manage_dm_interrupts(struct amdgpu_device *adev,
8458 struct amdgpu_crtc *acrtc,
8459 bool enable)
e7b07cee
HW
8460{
8461 /*
8fe684e9
NK
8462 * We have no guarantee that the frontend index maps to the same
8463 * backend index - some even map to more than one.
8464 *
8465 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8466 */
8467 int irq_type =
734dd01d 8468 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8469 adev,
8470 acrtc->crtc_id);
8471
8472 if (enable) {
8473 drm_crtc_vblank_on(&acrtc->base);
8474 amdgpu_irq_get(
8475 adev,
8476 &adev->pageflip_irq,
8477 irq_type);
86bc2219
WL
8478#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8479 amdgpu_irq_get(
8480 adev,
8481 &adev->vline0_irq,
8482 irq_type);
8483#endif
e7b07cee 8484 } else {
86bc2219
WL
8485#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8486 amdgpu_irq_put(
8487 adev,
8488 &adev->vline0_irq,
8489 irq_type);
8490#endif
e7b07cee
HW
8491 amdgpu_irq_put(
8492 adev,
8493 &adev->pageflip_irq,
8494 irq_type);
8495 drm_crtc_vblank_off(&acrtc->base);
8496 }
8497}
8498
8fe684e9
NK
8499static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8500 struct amdgpu_crtc *acrtc)
8501{
8502 int irq_type =
8503 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8504
8505 /**
8506 * This reads the current state for the IRQ and force reapplies
8507 * the setting to hardware.
8508 */
8509 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8510}
8511
3ee6b26b
AD
8512static bool
8513is_scaling_state_different(const struct dm_connector_state *dm_state,
8514 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8515{
8516 if (dm_state->scaling != old_dm_state->scaling)
8517 return true;
8518 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8519 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8520 return true;
8521 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8522 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8523 return true;
b830ebc9
HW
8524 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8525 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8526 return true;
e7b07cee
HW
8527 return false;
8528}
8529
0c8620d6
BL
8530#ifdef CONFIG_DRM_AMD_DC_HDCP
8531static bool is_content_protection_different(struct drm_connector_state *state,
8532 const struct drm_connector_state *old_state,
8533 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8534{
8535 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8536 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8537
31c0ed90 8538 /* Handle: Type0/1 change */
53e108aa
BL
8539 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8540 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8541 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8542 return true;
8543 }
8544
31c0ed90
BL
8545 /* CP is being re enabled, ignore this
8546 *
8547 * Handles: ENABLED -> DESIRED
8548 */
0c8620d6
BL
8549 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8550 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8551 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8552 return false;
8553 }
8554
31c0ed90
BL
8555 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8556 *
8557 * Handles: UNDESIRED -> ENABLED
8558 */
0c8620d6
BL
8559 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8560 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8561 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8562
0d9a947b
QZ
8563 /* Stream removed and re-enabled
8564 *
8565 * Can sometimes overlap with the HPD case,
8566 * thus set update_hdcp to false to avoid
8567 * setting HDCP multiple times.
8568 *
8569 * Handles: DESIRED -> DESIRED (Special case)
8570 */
8571 if (!(old_state->crtc && old_state->crtc->enabled) &&
8572 state->crtc && state->crtc->enabled &&
8573 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8574 dm_con_state->update_hdcp = false;
8575 return true;
8576 }
8577
8578 /* Hot-plug, headless s3, dpms
8579 *
8580 * Only start HDCP if the display is connected/enabled.
8581 * update_hdcp flag will be set to false until the next
8582 * HPD comes in.
31c0ed90
BL
8583 *
8584 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8585 */
97f6c917
BL
8586 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8587 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8588 dm_con_state->update_hdcp = false;
0c8620d6 8589 return true;
97f6c917 8590 }
0c8620d6 8591
31c0ed90
BL
8592 /*
8593 * Handles: UNDESIRED -> UNDESIRED
8594 * DESIRED -> DESIRED
8595 * ENABLED -> ENABLED
8596 */
0c8620d6
BL
8597 if (old_state->content_protection == state->content_protection)
8598 return false;
8599
31c0ed90
BL
8600 /*
8601 * Handles: UNDESIRED -> DESIRED
8602 * DESIRED -> UNDESIRED
8603 * ENABLED -> UNDESIRED
8604 */
97f6c917 8605 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8606 return true;
8607
31c0ed90
BL
8608 /*
8609 * Handles: DESIRED -> ENABLED
8610 */
0c8620d6
BL
8611 return false;
8612}
8613
0c8620d6 8614#endif
3ee6b26b
AD
8615static void remove_stream(struct amdgpu_device *adev,
8616 struct amdgpu_crtc *acrtc,
8617 struct dc_stream_state *stream)
e7b07cee
HW
8618{
8619 /* this is the update mode case */
e7b07cee
HW
8620
8621 acrtc->otg_inst = -1;
8622 acrtc->enabled = false;
8623}
8624
7578ecda
AD
8625static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8626 struct dc_cursor_position *position)
2a8f6ccb 8627{
f4c2cc43 8628 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8629 int x, y;
8630 int xorigin = 0, yorigin = 0;
8631
e371e19c 8632 if (!crtc || !plane->state->fb)
2a8f6ccb 8633 return 0;
2a8f6ccb
HW
8634
8635 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8636 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8637 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8638 __func__,
8639 plane->state->crtc_w,
8640 plane->state->crtc_h);
8641 return -EINVAL;
8642 }
8643
8644 x = plane->state->crtc_x;
8645 y = plane->state->crtc_y;
c14a005c 8646
e371e19c
NK
8647 if (x <= -amdgpu_crtc->max_cursor_width ||
8648 y <= -amdgpu_crtc->max_cursor_height)
8649 return 0;
8650
2a8f6ccb
HW
8651 if (x < 0) {
8652 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8653 x = 0;
8654 }
8655 if (y < 0) {
8656 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8657 y = 0;
8658 }
8659 position->enable = true;
d243b6ff 8660 position->translate_by_source = true;
2a8f6ccb
HW
8661 position->x = x;
8662 position->y = y;
8663 position->x_hotspot = xorigin;
8664 position->y_hotspot = yorigin;
8665
8666 return 0;
8667}
8668
3ee6b26b
AD
8669static void handle_cursor_update(struct drm_plane *plane,
8670 struct drm_plane_state *old_plane_state)
e7b07cee 8671{
1348969a 8672 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8673 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8674 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8675 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8676 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8677 uint64_t address = afb ? afb->address : 0;
6a30a929 8678 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8679 struct dc_cursor_attributes attributes;
8680 int ret;
8681
e7b07cee
HW
8682 if (!plane->state->fb && !old_plane_state->fb)
8683 return;
8684
cb2318b7 8685 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8686 __func__,
8687 amdgpu_crtc->crtc_id,
8688 plane->state->crtc_w,
8689 plane->state->crtc_h);
2a8f6ccb
HW
8690
8691 ret = get_cursor_position(plane, crtc, &position);
8692 if (ret)
8693 return;
8694
8695 if (!position.enable) {
8696 /* turn off cursor */
674e78ac
NK
8697 if (crtc_state && crtc_state->stream) {
8698 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8699 dc_stream_set_cursor_position(crtc_state->stream,
8700 &position);
674e78ac
NK
8701 mutex_unlock(&adev->dm.dc_lock);
8702 }
2a8f6ccb 8703 return;
e7b07cee 8704 }
e7b07cee 8705
2a8f6ccb
HW
8706 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8707 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8708
c1cefe11 8709 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8710 attributes.address.high_part = upper_32_bits(address);
8711 attributes.address.low_part = lower_32_bits(address);
8712 attributes.width = plane->state->crtc_w;
8713 attributes.height = plane->state->crtc_h;
8714 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8715 attributes.rotation_angle = 0;
8716 attributes.attribute_flags.value = 0;
8717
03a66367 8718 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8719
886daac9 8720 if (crtc_state->stream) {
674e78ac 8721 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8722 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8723 &attributes))
8724 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8725
2a8f6ccb
HW
8726 if (!dc_stream_set_cursor_position(crtc_state->stream,
8727 &position))
8728 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8729 mutex_unlock(&adev->dm.dc_lock);
886daac9 8730 }
2a8f6ccb 8731}
e7b07cee
HW
8732
8733static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8734{
8735
8736 assert_spin_locked(&acrtc->base.dev->event_lock);
8737 WARN_ON(acrtc->event);
8738
8739 acrtc->event = acrtc->base.state->event;
8740
8741 /* Set the flip status */
8742 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8743
8744 /* Mark this event as consumed */
8745 acrtc->base.state->event = NULL;
8746
cb2318b7
VL
8747 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8748 acrtc->crtc_id);
e7b07cee
HW
8749}
8750
bb47de73
NK
8751static void update_freesync_state_on_stream(
8752 struct amdgpu_display_manager *dm,
8753 struct dm_crtc_state *new_crtc_state,
180db303
NK
8754 struct dc_stream_state *new_stream,
8755 struct dc_plane_state *surface,
8756 u32 flip_timestamp_in_us)
bb47de73 8757{
09aef2c4 8758 struct mod_vrr_params vrr_params;
bb47de73 8759 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8760 struct amdgpu_device *adev = dm->adev;
585d450c 8761 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8762 unsigned long flags;
4cda3243 8763 bool pack_sdp_v1_3 = false;
bb47de73
NK
8764
8765 if (!new_stream)
8766 return;
8767
8768 /*
8769 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8770 * For now it's sufficient to just guard against these conditions.
8771 */
8772
8773 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8774 return;
8775
4a580877 8776 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8777 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8778
180db303
NK
8779 if (surface) {
8780 mod_freesync_handle_preflip(
8781 dm->freesync_module,
8782 surface,
8783 new_stream,
8784 flip_timestamp_in_us,
8785 &vrr_params);
09aef2c4
MK
8786
8787 if (adev->family < AMDGPU_FAMILY_AI &&
8788 amdgpu_dm_vrr_active(new_crtc_state)) {
8789 mod_freesync_handle_v_update(dm->freesync_module,
8790 new_stream, &vrr_params);
e63e2491
EB
8791
8792 /* Need to call this before the frame ends. */
8793 dc_stream_adjust_vmin_vmax(dm->dc,
8794 new_crtc_state->stream,
8795 &vrr_params.adjust);
09aef2c4 8796 }
180db303 8797 }
bb47de73
NK
8798
8799 mod_freesync_build_vrr_infopacket(
8800 dm->freesync_module,
8801 new_stream,
180db303 8802 &vrr_params,
ecd0136b
HT
8803 PACKET_TYPE_VRR,
8804 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8805 &vrr_infopacket,
8806 pack_sdp_v1_3);
bb47de73 8807
8a48b44c 8808 new_crtc_state->freesync_timing_changed |=
585d450c 8809 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8810 &vrr_params.adjust,
8811 sizeof(vrr_params.adjust)) != 0);
bb47de73 8812
8a48b44c 8813 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8814 (memcmp(&new_crtc_state->vrr_infopacket,
8815 &vrr_infopacket,
8816 sizeof(vrr_infopacket)) != 0);
8817
585d450c 8818 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8819 new_crtc_state->vrr_infopacket = vrr_infopacket;
8820
585d450c 8821 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8822 new_stream->vrr_infopacket = vrr_infopacket;
8823
8824 if (new_crtc_state->freesync_vrr_info_changed)
8825 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8826 new_crtc_state->base.crtc->base.id,
8827 (int)new_crtc_state->base.vrr_enabled,
180db303 8828 (int)vrr_params.state);
09aef2c4 8829
4a580877 8830 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8831}
8832
585d450c 8833static void update_stream_irq_parameters(
e854194c
MK
8834 struct amdgpu_display_manager *dm,
8835 struct dm_crtc_state *new_crtc_state)
8836{
8837 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8838 struct mod_vrr_params vrr_params;
e854194c 8839 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8840 struct amdgpu_device *adev = dm->adev;
585d450c 8841 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8842 unsigned long flags;
e854194c
MK
8843
8844 if (!new_stream)
8845 return;
8846
8847 /*
8848 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8849 * For now it's sufficient to just guard against these conditions.
8850 */
8851 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8852 return;
8853
4a580877 8854 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8855 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8856
e854194c
MK
8857 if (new_crtc_state->vrr_supported &&
8858 config.min_refresh_in_uhz &&
8859 config.max_refresh_in_uhz) {
a85ba005
NC
8860 /*
8861 * if freesync compatible mode was set, config.state will be set
8862 * in atomic check
8863 */
8864 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8865 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8866 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8867 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8868 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8869 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8870 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8871 } else {
8872 config.state = new_crtc_state->base.vrr_enabled ?
8873 VRR_STATE_ACTIVE_VARIABLE :
8874 VRR_STATE_INACTIVE;
8875 }
e854194c
MK
8876 } else {
8877 config.state = VRR_STATE_UNSUPPORTED;
8878 }
8879
8880 mod_freesync_build_vrr_params(dm->freesync_module,
8881 new_stream,
8882 &config, &vrr_params);
8883
8884 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8885 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8886 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8887
585d450c
AP
8888 new_crtc_state->freesync_config = config;
8889 /* Copy state for access from DM IRQ handler */
8890 acrtc->dm_irq_params.freesync_config = config;
8891 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8892 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8893 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8894}
8895
66b0c973
MK
8896static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8897 struct dm_crtc_state *new_state)
8898{
8899 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8900 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8901
8902 if (!old_vrr_active && new_vrr_active) {
8903 /* Transition VRR inactive -> active:
8904 * While VRR is active, we must not disable vblank irq, as a
8905 * reenable after disable would compute bogus vblank/pflip
8906 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8907 *
8908 * We also need vupdate irq for the actual core vblank handling
8909 * at end of vblank.
66b0c973 8910 */
d2574c33 8911 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8912 drm_crtc_vblank_get(new_state->base.crtc);
8913 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8914 __func__, new_state->base.crtc->base.id);
8915 } else if (old_vrr_active && !new_vrr_active) {
8916 /* Transition VRR active -> inactive:
8917 * Allow vblank irq disable again for fixed refresh rate.
8918 */
d2574c33 8919 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
8920 drm_crtc_vblank_put(new_state->base.crtc);
8921 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8922 __func__, new_state->base.crtc->base.id);
8923 }
8924}
8925
8ad27806
NK
8926static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8927{
8928 struct drm_plane *plane;
5760dcb9 8929 struct drm_plane_state *old_plane_state;
8ad27806
NK
8930 int i;
8931
8932 /*
8933 * TODO: Make this per-stream so we don't issue redundant updates for
8934 * commits with multiple streams.
8935 */
5760dcb9 8936 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
8937 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8938 handle_cursor_update(plane, old_plane_state);
8939}
8940
3be5262e 8941static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 8942 struct dc_state *dc_state,
3ee6b26b
AD
8943 struct drm_device *dev,
8944 struct amdgpu_display_manager *dm,
8945 struct drm_crtc *pcrtc,
420cd472 8946 bool wait_for_vblank)
e7b07cee 8947{
efc8278e 8948 uint32_t i;
8a48b44c 8949 uint64_t timestamp_ns;
e7b07cee 8950 struct drm_plane *plane;
0bc9706d 8951 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 8952 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
8953 struct drm_crtc_state *new_pcrtc_state =
8954 drm_atomic_get_new_crtc_state(state, pcrtc);
8955 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
8956 struct dm_crtc_state *dm_old_crtc_state =
8957 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 8958 int planes_count = 0, vpos, hpos;
570c91d5 8959 long r;
e7b07cee 8960 unsigned long flags;
8a48b44c 8961 struct amdgpu_bo *abo;
fdd1fe57
MK
8962 uint32_t target_vblank, last_flip_vblank;
8963 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 8964 bool pflip_present = false;
bc7f670e
DF
8965 struct {
8966 struct dc_surface_update surface_updates[MAX_SURFACES];
8967 struct dc_plane_info plane_infos[MAX_SURFACES];
8968 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 8969 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 8970 struct dc_stream_update stream_update;
74aa7bd4 8971 } *bundle;
bc7f670e 8972
74aa7bd4 8973 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 8974
74aa7bd4
DF
8975 if (!bundle) {
8976 dm_error("Failed to allocate update bundle\n");
4b510503
NK
8977 goto cleanup;
8978 }
e7b07cee 8979
8ad27806
NK
8980 /*
8981 * Disable the cursor first if we're disabling all the planes.
8982 * It'll remain on the screen after the planes are re-enabled
8983 * if we don't.
8984 */
8985 if (acrtc_state->active_planes == 0)
8986 amdgpu_dm_commit_cursors(state);
8987
e7b07cee 8988 /* update planes when needed */
efc8278e 8989 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 8990 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 8991 struct drm_crtc_state *new_crtc_state;
0bc9706d 8992 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 8993 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 8994 bool plane_needs_flip;
c7af5f77 8995 struct dc_plane_state *dc_plane;
54d76575 8996 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 8997
80c218d5
NK
8998 /* Cursor plane is handled after stream updates */
8999 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9000 continue;
e7b07cee 9001
f5ba60fe
DD
9002 if (!fb || !crtc || pcrtc != crtc)
9003 continue;
9004
9005 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9006 if (!new_crtc_state->active)
e7b07cee
HW
9007 continue;
9008
bc7f670e 9009 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9010
74aa7bd4 9011 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9012 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9013 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9014 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9015 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9016 }
8a48b44c 9017
695af5f9
NK
9018 fill_dc_scaling_info(new_plane_state,
9019 &bundle->scaling_infos[planes_count]);
8a48b44c 9020
695af5f9
NK
9021 bundle->surface_updates[planes_count].scaling_info =
9022 &bundle->scaling_infos[planes_count];
8a48b44c 9023
f5031000 9024 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9025
f5031000 9026 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9027
f5031000
DF
9028 if (!plane_needs_flip) {
9029 planes_count += 1;
9030 continue;
9031 }
8a48b44c 9032
2fac0f53
CK
9033 abo = gem_to_amdgpu_bo(fb->obj[0]);
9034
f8308898
AG
9035 /*
9036 * Wait for all fences on this FB. Do limited wait to avoid
9037 * deadlock during GPU reset when this fence will not signal
9038 * but we hold reservation lock for the BO.
9039 */
d3fae3b3
CK
9040 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9041 msecs_to_jiffies(5000));
f8308898 9042 if (unlikely(r <= 0))
ed8a5fb2 9043 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9044
695af5f9 9045 fill_dc_plane_info_and_addr(
8ce5d842 9046 dm->adev, new_plane_state,
6eed95b0 9047 afb->tiling_flags,
695af5f9 9048 &bundle->plane_infos[planes_count],
87b7ebc2 9049 &bundle->flip_addrs[planes_count].address,
6eed95b0 9050 afb->tmz_surface, false);
87b7ebc2 9051
4711c033 9052 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9053 new_plane_state->plane->index,
9054 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9055
9056 bundle->surface_updates[planes_count].plane_info =
9057 &bundle->plane_infos[planes_count];
8a48b44c 9058
caff0e66
NK
9059 /*
9060 * Only allow immediate flips for fast updates that don't
9061 * change FB pitch, DCC state, rotation or mirroing.
9062 */
f5031000 9063 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9064 crtc->state->async_flip &&
caff0e66 9065 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9066
f5031000
DF
9067 timestamp_ns = ktime_get_ns();
9068 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9069 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9070 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9071
f5031000
DF
9072 if (!bundle->surface_updates[planes_count].surface) {
9073 DRM_ERROR("No surface for CRTC: id=%d\n",
9074 acrtc_attach->crtc_id);
9075 continue;
bc7f670e
DF
9076 }
9077
f5031000
DF
9078 if (plane == pcrtc->primary)
9079 update_freesync_state_on_stream(
9080 dm,
9081 acrtc_state,
9082 acrtc_state->stream,
9083 dc_plane,
9084 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9085
4711c033 9086 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9087 __func__,
9088 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9089 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9090
9091 planes_count += 1;
9092
8a48b44c
DF
9093 }
9094
74aa7bd4 9095 if (pflip_present) {
634092b1
MK
9096 if (!vrr_active) {
9097 /* Use old throttling in non-vrr fixed refresh rate mode
9098 * to keep flip scheduling based on target vblank counts
9099 * working in a backwards compatible way, e.g., for
9100 * clients using the GLX_OML_sync_control extension or
9101 * DRI3/Present extension with defined target_msc.
9102 */
e3eff4b5 9103 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9104 }
9105 else {
9106 /* For variable refresh rate mode only:
9107 * Get vblank of last completed flip to avoid > 1 vrr
9108 * flips per video frame by use of throttling, but allow
9109 * flip programming anywhere in the possibly large
9110 * variable vrr vblank interval for fine-grained flip
9111 * timing control and more opportunity to avoid stutter
9112 * on late submission of flips.
9113 */
9114 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9115 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9116 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9117 }
9118
fdd1fe57 9119 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9120
9121 /*
9122 * Wait until we're out of the vertical blank period before the one
9123 * targeted by the flip
9124 */
9125 while ((acrtc_attach->enabled &&
9126 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9127 0, &vpos, &hpos, NULL,
9128 NULL, &pcrtc->hwmode)
9129 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9130 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9131 (int)(target_vblank -
e3eff4b5 9132 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9133 usleep_range(1000, 1100);
9134 }
9135
8fe684e9
NK
9136 /**
9137 * Prepare the flip event for the pageflip interrupt to handle.
9138 *
9139 * This only works in the case where we've already turned on the
9140 * appropriate hardware blocks (eg. HUBP) so in the transition case
9141 * from 0 -> n planes we have to skip a hardware generated event
9142 * and rely on sending it from software.
9143 */
9144 if (acrtc_attach->base.state->event &&
035f5496
AP
9145 acrtc_state->active_planes > 0 &&
9146 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9147 drm_crtc_vblank_get(pcrtc);
9148
9149 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9150
9151 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9152 prepare_flip_isr(acrtc_attach);
9153
9154 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9155 }
9156
9157 if (acrtc_state->stream) {
8a48b44c 9158 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9159 bundle->stream_update.vrr_infopacket =
8a48b44c 9160 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9161 }
e7b07cee
HW
9162 }
9163
bc92c065 9164 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9165 if ((planes_count || acrtc_state->active_planes == 0) &&
9166 acrtc_state->stream) {
96160687 9167#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9168 /*
9169 * If PSR or idle optimizations are enabled then flush out
9170 * any pending work before hardware programming.
9171 */
06dd1888
NK
9172 if (dm->vblank_control_workqueue)
9173 flush_workqueue(dm->vblank_control_workqueue);
96160687 9174#endif
58aa1c50 9175
b6e881c9 9176 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9177 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9178 bundle->stream_update.src = acrtc_state->stream->src;
9179 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9180 }
9181
cf020d49
NK
9182 if (new_pcrtc_state->color_mgmt_changed) {
9183 /*
9184 * TODO: This isn't fully correct since we've actually
9185 * already modified the stream in place.
9186 */
9187 bundle->stream_update.gamut_remap =
9188 &acrtc_state->stream->gamut_remap_matrix;
9189 bundle->stream_update.output_csc_transform =
9190 &acrtc_state->stream->csc_color_matrix;
9191 bundle->stream_update.out_transfer_func =
9192 acrtc_state->stream->out_transfer_func;
9193 }
bc7f670e 9194
8a48b44c 9195 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9196 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9197 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9198
e63e2491
EB
9199 /*
9200 * If FreeSync state on the stream has changed then we need to
9201 * re-adjust the min/max bounds now that DC doesn't handle this
9202 * as part of commit.
9203 */
a85ba005 9204 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9205 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9206 dc_stream_adjust_vmin_vmax(
9207 dm->dc, acrtc_state->stream,
585d450c 9208 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9209 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9210 }
bc7f670e 9211 mutex_lock(&dm->dc_lock);
8c322309 9212 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9213 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9214 amdgpu_dm_psr_disable(acrtc_state->stream);
9215
bc7f670e 9216 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9217 bundle->surface_updates,
bc7f670e
DF
9218 planes_count,
9219 acrtc_state->stream,
efc8278e
AJ
9220 &bundle->stream_update,
9221 dc_state);
8c322309 9222
8fe684e9
NK
9223 /**
9224 * Enable or disable the interrupts on the backend.
9225 *
9226 * Most pipes are put into power gating when unused.
9227 *
9228 * When power gating is enabled on a pipe we lose the
9229 * interrupt enablement state when power gating is disabled.
9230 *
9231 * So we need to update the IRQ control state in hardware
9232 * whenever the pipe turns on (since it could be previously
9233 * power gated) or off (since some pipes can't be power gated
9234 * on some ASICs).
9235 */
9236 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9237 dm_update_pflip_irq_state(drm_to_adev(dev),
9238 acrtc_attach);
8fe684e9 9239
8c322309 9240 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9241 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9242 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9243 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9244
9245 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9246 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9247 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9248 struct amdgpu_dm_connector *aconn =
9249 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9250
9251 if (aconn->psr_skip_count > 0)
9252 aconn->psr_skip_count--;
58aa1c50
NK
9253
9254 /* Allow PSR when skip count is 0. */
9255 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9256 } else {
9257 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9258 }
9259
bc7f670e 9260 mutex_unlock(&dm->dc_lock);
e7b07cee 9261 }
4b510503 9262
8ad27806
NK
9263 /*
9264 * Update cursor state *after* programming all the planes.
9265 * This avoids redundant programming in the case where we're going
9266 * to be disabling a single plane - those pipes are being disabled.
9267 */
9268 if (acrtc_state->active_planes)
9269 amdgpu_dm_commit_cursors(state);
80c218d5 9270
4b510503 9271cleanup:
74aa7bd4 9272 kfree(bundle);
e7b07cee
HW
9273}
9274
6ce8f316
NK
9275static void amdgpu_dm_commit_audio(struct drm_device *dev,
9276 struct drm_atomic_state *state)
9277{
1348969a 9278 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9279 struct amdgpu_dm_connector *aconnector;
9280 struct drm_connector *connector;
9281 struct drm_connector_state *old_con_state, *new_con_state;
9282 struct drm_crtc_state *new_crtc_state;
9283 struct dm_crtc_state *new_dm_crtc_state;
9284 const struct dc_stream_status *status;
9285 int i, inst;
9286
9287 /* Notify device removals. */
9288 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9289 if (old_con_state->crtc != new_con_state->crtc) {
9290 /* CRTC changes require notification. */
9291 goto notify;
9292 }
9293
9294 if (!new_con_state->crtc)
9295 continue;
9296
9297 new_crtc_state = drm_atomic_get_new_crtc_state(
9298 state, new_con_state->crtc);
9299
9300 if (!new_crtc_state)
9301 continue;
9302
9303 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9304 continue;
9305
9306 notify:
9307 aconnector = to_amdgpu_dm_connector(connector);
9308
9309 mutex_lock(&adev->dm.audio_lock);
9310 inst = aconnector->audio_inst;
9311 aconnector->audio_inst = -1;
9312 mutex_unlock(&adev->dm.audio_lock);
9313
9314 amdgpu_dm_audio_eld_notify(adev, inst);
9315 }
9316
9317 /* Notify audio device additions. */
9318 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9319 if (!new_con_state->crtc)
9320 continue;
9321
9322 new_crtc_state = drm_atomic_get_new_crtc_state(
9323 state, new_con_state->crtc);
9324
9325 if (!new_crtc_state)
9326 continue;
9327
9328 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9329 continue;
9330
9331 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9332 if (!new_dm_crtc_state->stream)
9333 continue;
9334
9335 status = dc_stream_get_status(new_dm_crtc_state->stream);
9336 if (!status)
9337 continue;
9338
9339 aconnector = to_amdgpu_dm_connector(connector);
9340
9341 mutex_lock(&adev->dm.audio_lock);
9342 inst = status->audio_inst;
9343 aconnector->audio_inst = inst;
9344 mutex_unlock(&adev->dm.audio_lock);
9345
9346 amdgpu_dm_audio_eld_notify(adev, inst);
9347 }
9348}
9349
1f6010a9 9350/*
27b3f4fc
LSL
9351 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9352 * @crtc_state: the DRM CRTC state
9353 * @stream_state: the DC stream state.
9354 *
9355 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9356 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9357 */
9358static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9359 struct dc_stream_state *stream_state)
9360{
b9952f93 9361 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9362}
e7b07cee 9363
b8592b48
LL
9364/**
9365 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9366 * @state: The atomic state to commit
9367 *
9368 * This will tell DC to commit the constructed DC state from atomic_check,
9369 * programming the hardware. Any failures here implies a hardware failure, since
9370 * atomic check should have filtered anything non-kosher.
9371 */
7578ecda 9372static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9373{
9374 struct drm_device *dev = state->dev;
1348969a 9375 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9376 struct amdgpu_display_manager *dm = &adev->dm;
9377 struct dm_atomic_state *dm_state;
eb3dc897 9378 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9379 uint32_t i, j;
5cc6dcbd 9380 struct drm_crtc *crtc;
0bc9706d 9381 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9382 unsigned long flags;
9383 bool wait_for_vblank = true;
9384 struct drm_connector *connector;
c2cea706 9385 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9386 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9387 int crtc_disable_count = 0;
6ee90e88 9388 bool mode_set_reset_required = false;
e7b07cee 9389
e8a98235
RS
9390 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9391
e7b07cee
HW
9392 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9393
eb3dc897
NK
9394 dm_state = dm_atomic_get_new_state(state);
9395 if (dm_state && dm_state->context) {
9396 dc_state = dm_state->context;
9397 } else {
9398 /* No state changes, retain current state. */
813d20dc 9399 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9400 ASSERT(dc_state_temp);
9401 dc_state = dc_state_temp;
9402 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9403 }
e7b07cee 9404
6d90a208
AP
9405 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9406 new_crtc_state, i) {
9407 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9408
9409 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9410
9411 if (old_crtc_state->active &&
9412 (!new_crtc_state->active ||
9413 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9414 manage_dm_interrupts(adev, acrtc, false);
9415 dc_stream_release(dm_old_crtc_state->stream);
9416 }
9417 }
9418
8976f73b
RS
9419 drm_atomic_helper_calc_timestamping_constants(state);
9420
e7b07cee 9421 /* update changed items */
0bc9706d 9422 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9423 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9424
54d76575
LSL
9425 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9426 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9427
4711c033 9428 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9429 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9430 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9431 "connectors_changed:%d\n",
9432 acrtc->crtc_id,
0bc9706d
LSL
9433 new_crtc_state->enable,
9434 new_crtc_state->active,
9435 new_crtc_state->planes_changed,
9436 new_crtc_state->mode_changed,
9437 new_crtc_state->active_changed,
9438 new_crtc_state->connectors_changed);
e7b07cee 9439
5c68c652
VL
9440 /* Disable cursor if disabling crtc */
9441 if (old_crtc_state->active && !new_crtc_state->active) {
9442 struct dc_cursor_position position;
9443
9444 memset(&position, 0, sizeof(position));
9445 mutex_lock(&dm->dc_lock);
9446 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9447 mutex_unlock(&dm->dc_lock);
9448 }
9449
27b3f4fc
LSL
9450 /* Copy all transient state flags into dc state */
9451 if (dm_new_crtc_state->stream) {
9452 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9453 dm_new_crtc_state->stream);
9454 }
9455
e7b07cee
HW
9456 /* handles headless hotplug case, updating new_state and
9457 * aconnector as needed
9458 */
9459
54d76575 9460 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9461
4711c033 9462 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9463
54d76575 9464 if (!dm_new_crtc_state->stream) {
e7b07cee 9465 /*
b830ebc9
HW
9466 * this could happen because of issues with
9467 * userspace notifications delivery.
9468 * In this case userspace tries to set mode on
1f6010a9
DF
9469 * display which is disconnected in fact.
9470 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9471 * We expect reset mode will come soon.
9472 *
9473 * This can also happen when unplug is done
9474 * during resume sequence ended
9475 *
9476 * In this case, we want to pretend we still
9477 * have a sink to keep the pipe running so that
9478 * hw state is consistent with the sw state
9479 */
f1ad2f5e 9480 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9481 __func__, acrtc->base.base.id);
9482 continue;
9483 }
9484
54d76575
LSL
9485 if (dm_old_crtc_state->stream)
9486 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9487
97028037
LP
9488 pm_runtime_get_noresume(dev->dev);
9489
e7b07cee 9490 acrtc->enabled = true;
0bc9706d
LSL
9491 acrtc->hw_mode = new_crtc_state->mode;
9492 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9493 mode_set_reset_required = true;
0bc9706d 9494 } else if (modereset_required(new_crtc_state)) {
4711c033 9495 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9496 /* i.e. reset mode */
6ee90e88 9497 if (dm_old_crtc_state->stream)
54d76575 9498 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9499
6ee90e88 9500 mode_set_reset_required = true;
e7b07cee
HW
9501 }
9502 } /* for_each_crtc_in_state() */
9503
eb3dc897 9504 if (dc_state) {
6ee90e88 9505 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9506 if (mode_set_reset_required) {
96160687 9507#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9508 if (dm->vblank_control_workqueue)
9509 flush_workqueue(dm->vblank_control_workqueue);
96160687 9510#endif
6ee90e88 9511 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9512 }
6ee90e88 9513
eb3dc897 9514 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9515 mutex_lock(&dm->dc_lock);
eb3dc897 9516 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9517#if defined(CONFIG_DRM_AMD_DC_DCN)
9518 /* Allow idle optimization when vblank count is 0 for display off */
9519 if (dm->active_vblank_irq_count == 0)
9520 dc_allow_idle_optimizations(dm->dc,true);
9521#endif
674e78ac 9522 mutex_unlock(&dm->dc_lock);
fa2123db 9523 }
fe8858bb 9524
0bc9706d 9525 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9526 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9527
54d76575 9528 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9529
54d76575 9530 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9531 const struct dc_stream_status *status =
54d76575 9532 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9533
eb3dc897 9534 if (!status)
09f609c3
LL
9535 status = dc_stream_get_status_from_state(dc_state,
9536 dm_new_crtc_state->stream);
e7b07cee 9537 if (!status)
54d76575 9538 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9539 else
9540 acrtc->otg_inst = status->primary_otg_inst;
9541 }
9542 }
0c8620d6
BL
9543#ifdef CONFIG_DRM_AMD_DC_HDCP
9544 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9545 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9546 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9547 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9548
9549 new_crtc_state = NULL;
9550
9551 if (acrtc)
9552 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9553
9554 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9555
9556 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9557 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9558 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9559 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9560 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9561 continue;
9562 }
9563
9564 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9565 hdcp_update_display(
9566 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9567 new_con_state->hdcp_content_type,
0e86d3d4 9568 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9569 }
9570#endif
e7b07cee 9571
02d6a6fc 9572 /* Handle connector state changes */
c2cea706 9573 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9574 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9575 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9576 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9577 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9578 struct dc_stream_update stream_update;
b232d4ed 9579 struct dc_info_packet hdr_packet;
e7b07cee 9580 struct dc_stream_status *status = NULL;
b232d4ed 9581 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9582
efc8278e 9583 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9584 memset(&stream_update, 0, sizeof(stream_update));
9585
44d09c6a 9586 if (acrtc) {
0bc9706d 9587 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9588 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9589 }
0bc9706d 9590
e7b07cee 9591 /* Skip any modesets/resets */
0bc9706d 9592 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9593 continue;
9594
54d76575 9595 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9596 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9597
b232d4ed
NK
9598 scaling_changed = is_scaling_state_different(dm_new_con_state,
9599 dm_old_con_state);
9600
9601 abm_changed = dm_new_crtc_state->abm_level !=
9602 dm_old_crtc_state->abm_level;
9603
9604 hdr_changed =
72921cdf 9605 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9606
9607 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9608 continue;
e7b07cee 9609
b6e881c9 9610 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9611 if (scaling_changed) {
02d6a6fc 9612 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9613 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9614
02d6a6fc
DF
9615 stream_update.src = dm_new_crtc_state->stream->src;
9616 stream_update.dst = dm_new_crtc_state->stream->dst;
9617 }
9618
b232d4ed 9619 if (abm_changed) {
02d6a6fc
DF
9620 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9621
9622 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9623 }
70e8ffc5 9624
b232d4ed
NK
9625 if (hdr_changed) {
9626 fill_hdr_info_packet(new_con_state, &hdr_packet);
9627 stream_update.hdr_static_metadata = &hdr_packet;
9628 }
9629
54d76575 9630 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9631
9632 if (WARN_ON(!status))
9633 continue;
9634
3be5262e 9635 WARN_ON(!status->plane_count);
e7b07cee 9636
02d6a6fc
DF
9637 /*
9638 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9639 * Here we create an empty update on each plane.
9640 * To fix this, DC should permit updating only stream properties.
9641 */
9642 for (j = 0; j < status->plane_count; j++)
efc8278e 9643 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9644
9645
9646 mutex_lock(&dm->dc_lock);
9647 dc_commit_updates_for_stream(dm->dc,
efc8278e 9648 dummy_updates,
02d6a6fc
DF
9649 status->plane_count,
9650 dm_new_crtc_state->stream,
efc8278e
AJ
9651 &stream_update,
9652 dc_state);
02d6a6fc 9653 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9654 }
9655
b5e83f6f 9656 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9657 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9658 new_crtc_state, i) {
fe2a1965
LP
9659 if (old_crtc_state->active && !new_crtc_state->active)
9660 crtc_disable_count++;
9661
54d76575 9662 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9663 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9664
585d450c
AP
9665 /* For freesync config update on crtc state and params for irq */
9666 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9667
66b0c973
MK
9668 /* Handle vrr on->off / off->on transitions */
9669 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9670 dm_new_crtc_state);
e7b07cee
HW
9671 }
9672
8fe684e9
NK
9673 /**
9674 * Enable interrupts for CRTCs that are newly enabled or went through
9675 * a modeset. It was intentionally deferred until after the front end
9676 * state was modified to wait until the OTG was on and so the IRQ
9677 * handlers didn't access stale or invalid state.
9678 */
9679 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9680 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9681#ifdef CONFIG_DEBUG_FS
86bc2219 9682 bool configure_crc = false;
8e7b6fee 9683 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9684#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9685 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9686#endif
9687 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9688 cur_crc_src = acrtc->dm_irq_params.crc_src;
9689 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9690#endif
585d450c
AP
9691 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9692
8fe684e9
NK
9693 if (new_crtc_state->active &&
9694 (!old_crtc_state->active ||
9695 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9696 dc_stream_retain(dm_new_crtc_state->stream);
9697 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9698 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9699
24eb9374 9700#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9701 /**
9702 * Frontend may have changed so reapply the CRC capture
9703 * settings for the stream.
9704 */
9705 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9706
8e7b6fee 9707 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9708 configure_crc = true;
9709#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9710 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9711 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9712 acrtc->dm_irq_params.crc_window.update_win = true;
9713 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9714 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9715 crc_rd_wrk->crtc = crtc;
9716 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9717 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9718 }
86bc2219 9719#endif
e2881d6d 9720 }
c920888c 9721
86bc2219 9722 if (configure_crc)
bbc49fc0
WL
9723 if (amdgpu_dm_crtc_configure_crc_source(
9724 crtc, dm_new_crtc_state, cur_crc_src))
9725 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9726#endif
8fe684e9
NK
9727 }
9728 }
e7b07cee 9729
420cd472 9730 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9731 if (new_crtc_state->async_flip)
420cd472
DF
9732 wait_for_vblank = false;
9733
e7b07cee 9734 /* update planes when needed per crtc*/
5cc6dcbd 9735 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9736 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9737
54d76575 9738 if (dm_new_crtc_state->stream)
eb3dc897 9739 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9740 dm, crtc, wait_for_vblank);
e7b07cee
HW
9741 }
9742
6ce8f316
NK
9743 /* Update audio instances for each connector. */
9744 amdgpu_dm_commit_audio(dev, state);
9745
7230362c
AD
9746#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9747 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9748 /* restore the backlight level */
7fd13bae
AD
9749 for (i = 0; i < dm->num_of_edps; i++) {
9750 if (dm->backlight_dev[i] &&
9751 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9752 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9753 }
7230362c 9754#endif
e7b07cee
HW
9755 /*
9756 * send vblank event on all events not handled in flip and
9757 * mark consumed event for drm_atomic_helper_commit_hw_done
9758 */
4a580877 9759 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9760 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9761
0bc9706d
LSL
9762 if (new_crtc_state->event)
9763 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9764
0bc9706d 9765 new_crtc_state->event = NULL;
e7b07cee 9766 }
4a580877 9767 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9768
29c8f234
LL
9769 /* Signal HW programming completion */
9770 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9771
9772 if (wait_for_vblank)
320a1274 9773 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9774
9775 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9776
5f6fab24
AD
9777 /* return the stolen vga memory back to VRAM */
9778 if (!adev->mman.keep_stolen_vga_memory)
9779 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9780 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9781
1f6010a9
DF
9782 /*
9783 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9784 * so we can put the GPU into runtime suspend if we're not driving any
9785 * displays anymore
9786 */
fe2a1965
LP
9787 for (i = 0; i < crtc_disable_count; i++)
9788 pm_runtime_put_autosuspend(dev->dev);
97028037 9789 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9790
9791 if (dc_state_temp)
9792 dc_release_state(dc_state_temp);
e7b07cee
HW
9793}
9794
9795
9796static int dm_force_atomic_commit(struct drm_connector *connector)
9797{
9798 int ret = 0;
9799 struct drm_device *ddev = connector->dev;
9800 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9801 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9802 struct drm_plane *plane = disconnected_acrtc->base.primary;
9803 struct drm_connector_state *conn_state;
9804 struct drm_crtc_state *crtc_state;
9805 struct drm_plane_state *plane_state;
9806
9807 if (!state)
9808 return -ENOMEM;
9809
9810 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9811
9812 /* Construct an atomic state to restore previous display setting */
9813
9814 /*
9815 * Attach connectors to drm_atomic_state
9816 */
9817 conn_state = drm_atomic_get_connector_state(state, connector);
9818
9819 ret = PTR_ERR_OR_ZERO(conn_state);
9820 if (ret)
2dc39051 9821 goto out;
e7b07cee
HW
9822
9823 /* Attach crtc to drm_atomic_state*/
9824 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9825
9826 ret = PTR_ERR_OR_ZERO(crtc_state);
9827 if (ret)
2dc39051 9828 goto out;
e7b07cee
HW
9829
9830 /* force a restore */
9831 crtc_state->mode_changed = true;
9832
9833 /* Attach plane to drm_atomic_state */
9834 plane_state = drm_atomic_get_plane_state(state, plane);
9835
9836 ret = PTR_ERR_OR_ZERO(plane_state);
9837 if (ret)
2dc39051 9838 goto out;
e7b07cee
HW
9839
9840 /* Call commit internally with the state we just constructed */
9841 ret = drm_atomic_commit(state);
e7b07cee 9842
2dc39051 9843out:
e7b07cee 9844 drm_atomic_state_put(state);
2dc39051
VL
9845 if (ret)
9846 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9847
9848 return ret;
9849}
9850
9851/*
1f6010a9
DF
9852 * This function handles all cases when set mode does not come upon hotplug.
9853 * This includes when a display is unplugged then plugged back into the
9854 * same port and when running without usermode desktop manager supprot
e7b07cee 9855 */
3ee6b26b
AD
9856void dm_restore_drm_connector_state(struct drm_device *dev,
9857 struct drm_connector *connector)
e7b07cee 9858{
c84dec2f 9859 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9860 struct amdgpu_crtc *disconnected_acrtc;
9861 struct dm_crtc_state *acrtc_state;
9862
9863 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9864 return;
9865
9866 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9867 if (!disconnected_acrtc)
9868 return;
e7b07cee 9869
70e8ffc5
HW
9870 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9871 if (!acrtc_state->stream)
e7b07cee
HW
9872 return;
9873
9874 /*
9875 * If the previous sink is not released and different from the current,
9876 * we deduce we are in a state where we can not rely on usermode call
9877 * to turn on the display, so we do it here
9878 */
9879 if (acrtc_state->stream->sink != aconnector->dc_sink)
9880 dm_force_atomic_commit(&aconnector->base);
9881}
9882
1f6010a9 9883/*
e7b07cee
HW
9884 * Grabs all modesetting locks to serialize against any blocking commits,
9885 * Waits for completion of all non blocking commits.
9886 */
3ee6b26b
AD
9887static int do_aquire_global_lock(struct drm_device *dev,
9888 struct drm_atomic_state *state)
e7b07cee
HW
9889{
9890 struct drm_crtc *crtc;
9891 struct drm_crtc_commit *commit;
9892 long ret;
9893
1f6010a9
DF
9894 /*
9895 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9896 * ensure that when the framework release it the
9897 * extra locks we are locking here will get released to
9898 */
9899 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9900 if (ret)
9901 return ret;
9902
9903 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9904 spin_lock(&crtc->commit_lock);
9905 commit = list_first_entry_or_null(&crtc->commit_list,
9906 struct drm_crtc_commit, commit_entry);
9907 if (commit)
9908 drm_crtc_commit_get(commit);
9909 spin_unlock(&crtc->commit_lock);
9910
9911 if (!commit)
9912 continue;
9913
1f6010a9
DF
9914 /*
9915 * Make sure all pending HW programming completed and
e7b07cee
HW
9916 * page flips done
9917 */
9918 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9919
9920 if (ret > 0)
9921 ret = wait_for_completion_interruptible_timeout(
9922 &commit->flip_done, 10*HZ);
9923
9924 if (ret == 0)
9925 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9926 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9927
9928 drm_crtc_commit_put(commit);
9929 }
9930
9931 return ret < 0 ? ret : 0;
9932}
9933
bb47de73
NK
9934static void get_freesync_config_for_crtc(
9935 struct dm_crtc_state *new_crtc_state,
9936 struct dm_connector_state *new_con_state)
98e6436d
AK
9937{
9938 struct mod_freesync_config config = {0};
98e6436d
AK
9939 struct amdgpu_dm_connector *aconnector =
9940 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9941 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9942 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9943 bool fs_vid_mode = false;
98e6436d 9944
a057ec46 9945 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9946 vrefresh >= aconnector->min_vfreq &&
9947 vrefresh <= aconnector->max_vfreq;
bb47de73 9948
a057ec46
IB
9949 if (new_crtc_state->vrr_supported) {
9950 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
9951 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9952
9953 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9954 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9955 config.vsif_supported = true;
180db303 9956 config.btr = true;
98e6436d 9957
a85ba005
NC
9958 if (fs_vid_mode) {
9959 config.state = VRR_STATE_ACTIVE_FIXED;
9960 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9961 goto out;
9962 } else if (new_crtc_state->base.vrr_enabled) {
9963 config.state = VRR_STATE_ACTIVE_VARIABLE;
9964 } else {
9965 config.state = VRR_STATE_INACTIVE;
9966 }
9967 }
9968out:
bb47de73
NK
9969 new_crtc_state->freesync_config = config;
9970}
98e6436d 9971
bb47de73
NK
9972static void reset_freesync_config_for_crtc(
9973 struct dm_crtc_state *new_crtc_state)
9974{
9975 new_crtc_state->vrr_supported = false;
98e6436d 9976
bb47de73
NK
9977 memset(&new_crtc_state->vrr_infopacket, 0,
9978 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9979}
9980
a85ba005
NC
9981static bool
9982is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9983 struct drm_crtc_state *new_crtc_state)
9984{
9985 struct drm_display_mode old_mode, new_mode;
9986
9987 if (!old_crtc_state || !new_crtc_state)
9988 return false;
9989
9990 old_mode = old_crtc_state->mode;
9991 new_mode = new_crtc_state->mode;
9992
9993 if (old_mode.clock == new_mode.clock &&
9994 old_mode.hdisplay == new_mode.hdisplay &&
9995 old_mode.vdisplay == new_mode.vdisplay &&
9996 old_mode.htotal == new_mode.htotal &&
9997 old_mode.vtotal != new_mode.vtotal &&
9998 old_mode.hsync_start == new_mode.hsync_start &&
9999 old_mode.vsync_start != new_mode.vsync_start &&
10000 old_mode.hsync_end == new_mode.hsync_end &&
10001 old_mode.vsync_end != new_mode.vsync_end &&
10002 old_mode.hskew == new_mode.hskew &&
10003 old_mode.vscan == new_mode.vscan &&
10004 (old_mode.vsync_end - old_mode.vsync_start) ==
10005 (new_mode.vsync_end - new_mode.vsync_start))
10006 return true;
10007
10008 return false;
10009}
10010
10011static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10012 uint64_t num, den, res;
10013 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10014
10015 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10016
10017 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10018 den = (unsigned long long)new_crtc_state->mode.htotal *
10019 (unsigned long long)new_crtc_state->mode.vtotal;
10020
10021 res = div_u64(num, den);
10022 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10023}
10024
4b9674e5
LL
10025static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10026 struct drm_atomic_state *state,
10027 struct drm_crtc *crtc,
10028 struct drm_crtc_state *old_crtc_state,
10029 struct drm_crtc_state *new_crtc_state,
10030 bool enable,
10031 bool *lock_and_validation_needed)
e7b07cee 10032{
eb3dc897 10033 struct dm_atomic_state *dm_state = NULL;
54d76575 10034 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10035 struct dc_stream_state *new_stream;
62f55537 10036 int ret = 0;
d4d4a645 10037
1f6010a9
DF
10038 /*
10039 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10040 * update changed items
10041 */
4b9674e5
LL
10042 struct amdgpu_crtc *acrtc = NULL;
10043 struct amdgpu_dm_connector *aconnector = NULL;
10044 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10045 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10046
4b9674e5 10047 new_stream = NULL;
9635b754 10048
4b9674e5
LL
10049 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10050 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10051 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10052 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10053
4b9674e5
LL
10054 /* TODO This hack should go away */
10055 if (aconnector && enable) {
10056 /* Make sure fake sink is created in plug-in scenario */
10057 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10058 &aconnector->base);
10059 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10060 &aconnector->base);
19f89e23 10061
4b9674e5
LL
10062 if (IS_ERR(drm_new_conn_state)) {
10063 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10064 goto fail;
10065 }
19f89e23 10066
4b9674e5
LL
10067 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10068 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10069
02d35a67
JFZ
10070 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10071 goto skip_modeset;
10072
cbd14ae7
SW
10073 new_stream = create_validate_stream_for_sink(aconnector,
10074 &new_crtc_state->mode,
10075 dm_new_conn_state,
10076 dm_old_crtc_state->stream);
19f89e23 10077
4b9674e5
LL
10078 /*
10079 * we can have no stream on ACTION_SET if a display
10080 * was disconnected during S3, in this case it is not an
10081 * error, the OS will be updated after detection, and
10082 * will do the right thing on next atomic commit
10083 */
19f89e23 10084
4b9674e5
LL
10085 if (!new_stream) {
10086 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10087 __func__, acrtc->base.base.id);
10088 ret = -ENOMEM;
10089 goto fail;
10090 }
e7b07cee 10091
3d4e52d0
VL
10092 /*
10093 * TODO: Check VSDB bits to decide whether this should
10094 * be enabled or not.
10095 */
10096 new_stream->triggered_crtc_reset.enabled =
10097 dm->force_timing_sync;
10098
4b9674e5 10099 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10100
88694af9
NK
10101 ret = fill_hdr_info_packet(drm_new_conn_state,
10102 &new_stream->hdr_static_metadata);
10103 if (ret)
10104 goto fail;
10105
7e930949
NK
10106 /*
10107 * If we already removed the old stream from the context
10108 * (and set the new stream to NULL) then we can't reuse
10109 * the old stream even if the stream and scaling are unchanged.
10110 * We'll hit the BUG_ON and black screen.
10111 *
10112 * TODO: Refactor this function to allow this check to work
10113 * in all conditions.
10114 */
a85ba005
NC
10115 if (amdgpu_freesync_vid_mode &&
10116 dm_new_crtc_state->stream &&
10117 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10118 goto skip_modeset;
10119
7e930949
NK
10120 if (dm_new_crtc_state->stream &&
10121 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10122 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10123 new_crtc_state->mode_changed = false;
10124 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10125 new_crtc_state->mode_changed);
62f55537 10126 }
4b9674e5 10127 }
b830ebc9 10128
02d35a67 10129 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10130 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10131 goto skip_modeset;
e7b07cee 10132
4711c033 10133 DRM_DEBUG_ATOMIC(
4b9674e5
LL
10134 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10135 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10136 "connectors_changed:%d\n",
10137 acrtc->crtc_id,
10138 new_crtc_state->enable,
10139 new_crtc_state->active,
10140 new_crtc_state->planes_changed,
10141 new_crtc_state->mode_changed,
10142 new_crtc_state->active_changed,
10143 new_crtc_state->connectors_changed);
62f55537 10144
4b9674e5
LL
10145 /* Remove stream for any changed/disabled CRTC */
10146 if (!enable) {
62f55537 10147
4b9674e5
LL
10148 if (!dm_old_crtc_state->stream)
10149 goto skip_modeset;
eb3dc897 10150
a85ba005
NC
10151 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10152 is_timing_unchanged_for_freesync(new_crtc_state,
10153 old_crtc_state)) {
10154 new_crtc_state->mode_changed = false;
10155 DRM_DEBUG_DRIVER(
10156 "Mode change not required for front porch change, "
10157 "setting mode_changed to %d",
10158 new_crtc_state->mode_changed);
10159
10160 set_freesync_fixed_config(dm_new_crtc_state);
10161
10162 goto skip_modeset;
10163 } else if (amdgpu_freesync_vid_mode && aconnector &&
10164 is_freesync_video_mode(&new_crtc_state->mode,
10165 aconnector)) {
e88ebd83
SC
10166 struct drm_display_mode *high_mode;
10167
10168 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10169 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10170 set_freesync_fixed_config(dm_new_crtc_state);
10171 }
a85ba005
NC
10172 }
10173
4b9674e5
LL
10174 ret = dm_atomic_get_state(state, &dm_state);
10175 if (ret)
10176 goto fail;
e7b07cee 10177
4b9674e5
LL
10178 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10179 crtc->base.id);
62f55537 10180
4b9674e5
LL
10181 /* i.e. reset mode */
10182 if (dc_remove_stream_from_ctx(
10183 dm->dc,
10184 dm_state->context,
10185 dm_old_crtc_state->stream) != DC_OK) {
10186 ret = -EINVAL;
10187 goto fail;
10188 }
62f55537 10189
4b9674e5
LL
10190 dc_stream_release(dm_old_crtc_state->stream);
10191 dm_new_crtc_state->stream = NULL;
bb47de73 10192
4b9674e5 10193 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10194
4b9674e5 10195 *lock_and_validation_needed = true;
62f55537 10196
4b9674e5
LL
10197 } else {/* Add stream for any updated/enabled CRTC */
10198 /*
10199 * Quick fix to prevent NULL pointer on new_stream when
10200 * added MST connectors not found in existing crtc_state in the chained mode
10201 * TODO: need to dig out the root cause of that
10202 */
10203 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10204 goto skip_modeset;
62f55537 10205
4b9674e5
LL
10206 if (modereset_required(new_crtc_state))
10207 goto skip_modeset;
62f55537 10208
4b9674e5
LL
10209 if (modeset_required(new_crtc_state, new_stream,
10210 dm_old_crtc_state->stream)) {
62f55537 10211
4b9674e5 10212 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10213
4b9674e5
LL
10214 ret = dm_atomic_get_state(state, &dm_state);
10215 if (ret)
10216 goto fail;
27b3f4fc 10217
4b9674e5 10218 dm_new_crtc_state->stream = new_stream;
62f55537 10219
4b9674e5 10220 dc_stream_retain(new_stream);
1dc90497 10221
4711c033
LT
10222 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10223 crtc->base.id);
1dc90497 10224
4b9674e5
LL
10225 if (dc_add_stream_to_ctx(
10226 dm->dc,
10227 dm_state->context,
10228 dm_new_crtc_state->stream) != DC_OK) {
10229 ret = -EINVAL;
10230 goto fail;
9b690ef3
BL
10231 }
10232
4b9674e5
LL
10233 *lock_and_validation_needed = true;
10234 }
10235 }
e277adc5 10236
4b9674e5
LL
10237skip_modeset:
10238 /* Release extra reference */
10239 if (new_stream)
10240 dc_stream_release(new_stream);
e277adc5 10241
4b9674e5
LL
10242 /*
10243 * We want to do dc stream updates that do not require a
10244 * full modeset below.
10245 */
2afda735 10246 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10247 return 0;
10248 /*
10249 * Given above conditions, the dc state cannot be NULL because:
10250 * 1. We're in the process of enabling CRTCs (just been added
10251 * to the dc context, or already is on the context)
10252 * 2. Has a valid connector attached, and
10253 * 3. Is currently active and enabled.
10254 * => The dc stream state currently exists.
10255 */
10256 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10257
4b9674e5 10258 /* Scaling or underscan settings */
c521fc31
RL
10259 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10260 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10261 update_stream_scaling_settings(
10262 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10263
b05e2c5e
DF
10264 /* ABM settings */
10265 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10266
4b9674e5
LL
10267 /*
10268 * Color management settings. We also update color properties
10269 * when a modeset is needed, to ensure it gets reprogrammed.
10270 */
10271 if (dm_new_crtc_state->base.color_mgmt_changed ||
10272 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10273 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10274 if (ret)
10275 goto fail;
62f55537 10276 }
e7b07cee 10277
4b9674e5
LL
10278 /* Update Freesync settings. */
10279 get_freesync_config_for_crtc(dm_new_crtc_state,
10280 dm_new_conn_state);
10281
62f55537 10282 return ret;
9635b754
DS
10283
10284fail:
10285 if (new_stream)
10286 dc_stream_release(new_stream);
10287 return ret;
62f55537 10288}
9b690ef3 10289
f6ff2a08
NK
10290static bool should_reset_plane(struct drm_atomic_state *state,
10291 struct drm_plane *plane,
10292 struct drm_plane_state *old_plane_state,
10293 struct drm_plane_state *new_plane_state)
10294{
10295 struct drm_plane *other;
10296 struct drm_plane_state *old_other_state, *new_other_state;
10297 struct drm_crtc_state *new_crtc_state;
10298 int i;
10299
70a1efac
NK
10300 /*
10301 * TODO: Remove this hack once the checks below are sufficient
10302 * enough to determine when we need to reset all the planes on
10303 * the stream.
10304 */
10305 if (state->allow_modeset)
10306 return true;
10307
f6ff2a08
NK
10308 /* Exit early if we know that we're adding or removing the plane. */
10309 if (old_plane_state->crtc != new_plane_state->crtc)
10310 return true;
10311
10312 /* old crtc == new_crtc == NULL, plane not in context. */
10313 if (!new_plane_state->crtc)
10314 return false;
10315
10316 new_crtc_state =
10317 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10318
10319 if (!new_crtc_state)
10320 return true;
10321
7316c4ad
NK
10322 /* CRTC Degamma changes currently require us to recreate planes. */
10323 if (new_crtc_state->color_mgmt_changed)
10324 return true;
10325
f6ff2a08
NK
10326 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10327 return true;
10328
10329 /*
10330 * If there are any new primary or overlay planes being added or
10331 * removed then the z-order can potentially change. To ensure
10332 * correct z-order and pipe acquisition the current DC architecture
10333 * requires us to remove and recreate all existing planes.
10334 *
10335 * TODO: Come up with a more elegant solution for this.
10336 */
10337 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10338 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10339 if (other->type == DRM_PLANE_TYPE_CURSOR)
10340 continue;
10341
10342 if (old_other_state->crtc != new_plane_state->crtc &&
10343 new_other_state->crtc != new_plane_state->crtc)
10344 continue;
10345
10346 if (old_other_state->crtc != new_other_state->crtc)
10347 return true;
10348
dc4cb30d
NK
10349 /* Src/dst size and scaling updates. */
10350 if (old_other_state->src_w != new_other_state->src_w ||
10351 old_other_state->src_h != new_other_state->src_h ||
10352 old_other_state->crtc_w != new_other_state->crtc_w ||
10353 old_other_state->crtc_h != new_other_state->crtc_h)
10354 return true;
10355
10356 /* Rotation / mirroring updates. */
10357 if (old_other_state->rotation != new_other_state->rotation)
10358 return true;
10359
10360 /* Blending updates. */
10361 if (old_other_state->pixel_blend_mode !=
10362 new_other_state->pixel_blend_mode)
10363 return true;
10364
10365 /* Alpha updates. */
10366 if (old_other_state->alpha != new_other_state->alpha)
10367 return true;
10368
10369 /* Colorspace changes. */
10370 if (old_other_state->color_range != new_other_state->color_range ||
10371 old_other_state->color_encoding != new_other_state->color_encoding)
10372 return true;
10373
9a81cc60
NK
10374 /* Framebuffer checks fall at the end. */
10375 if (!old_other_state->fb || !new_other_state->fb)
10376 continue;
10377
10378 /* Pixel format changes can require bandwidth updates. */
10379 if (old_other_state->fb->format != new_other_state->fb->format)
10380 return true;
10381
6eed95b0
BN
10382 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10383 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10384
10385 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10386 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10387 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10388 return true;
10389 }
10390
10391 return false;
10392}
10393
b0455fda
SS
10394static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10395 struct drm_plane_state *new_plane_state,
10396 struct drm_framebuffer *fb)
10397{
e72868c4
SS
10398 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10399 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10400 unsigned int pitch;
e72868c4 10401 bool linear;
b0455fda
SS
10402
10403 if (fb->width > new_acrtc->max_cursor_width ||
10404 fb->height > new_acrtc->max_cursor_height) {
10405 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10406 new_plane_state->fb->width,
10407 new_plane_state->fb->height);
10408 return -EINVAL;
10409 }
10410 if (new_plane_state->src_w != fb->width << 16 ||
10411 new_plane_state->src_h != fb->height << 16) {
10412 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10413 return -EINVAL;
10414 }
10415
10416 /* Pitch in pixels */
10417 pitch = fb->pitches[0] / fb->format->cpp[0];
10418
10419 if (fb->width != pitch) {
10420 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10421 fb->width, pitch);
10422 return -EINVAL;
10423 }
10424
10425 switch (pitch) {
10426 case 64:
10427 case 128:
10428 case 256:
10429 /* FB pitch is supported by cursor plane */
10430 break;
10431 default:
10432 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10433 return -EINVAL;
10434 }
10435
e72868c4
SS
10436 /* Core DRM takes care of checking FB modifiers, so we only need to
10437 * check tiling flags when the FB doesn't have a modifier. */
10438 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10439 if (adev->family < AMDGPU_FAMILY_AI) {
10440 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10441 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10442 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10443 } else {
10444 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10445 }
10446 if (!linear) {
10447 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10448 return -EINVAL;
10449 }
10450 }
10451
b0455fda
SS
10452 return 0;
10453}
10454
9e869063
LL
10455static int dm_update_plane_state(struct dc *dc,
10456 struct drm_atomic_state *state,
10457 struct drm_plane *plane,
10458 struct drm_plane_state *old_plane_state,
10459 struct drm_plane_state *new_plane_state,
10460 bool enable,
10461 bool *lock_and_validation_needed)
62f55537 10462{
eb3dc897
NK
10463
10464 struct dm_atomic_state *dm_state = NULL;
62f55537 10465 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10466 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10467 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10468 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10469 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10470 bool needs_reset;
62f55537 10471 int ret = 0;
e7b07cee 10472
9b690ef3 10473
9e869063
LL
10474 new_plane_crtc = new_plane_state->crtc;
10475 old_plane_crtc = old_plane_state->crtc;
10476 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10477 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10478
626bf90f
SS
10479 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10480 if (!enable || !new_plane_crtc ||
10481 drm_atomic_plane_disabling(plane->state, new_plane_state))
10482 return 0;
10483
10484 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10485
5f581248
SS
10486 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10487 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10488 return -EINVAL;
10489 }
10490
24f99d2b 10491 if (new_plane_state->fb) {
b0455fda
SS
10492 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10493 new_plane_state->fb);
10494 if (ret)
10495 return ret;
24f99d2b
SS
10496 }
10497
9e869063 10498 return 0;
626bf90f 10499 }
9b690ef3 10500
f6ff2a08
NK
10501 needs_reset = should_reset_plane(state, plane, old_plane_state,
10502 new_plane_state);
10503
9e869063
LL
10504 /* Remove any changed/removed planes */
10505 if (!enable) {
f6ff2a08 10506 if (!needs_reset)
9e869063 10507 return 0;
a7b06724 10508
9e869063
LL
10509 if (!old_plane_crtc)
10510 return 0;
62f55537 10511
9e869063
LL
10512 old_crtc_state = drm_atomic_get_old_crtc_state(
10513 state, old_plane_crtc);
10514 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10515
9e869063
LL
10516 if (!dm_old_crtc_state->stream)
10517 return 0;
62f55537 10518
9e869063
LL
10519 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10520 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10521
9e869063
LL
10522 ret = dm_atomic_get_state(state, &dm_state);
10523 if (ret)
10524 return ret;
eb3dc897 10525
9e869063
LL
10526 if (!dc_remove_plane_from_context(
10527 dc,
10528 dm_old_crtc_state->stream,
10529 dm_old_plane_state->dc_state,
10530 dm_state->context)) {
62f55537 10531
c3537613 10532 return -EINVAL;
9e869063 10533 }
e7b07cee 10534
9b690ef3 10535
9e869063
LL
10536 dc_plane_state_release(dm_old_plane_state->dc_state);
10537 dm_new_plane_state->dc_state = NULL;
1dc90497 10538
9e869063 10539 *lock_and_validation_needed = true;
1dc90497 10540
9e869063
LL
10541 } else { /* Add new planes */
10542 struct dc_plane_state *dc_new_plane_state;
1dc90497 10543
9e869063
LL
10544 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10545 return 0;
e7b07cee 10546
9e869063
LL
10547 if (!new_plane_crtc)
10548 return 0;
e7b07cee 10549
9e869063
LL
10550 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10551 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10552
9e869063
LL
10553 if (!dm_new_crtc_state->stream)
10554 return 0;
62f55537 10555
f6ff2a08 10556 if (!needs_reset)
9e869063 10557 return 0;
62f55537 10558
8c44515b
AP
10559 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10560 if (ret)
10561 return ret;
10562
9e869063 10563 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10564
9e869063
LL
10565 dc_new_plane_state = dc_create_plane_state(dc);
10566 if (!dc_new_plane_state)
10567 return -ENOMEM;
62f55537 10568
4711c033
LT
10569 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10570 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10571
695af5f9 10572 ret = fill_dc_plane_attributes(
1348969a 10573 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10574 dc_new_plane_state,
10575 new_plane_state,
10576 new_crtc_state);
10577 if (ret) {
10578 dc_plane_state_release(dc_new_plane_state);
10579 return ret;
10580 }
62f55537 10581
9e869063
LL
10582 ret = dm_atomic_get_state(state, &dm_state);
10583 if (ret) {
10584 dc_plane_state_release(dc_new_plane_state);
10585 return ret;
10586 }
eb3dc897 10587
9e869063
LL
10588 /*
10589 * Any atomic check errors that occur after this will
10590 * not need a release. The plane state will be attached
10591 * to the stream, and therefore part of the atomic
10592 * state. It'll be released when the atomic state is
10593 * cleaned.
10594 */
10595 if (!dc_add_plane_to_context(
10596 dc,
10597 dm_new_crtc_state->stream,
10598 dc_new_plane_state,
10599 dm_state->context)) {
62f55537 10600
9e869063
LL
10601 dc_plane_state_release(dc_new_plane_state);
10602 return -EINVAL;
10603 }
8c45c5db 10604
9e869063 10605 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10606
9e869063
LL
10607 /* Tell DC to do a full surface update every time there
10608 * is a plane change. Inefficient, but works for now.
10609 */
10610 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10611
10612 *lock_and_validation_needed = true;
62f55537 10613 }
e7b07cee
HW
10614
10615
62f55537
AG
10616 return ret;
10617}
a87fa993 10618
12f4849a
SS
10619static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10620 struct drm_crtc *crtc,
10621 struct drm_crtc_state *new_crtc_state)
10622{
d1bfbe8a
SS
10623 struct drm_plane *cursor = crtc->cursor, *underlying;
10624 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10625 int i;
10626 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
12f4849a
SS
10627
10628 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10629 * cursor per pipe but it's going to inherit the scaling and
10630 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 10631 * blending properties match the underlying planes'. */
12f4849a 10632
d1bfbe8a
SS
10633 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10634 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
10635 return 0;
10636 }
10637
10638 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10639 (new_cursor_state->src_w >> 16);
10640 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10641 (new_cursor_state->src_h >> 16);
10642
d1bfbe8a
SS
10643 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10644 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10645 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10646 continue;
12f4849a 10647
d1bfbe8a
SS
10648 /* Ignore disabled planes */
10649 if (!new_underlying_state->fb)
10650 continue;
10651
10652 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10653 (new_underlying_state->src_w >> 16);
10654 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10655 (new_underlying_state->src_h >> 16);
10656
10657 if (cursor_scale_w != underlying_scale_w ||
10658 cursor_scale_h != underlying_scale_h) {
10659 drm_dbg_atomic(crtc->dev,
10660 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10661 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10662 return -EINVAL;
10663 }
10664
10665 /* If this plane covers the whole CRTC, no need to check planes underneath */
10666 if (new_underlying_state->crtc_x <= 0 &&
10667 new_underlying_state->crtc_y <= 0 &&
10668 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10669 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10670 break;
12f4849a
SS
10671 }
10672
10673 return 0;
10674}
10675
e10517b3 10676#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10677static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10678{
10679 struct drm_connector *connector;
10680 struct drm_connector_state *conn_state;
10681 struct amdgpu_dm_connector *aconnector = NULL;
10682 int i;
10683 for_each_new_connector_in_state(state, connector, conn_state, i) {
10684 if (conn_state->crtc != crtc)
10685 continue;
10686
10687 aconnector = to_amdgpu_dm_connector(connector);
10688 if (!aconnector->port || !aconnector->mst_port)
10689 aconnector = NULL;
10690 else
10691 break;
10692 }
10693
10694 if (!aconnector)
10695 return 0;
10696
10697 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10698}
e10517b3 10699#endif
44be939f 10700
b8592b48
LL
10701/**
10702 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10703 * @dev: The DRM device
10704 * @state: The atomic state to commit
10705 *
10706 * Validate that the given atomic state is programmable by DC into hardware.
10707 * This involves constructing a &struct dc_state reflecting the new hardware
10708 * state we wish to commit, then querying DC to see if it is programmable. It's
10709 * important not to modify the existing DC state. Otherwise, atomic_check
10710 * may unexpectedly commit hardware changes.
10711 *
10712 * When validating the DC state, it's important that the right locks are
10713 * acquired. For full updates case which removes/adds/updates streams on one
10714 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10715 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10716 * flip using DRMs synchronization events.
b8592b48
LL
10717 *
10718 * Note that DM adds the affected connectors for all CRTCs in state, when that
10719 * might not seem necessary. This is because DC stream creation requires the
10720 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10721 * be possible but non-trivial - a possible TODO item.
10722 *
10723 * Return: -Error code if validation failed.
10724 */
7578ecda
AD
10725static int amdgpu_dm_atomic_check(struct drm_device *dev,
10726 struct drm_atomic_state *state)
62f55537 10727{
1348969a 10728 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10729 struct dm_atomic_state *dm_state = NULL;
62f55537 10730 struct dc *dc = adev->dm.dc;
62f55537 10731 struct drm_connector *connector;
c2cea706 10732 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10733 struct drm_crtc *crtc;
fc9e9920 10734 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10735 struct drm_plane *plane;
10736 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10737 enum dc_status status;
1e88ad0a 10738 int ret, i;
62f55537 10739 bool lock_and_validation_needed = false;
886876ec 10740 struct dm_crtc_state *dm_old_crtc_state;
6513104b
HW
10741#if defined(CONFIG_DRM_AMD_DC_DCN)
10742 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
10743 struct drm_dp_mst_topology_state *mst_state;
10744 struct drm_dp_mst_topology_mgr *mgr;
6513104b 10745#endif
62f55537 10746
e8a98235 10747 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10748
62f55537 10749 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
10750 if (ret)
10751 goto fail;
62f55537 10752
c5892a10
SW
10753 /* Check connector changes */
10754 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10755 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10756 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10757
10758 /* Skip connectors that are disabled or part of modeset already. */
10759 if (!old_con_state->crtc && !new_con_state->crtc)
10760 continue;
10761
10762 if (!new_con_state->crtc)
10763 continue;
10764
10765 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10766 if (IS_ERR(new_crtc_state)) {
10767 ret = PTR_ERR(new_crtc_state);
10768 goto fail;
10769 }
10770
10771 if (dm_old_con_state->abm_level !=
10772 dm_new_con_state->abm_level)
10773 new_crtc_state->connectors_changed = true;
10774 }
10775
e10517b3 10776#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10777 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10778 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10779 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10780 ret = add_affected_mst_dsc_crtcs(state, crtc);
10781 if (ret)
10782 goto fail;
10783 }
10784 }
10785 }
e10517b3 10786#endif
1e88ad0a 10787 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10788 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10789
1e88ad0a 10790 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10791 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10792 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10793 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10794 continue;
7bef1af3 10795
03fc4cf4
MY
10796 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10797 if (ret)
10798 goto fail;
10799
1e88ad0a
S
10800 if (!new_crtc_state->enable)
10801 continue;
fc9e9920 10802
1e88ad0a
S
10803 ret = drm_atomic_add_affected_connectors(state, crtc);
10804 if (ret)
10805 return ret;
fc9e9920 10806
1e88ad0a
S
10807 ret = drm_atomic_add_affected_planes(state, crtc);
10808 if (ret)
10809 goto fail;
115a385c 10810
cbac53f7 10811 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10812 new_crtc_state->mode_changed = true;
e7b07cee
HW
10813 }
10814
2d9e6431
NK
10815 /*
10816 * Add all primary and overlay planes on the CRTC to the state
10817 * whenever a plane is enabled to maintain correct z-ordering
10818 * and to enable fast surface updates.
10819 */
10820 drm_for_each_crtc(crtc, dev) {
10821 bool modified = false;
10822
10823 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10824 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10825 continue;
10826
10827 if (new_plane_state->crtc == crtc ||
10828 old_plane_state->crtc == crtc) {
10829 modified = true;
10830 break;
10831 }
10832 }
10833
10834 if (!modified)
10835 continue;
10836
10837 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10838 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10839 continue;
10840
10841 new_plane_state =
10842 drm_atomic_get_plane_state(state, plane);
10843
10844 if (IS_ERR(new_plane_state)) {
10845 ret = PTR_ERR(new_plane_state);
10846 goto fail;
10847 }
10848 }
10849 }
10850
62f55537 10851 /* Remove exiting planes if they are modified */
9e869063
LL
10852 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10853 ret = dm_update_plane_state(dc, state, plane,
10854 old_plane_state,
10855 new_plane_state,
10856 false,
10857 &lock_and_validation_needed);
10858 if (ret)
10859 goto fail;
62f55537
AG
10860 }
10861
10862 /* Disable all crtcs which require disable */
4b9674e5
LL
10863 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10864 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10865 old_crtc_state,
10866 new_crtc_state,
10867 false,
10868 &lock_and_validation_needed);
10869 if (ret)
10870 goto fail;
62f55537
AG
10871 }
10872
10873 /* Enable all crtcs which require enable */
4b9674e5
LL
10874 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10875 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10876 old_crtc_state,
10877 new_crtc_state,
10878 true,
10879 &lock_and_validation_needed);
10880 if (ret)
10881 goto fail;
62f55537
AG
10882 }
10883
10884 /* Add new/modified planes */
9e869063
LL
10885 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10886 ret = dm_update_plane_state(dc, state, plane,
10887 old_plane_state,
10888 new_plane_state,
10889 true,
10890 &lock_and_validation_needed);
10891 if (ret)
10892 goto fail;
62f55537
AG
10893 }
10894
b349f76e
ES
10895 /* Run this here since we want to validate the streams we created */
10896 ret = drm_atomic_helper_check_planes(dev, state);
10897 if (ret)
10898 goto fail;
62f55537 10899
12f4849a
SS
10900 /* Check cursor planes scaling */
10901 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10902 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10903 if (ret)
10904 goto fail;
10905 }
10906
43d10d30
NK
10907 if (state->legacy_cursor_update) {
10908 /*
10909 * This is a fast cursor update coming from the plane update
10910 * helper, check if it can be done asynchronously for better
10911 * performance.
10912 */
10913 state->async_update =
10914 !drm_atomic_helper_async_check(dev, state);
10915
10916 /*
10917 * Skip the remaining global validation if this is an async
10918 * update. Cursor updates can be done without affecting
10919 * state or bandwidth calcs and this avoids the performance
10920 * penalty of locking the private state object and
10921 * allocating a new dc_state.
10922 */
10923 if (state->async_update)
10924 return 0;
10925 }
10926
ebdd27e1 10927 /* Check scaling and underscan changes*/
1f6010a9 10928 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10929 * new stream into context w\o causing full reset. Need to
10930 * decide how to handle.
10931 */
c2cea706 10932 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10933 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10934 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10935 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10936
10937 /* Skip any modesets/resets */
0bc9706d
LSL
10938 if (!acrtc || drm_atomic_crtc_needs_modeset(
10939 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10940 continue;
10941
b830ebc9 10942 /* Skip any thing not scale or underscan changes */
54d76575 10943 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10944 continue;
10945
10946 lock_and_validation_needed = true;
10947 }
10948
41724ea2
BL
10949#if defined(CONFIG_DRM_AMD_DC_DCN)
10950 /* set the slot info for each mst_state based on the link encoding format */
10951 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10952 struct amdgpu_dm_connector *aconnector;
10953 struct drm_connector *connector;
10954 struct drm_connector_list_iter iter;
10955 u8 link_coding_cap;
10956
10957 if (!mgr->mst_state )
10958 continue;
10959
10960 drm_connector_list_iter_begin(dev, &iter);
10961 drm_for_each_connector_iter(connector, &iter) {
10962 int id = connector->index;
10963
10964 if (id == mst_state->mgr->conn_base_id) {
10965 aconnector = to_amdgpu_dm_connector(connector);
10966 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10967 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10968
10969 break;
10970 }
10971 }
10972 drm_connector_list_iter_end(&iter);
10973
10974 }
10975#endif
f6d7c7fa
NK
10976 /**
10977 * Streams and planes are reset when there are changes that affect
10978 * bandwidth. Anything that affects bandwidth needs to go through
10979 * DC global validation to ensure that the configuration can be applied
10980 * to hardware.
10981 *
10982 * We have to currently stall out here in atomic_check for outstanding
10983 * commits to finish in this case because our IRQ handlers reference
10984 * DRM state directly - we can end up disabling interrupts too early
10985 * if we don't.
10986 *
10987 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10988 */
f6d7c7fa 10989 if (lock_and_validation_needed) {
eb3dc897
NK
10990 ret = dm_atomic_get_state(state, &dm_state);
10991 if (ret)
10992 goto fail;
e7b07cee
HW
10993
10994 ret = do_aquire_global_lock(dev, state);
10995 if (ret)
10996 goto fail;
1dc90497 10997
d9fe1a4c 10998#if defined(CONFIG_DRM_AMD_DC_DCN)
6513104b 10999 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
8c20a1ed
DF
11000 goto fail;
11001
6513104b 11002 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
29b9ba74
ML
11003 if (ret)
11004 goto fail;
d9fe1a4c 11005#endif
29b9ba74 11006
ded58c7b
ZL
11007 /*
11008 * Perform validation of MST topology in the state:
11009 * We need to perform MST atomic check before calling
11010 * dc_validate_global_state(), or there is a chance
11011 * to get stuck in an infinite loop and hang eventually.
11012 */
11013 ret = drm_dp_mst_atomic_check(state);
11014 if (ret)
11015 goto fail;
74a16675
RS
11016 status = dc_validate_global_state(dc, dm_state->context, false);
11017 if (status != DC_OK) {
a906331c
SS
11018 drm_dbg_atomic(dev,
11019 "DC global validation failure: %s (%d)",
74a16675 11020 dc_status_to_str(status), status);
e7b07cee
HW
11021 ret = -EINVAL;
11022 goto fail;
11023 }
bd200d19 11024 } else {
674e78ac 11025 /*
bd200d19
NK
11026 * The commit is a fast update. Fast updates shouldn't change
11027 * the DC context, affect global validation, and can have their
11028 * commit work done in parallel with other commits not touching
11029 * the same resource. If we have a new DC context as part of
11030 * the DM atomic state from validation we need to free it and
11031 * retain the existing one instead.
fde9f39a
MR
11032 *
11033 * Furthermore, since the DM atomic state only contains the DC
11034 * context and can safely be annulled, we can free the state
11035 * and clear the associated private object now to free
11036 * some memory and avoid a possible use-after-free later.
674e78ac 11037 */
bd200d19 11038
fde9f39a
MR
11039 for (i = 0; i < state->num_private_objs; i++) {
11040 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11041
fde9f39a
MR
11042 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11043 int j = state->num_private_objs-1;
bd200d19 11044
fde9f39a
MR
11045 dm_atomic_destroy_state(obj,
11046 state->private_objs[i].state);
11047
11048 /* If i is not at the end of the array then the
11049 * last element needs to be moved to where i was
11050 * before the array can safely be truncated.
11051 */
11052 if (i != j)
11053 state->private_objs[i] =
11054 state->private_objs[j];
bd200d19 11055
fde9f39a
MR
11056 state->private_objs[j].ptr = NULL;
11057 state->private_objs[j].state = NULL;
11058 state->private_objs[j].old_state = NULL;
11059 state->private_objs[j].new_state = NULL;
11060
11061 state->num_private_objs = j;
11062 break;
11063 }
bd200d19 11064 }
e7b07cee
HW
11065 }
11066
caff0e66
NK
11067 /* Store the overall update type for use later in atomic check. */
11068 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11069 struct dm_crtc_state *dm_new_crtc_state =
11070 to_dm_crtc_state(new_crtc_state);
11071
f6d7c7fa
NK
11072 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11073 UPDATE_TYPE_FULL :
11074 UPDATE_TYPE_FAST;
e7b07cee
HW
11075 }
11076
11077 /* Must be success */
11078 WARN_ON(ret);
e8a98235
RS
11079
11080 trace_amdgpu_dm_atomic_check_finish(state, ret);
11081
e7b07cee
HW
11082 return ret;
11083
11084fail:
11085 if (ret == -EDEADLK)
01e28f9c 11086 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11087 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11088 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11089 else
01e28f9c 11090 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11091
e8a98235
RS
11092 trace_amdgpu_dm_atomic_check_finish(state, ret);
11093
e7b07cee
HW
11094 return ret;
11095}
11096
3ee6b26b
AD
11097static bool is_dp_capable_without_timing_msa(struct dc *dc,
11098 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11099{
11100 uint8_t dpcd_data;
11101 bool capable = false;
11102
c84dec2f 11103 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11104 dm_helpers_dp_read_dpcd(
11105 NULL,
c84dec2f 11106 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11107 DP_DOWN_STREAM_PORT_COUNT,
11108 &dpcd_data,
11109 sizeof(dpcd_data))) {
11110 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11111 }
11112
11113 return capable;
11114}
f9b4f20c 11115
46db138d
SW
11116static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11117 unsigned int offset,
11118 unsigned int total_length,
11119 uint8_t *data,
11120 unsigned int length,
11121 struct amdgpu_hdmi_vsdb_info *vsdb)
11122{
11123 bool res;
11124 union dmub_rb_cmd cmd;
11125 struct dmub_cmd_send_edid_cea *input;
11126 struct dmub_cmd_edid_cea_output *output;
11127
11128 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11129 return false;
11130
11131 memset(&cmd, 0, sizeof(cmd));
11132
11133 input = &cmd.edid_cea.data.input;
11134
11135 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11136 cmd.edid_cea.header.sub_type = 0;
11137 cmd.edid_cea.header.payload_bytes =
11138 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11139 input->offset = offset;
11140 input->length = length;
11141 input->total_length = total_length;
11142 memcpy(input->payload, data, length);
11143
11144 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11145 if (!res) {
11146 DRM_ERROR("EDID CEA parser failed\n");
11147 return false;
11148 }
11149
11150 output = &cmd.edid_cea.data.output;
11151
11152 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11153 if (!output->ack.success) {
11154 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11155 output->ack.offset);
11156 }
11157 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11158 if (!output->amd_vsdb.vsdb_found)
11159 return false;
11160
11161 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11162 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11163 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11164 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11165 } else {
b76a8062 11166 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11167 return false;
11168 }
11169
11170 return true;
11171}
11172
11173static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11174 uint8_t *edid_ext, int len,
11175 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11176{
11177 int i;
f9b4f20c
SW
11178
11179 /* send extension block to DMCU for parsing */
11180 for (i = 0; i < len; i += 8) {
11181 bool res;
11182 int offset;
11183
11184 /* send 8 bytes a time */
46db138d 11185 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11186 return false;
11187
11188 if (i+8 == len) {
11189 /* EDID block sent completed, expect result */
11190 int version, min_rate, max_rate;
11191
46db138d 11192 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11193 if (res) {
11194 /* amd vsdb found */
11195 vsdb_info->freesync_supported = 1;
11196 vsdb_info->amd_vsdb_version = version;
11197 vsdb_info->min_refresh_rate_hz = min_rate;
11198 vsdb_info->max_refresh_rate_hz = max_rate;
11199 return true;
11200 }
11201 /* not amd vsdb */
11202 return false;
11203 }
11204
11205 /* check for ack*/
46db138d 11206 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11207 if (!res)
11208 return false;
11209 }
11210
11211 return false;
11212}
11213
46db138d
SW
11214static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11215 uint8_t *edid_ext, int len,
11216 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11217{
11218 int i;
11219
11220 /* send extension block to DMCU for parsing */
11221 for (i = 0; i < len; i += 8) {
11222 /* send 8 bytes a time */
11223 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11224 return false;
11225 }
11226
11227 return vsdb_info->freesync_supported;
11228}
11229
11230static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11231 uint8_t *edid_ext, int len,
11232 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11233{
11234 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11235
11236 if (adev->dm.dmub_srv)
11237 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11238 else
11239 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11240}
11241
7c7dd774 11242static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11243 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11244{
11245 uint8_t *edid_ext = NULL;
11246 int i;
11247 bool valid_vsdb_found = false;
11248
11249 /*----- drm_find_cea_extension() -----*/
11250 /* No EDID or EDID extensions */
11251 if (edid == NULL || edid->extensions == 0)
7c7dd774 11252 return -ENODEV;
f9b4f20c
SW
11253
11254 /* Find CEA extension */
11255 for (i = 0; i < edid->extensions; i++) {
11256 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11257 if (edid_ext[0] == CEA_EXT)
11258 break;
11259 }
11260
11261 if (i == edid->extensions)
7c7dd774 11262 return -ENODEV;
f9b4f20c
SW
11263
11264 /*----- cea_db_offsets() -----*/
11265 if (edid_ext[0] != CEA_EXT)
7c7dd774 11266 return -ENODEV;
f9b4f20c
SW
11267
11268 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11269
11270 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11271}
11272
98e6436d
AK
11273void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11274 struct edid *edid)
e7b07cee 11275{
eb0709ba 11276 int i = 0;
e7b07cee
HW
11277 struct detailed_timing *timing;
11278 struct detailed_non_pixel *data;
11279 struct detailed_data_monitor_range *range;
c84dec2f
HW
11280 struct amdgpu_dm_connector *amdgpu_dm_connector =
11281 to_amdgpu_dm_connector(connector);
bb47de73 11282 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11283 struct dc_sink *sink;
e7b07cee
HW
11284
11285 struct drm_device *dev = connector->dev;
1348969a 11286 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11287 bool freesync_capable = false;
f9b4f20c 11288 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11289
8218d7f1
HW
11290 if (!connector->state) {
11291 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11292 goto update;
8218d7f1
HW
11293 }
11294
9b2fdc33
AP
11295 sink = amdgpu_dm_connector->dc_sink ?
11296 amdgpu_dm_connector->dc_sink :
11297 amdgpu_dm_connector->dc_em_sink;
11298
11299 if (!edid || !sink) {
98e6436d
AK
11300 dm_con_state = to_dm_connector_state(connector->state);
11301
11302 amdgpu_dm_connector->min_vfreq = 0;
11303 amdgpu_dm_connector->max_vfreq = 0;
11304 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11305 connector->display_info.monitor_range.min_vfreq = 0;
11306 connector->display_info.monitor_range.max_vfreq = 0;
11307 freesync_capable = false;
98e6436d 11308
bb47de73 11309 goto update;
98e6436d
AK
11310 }
11311
8218d7f1
HW
11312 dm_con_state = to_dm_connector_state(connector->state);
11313
e7b07cee 11314 if (!adev->dm.freesync_module)
bb47de73 11315 goto update;
f9b4f20c
SW
11316
11317
9b2fdc33
AP
11318 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11319 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11320 bool edid_check_required = false;
11321
11322 if (edid) {
e7b07cee
HW
11323 edid_check_required = is_dp_capable_without_timing_msa(
11324 adev->dm.dc,
c84dec2f 11325 amdgpu_dm_connector);
e7b07cee 11326 }
e7b07cee 11327
f9b4f20c
SW
11328 if (edid_check_required == true && (edid->version > 1 ||
11329 (edid->version == 1 && edid->revision > 1))) {
11330 for (i = 0; i < 4; i++) {
e7b07cee 11331
f9b4f20c
SW
11332 timing = &edid->detailed_timings[i];
11333 data = &timing->data.other_data;
11334 range = &data->data.range;
11335 /*
11336 * Check if monitor has continuous frequency mode
11337 */
11338 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11339 continue;
11340 /*
11341 * Check for flag range limits only. If flag == 1 then
11342 * no additional timing information provided.
11343 * Default GTF, GTF Secondary curve and CVT are not
11344 * supported
11345 */
11346 if (range->flags != 1)
11347 continue;
a0ffc3fd 11348
f9b4f20c
SW
11349 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11350 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11351 amdgpu_dm_connector->pixel_clock_mhz =
11352 range->pixel_clock_mhz * 10;
a0ffc3fd 11353
f9b4f20c
SW
11354 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11355 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11356
f9b4f20c
SW
11357 break;
11358 }
98e6436d 11359
f9b4f20c
SW
11360 if (amdgpu_dm_connector->max_vfreq -
11361 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11362
f9b4f20c
SW
11363 freesync_capable = true;
11364 }
11365 }
9b2fdc33 11366 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11367 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11368 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11369 timing = &edid->detailed_timings[i];
11370 data = &timing->data.other_data;
11371
11372 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11373 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11374 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11375 freesync_capable = true;
11376
11377 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11378 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11379 }
11380 }
bb47de73
NK
11381
11382update:
11383 if (dm_con_state)
11384 dm_con_state->freesync_capable = freesync_capable;
11385
11386 if (connector->vrr_capable_property)
11387 drm_connector_set_vrr_capable_property(connector,
11388 freesync_capable);
e7b07cee
HW
11389}
11390
3d4e52d0
VL
11391void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11392{
1348969a 11393 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11394 struct dc *dc = adev->dm.dc;
11395 int i;
11396
11397 mutex_lock(&adev->dm.dc_lock);
11398 if (dc->current_state) {
11399 for (i = 0; i < dc->current_state->stream_count; ++i)
11400 dc->current_state->streams[i]
11401 ->triggered_crtc_reset.enabled =
11402 adev->dm.force_timing_sync;
11403
11404 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11405 dc_trigger_sync(dc, dc->current_state);
11406 }
11407 mutex_unlock(&adev->dm.dc_lock);
11408}
9d83722d
RS
11409
11410void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11411 uint32_t value, const char *func_name)
11412{
11413#ifdef DM_CHECK_ADDR_0
11414 if (address == 0) {
11415 DC_ERR("invalid register write. address = 0");
11416 return;
11417 }
11418#endif
11419 cgs_write_register(ctx->cgs_device, address, value);
11420 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11421}
11422
11423uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11424 const char *func_name)
11425{
11426 uint32_t value;
11427#ifdef DM_CHECK_ADDR_0
11428 if (address == 0) {
11429 DC_ERR("invalid register read; address = 0\n");
11430 return 0;
11431 }
11432#endif
11433
11434 if (ctx->dmub_srv &&
11435 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11436 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11437 ASSERT(false);
11438 return 0;
11439 }
11440
11441 value = cgs_read_register(ctx->cgs_device, address);
11442
11443 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11444
11445 return value;
11446}
81927e28 11447
88f52b1f
JS
11448int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11449 uint8_t status_type, uint32_t *operation_result)
11450{
11451 struct amdgpu_device *adev = ctx->driver_context;
11452 int return_status = -1;
11453 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11454
11455 if (is_cmd_aux) {
11456 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11457 return_status = p_notify->aux_reply.length;
11458 *operation_result = p_notify->result;
11459 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11460 *operation_result = AUX_RET_ERROR_TIMEOUT;
11461 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11462 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11463 } else {
11464 *operation_result = AUX_RET_ERROR_UNKNOWN;
11465 }
11466 } else {
11467 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11468 return_status = 0;
11469 *operation_result = p_notify->sc_status;
11470 } else {
11471 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11472 }
11473 }
11474
11475 return return_status;
11476}
11477
11478int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11479 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11480{
11481 struct amdgpu_device *adev = ctx->driver_context;
11482 int ret = 0;
11483
88f52b1f
JS
11484 if (is_cmd_aux) {
11485 dc_process_dmub_aux_transfer_async(ctx->dc,
11486 link_index, (struct aux_payload *)cmd_payload);
11487 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11488 (struct set_config_cmd_payload *)cmd_payload,
11489 adev->dm.dmub_notify)) {
11490 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11491 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11492 (uint32_t *)operation_result);
11493 }
11494
9e3a50d2 11495 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11496 if (ret == 0) {
9e3a50d2 11497 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11498 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11499 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11500 (uint32_t *)operation_result);
81927e28 11501 }
81927e28 11502
88f52b1f
JS
11503 if (is_cmd_aux) {
11504 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11505 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11506
88f52b1f
JS
11507 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11508 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11509 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11510 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11511 adev->dm.dmub_notify->aux_reply.length);
11512 }
11513 }
81927e28
JS
11514 }
11515
88f52b1f
JS
11516 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11517 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11518 (uint32_t *)operation_result);
81927e28 11519}