drm/amdgpu: Fix one list corruption when create queue fails
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
6a99099f 51#include <drm/display/drm_hdcp_helper.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
1f579254 54#include "amdgpu_atombios.h"
4562236b
HW
55
56#include "amd_shared.h"
57#include "amdgpu_dm_irq.h"
58#include "dm_helpers.h"
e7b07cee 59#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
60#if defined(CONFIG_DEBUG_FS)
61#include "amdgpu_dm_debugfs.h"
62#endif
f4594cd1 63#include "amdgpu_dm_psr.h"
4562236b
HW
64
65#include "ivsrcid/ivsrcid_vislands30.h"
66
81927e28 67#include "i2caux_interface.h"
4562236b
HW
68#include <linux/module.h>
69#include <linux/moduleparam.h>
e7b07cee 70#include <linux/types.h>
97028037 71#include <linux/pm_runtime.h>
09d21852 72#include <linux/pci.h>
a94d5569 73#include <linux/firmware.h>
6ce8f316 74#include <linux/component.h>
4562236b 75
da68386d 76#include <drm/display/drm_dp_mst_helper.h>
4fc8cb47 77#include <drm/display/drm_hdmi_helper.h>
4562236b 78#include <drm/drm_atomic.h>
674e78ac 79#include <drm/drm_atomic_uapi.h>
4562236b 80#include <drm/drm_atomic_helper.h>
e7b07cee 81#include <drm/drm_fb_helper.h>
09d21852 82#include <drm/drm_fourcc.h>
e7b07cee 83#include <drm/drm_edid.h>
09d21852 84#include <drm/drm_vblank.h>
6ce8f316 85#include <drm/drm_audio_component.h>
4562236b 86
5527cd06 87#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 88
ad941f7a
FX
89#include "dcn/dcn_1_0_offset.h"
90#include "dcn/dcn_1_0_sh_mask.h"
407e7517 91#include "soc15_hw_ip.h"
543036a2 92#include "soc15_common.h"
407e7517 93#include "vega10_ip_offset.h"
ff5ef992
AD
94
95#include "soc15_common.h"
ff5ef992 96
543036a2
AP
97#include "gc/gc_11_0_0_offset.h"
98#include "gc/gc_11_0_0_sh_mask.h"
99
e7b07cee 100#include "modules/inc/mod_freesync.h"
bbf854dc 101#include "modules/power/power_helpers.h"
ecd0136b 102#include "modules/inc/mod_info_packet.h"
e7b07cee 103
743b9786
NK
104#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
106#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
108#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
110#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
112#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
114#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
115MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
116#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
117MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
118#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
119MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
b5b8ed44
QZ
120#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
121MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
de7cc1b4
PL
122#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
123MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
2200eb9e 124
577359ca
AP
125#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
126MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
127#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
128MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
129
a94d5569
DF
130#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
131MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 132
5ea23931
RL
133#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
134MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
135
8c7aea40
NK
136/* Number of bytes in PSP header for firmware. */
137#define PSP_HEADER_BYTES 0x100
138
139/* Number of bytes in PSP footer for firmware. */
140#define PSP_FOOTER_BYTES 0x100
141
b8592b48
LL
142/**
143 * DOC: overview
144 *
145 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 146 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
147 * requests into DC requests, and DC responses into DRM responses.
148 *
149 * The root control structure is &struct amdgpu_display_manager.
150 */
151
7578ecda
AD
152/* basic init/fini API */
153static int amdgpu_dm_init(struct amdgpu_device *adev);
154static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 155static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 156
0f877894
OV
157static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
158{
159 switch (link->dpcd_caps.dongle_type) {
160 case DISPLAY_DONGLE_NONE:
161 return DRM_MODE_SUBCONNECTOR_Native;
162 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
163 return DRM_MODE_SUBCONNECTOR_VGA;
164 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
165 case DISPLAY_DONGLE_DP_DVI_DONGLE:
166 return DRM_MODE_SUBCONNECTOR_DVID;
167 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
168 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
169 return DRM_MODE_SUBCONNECTOR_HDMIA;
170 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
171 default:
172 return DRM_MODE_SUBCONNECTOR_Unknown;
173 }
174}
175
176static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
177{
178 struct dc_link *link = aconnector->dc_link;
179 struct drm_connector *connector = &aconnector->base;
180 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
181
182 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
183 return;
184
185 if (aconnector->dc_sink)
186 subconnector = get_subconnector_type(link);
187
188 drm_object_property_set_value(&connector->base,
189 connector->dev->mode_config.dp_subconnector_property,
190 subconnector);
191}
192
1f6010a9
DF
193/*
194 * initializes drm_device display related structures, based on the information
7578ecda
AD
195 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
196 * drm_encoder, drm_mode_config
197 *
198 * Returns 0 on success
199 */
200static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
201/* removes and deallocates the drm structures, created by the above function */
202static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
203
7578ecda 204static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 205 struct drm_plane *plane,
cc1fec57
NK
206 unsigned long possible_crtcs,
207 const struct dc_plane_cap *plane_cap);
7578ecda
AD
208static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
209 struct drm_plane *plane,
210 uint32_t link_index);
211static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
212 struct amdgpu_dm_connector *amdgpu_dm_connector,
213 uint32_t link_index,
214 struct amdgpu_encoder *amdgpu_encoder);
215static int amdgpu_dm_encoder_init(struct drm_device *dev,
216 struct amdgpu_encoder *aencoder,
217 uint32_t link_index);
218
219static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
220
7578ecda
AD
221static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
222
223static int amdgpu_dm_atomic_check(struct drm_device *dev,
224 struct drm_atomic_state *state);
225
674e78ac
NK
226static void handle_cursor_update(struct drm_plane *plane,
227 struct drm_plane_state *old_plane_state);
7578ecda 228
dfbbfe3c
BN
229static const struct drm_format_info *
230amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
231
e27c41d5 232static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 233static void handle_hpd_rx_irq(void *param);
e27c41d5 234
a85ba005
NC
235static bool
236is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
237 struct drm_crtc_state *new_crtc_state);
4562236b
HW
238/*
239 * dm_vblank_get_counter
240 *
241 * @brief
242 * Get counter for number of vertical blanks
243 *
244 * @param
245 * struct amdgpu_device *adev - [in] desired amdgpu device
246 * int disp_idx - [in] which CRTC to get the counter from
247 *
248 * @return
249 * Counter for vertical blanks
250 */
251static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
252{
253 if (crtc >= adev->mode_info.num_crtc)
254 return 0;
255 else {
256 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
257
585d450c 258 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
259 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
260 crtc);
4562236b
HW
261 return 0;
262 }
263
585d450c 264 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
265 }
266}
267
268static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 269 u32 *vbl, u32 *position)
4562236b 270{
81c50963
ST
271 uint32_t v_blank_start, v_blank_end, h_position, v_position;
272
4562236b
HW
273 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
274 return -EINVAL;
275 else {
276 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
277
585d450c 278 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
279 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
280 crtc);
4562236b
HW
281 return 0;
282 }
283
81c50963
ST
284 /*
285 * TODO rework base driver to use values directly.
286 * for now parse it back into reg-format
287 */
585d450c 288 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
289 &v_blank_start,
290 &v_blank_end,
291 &h_position,
292 &v_position);
293
e806208d
AG
294 *position = v_position | (h_position << 16);
295 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
296 }
297
298 return 0;
299}
300
301static bool dm_is_idle(void *handle)
302{
303 /* XXX todo */
304 return true;
305}
306
307static int dm_wait_for_idle(void *handle)
308{
309 /* XXX todo */
310 return 0;
311}
312
313static bool dm_check_soft_reset(void *handle)
314{
315 return false;
316}
317
318static int dm_soft_reset(void *handle)
319{
320 /* XXX todo */
321 return 0;
322}
323
3ee6b26b
AD
324static struct amdgpu_crtc *
325get_crtc_by_otg_inst(struct amdgpu_device *adev,
326 int otg_inst)
4562236b 327{
4a580877 328 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
329 struct drm_crtc *crtc;
330 struct amdgpu_crtc *amdgpu_crtc;
331
bcd74374 332 if (WARN_ON(otg_inst == -1))
4562236b 333 return adev->mode_info.crtcs[0];
4562236b
HW
334
335 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
336 amdgpu_crtc = to_amdgpu_crtc(crtc);
337
338 if (amdgpu_crtc->otg_inst == otg_inst)
339 return amdgpu_crtc;
340 }
341
342 return NULL;
343}
344
585d450c
AP
345static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
346{
347 return acrtc->dm_irq_params.freesync_config.state ==
348 VRR_STATE_ACTIVE_VARIABLE ||
349 acrtc->dm_irq_params.freesync_config.state ==
350 VRR_STATE_ACTIVE_FIXED;
351}
352
66b0c973
MK
353static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
354{
355 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
356 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
357}
358
a85ba005
NC
359static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
360 struct dm_crtc_state *new_state)
361{
362 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
363 return true;
364 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
365 return true;
366 else
367 return false;
368}
369
b8e8c934
HW
370/**
371 * dm_pflip_high_irq() - Handle pageflip interrupt
372 * @interrupt_params: ignored
373 *
374 * Handles the pageflip interrupt by notifying all interested parties
375 * that the pageflip has been completed.
376 */
4562236b
HW
377static void dm_pflip_high_irq(void *interrupt_params)
378{
4562236b
HW
379 struct amdgpu_crtc *amdgpu_crtc;
380 struct common_irq_params *irq_params = interrupt_params;
381 struct amdgpu_device *adev = irq_params->adev;
382 unsigned long flags;
71bbe51a 383 struct drm_pending_vblank_event *e;
71bbe51a
MK
384 uint32_t vpos, hpos, v_blank_start, v_blank_end;
385 bool vrr_active;
4562236b
HW
386
387 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
388
389 /* IRQ could occur when in initial stage */
1f6010a9 390 /* TODO work and BO cleanup */
4562236b 391 if (amdgpu_crtc == NULL) {
cb2318b7 392 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
393 return;
394 }
395
4a580877 396 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
397
398 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 399 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
400 amdgpu_crtc->pflip_status,
401 AMDGPU_FLIP_SUBMITTED,
402 amdgpu_crtc->crtc_id,
403 amdgpu_crtc);
4a580877 404 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
405 return;
406 }
407
71bbe51a
MK
408 /* page flip completed. */
409 e = amdgpu_crtc->event;
410 amdgpu_crtc->event = NULL;
4562236b 411
bcd74374 412 WARN_ON(!e);
1159898a 413
585d450c 414 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
415
416 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
417 if (!vrr_active ||
585d450c 418 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
419 &v_blank_end, &hpos, &vpos) ||
420 (vpos < v_blank_start)) {
421 /* Update to correct count and vblank timestamp if racing with
422 * vblank irq. This also updates to the correct vblank timestamp
423 * even in VRR mode, as scanout is past the front-porch atm.
424 */
425 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 426
71bbe51a
MK
427 /* Wake up userspace by sending the pageflip event with proper
428 * count and timestamp of vblank of flip completion.
429 */
430 if (e) {
431 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
432
433 /* Event sent, so done with vblank for this flip */
434 drm_crtc_vblank_put(&amdgpu_crtc->base);
435 }
436 } else if (e) {
437 /* VRR active and inside front-porch: vblank count and
438 * timestamp for pageflip event will only be up to date after
439 * drm_crtc_handle_vblank() has been executed from late vblank
440 * irq handler after start of back-porch (vline 0). We queue the
441 * pageflip event for send-out by drm_crtc_handle_vblank() with
442 * updated timestamp and count, once it runs after us.
443 *
444 * We need to open-code this instead of using the helper
445 * drm_crtc_arm_vblank_event(), as that helper would
446 * call drm_crtc_accurate_vblank_count(), which we must
447 * not call in VRR mode while we are in front-porch!
448 */
449
450 /* sequence will be replaced by real count during send-out. */
451 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
452 e->pipe = amdgpu_crtc->crtc_id;
453
4a580877 454 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
455 e = NULL;
456 }
4562236b 457
fdd1fe57
MK
458 /* Keep track of vblank of this flip for flip throttling. We use the
459 * cooked hw counter, as that one incremented at start of this vblank
460 * of pageflip completion, so last_flip_vblank is the forbidden count
461 * for queueing new pageflips if vsync + VRR is enabled.
462 */
5d1c59c4 463 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 464 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 465
54f5499a 466 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 467 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 468
cb2318b7
VL
469 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
470 amdgpu_crtc->crtc_id, amdgpu_crtc,
471 vrr_active, (int) !e);
4562236b
HW
472}
473
d2574c33
MK
474static void dm_vupdate_high_irq(void *interrupt_params)
475{
476 struct common_irq_params *irq_params = interrupt_params;
477 struct amdgpu_device *adev = irq_params->adev;
478 struct amdgpu_crtc *acrtc;
47588233
RS
479 struct drm_device *drm_dev;
480 struct drm_vblank_crtc *vblank;
481 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 482 unsigned long flags;
585d450c 483 int vrr_active;
d2574c33
MK
484
485 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
486
487 if (acrtc) {
585d450c 488 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
489 drm_dev = acrtc->base.dev;
490 vblank = &drm_dev->vblank[acrtc->base.index];
491 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
492 frame_duration_ns = vblank->time - previous_timestamp;
493
494 if (frame_duration_ns > 0) {
495 trace_amdgpu_refresh_rate_track(acrtc->base.index,
496 frame_duration_ns,
497 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
498 atomic64_set(&irq_params->previous_timestamp, vblank->time);
499 }
d2574c33 500
cb2318b7 501 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 502 acrtc->crtc_id,
585d450c 503 vrr_active);
d2574c33
MK
504
505 /* Core vblank handling is done here after end of front-porch in
506 * vrr mode, as vblank timestamping will give valid results
507 * while now done after front-porch. This will also deliver
508 * page-flip completion events that have been queued to us
509 * if a pageflip happened inside front-porch.
510 */
585d450c 511 if (vrr_active) {
d2574c33 512 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
513
514 /* BTR processing for pre-DCE12 ASICs */
585d450c 515 if (acrtc->dm_irq_params.stream &&
09aef2c4 516 adev->family < AMDGPU_FAMILY_AI) {
4a580877 517 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
518 mod_freesync_handle_v_update(
519 adev->dm.freesync_module,
585d450c
AP
520 acrtc->dm_irq_params.stream,
521 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
522
523 dc_stream_adjust_vmin_vmax(
524 adev->dm.dc,
585d450c
AP
525 acrtc->dm_irq_params.stream,
526 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 527 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
528 }
529 }
d2574c33
MK
530 }
531}
532
b8e8c934
HW
533/**
534 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 535 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
536 *
537 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
538 * event handler.
539 */
4562236b
HW
540static void dm_crtc_high_irq(void *interrupt_params)
541{
542 struct common_irq_params *irq_params = interrupt_params;
543 struct amdgpu_device *adev = irq_params->adev;
4562236b 544 struct amdgpu_crtc *acrtc;
09aef2c4 545 unsigned long flags;
585d450c 546 int vrr_active;
4562236b 547
b57de80a 548 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
549 if (!acrtc)
550 return;
551
585d450c 552 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 553
cb2318b7 554 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 555 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 556
2346ef47
NK
557 /**
558 * Core vblank handling at start of front-porch is only possible
559 * in non-vrr mode, as only there vblank timestamping will give
560 * valid results while done in front-porch. Otherwise defer it
561 * to dm_vupdate_high_irq after end of front-porch.
562 */
585d450c 563 if (!vrr_active)
2346ef47
NK
564 drm_crtc_handle_vblank(&acrtc->base);
565
566 /**
567 * Following stuff must happen at start of vblank, for crc
568 * computation and below-the-range btr support in vrr mode.
569 */
16f17eda 570 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
571
572 /* BTR updates need to happen before VUPDATE on Vega and above. */
573 if (adev->family < AMDGPU_FAMILY_AI)
574 return;
16f17eda 575
4a580877 576 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 577
585d450c
AP
578 if (acrtc->dm_irq_params.stream &&
579 acrtc->dm_irq_params.vrr_params.supported &&
580 acrtc->dm_irq_params.freesync_config.state ==
581 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 582 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
583 acrtc->dm_irq_params.stream,
584 &acrtc->dm_irq_params.vrr_params);
16f17eda 585
585d450c
AP
586 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
587 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
588 }
589
2b5aed9a
MK
590 /*
591 * If there aren't any active_planes then DCH HUBP may be clock-gated.
592 * In that case, pageflip completion interrupts won't fire and pageflip
593 * completion events won't get delivered. Prevent this by sending
594 * pending pageflip events from here if a flip is still pending.
595 *
596 * If any planes are enabled, use dm_pflip_high_irq() instead, to
597 * avoid race conditions between flip programming and completion,
598 * which could cause too early flip completion events.
599 */
2346ef47
NK
600 if (adev->family >= AMDGPU_FAMILY_RV &&
601 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 602 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
603 if (acrtc->event) {
604 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
605 acrtc->event = NULL;
606 drm_crtc_vblank_put(&acrtc->base);
607 }
608 acrtc->pflip_status = AMDGPU_FLIP_NONE;
609 }
610
4a580877 611 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
612}
613
9e1178ef 614#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
615/**
616 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
617 * DCN generation ASICs
48e01bf4 618 * @interrupt_params: interrupt parameters
86bc2219
WL
619 *
620 * Used to set crc window/read out crc value at vertical line 0 position
621 */
86bc2219
WL
622static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
623{
624 struct common_irq_params *irq_params = interrupt_params;
625 struct amdgpu_device *adev = irq_params->adev;
626 struct amdgpu_crtc *acrtc;
627
628 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
629
630 if (!acrtc)
631 return;
632
633 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
634}
433e5dec 635#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 636
e27c41d5 637/**
03f2abb0 638 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
e27c41d5
JS
639 * @adev: amdgpu_device pointer
640 * @notify: dmub notification structure
641 *
642 * Dmub AUX or SET_CONFIG command completion processing callback
643 * Copies dmub notification to DM which is to be read by AUX command.
644 * issuing thread and also signals the event to wake up the thread.
645 */
240e6d25
IB
646static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
647 struct dmub_notification *notify)
e27c41d5
JS
648{
649 if (adev->dm.dmub_notify)
650 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
651 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
652 complete(&adev->dm.dmub_aux_transfer_done);
653}
654
655/**
656 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
657 * @adev: amdgpu_device pointer
658 * @notify: dmub notification structure
659 *
660 * Dmub Hpd interrupt processing callback. Gets displayindex through the
661 * ink index and calls helper to do the processing.
662 */
240e6d25
IB
663static void dmub_hpd_callback(struct amdgpu_device *adev,
664 struct dmub_notification *notify)
e27c41d5
JS
665{
666 struct amdgpu_dm_connector *aconnector;
f6e03f80 667 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
668 struct drm_connector *connector;
669 struct drm_connector_list_iter iter;
670 struct dc_link *link;
671 uint8_t link_index = 0;
978ffac8 672 struct drm_device *dev;
e27c41d5
JS
673
674 if (adev == NULL)
675 return;
676
677 if (notify == NULL) {
678 DRM_ERROR("DMUB HPD callback notification was NULL");
679 return;
680 }
681
682 if (notify->link_index > adev->dm.dc->link_count) {
683 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
684 return;
685 }
686
e27c41d5 687 link_index = notify->link_index;
e27c41d5 688 link = adev->dm.dc->links[link_index];
978ffac8 689 dev = adev->dm.ddev;
e27c41d5
JS
690
691 drm_connector_list_iter_begin(dev, &iter);
692 drm_for_each_connector_iter(connector, &iter) {
693 aconnector = to_amdgpu_dm_connector(connector);
694 if (link && aconnector->dc_link == link) {
695 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 696 hpd_aconnector = aconnector;
e27c41d5
JS
697 break;
698 }
699 }
700 drm_connector_list_iter_end(&iter);
e27c41d5 701
c40a09e5
NK
702 if (hpd_aconnector) {
703 if (notify->type == DMUB_NOTIFICATION_HPD)
704 handle_hpd_irq_helper(hpd_aconnector);
705 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
706 handle_hpd_rx_irq(hpd_aconnector);
707 }
e27c41d5
JS
708}
709
710/**
711 * register_dmub_notify_callback - Sets callback for DMUB notify
712 * @adev: amdgpu_device pointer
713 * @type: Type of dmub notification
714 * @callback: Dmub interrupt callback function
715 * @dmub_int_thread_offload: offload indicator
716 *
717 * API to register a dmub callback handler for a dmub notification
718 * Also sets indicator whether callback processing to be offloaded.
719 * to dmub interrupt handling thread
720 * Return: true if successfully registered, false if there is existing registration
721 */
240e6d25
IB
722static bool register_dmub_notify_callback(struct amdgpu_device *adev,
723 enum dmub_notification_type type,
724 dmub_notify_interrupt_callback_t callback,
725 bool dmub_int_thread_offload)
e27c41d5
JS
726{
727 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
728 adev->dm.dmub_callback[type] = callback;
729 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
730 } else
731 return false;
732
733 return true;
734}
735
736static void dm_handle_hpd_work(struct work_struct *work)
737{
738 struct dmub_hpd_work *dmub_hpd_wrk;
739
740 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
741
742 if (!dmub_hpd_wrk->dmub_notify) {
743 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
744 return;
745 }
746
747 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
748 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
749 dmub_hpd_wrk->dmub_notify);
750 }
094b21c1
JS
751
752 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
753 kfree(dmub_hpd_wrk);
754
755}
756
e25515e2 757#define DMUB_TRACE_MAX_READ 64
81927e28
JS
758/**
759 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
760 * @interrupt_params: used for determining the Outbox instance
761 *
762 * Handles the Outbox Interrupt
763 * event handler.
764 */
81927e28
JS
765static void dm_dmub_outbox1_low_irq(void *interrupt_params)
766{
767 struct dmub_notification notify;
768 struct common_irq_params *irq_params = interrupt_params;
769 struct amdgpu_device *adev = irq_params->adev;
770 struct amdgpu_display_manager *dm = &adev->dm;
771 struct dmcub_trace_buf_entry entry = { 0 };
772 uint32_t count = 0;
e27c41d5 773 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 774 struct dc_link *plink = NULL;
81927e28 775
f6e03f80
JS
776 if (dc_enable_dmub_notifications(adev->dm.dc) &&
777 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 778
f6e03f80
JS
779 do {
780 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
a35faec3 781 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
f6e03f80
JS
782 DRM_ERROR("DM: notify type %d invalid!", notify.type);
783 continue;
784 }
c40a09e5
NK
785 if (!dm->dmub_callback[notify.type]) {
786 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
787 continue;
788 }
f6e03f80 789 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
790 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
791 if (!dmub_hpd_wrk) {
792 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
793 return;
794 }
795 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
796 if (!dmub_hpd_wrk->dmub_notify) {
797 kfree(dmub_hpd_wrk);
798 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
799 return;
800 }
801 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
802 if (dmub_hpd_wrk->dmub_notify)
803 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
804 dmub_hpd_wrk->adev = adev;
805 if (notify.type == DMUB_NOTIFICATION_HPD) {
806 plink = adev->dm.dc->links[notify.link_index];
807 if (plink) {
808 plink->hpd_status =
b97788e5 809 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 810 }
e27c41d5 811 }
f6e03f80
JS
812 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
813 } else {
814 dm->dmub_callback[notify.type](adev, &notify);
815 }
816 } while (notify.pending_notification);
81927e28
JS
817 }
818
819
820 do {
821 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
822 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
823 entry.param0, entry.param1);
824
825 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
826 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
827 } else
828 break;
829
830 count++;
831
832 } while (count <= DMUB_TRACE_MAX_READ);
833
f6e03f80
JS
834 if (count > DMUB_TRACE_MAX_READ)
835 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 836}
86bc2219 837
4562236b
HW
838static int dm_set_clockgating_state(void *handle,
839 enum amd_clockgating_state state)
840{
841 return 0;
842}
843
844static int dm_set_powergating_state(void *handle,
845 enum amd_powergating_state state)
846{
847 return 0;
848}
849
850/* Prototypes of private functions */
851static int dm_early_init(void* handle);
852
a32e24b4 853/* Allocate memory for FBC compressed data */
3e332d3a 854static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 855{
3e332d3a 856 struct drm_device *dev = connector->dev;
1348969a 857 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 858 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
859 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
860 struct drm_display_mode *mode;
42e67c3b
RL
861 unsigned long max_size = 0;
862
863 if (adev->dm.dc->fbc_compressor == NULL)
864 return;
a32e24b4 865
3e332d3a 866 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
867 return;
868
3e332d3a
RL
869 if (compressor->bo_ptr)
870 return;
42e67c3b 871
42e67c3b 872
3e332d3a
RL
873 list_for_each_entry(mode, &connector->modes, head) {
874 if (max_size < mode->htotal * mode->vtotal)
875 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
876 }
877
878 if (max_size) {
879 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 880 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 881 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
882
883 if (r)
42e67c3b
RL
884 DRM_ERROR("DM: Failed to initialize FBC\n");
885 else {
886 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
887 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
888 }
889
a32e24b4
RL
890 }
891
892}
a32e24b4 893
6ce8f316
NK
894static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
895 int pipe, bool *enabled,
896 unsigned char *buf, int max_bytes)
897{
898 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 899 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
900 struct drm_connector *connector;
901 struct drm_connector_list_iter conn_iter;
902 struct amdgpu_dm_connector *aconnector;
903 int ret = 0;
904
905 *enabled = false;
906
907 mutex_lock(&adev->dm.audio_lock);
908
909 drm_connector_list_iter_begin(dev, &conn_iter);
910 drm_for_each_connector_iter(connector, &conn_iter) {
911 aconnector = to_amdgpu_dm_connector(connector);
912 if (aconnector->audio_inst != port)
913 continue;
914
915 *enabled = true;
916 ret = drm_eld_size(connector->eld);
917 memcpy(buf, connector->eld, min(max_bytes, ret));
918
919 break;
920 }
921 drm_connector_list_iter_end(&conn_iter);
922
923 mutex_unlock(&adev->dm.audio_lock);
924
925 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
926
927 return ret;
928}
929
930static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
931 .get_eld = amdgpu_dm_audio_component_get_eld,
932};
933
934static int amdgpu_dm_audio_component_bind(struct device *kdev,
935 struct device *hda_kdev, void *data)
936{
937 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 938 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
939 struct drm_audio_component *acomp = data;
940
941 acomp->ops = &amdgpu_dm_audio_component_ops;
942 acomp->dev = kdev;
943 adev->dm.audio_component = acomp;
944
945 return 0;
946}
947
948static void amdgpu_dm_audio_component_unbind(struct device *kdev,
949 struct device *hda_kdev, void *data)
950{
951 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 952 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
953 struct drm_audio_component *acomp = data;
954
955 acomp->ops = NULL;
956 acomp->dev = NULL;
957 adev->dm.audio_component = NULL;
958}
959
960static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
961 .bind = amdgpu_dm_audio_component_bind,
962 .unbind = amdgpu_dm_audio_component_unbind,
963};
964
965static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
966{
967 int i, ret;
968
969 if (!amdgpu_audio)
970 return 0;
971
972 adev->mode_info.audio.enabled = true;
973
974 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
975
976 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
977 adev->mode_info.audio.pin[i].channels = -1;
978 adev->mode_info.audio.pin[i].rate = -1;
979 adev->mode_info.audio.pin[i].bits_per_sample = -1;
980 adev->mode_info.audio.pin[i].status_bits = 0;
981 adev->mode_info.audio.pin[i].category_code = 0;
982 adev->mode_info.audio.pin[i].connected = false;
983 adev->mode_info.audio.pin[i].id =
984 adev->dm.dc->res_pool->audios[i]->inst;
985 adev->mode_info.audio.pin[i].offset = 0;
986 }
987
988 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
989 if (ret < 0)
990 return ret;
991
992 adev->dm.audio_registered = true;
993
994 return 0;
995}
996
997static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
998{
999 if (!amdgpu_audio)
1000 return;
1001
1002 if (!adev->mode_info.audio.enabled)
1003 return;
1004
1005 if (adev->dm.audio_registered) {
1006 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1007 adev->dm.audio_registered = false;
1008 }
1009
1010 /* TODO: Disable audio? */
1011
1012 adev->mode_info.audio.enabled = false;
1013}
1014
dfd84d90 1015static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1016{
1017 struct drm_audio_component *acomp = adev->dm.audio_component;
1018
1019 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1020 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1021
1022 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1023 pin, -1);
1024 }
1025}
1026
743b9786
NK
1027static int dm_dmub_hw_init(struct amdgpu_device *adev)
1028{
743b9786
NK
1029 const struct dmcub_firmware_header_v1_0 *hdr;
1030 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1031 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1032 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1033 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1034 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1035 struct dmub_srv_hw_params hw_params;
1036 enum dmub_status status;
1037 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1038 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
1039 bool has_hw_support;
1040
1041 if (!dmub_srv)
1042 /* DMUB isn't supported on the ASIC. */
1043 return 0;
1044
8c7aea40
NK
1045 if (!fb_info) {
1046 DRM_ERROR("No framebuffer info for DMUB service.\n");
1047 return -EINVAL;
1048 }
1049
743b9786
NK
1050 if (!dmub_fw) {
1051 /* Firmware required for DMUB support. */
1052 DRM_ERROR("No firmware provided for DMUB.\n");
1053 return -EINVAL;
1054 }
1055
1056 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1057 if (status != DMUB_STATUS_OK) {
1058 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1059 return -EINVAL;
1060 }
1061
1062 if (!has_hw_support) {
1063 DRM_INFO("DMUB unsupported on ASIC\n");
1064 return 0;
1065 }
1066
47e62dbd
NK
1067 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1068 status = dmub_srv_hw_reset(dmub_srv);
1069 if (status != DMUB_STATUS_OK)
1070 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1071
743b9786
NK
1072 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1073
743b9786
NK
1074 fw_inst_const = dmub_fw->data +
1075 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1076 PSP_HEADER_BYTES;
743b9786
NK
1077
1078 fw_bss_data = dmub_fw->data +
1079 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1080 le32_to_cpu(hdr->inst_const_bytes);
1081
1082 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1083 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1084 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1085
1086 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1087
ddde28a5
HW
1088 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1089 * amdgpu_ucode_init_single_fw will load dmub firmware
1090 * fw_inst_const part to cw0; otherwise, the firmware back door load
1091 * will be done by dm_dmub_hw_init
1092 */
1093 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1094 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1095 fw_inst_const_size);
1096 }
1097
a576b345
NK
1098 if (fw_bss_data_size)
1099 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1100 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1101
1102 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1103 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1104 adev->bios_size);
1105
1106 /* Reset regions that need to be reset. */
1107 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1108 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1109
1110 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1111 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1112
1113 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1114 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1115
1116 /* Initialize hardware. */
1117 memset(&hw_params, 0, sizeof(hw_params));
1118 hw_params.fb_base = adev->gmc.fb_start;
1119 hw_params.fb_offset = adev->gmc.aper_base;
1120
31a7f4bb
HW
1121 /* backdoor load firmware and trigger dmub running */
1122 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1123 hw_params.load_inst_const = true;
1124
743b9786
NK
1125 if (dmcu)
1126 hw_params.psp_version = dmcu->psp_version;
1127
8c7aea40
NK
1128 for (i = 0; i < fb_info->num_fb; ++i)
1129 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1130
3b36f50d
TH
1131 switch (adev->ip_versions[DCE_HWIP][0]) {
1132 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1133 hw_params.dpia_supported = true;
7367540b 1134 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
5b109397
JS
1135 break;
1136 default:
1137 break;
1138 }
1139
743b9786
NK
1140 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1141 if (status != DMUB_STATUS_OK) {
1142 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1143 return -EINVAL;
1144 }
1145
1146 /* Wait for firmware load to finish. */
1147 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1148 if (status != DMUB_STATUS_OK)
1149 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1150
1151 /* Init DMCU and ABM if available. */
1152 if (dmcu && abm) {
1153 dmcu->funcs->dmcu_init(dmcu);
1154 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1155 }
1156
051b7887
RL
1157 if (!adev->dm.dc->ctx->dmub_srv)
1158 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1159 if (!adev->dm.dc->ctx->dmub_srv) {
1160 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1161 return -ENOMEM;
1162 }
1163
743b9786
NK
1164 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1165 adev->dm.dmcub_fw_version);
1166
1167 return 0;
1168}
1169
79d6b935
NK
1170static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1171{
1172 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1173 enum dmub_status status;
1174 bool init;
1175
1176 if (!dmub_srv) {
1177 /* DMUB isn't supported on the ASIC. */
1178 return;
1179 }
1180
1181 status = dmub_srv_is_hw_init(dmub_srv, &init);
1182 if (status != DMUB_STATUS_OK)
1183 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1184
1185 if (status == DMUB_STATUS_OK && init) {
1186 /* Wait for firmware load to finish. */
1187 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1188 if (status != DMUB_STATUS_OK)
1189 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1190 } else {
1191 /* Perform the full hardware initialization. */
1192 dm_dmub_hw_init(adev);
1193 }
1194}
1195
c0fb85ae 1196static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1197{
c0fb85ae
YZ
1198 uint64_t pt_base;
1199 uint32_t logical_addr_low;
1200 uint32_t logical_addr_high;
1201 uint32_t agp_base, agp_bot, agp_top;
1202 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1203
a0f884f5
NK
1204 memset(pa_config, 0, sizeof(*pa_config));
1205
c0fb85ae
YZ
1206 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1207 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1208
c0fb85ae
YZ
1209 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1210 /*
1211 * Raven2 has a HW issue that it is unable to use the vram which
1212 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1213 * workaround that increase system aperture high address (add 1)
1214 * to get rid of the VM fault and hardware hang.
1215 */
1216 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1217 else
1218 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1219
c0fb85ae
YZ
1220 agp_base = 0;
1221 agp_bot = adev->gmc.agp_start >> 24;
1222 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1223
c44a22b3 1224
c0fb85ae
YZ
1225 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1226 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1227 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1228 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1229 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1230 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1231
c0fb85ae
YZ
1232 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1233 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1234
1235 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1236 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1237 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1238
1239 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1240 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1241 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1242
1243 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1244 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1245 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1246
1247 pa_config->is_hvm_enabled = 0;
c44a22b3 1248
c44a22b3 1249}
cae5c1ab 1250
09a5df6c 1251static void vblank_control_worker(struct work_struct *work)
ea3b4242 1252{
09a5df6c
NK
1253 struct vblank_control_work *vblank_work =
1254 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1255 struct amdgpu_display_manager *dm = vblank_work->dm;
1256
1257 mutex_lock(&dm->dc_lock);
1258
1259 if (vblank_work->enable)
1260 dm->active_vblank_irq_count++;
5af50b0b 1261 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1262 dm->active_vblank_irq_count--;
1263
2cbcb78c 1264 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1265
4711c033 1266 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1267
7cc191ee
LL
1268 /*
1269 * Control PSR based on vblank requirements from OS
1270 *
1271 * If panel supports PSR SU, there's no need to disable PSR when OS is
1272 * submitting fast atomic commits (we infer this by whether the OS
1273 * requests vblank events). Fast atomic commits will simply trigger a
1274 * full-frame-update (FFU); a specific case of selective-update (SU)
1275 * where the SU region is the full hactive*vactive region. See
1276 * fill_dc_dirty_rects().
1277 */
58aa1c50
NK
1278 if (vblank_work->stream && vblank_work->stream->link) {
1279 if (vblank_work->enable) {
7cc191ee
LL
1280 if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1281 vblank_work->stream->link->psr_settings.psr_allow_active)
58aa1c50
NK
1282 amdgpu_dm_psr_disable(vblank_work->stream);
1283 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1284 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1285 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1286 amdgpu_dm_psr_enable(vblank_work->stream);
1287 }
1288 }
1289
ea3b4242 1290 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1291
1292 dc_stream_release(vblank_work->stream);
1293
09a5df6c 1294 kfree(vblank_work);
ea3b4242
QZ
1295}
1296
8e794421
WL
1297static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1298{
1299 struct hpd_rx_irq_offload_work *offload_work;
1300 struct amdgpu_dm_connector *aconnector;
1301 struct dc_link *dc_link;
1302 struct amdgpu_device *adev;
1303 enum dc_connection_type new_connection_type = dc_connection_none;
1304 unsigned long flags;
1305
1306 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1307 aconnector = offload_work->offload_wq->aconnector;
1308
1309 if (!aconnector) {
1310 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1311 goto skip;
1312 }
1313
1314 adev = drm_to_adev(aconnector->base.dev);
1315 dc_link = aconnector->dc_link;
1316
1317 mutex_lock(&aconnector->hpd_lock);
1318 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1319 DRM_ERROR("KMS: Failed to detect connector\n");
1320 mutex_unlock(&aconnector->hpd_lock);
1321
1322 if (new_connection_type == dc_connection_none)
1323 goto skip;
1324
1325 if (amdgpu_in_reset(adev))
1326 goto skip;
1327
1328 mutex_lock(&adev->dm.dc_lock);
1329 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1330 dc_link_dp_handle_automated_test(dc_link);
1331 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1332 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1333 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1334 dc_link_dp_handle_link_loss(dc_link);
1335 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1336 offload_work->offload_wq->is_handling_link_loss = false;
1337 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1338 }
1339 mutex_unlock(&adev->dm.dc_lock);
1340
1341skip:
1342 kfree(offload_work);
1343
1344}
1345
1346static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1347{
1348 int max_caps = dc->caps.max_links;
1349 int i = 0;
1350 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1351
1352 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1353
1354 if (!hpd_rx_offload_wq)
1355 return NULL;
1356
1357
1358 for (i = 0; i < max_caps; i++) {
1359 hpd_rx_offload_wq[i].wq =
1360 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1361
1362 if (hpd_rx_offload_wq[i].wq == NULL) {
1363 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1364 return NULL;
1365 }
1366
1367 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1368 }
1369
1370 return hpd_rx_offload_wq;
1371}
1372
3ce51649
AD
1373struct amdgpu_stutter_quirk {
1374 u16 chip_vendor;
1375 u16 chip_device;
1376 u16 subsys_vendor;
1377 u16 subsys_device;
1378 u8 revision;
1379};
1380
1381static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1382 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1383 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1384 { 0, 0, 0, 0, 0 },
1385};
1386
1387static bool dm_should_disable_stutter(struct pci_dev *pdev)
1388{
1389 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1390
1391 while (p && p->chip_device != 0) {
1392 if (pdev->vendor == p->chip_vendor &&
1393 pdev->device == p->chip_device &&
1394 pdev->subsystem_vendor == p->subsys_vendor &&
1395 pdev->subsystem_device == p->subsys_device &&
1396 pdev->revision == p->revision) {
1397 return true;
1398 }
1399 ++p;
1400 }
1401 return false;
1402}
1403
7578ecda 1404static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1405{
1406 struct dc_init_data init_data;
52704fca
BL
1407#ifdef CONFIG_DRM_AMD_DC_HDCP
1408 struct dc_callback_init init_params;
1409#endif
743b9786 1410 int r;
52704fca 1411
4a580877 1412 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1413 adev->dm.adev = adev;
1414
4562236b
HW
1415 /* Zero all the fields */
1416 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1417#ifdef CONFIG_DRM_AMD_DC_HDCP
1418 memset(&init_params, 0, sizeof(init_params));
1419#endif
4562236b 1420
674e78ac 1421 mutex_init(&adev->dm.dc_lock);
6ce8f316 1422 mutex_init(&adev->dm.audio_lock);
ea3b4242 1423 spin_lock_init(&adev->dm.vblank_lock);
674e78ac 1424
4562236b
HW
1425 if(amdgpu_dm_irq_init(adev)) {
1426 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1427 goto error;
1428 }
1429
1430 init_data.asic_id.chip_family = adev->family;
1431
2dc31ca1 1432 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1433 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1434 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1435
770d13b1 1436 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1437 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1438 init_data.asic_id.atombios_base_address =
1439 adev->mode_info.atom_context->bios;
1440
1441 init_data.driver = adev;
1442
1443 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1444
1445 if (!adev->dm.cgs_device) {
1446 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1447 goto error;
1448 }
1449
1450 init_data.cgs_device = adev->dm.cgs_device;
1451
4562236b
HW
1452 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1453
fd546bc5
AD
1454 switch (adev->ip_versions[DCE_HWIP][0]) {
1455 case IP_VERSION(2, 1, 0):
1456 switch (adev->dm.dmcub_fw_version) {
1457 case 0: /* development */
1458 case 0x1: /* linux-firmware.git hash 6d9f399 */
1459 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1460 init_data.flags.disable_dmcu = false;
1461 break;
1462 default:
1463 init_data.flags.disable_dmcu = true;
1464 }
1465 break;
1466 case IP_VERSION(2, 0, 3):
1467 init_data.flags.disable_dmcu = true;
1468 break;
1469 default:
1470 break;
1471 }
1472
60fb100b
AD
1473 switch (adev->asic_type) {
1474 case CHIP_CARRIZO:
1475 case CHIP_STONEY:
1ebcaebd
NK
1476 init_data.flags.gpu_vm_support = true;
1477 break;
60fb100b 1478 default:
1d789535 1479 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
1480 case IP_VERSION(1, 0, 0):
1481 case IP_VERSION(1, 0, 1):
a7f520bf
AD
1482 /* enable S/G on PCO and RV2 */
1483 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1484 (adev->apu_flags & AMD_APU_IS_PICASSO))
1485 init_data.flags.gpu_vm_support = true;
1486 break;
fd546bc5 1487 case IP_VERSION(2, 1, 0):
c08182f2
AD
1488 case IP_VERSION(3, 0, 1):
1489 case IP_VERSION(3, 1, 2):
1490 case IP_VERSION(3, 1, 3):
b5b8ed44 1491 case IP_VERSION(3, 1, 5):
0fe382fb 1492 case IP_VERSION(3, 1, 6):
c08182f2
AD
1493 init_data.flags.gpu_vm_support = true;
1494 break;
c08182f2
AD
1495 default:
1496 break;
1497 }
60fb100b
AD
1498 break;
1499 }
6e227308 1500
a7f520bf
AD
1501 if (init_data.flags.gpu_vm_support)
1502 adev->mode_info.gpu_vm_support = true;
1503
04b94af4
AD
1504 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1505 init_data.flags.fbc_support = true;
1506
d99f38ae
AD
1507 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1508 init_data.flags.multi_mon_pp_mclk_switch = true;
1509
eaf56410
LL
1510 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1511 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1512
1513 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1514 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1515
12320274
AP
1516 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1517 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1518 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1519 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
12320274 1520
7aba117a 1521 init_data.flags.seamless_boot_edp_requested = false;
78ad75f8 1522
1edf5ae1 1523 if (check_seamless_boot_capability(adev)) {
7aba117a 1524 init_data.flags.seamless_boot_edp_requested = true;
1edf5ae1
ZL
1525 init_data.flags.allow_seamless_boot_optimization = true;
1526 DRM_INFO("Seamless boot condition check passed\n");
1527 }
1528
a8201902
LM
1529 init_data.flags.enable_mipi_converter_optimization = true;
1530
0dd79532 1531 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1532 /* Display Core create. */
1533 adev->dm.dc = dc_create(&init_data);
1534
423788c7 1535 if (adev->dm.dc) {
76121231 1536 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1537 } else {
76121231 1538 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1539 goto error;
1540 }
4562236b 1541
8a791dab
HW
1542 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1543 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1544 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1545 }
1546
f99d8762
HW
1547 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1548 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1549 if (dm_should_disable_stutter(adev->pdev))
1550 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1551
8a791dab
HW
1552 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1553 adev->dm.dc->debug.disable_stutter = true;
1554
2665f63a 1555 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1556 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1557 adev->dm.dc->debug.disable_dsc_edp = true;
1558 }
8a791dab
HW
1559
1560 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1561 adev->dm.dc->debug.disable_clock_gate = true;
1562
cfb979f7
AP
1563 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1564 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1565
743b9786
NK
1566 r = dm_dmub_hw_init(adev);
1567 if (r) {
1568 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1569 goto error;
1570 }
1571
bb6785c1
NK
1572 dc_hardware_init(adev->dm.dc);
1573
8e794421
WL
1574 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1575 if (!adev->dm.hpd_rx_offload_wq) {
1576 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1577 goto error;
1578 }
1579
3ca001af 1580 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1581 struct dc_phy_addr_space_config pa_config;
1582
0b08c54b 1583 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1584
0b08c54b
YZ
1585 // Call the DC init_memory func
1586 dc_setup_system_context(adev->dm.dc, &pa_config);
1587 }
c0fb85ae 1588
4562236b
HW
1589 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1590 if (!adev->dm.freesync_module) {
1591 DRM_ERROR(
1592 "amdgpu: failed to initialize freesync_module.\n");
1593 } else
f1ad2f5e 1594 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1595 adev->dm.freesync_module);
1596
e277adc5
LSL
1597 amdgpu_dm_init_color_mod();
1598
ea3b4242 1599 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1600 adev->dm.vblank_control_workqueue =
1601 create_singlethread_workqueue("dm_vblank_control_workqueue");
1602 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1603 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242 1604 }
ea3b4242 1605
52704fca 1606#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1607 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1608 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1609
96a3b32e
BL
1610 if (!adev->dm.hdcp_workqueue)
1611 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1612 else
1613 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1614
96a3b32e
BL
1615 dc_init_callbacks(adev->dm.dc, &init_params);
1616 }
9a65df19
WL
1617#endif
1618#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1619 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1620#endif
81927e28
JS
1621 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1622 init_completion(&adev->dm.dmub_aux_transfer_done);
1623 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1624 if (!adev->dm.dmub_notify) {
1625 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1626 goto error;
1627 }
e27c41d5
JS
1628
1629 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1630 if (!adev->dm.delayed_hpd_wq) {
1631 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1632 goto error;
1633 }
1634
81927e28 1635 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1636 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1637 dmub_aux_setconfig_callback, false)) {
1638 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1639 goto error;
1640 }
1641 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1642 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1643 goto error;
1644 }
c40a09e5
NK
1645 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1646 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1647 goto error;
1648 }
81927e28
JS
1649 }
1650
4562236b
HW
1651 if (amdgpu_dm_initialize_drm_device(adev)) {
1652 DRM_ERROR(
1653 "amdgpu: failed to initialize sw for display support.\n");
1654 goto error;
1655 }
1656
f74367e4
AD
1657 /* create fake encoders for MST */
1658 dm_dp_create_fake_mst_encoders(adev);
1659
4562236b
HW
1660 /* TODO: Add_display_info? */
1661
1662 /* TODO use dynamic cursor width */
4a580877
LT
1663 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1664 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1665
4a580877 1666 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1667 DRM_ERROR(
1668 "amdgpu: failed to initialize sw for display support.\n");
1669 goto error;
1670 }
1671
c0fb85ae 1672
f1ad2f5e 1673 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1674
1675 return 0;
1676error:
1677 amdgpu_dm_fini(adev);
1678
59d0f396 1679 return -EINVAL;
4562236b
HW
1680}
1681
e9669fb7
AG
1682static int amdgpu_dm_early_fini(void *handle)
1683{
1684 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1685
1686 amdgpu_dm_audio_fini(adev);
1687
1688 return 0;
1689}
1690
7578ecda 1691static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1692{
f74367e4
AD
1693 int i;
1694
09a5df6c
NK
1695 if (adev->dm.vblank_control_workqueue) {
1696 destroy_workqueue(adev->dm.vblank_control_workqueue);
1697 adev->dm.vblank_control_workqueue = NULL;
1698 }
09a5df6c 1699
f74367e4
AD
1700 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1701 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1702 }
1703
4562236b 1704 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1705
9a65df19
WL
1706#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1707 if (adev->dm.crc_rd_wrk) {
1708 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1709 kfree(adev->dm.crc_rd_wrk);
1710 adev->dm.crc_rd_wrk = NULL;
1711 }
1712#endif
52704fca
BL
1713#ifdef CONFIG_DRM_AMD_DC_HDCP
1714 if (adev->dm.hdcp_workqueue) {
e96b1b29 1715 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1716 adev->dm.hdcp_workqueue = NULL;
1717 }
1718
1719 if (adev->dm.dc)
1720 dc_deinit_callbacks(adev->dm.dc);
1721#endif
51ba6912 1722
3beac533 1723 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1724
81927e28
JS
1725 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1726 kfree(adev->dm.dmub_notify);
1727 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1728 destroy_workqueue(adev->dm.delayed_hpd_wq);
1729 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1730 }
1731
743b9786
NK
1732 if (adev->dm.dmub_bo)
1733 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1734 &adev->dm.dmub_bo_gpu_addr,
1735 &adev->dm.dmub_bo_cpu_addr);
52704fca 1736
006c26a0
AG
1737 if (adev->dm.hpd_rx_offload_wq) {
1738 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1739 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1740 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1741 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1742 }
1743 }
1744
1745 kfree(adev->dm.hpd_rx_offload_wq);
1746 adev->dm.hpd_rx_offload_wq = NULL;
1747 }
1748
c8bdf2b6
ED
1749 /* DC Destroy TODO: Replace destroy DAL */
1750 if (adev->dm.dc)
1751 dc_destroy(&adev->dm.dc);
4562236b
HW
1752 /*
1753 * TODO: pageflip, vlank interrupt
1754 *
1755 * amdgpu_dm_irq_fini(adev);
1756 */
1757
1758 if (adev->dm.cgs_device) {
1759 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1760 adev->dm.cgs_device = NULL;
1761 }
1762 if (adev->dm.freesync_module) {
1763 mod_freesync_destroy(adev->dm.freesync_module);
1764 adev->dm.freesync_module = NULL;
1765 }
674e78ac 1766
6ce8f316 1767 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1768 mutex_destroy(&adev->dm.dc_lock);
1769
4562236b
HW
1770 return;
1771}
1772
a94d5569 1773static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1774{
a7669aff 1775 const char *fw_name_dmcu = NULL;
a94d5569
DF
1776 int r;
1777 const struct dmcu_firmware_header_v1_0 *hdr;
1778
1779 switch(adev->asic_type) {
55e56389
MR
1780#if defined(CONFIG_DRM_AMD_DC_SI)
1781 case CHIP_TAHITI:
1782 case CHIP_PITCAIRN:
1783 case CHIP_VERDE:
1784 case CHIP_OLAND:
1785#endif
a94d5569
DF
1786 case CHIP_BONAIRE:
1787 case CHIP_HAWAII:
1788 case CHIP_KAVERI:
1789 case CHIP_KABINI:
1790 case CHIP_MULLINS:
1791 case CHIP_TONGA:
1792 case CHIP_FIJI:
1793 case CHIP_CARRIZO:
1794 case CHIP_STONEY:
1795 case CHIP_POLARIS11:
1796 case CHIP_POLARIS10:
1797 case CHIP_POLARIS12:
1798 case CHIP_VEGAM:
1799 case CHIP_VEGA10:
1800 case CHIP_VEGA12:
1801 case CHIP_VEGA20:
1802 return 0;
5ea23931
RL
1803 case CHIP_NAVI12:
1804 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1805 break;
a94d5569 1806 case CHIP_RAVEN:
a7669aff
HW
1807 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1808 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1809 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1810 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1811 else
a7669aff 1812 return 0;
a94d5569
DF
1813 break;
1814 default:
1d789535 1815 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1816 case IP_VERSION(2, 0, 2):
1817 case IP_VERSION(2, 0, 3):
1818 case IP_VERSION(2, 0, 0):
1819 case IP_VERSION(2, 1, 0):
1820 case IP_VERSION(3, 0, 0):
1821 case IP_VERSION(3, 0, 2):
1822 case IP_VERSION(3, 0, 3):
1823 case IP_VERSION(3, 0, 1):
1824 case IP_VERSION(3, 1, 2):
1825 case IP_VERSION(3, 1, 3):
b5b8ed44 1826 case IP_VERSION(3, 1, 5):
de7cc1b4 1827 case IP_VERSION(3, 1, 6):
577359ca
AP
1828 case IP_VERSION(3, 2, 0):
1829 case IP_VERSION(3, 2, 1):
c08182f2
AD
1830 return 0;
1831 default:
1832 break;
1833 }
a94d5569 1834 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1835 return -EINVAL;
a94d5569
DF
1836 }
1837
1838 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1839 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1840 return 0;
1841 }
1842
1843 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1844 if (r == -ENOENT) {
1845 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1846 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1847 adev->dm.fw_dmcu = NULL;
1848 return 0;
1849 }
1850 if (r) {
1851 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1852 fw_name_dmcu);
1853 return r;
1854 }
1855
1856 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1857 if (r) {
1858 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1859 fw_name_dmcu);
1860 release_firmware(adev->dm.fw_dmcu);
1861 adev->dm.fw_dmcu = NULL;
1862 return r;
1863 }
1864
1865 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1866 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1867 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1868 adev->firmware.fw_size +=
1869 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1870
1871 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1872 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1873 adev->firmware.fw_size +=
1874 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1875
ee6e89c0
DF
1876 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1877
a94d5569
DF
1878 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1879
4562236b
HW
1880 return 0;
1881}
1882
743b9786
NK
1883static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1884{
1885 struct amdgpu_device *adev = ctx;
1886
1887 return dm_read_reg(adev->dm.dc->ctx, address);
1888}
1889
1890static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1891 uint32_t value)
1892{
1893 struct amdgpu_device *adev = ctx;
1894
1895 return dm_write_reg(adev->dm.dc->ctx, address, value);
1896}
1897
1898static int dm_dmub_sw_init(struct amdgpu_device *adev)
1899{
1900 struct dmub_srv_create_params create_params;
8c7aea40
NK
1901 struct dmub_srv_region_params region_params;
1902 struct dmub_srv_region_info region_info;
1903 struct dmub_srv_fb_params fb_params;
1904 struct dmub_srv_fb_info *fb_info;
1905 struct dmub_srv *dmub_srv;
743b9786
NK
1906 const struct dmcub_firmware_header_v1_0 *hdr;
1907 const char *fw_name_dmub;
1908 enum dmub_asic dmub_asic;
1909 enum dmub_status status;
1910 int r;
1911
1d789535 1912 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1913 case IP_VERSION(2, 1, 0):
743b9786
NK
1914 dmub_asic = DMUB_ASIC_DCN21;
1915 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1916 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1917 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1918 break;
c08182f2 1919 case IP_VERSION(3, 0, 0):
1d789535 1920 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1921 dmub_asic = DMUB_ASIC_DCN30;
1922 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1923 } else {
1924 dmub_asic = DMUB_ASIC_DCN30;
1925 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1926 }
79037324 1927 break;
c08182f2 1928 case IP_VERSION(3, 0, 1):
469989ca
RL
1929 dmub_asic = DMUB_ASIC_DCN301;
1930 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1931 break;
c08182f2 1932 case IP_VERSION(3, 0, 2):
2a411205
BL
1933 dmub_asic = DMUB_ASIC_DCN302;
1934 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1935 break;
c08182f2 1936 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1937 dmub_asic = DMUB_ASIC_DCN303;
1938 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1939 break;
c08182f2
AD
1940 case IP_VERSION(3, 1, 2):
1941 case IP_VERSION(3, 1, 3):
3137f792 1942 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1943 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1944 break;
b5b8ed44
QZ
1945 case IP_VERSION(3, 1, 5):
1946 dmub_asic = DMUB_ASIC_DCN315;
1947 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1948 break;
de7cc1b4 1949 case IP_VERSION(3, 1, 6):
868f4357 1950 dmub_asic = DMUB_ASIC_DCN316;
de7cc1b4
PL
1951 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1952 break;
577359ca
AP
1953 case IP_VERSION(3, 2, 0):
1954 dmub_asic = DMUB_ASIC_DCN32;
1955 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1956 break;
1957 case IP_VERSION(3, 2, 1):
1958 dmub_asic = DMUB_ASIC_DCN321;
1959 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1960 break;
743b9786
NK
1961 default:
1962 /* ASIC doesn't support DMUB. */
1963 return 0;
1964 }
1965
743b9786
NK
1966 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1967 if (r) {
1968 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1969 return 0;
1970 }
1971
1972 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1973 if (r) {
1974 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1975 return 0;
1976 }
1977
743b9786 1978 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1979 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1980
9a6ed547
NK
1981 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1982 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1983 AMDGPU_UCODE_ID_DMCUB;
1984 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1985 adev->dm.dmub_fw;
1986 adev->firmware.fw_size +=
1987 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1988
9a6ed547
NK
1989 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1990 adev->dm.dmcub_fw_version);
1991 }
1992
743b9786 1993
8c7aea40
NK
1994 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1995 dmub_srv = adev->dm.dmub_srv;
1996
1997 if (!dmub_srv) {
1998 DRM_ERROR("Failed to allocate DMUB service!\n");
1999 return -ENOMEM;
2000 }
2001
2002 memset(&create_params, 0, sizeof(create_params));
2003 create_params.user_ctx = adev;
2004 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2005 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2006 create_params.asic = dmub_asic;
2007
2008 /* Create the DMUB service. */
2009 status = dmub_srv_create(dmub_srv, &create_params);
2010 if (status != DMUB_STATUS_OK) {
2011 DRM_ERROR("Error creating DMUB service: %d\n", status);
2012 return -EINVAL;
2013 }
2014
2015 /* Calculate the size of all the regions for the DMUB service. */
2016 memset(&region_params, 0, sizeof(region_params));
2017
2018 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2019 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2020 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2021 region_params.vbios_size = adev->bios_size;
0922b899 2022 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
2023 adev->dm.dmub_fw->data +
2024 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 2025 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
2026 region_params.fw_inst_const =
2027 adev->dm.dmub_fw->data +
2028 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2029 PSP_HEADER_BYTES;
8c7aea40
NK
2030
2031 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2032 &region_info);
2033
2034 if (status != DMUB_STATUS_OK) {
2035 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2036 return -EINVAL;
2037 }
2038
2039 /*
2040 * Allocate a framebuffer based on the total size of all the regions.
2041 * TODO: Move this into GART.
2042 */
2043 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2044 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2045 &adev->dm.dmub_bo_gpu_addr,
2046 &adev->dm.dmub_bo_cpu_addr);
2047 if (r)
2048 return r;
2049
2050 /* Rebase the regions on the framebuffer address. */
2051 memset(&fb_params, 0, sizeof(fb_params));
2052 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2053 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2054 fb_params.region_info = &region_info;
2055
2056 adev->dm.dmub_fb_info =
2057 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2058 fb_info = adev->dm.dmub_fb_info;
2059
2060 if (!fb_info) {
2061 DRM_ERROR(
2062 "Failed to allocate framebuffer info for DMUB service!\n");
2063 return -ENOMEM;
2064 }
2065
2066 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2067 if (status != DMUB_STATUS_OK) {
2068 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2069 return -EINVAL;
2070 }
2071
743b9786
NK
2072 return 0;
2073}
2074
a94d5569
DF
2075static int dm_sw_init(void *handle)
2076{
2077 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
2078 int r;
2079
2080 r = dm_dmub_sw_init(adev);
2081 if (r)
2082 return r;
a94d5569
DF
2083
2084 return load_dmcu_fw(adev);
2085}
2086
4562236b
HW
2087static int dm_sw_fini(void *handle)
2088{
a94d5569
DF
2089 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2090
8c7aea40
NK
2091 kfree(adev->dm.dmub_fb_info);
2092 adev->dm.dmub_fb_info = NULL;
2093
743b9786
NK
2094 if (adev->dm.dmub_srv) {
2095 dmub_srv_destroy(adev->dm.dmub_srv);
2096 adev->dm.dmub_srv = NULL;
2097 }
2098
75e1658e
ND
2099 release_firmware(adev->dm.dmub_fw);
2100 adev->dm.dmub_fw = NULL;
743b9786 2101
75e1658e
ND
2102 release_firmware(adev->dm.fw_dmcu);
2103 adev->dm.fw_dmcu = NULL;
a94d5569 2104
4562236b
HW
2105 return 0;
2106}
2107
7abcf6b5 2108static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2109{
c84dec2f 2110 struct amdgpu_dm_connector *aconnector;
4562236b 2111 struct drm_connector *connector;
f8d2d39e 2112 struct drm_connector_list_iter iter;
7abcf6b5 2113 int ret = 0;
4562236b 2114
f8d2d39e
LP
2115 drm_connector_list_iter_begin(dev, &iter);
2116 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2117 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2118 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2119 aconnector->mst_mgr.aux) {
f1ad2f5e 2120 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2121 aconnector,
2122 aconnector->base.base.id);
7abcf6b5
AG
2123
2124 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2125 if (ret < 0) {
2126 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2127 aconnector->dc_link->type =
2128 dc_connection_single;
2129 break;
7abcf6b5 2130 }
f8d2d39e 2131 }
4562236b 2132 }
f8d2d39e 2133 drm_connector_list_iter_end(&iter);
4562236b 2134
7abcf6b5
AG
2135 return ret;
2136}
2137
2138static int dm_late_init(void *handle)
2139{
42e67c3b 2140 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2141
bbf854dc
DF
2142 struct dmcu_iram_parameters params;
2143 unsigned int linear_lut[16];
2144 int i;
17bdb4a8 2145 struct dmcu *dmcu = NULL;
bbf854dc 2146
17bdb4a8
JFZ
2147 dmcu = adev->dm.dc->res_pool->dmcu;
2148
bbf854dc
DF
2149 for (i = 0; i < 16; i++)
2150 linear_lut[i] = 0xFFFF * i / 15;
2151
2152 params.set = 0;
75068994 2153 params.backlight_ramping_override = false;
bbf854dc
DF
2154 params.backlight_ramping_start = 0xCCCC;
2155 params.backlight_ramping_reduction = 0xCCCCCCCC;
2156 params.backlight_lut_array_size = 16;
2157 params.backlight_lut_array = linear_lut;
2158
2ad0cdf9
AK
2159 /* Min backlight level after ABM reduction, Don't allow below 1%
2160 * 0xFFFF x 0.01 = 0x28F
2161 */
2162 params.min_abm_backlight = 0x28F;
5cb32419 2163 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2164 * dmcu object will be null.
2165 * ABM 2.4 and up are implemented on dmcub.
2166 */
2167 if (dmcu) {
2168 if (!dmcu_load_iram(dmcu, params))
2169 return -EINVAL;
2170 } else if (adev->dm.dc->ctx->dmub_srv) {
2171 struct dc_link *edp_links[MAX_NUM_EDP];
2172 int edp_num;
bbf854dc 2173
6e568e43
JW
2174 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2175 for (i = 0; i < edp_num; i++) {
2176 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2177 return -EINVAL;
2178 }
2179 }
bbf854dc 2180
4a580877 2181 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2182}
2183
2184static void s3_handle_mst(struct drm_device *dev, bool suspend)
2185{
c84dec2f 2186 struct amdgpu_dm_connector *aconnector;
4562236b 2187 struct drm_connector *connector;
f8d2d39e 2188 struct drm_connector_list_iter iter;
fe7553be
LP
2189 struct drm_dp_mst_topology_mgr *mgr;
2190 int ret;
2191 bool need_hotplug = false;
4562236b 2192
f8d2d39e
LP
2193 drm_connector_list_iter_begin(dev, &iter);
2194 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2195 aconnector = to_amdgpu_dm_connector(connector);
2196 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2197 aconnector->mst_port)
2198 continue;
2199
2200 mgr = &aconnector->mst_mgr;
2201
2202 if (suspend) {
2203 drm_dp_mst_topology_mgr_suspend(mgr);
2204 } else {
6f85f738 2205 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be 2206 if (ret < 0) {
84a8b390
WL
2207 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2208 aconnector->dc_link);
fe7553be
LP
2209 need_hotplug = true;
2210 }
2211 }
4562236b 2212 }
f8d2d39e 2213 drm_connector_list_iter_end(&iter);
fe7553be
LP
2214
2215 if (need_hotplug)
2216 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2217}
2218
9340dfd3
HW
2219static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2220{
9340dfd3
HW
2221 int ret = 0;
2222
9340dfd3
HW
2223 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2224 * on window driver dc implementation.
2225 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2226 * should be passed to smu during boot up and resume from s3.
2227 * boot up: dc calculate dcn watermark clock settings within dc_create,
2228 * dcn20_resource_construct
2229 * then call pplib functions below to pass the settings to smu:
2230 * smu_set_watermarks_for_clock_ranges
2231 * smu_set_watermarks_table
2232 * navi10_set_watermarks_table
2233 * smu_write_watermarks_table
2234 *
2235 * For Renoir, clock settings of dcn watermark are also fixed values.
2236 * dc has implemented different flow for window driver:
2237 * dc_hardware_init / dc_set_power_state
2238 * dcn10_init_hw
2239 * notify_wm_ranges
2240 * set_wm_ranges
2241 * -- Linux
2242 * smu_set_watermarks_for_clock_ranges
2243 * renoir_set_watermarks_table
2244 * smu_write_watermarks_table
2245 *
2246 * For Linux,
2247 * dc_hardware_init -> amdgpu_dm_init
2248 * dc_set_power_state --> dm_resume
2249 *
2250 * therefore, this function apply to navi10/12/14 but not Renoir
2251 * *
2252 */
1d789535 2253 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2254 case IP_VERSION(2, 0, 2):
2255 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2256 break;
2257 default:
2258 return 0;
2259 }
2260
13f5dbd6 2261 ret = amdgpu_dpm_write_watermarks_table(adev);
e7a95eea
EQ
2262 if (ret) {
2263 DRM_ERROR("Failed to update WMTABLE!\n");
2264 return ret;
9340dfd3
HW
2265 }
2266
9340dfd3
HW
2267 return 0;
2268}
2269
b8592b48
LL
2270/**
2271 * dm_hw_init() - Initialize DC device
28d687ea 2272 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2273 *
2274 * Initialize the &struct amdgpu_display_manager device. This involves calling
2275 * the initializers of each DM component, then populating the struct with them.
2276 *
2277 * Although the function implies hardware initialization, both hardware and
2278 * software are initialized here. Splitting them out to their relevant init
2279 * hooks is a future TODO item.
2280 *
2281 * Some notable things that are initialized here:
2282 *
2283 * - Display Core, both software and hardware
2284 * - DC modules that we need (freesync and color management)
2285 * - DRM software states
2286 * - Interrupt sources and handlers
2287 * - Vblank support
2288 * - Debug FS entries, if enabled
2289 */
4562236b
HW
2290static int dm_hw_init(void *handle)
2291{
2292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2293 /* Create DAL display manager */
2294 amdgpu_dm_init(adev);
4562236b
HW
2295 amdgpu_dm_hpd_init(adev);
2296
4562236b
HW
2297 return 0;
2298}
2299
b8592b48
LL
2300/**
2301 * dm_hw_fini() - Teardown DC device
28d687ea 2302 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2303 *
2304 * Teardown components within &struct amdgpu_display_manager that require
2305 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2306 * were loaded. Also flush IRQ workqueues and disable them.
2307 */
4562236b
HW
2308static int dm_hw_fini(void *handle)
2309{
2310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2311
2312 amdgpu_dm_hpd_fini(adev);
2313
2314 amdgpu_dm_irq_fini(adev);
21de3396 2315 amdgpu_dm_fini(adev);
4562236b
HW
2316 return 0;
2317}
2318
cdaae837
BL
2319
2320static int dm_enable_vblank(struct drm_crtc *crtc);
2321static void dm_disable_vblank(struct drm_crtc *crtc);
2322
2323static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2324 struct dc_state *state, bool enable)
2325{
2326 enum dc_irq_source irq_source;
2327 struct amdgpu_crtc *acrtc;
2328 int rc = -EBUSY;
2329 int i = 0;
2330
2331 for (i = 0; i < state->stream_count; i++) {
2332 acrtc = get_crtc_by_otg_inst(
2333 adev, state->stream_status[i].primary_otg_inst);
2334
2335 if (acrtc && state->stream_status[i].plane_count != 0) {
2336 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2337 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2338 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2339 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2340 if (rc)
2341 DRM_WARN("Failed to %s pflip interrupts\n",
2342 enable ? "enable" : "disable");
2343
2344 if (enable) {
2345 rc = dm_enable_vblank(&acrtc->base);
2346 if (rc)
2347 DRM_WARN("Failed to enable vblank interrupts\n");
2348 } else {
2349 dm_disable_vblank(&acrtc->base);
2350 }
2351
2352 }
2353 }
2354
2355}
2356
dfd84d90 2357static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2358{
2359 struct dc_state *context = NULL;
2360 enum dc_status res = DC_ERROR_UNEXPECTED;
2361 int i;
2362 struct dc_stream_state *del_streams[MAX_PIPES];
2363 int del_streams_count = 0;
2364
2365 memset(del_streams, 0, sizeof(del_streams));
2366
2367 context = dc_create_state(dc);
2368 if (context == NULL)
2369 goto context_alloc_fail;
2370
2371 dc_resource_state_copy_construct_current(dc, context);
2372
2373 /* First remove from context all streams */
2374 for (i = 0; i < context->stream_count; i++) {
2375 struct dc_stream_state *stream = context->streams[i];
2376
2377 del_streams[del_streams_count++] = stream;
2378 }
2379
2380 /* Remove all planes for removed streams and then remove the streams */
2381 for (i = 0; i < del_streams_count; i++) {
2382 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2383 res = DC_FAIL_DETACH_SURFACES;
2384 goto fail;
2385 }
2386
2387 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2388 if (res != DC_OK)
2389 goto fail;
2390 }
2391
cdaae837
BL
2392 res = dc_commit_state(dc, context);
2393
2394fail:
2395 dc_release_state(context);
2396
2397context_alloc_fail:
2398 return res;
2399}
2400
8e794421
WL
2401static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2402{
2403 int i;
2404
2405 if (dm->hpd_rx_offload_wq) {
2406 for (i = 0; i < dm->dc->caps.max_links; i++)
2407 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2408 }
2409}
2410
4562236b
HW
2411static int dm_suspend(void *handle)
2412{
2413 struct amdgpu_device *adev = handle;
2414 struct amdgpu_display_manager *dm = &adev->dm;
2415 int ret = 0;
4562236b 2416
53b3f8f4 2417 if (amdgpu_in_reset(adev)) {
cdaae837 2418 mutex_lock(&dm->dc_lock);
98ab5f35 2419
98ab5f35 2420 dc_allow_idle_optimizations(adev->dm.dc, false);
98ab5f35 2421
cdaae837
BL
2422 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2423
2424 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2425
2426 amdgpu_dm_commit_zero_streams(dm->dc);
2427
2428 amdgpu_dm_irq_suspend(adev);
2429
8e794421
WL
2430 hpd_rx_irq_work_suspend(dm);
2431
cdaae837
BL
2432 return ret;
2433 }
4562236b 2434
d2f0b53b 2435 WARN_ON(adev->dm.cached_state);
4a580877 2436 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2437
4a580877 2438 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2439
4562236b
HW
2440 amdgpu_dm_irq_suspend(adev);
2441
8e794421
WL
2442 hpd_rx_irq_work_suspend(dm);
2443
32f5062d 2444 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2445
1c2075d4 2446 return 0;
4562236b
HW
2447}
2448
17ce8a69 2449struct amdgpu_dm_connector *
1daf8c63
AD
2450amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2451 struct drm_crtc *crtc)
4562236b
HW
2452{
2453 uint32_t i;
c2cea706 2454 struct drm_connector_state *new_con_state;
4562236b
HW
2455 struct drm_connector *connector;
2456 struct drm_crtc *crtc_from_state;
2457
c2cea706
LSL
2458 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2459 crtc_from_state = new_con_state->crtc;
4562236b
HW
2460
2461 if (crtc_from_state == crtc)
c84dec2f 2462 return to_amdgpu_dm_connector(connector);
4562236b
HW
2463 }
2464
2465 return NULL;
2466}
2467
fbbdadf2
BL
2468static void emulated_link_detect(struct dc_link *link)
2469{
2470 struct dc_sink_init_data sink_init_data = { 0 };
2471 struct display_sink_capability sink_caps = { 0 };
2472 enum dc_edid_status edid_status;
2473 struct dc_context *dc_ctx = link->ctx;
2474 struct dc_sink *sink = NULL;
2475 struct dc_sink *prev_sink = NULL;
2476
2477 link->type = dc_connection_none;
2478 prev_sink = link->local_sink;
2479
30164a16
VL
2480 if (prev_sink)
2481 dc_sink_release(prev_sink);
fbbdadf2
BL
2482
2483 switch (link->connector_signal) {
2484 case SIGNAL_TYPE_HDMI_TYPE_A: {
2485 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2486 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2487 break;
2488 }
2489
2490 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2491 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2492 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2493 break;
2494 }
2495
2496 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2497 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2498 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2499 break;
2500 }
2501
2502 case SIGNAL_TYPE_LVDS: {
2503 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2504 sink_caps.signal = SIGNAL_TYPE_LVDS;
2505 break;
2506 }
2507
2508 case SIGNAL_TYPE_EDP: {
2509 sink_caps.transaction_type =
2510 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2511 sink_caps.signal = SIGNAL_TYPE_EDP;
2512 break;
2513 }
2514
2515 case SIGNAL_TYPE_DISPLAY_PORT: {
2516 sink_caps.transaction_type =
2517 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2518 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2519 break;
2520 }
2521
2522 default:
2523 DC_ERROR("Invalid connector type! signal:%d\n",
2524 link->connector_signal);
2525 return;
2526 }
2527
2528 sink_init_data.link = link;
2529 sink_init_data.sink_signal = sink_caps.signal;
2530
2531 sink = dc_sink_create(&sink_init_data);
2532 if (!sink) {
2533 DC_ERROR("Failed to create sink!\n");
2534 return;
2535 }
2536
dcd5fb82 2537 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2538 link->local_sink = sink;
2539
2540 edid_status = dm_helpers_read_local_edid(
2541 link->ctx,
2542 link,
2543 sink);
2544
2545 if (edid_status != EDID_OK)
2546 DC_ERROR("Failed to read EDID");
2547
2548}
2549
cdaae837
BL
2550static void dm_gpureset_commit_state(struct dc_state *dc_state,
2551 struct amdgpu_display_manager *dm)
2552{
2553 struct {
2554 struct dc_surface_update surface_updates[MAX_SURFACES];
2555 struct dc_plane_info plane_infos[MAX_SURFACES];
2556 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2557 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2558 struct dc_stream_update stream_update;
2559 } * bundle;
2560 int k, m;
2561
2562 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2563
2564 if (!bundle) {
2565 dm_error("Failed to allocate update bundle\n");
2566 goto cleanup;
2567 }
2568
2569 for (k = 0; k < dc_state->stream_count; k++) {
2570 bundle->stream_update.stream = dc_state->streams[k];
2571
2572 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2573 bundle->surface_updates[m].surface =
2574 dc_state->stream_status->plane_states[m];
2575 bundle->surface_updates[m].surface->force_full_update =
2576 true;
2577 }
2578 dc_commit_updates_for_stream(
2579 dm->dc, bundle->surface_updates,
2580 dc_state->stream_status->plane_count,
efc8278e 2581 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2582 }
2583
2584cleanup:
2585 kfree(bundle);
2586
2587 return;
2588}
2589
4562236b
HW
2590static int dm_resume(void *handle)
2591{
2592 struct amdgpu_device *adev = handle;
4a580877 2593 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2594 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2595 struct amdgpu_dm_connector *aconnector;
4562236b 2596 struct drm_connector *connector;
f8d2d39e 2597 struct drm_connector_list_iter iter;
4562236b 2598 struct drm_crtc *crtc;
c2cea706 2599 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2600 struct dm_crtc_state *dm_new_crtc_state;
2601 struct drm_plane *plane;
2602 struct drm_plane_state *new_plane_state;
2603 struct dm_plane_state *dm_new_plane_state;
113b7a01 2604 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2605 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2606 struct dc_state *dc_state;
2607 int i, r, j;
4562236b 2608
53b3f8f4 2609 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2610 dc_state = dm->cached_dc_state;
2611
6d63fcc2
NK
2612 /*
2613 * The dc->current_state is backed up into dm->cached_dc_state
2614 * before we commit 0 streams.
2615 *
2616 * DC will clear link encoder assignments on the real state
2617 * but the changes won't propagate over to the copy we made
2618 * before the 0 streams commit.
2619 *
2620 * DC expects that link encoder assignments are *not* valid
32685b32
NK
2621 * when committing a state, so as a workaround we can copy
2622 * off of the current state.
2623 *
2624 * We lose the previous assignments, but we had already
2625 * commit 0 streams anyway.
6d63fcc2 2626 */
32685b32 2627 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
6d63fcc2 2628
af6902ec
NK
2629 if (dc_enable_dmub_notifications(adev->dm.dc))
2630 amdgpu_dm_outbox_init(adev);
524a0ba6 2631
cdaae837
BL
2632 r = dm_dmub_hw_init(adev);
2633 if (r)
2634 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2635
2636 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2637 dc_resume(dm->dc);
2638
2639 amdgpu_dm_irq_resume_early(adev);
2640
2641 for (i = 0; i < dc_state->stream_count; i++) {
2642 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2643 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2644 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2645 = 0xffffffff;
2646 }
2647 }
2648
2649 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2650
cdaae837
BL
2651 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2652
2653 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2654
2655 dc_release_state(dm->cached_dc_state);
2656 dm->cached_dc_state = NULL;
2657
2658 amdgpu_dm_irq_resume_late(adev);
2659
2660 mutex_unlock(&dm->dc_lock);
2661
2662 return 0;
2663 }
113b7a01
LL
2664 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2665 dc_release_state(dm_state->context);
2666 dm_state->context = dc_create_state(dm->dc);
2667 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2668 dc_resource_state_construct(dm->dc, dm_state->context);
2669
af6902ec
NK
2670 /* Re-enable outbox interrupts for DPIA. */
2671 if (dc_enable_dmub_notifications(adev->dm.dc))
2672 amdgpu_dm_outbox_init(adev);
2673
8c7aea40 2674 /* Before powering on DC we need to re-initialize DMUB. */
79d6b935 2675 dm_dmub_hw_resume(adev);
8c7aea40 2676
a80aa93d
ML
2677 /* power on hardware */
2678 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2679
4562236b
HW
2680 /* program HPD filter */
2681 dc_resume(dm->dc);
2682
4562236b
HW
2683 /*
2684 * early enable HPD Rx IRQ, should be done before set mode as short
2685 * pulse interrupts are used for MST
2686 */
2687 amdgpu_dm_irq_resume_early(adev);
2688
d20ebea8 2689 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2690 s3_handle_mst(ddev, false);
2691
4562236b 2692 /* Do detection*/
f8d2d39e
LP
2693 drm_connector_list_iter_begin(ddev, &iter);
2694 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2695 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2696
2697 /*
2698 * this is the case when traversing through already created
2699 * MST connectors, should be skipped
2700 */
f4346fb3
RL
2701 if (aconnector->dc_link &&
2702 aconnector->dc_link->type == dc_connection_mst_branch)
4562236b
HW
2703 continue;
2704
03ea364c 2705 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2706 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2707 DRM_ERROR("KMS: Failed to detect connector\n");
2708
2709 if (aconnector->base.force && new_connection_type == dc_connection_none)
2710 emulated_link_detect(aconnector->dc_link);
2711 else
2712 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2713
2714 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2715 aconnector->fake_enable = false;
2716
dcd5fb82
MF
2717 if (aconnector->dc_sink)
2718 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2719 aconnector->dc_sink = NULL;
2720 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2721 mutex_unlock(&aconnector->hpd_lock);
4562236b 2722 }
f8d2d39e 2723 drm_connector_list_iter_end(&iter);
4562236b 2724
1f6010a9 2725 /* Force mode set in atomic commit */
a80aa93d 2726 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2727 new_crtc_state->active_changed = true;
4f346e65 2728
fcb4019e
LSL
2729 /*
2730 * atomic_check is expected to create the dc states. We need to release
2731 * them here, since they were duplicated as part of the suspend
2732 * procedure.
2733 */
a80aa93d 2734 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2735 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2736 if (dm_new_crtc_state->stream) {
2737 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2738 dc_stream_release(dm_new_crtc_state->stream);
2739 dm_new_crtc_state->stream = NULL;
2740 }
2741 }
2742
a80aa93d 2743 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2744 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2745 if (dm_new_plane_state->dc_state) {
2746 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2747 dc_plane_state_release(dm_new_plane_state->dc_state);
2748 dm_new_plane_state->dc_state = NULL;
2749 }
2750 }
2751
2d1af6a1 2752 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2753
a80aa93d 2754 dm->cached_state = NULL;
0a214e2f 2755
9faa4237 2756 amdgpu_dm_irq_resume_late(adev);
4562236b 2757
9340dfd3
HW
2758 amdgpu_dm_smu_write_watermarks_table(adev);
2759
2d1af6a1 2760 return 0;
4562236b
HW
2761}
2762
b8592b48
LL
2763/**
2764 * DOC: DM Lifecycle
2765 *
2766 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2767 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2768 * the base driver's device list to be initialized and torn down accordingly.
2769 *
2770 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2771 */
2772
4562236b
HW
2773static const struct amd_ip_funcs amdgpu_dm_funcs = {
2774 .name = "dm",
2775 .early_init = dm_early_init,
7abcf6b5 2776 .late_init = dm_late_init,
4562236b
HW
2777 .sw_init = dm_sw_init,
2778 .sw_fini = dm_sw_fini,
e9669fb7 2779 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2780 .hw_init = dm_hw_init,
2781 .hw_fini = dm_hw_fini,
2782 .suspend = dm_suspend,
2783 .resume = dm_resume,
2784 .is_idle = dm_is_idle,
2785 .wait_for_idle = dm_wait_for_idle,
2786 .check_soft_reset = dm_check_soft_reset,
2787 .soft_reset = dm_soft_reset,
2788 .set_clockgating_state = dm_set_clockgating_state,
2789 .set_powergating_state = dm_set_powergating_state,
2790};
2791
2792const struct amdgpu_ip_block_version dm_ip_block =
2793{
2794 .type = AMD_IP_BLOCK_TYPE_DCE,
2795 .major = 1,
2796 .minor = 0,
2797 .rev = 0,
2798 .funcs = &amdgpu_dm_funcs,
2799};
2800
ca3268c4 2801
b8592b48
LL
2802/**
2803 * DOC: atomic
2804 *
2805 * *WIP*
2806 */
0a323b84 2807
b3663f70 2808static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2809 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2810 .get_format_info = amd_get_format_info,
366c1baa 2811 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2812 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2813 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2814};
2815
2816static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2817 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2818};
2819
94562810
RS
2820static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2821{
d8791dc7 2822 u32 max_avg, min_cll, max, min, q, r;
94562810
RS
2823 struct amdgpu_dm_backlight_caps *caps;
2824 struct amdgpu_display_manager *dm;
2825 struct drm_connector *conn_base;
2826 struct amdgpu_device *adev;
ec11fe37 2827 struct dc_link *link = NULL;
94562810
RS
2828 static const u8 pre_computed_values[] = {
2829 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2830 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2831 int i;
94562810
RS
2832
2833 if (!aconnector || !aconnector->dc_link)
2834 return;
2835
ec11fe37 2836 link = aconnector->dc_link;
2837 if (link->connector_signal != SIGNAL_TYPE_EDP)
2838 return;
2839
94562810 2840 conn_base = &aconnector->base;
1348969a 2841 adev = drm_to_adev(conn_base->dev);
94562810 2842 dm = &adev->dm;
7fd13bae
AD
2843 for (i = 0; i < dm->num_of_edps; i++) {
2844 if (link == dm->backlight_link[i])
2845 break;
2846 }
2847 if (i >= dm->num_of_edps)
2848 return;
2849 caps = &dm->backlight_caps[i];
94562810
RS
2850 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2851 caps->aux_support = false;
d8791dc7 2852 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
94562810
RS
2853 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2854
d0ae0b64 2855 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2856 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2857 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2858 caps->aux_support = true;
2859
7a46f05e
TI
2860 if (amdgpu_backlight == 0)
2861 caps->aux_support = false;
2862 else if (amdgpu_backlight == 1)
2863 caps->aux_support = true;
2864
94562810
RS
2865 /* From the specification (CTA-861-G), for calculating the maximum
2866 * luminance we need to use:
2867 * Luminance = 50*2**(CV/32)
2868 * Where CV is a one-byte value.
2869 * For calculating this expression we may need float point precision;
2870 * to avoid this complexity level, we take advantage that CV is divided
2871 * by a constant. From the Euclids division algorithm, we know that CV
2872 * can be written as: CV = 32*q + r. Next, we replace CV in the
2873 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2874 * need to pre-compute the value of r/32. For pre-computing the values
2875 * We just used the following Ruby line:
2876 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2877 * The results of the above expressions can be verified at
2878 * pre_computed_values.
2879 */
d8791dc7
RL
2880 q = max_avg >> 5;
2881 r = max_avg % 32;
94562810
RS
2882 max = (1 << q) * pre_computed_values[r];
2883
2884 // min luminance: maxLum * (CV/255)^2 / 100
2885 q = DIV_ROUND_CLOSEST(min_cll, 255);
2886 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2887
2888 caps->aux_max_input_signal = max;
2889 caps->aux_min_input_signal = min;
2890}
2891
97e51c16
HW
2892void amdgpu_dm_update_connector_after_detect(
2893 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2894{
2895 struct drm_connector *connector = &aconnector->base;
2896 struct drm_device *dev = connector->dev;
b73a22d3 2897 struct dc_sink *sink;
4562236b
HW
2898
2899 /* MST handled by drm_mst framework */
2900 if (aconnector->mst_mgr.mst_state == true)
2901 return;
2902
4562236b 2903 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2904 if (sink)
2905 dc_sink_retain(sink);
4562236b 2906
1f6010a9
DF
2907 /*
2908 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2909 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2910 * Skip if already done during boot.
4562236b
HW
2911 */
2912 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2913 && aconnector->dc_em_sink) {
2914
1f6010a9
DF
2915 /*
2916 * For S3 resume with headless use eml_sink to fake stream
2917 * because on resume connector->sink is set to NULL
4562236b
HW
2918 */
2919 mutex_lock(&dev->mode_config.mutex);
2920
2921 if (sink) {
922aa1e1 2922 if (aconnector->dc_sink) {
98e6436d 2923 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2924 /*
2925 * retain and release below are used to
2926 * bump up refcount for sink because the link doesn't point
2927 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2928 * reshuffle by UMD we will get into unwanted dc_sink release
2929 */
dcd5fb82 2930 dc_sink_release(aconnector->dc_sink);
922aa1e1 2931 }
4562236b 2932 aconnector->dc_sink = sink;
dcd5fb82 2933 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2934 amdgpu_dm_update_freesync_caps(connector,
2935 aconnector->edid);
4562236b 2936 } else {
98e6436d 2937 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2938 if (!aconnector->dc_sink) {
4562236b 2939 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2940 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2941 }
4562236b
HW
2942 }
2943
2944 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2945
2946 if (sink)
2947 dc_sink_release(sink);
4562236b
HW
2948 return;
2949 }
2950
2951 /*
2952 * TODO: temporary guard to look for proper fix
2953 * if this sink is MST sink, we should not do anything
2954 */
dcd5fb82
MF
2955 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2956 dc_sink_release(sink);
4562236b 2957 return;
dcd5fb82 2958 }
4562236b
HW
2959
2960 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2961 /*
2962 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2963 * Do nothing!!
2964 */
f1ad2f5e 2965 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2966 aconnector->connector_id);
dcd5fb82
MF
2967 if (sink)
2968 dc_sink_release(sink);
4562236b
HW
2969 return;
2970 }
2971
f1ad2f5e 2972 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2973 aconnector->connector_id, aconnector->dc_sink, sink);
2974
2975 mutex_lock(&dev->mode_config.mutex);
2976
1f6010a9
DF
2977 /*
2978 * 1. Update status of the drm connector
2979 * 2. Send an event and let userspace tell us what to do
2980 */
4562236b 2981 if (sink) {
1f6010a9
DF
2982 /*
2983 * TODO: check if we still need the S3 mode update workaround.
2984 * If yes, put it here.
2985 */
c64b0d6b 2986 if (aconnector->dc_sink) {
98e6436d 2987 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2988 dc_sink_release(aconnector->dc_sink);
2989 }
4562236b
HW
2990
2991 aconnector->dc_sink = sink;
dcd5fb82 2992 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2993 if (sink->dc_edid.length == 0) {
4562236b 2994 aconnector->edid = NULL;
e6142dd5
AP
2995 if (aconnector->dc_link->aux_mode) {
2996 drm_dp_cec_unset_edid(
2997 &aconnector->dm_dp_aux.aux);
2998 }
900b3cb1 2999 } else {
4562236b 3000 aconnector->edid =
e6142dd5 3001 (struct edid *)sink->dc_edid.raw_edid;
4562236b 3002
e6142dd5
AP
3003 if (aconnector->dc_link->aux_mode)
3004 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3005 aconnector->edid);
4562236b 3006 }
e6142dd5 3007
20543be9 3008 drm_connector_update_edid_property(connector, aconnector->edid);
98e6436d 3009 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 3010 update_connector_ext_caps(aconnector);
4562236b 3011 } else {
e86e8947 3012 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 3013 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 3014 drm_connector_update_edid_property(connector, NULL);
4562236b 3015 aconnector->num_modes = 0;
dcd5fb82 3016 dc_sink_release(aconnector->dc_sink);
4562236b 3017 aconnector->dc_sink = NULL;
5326c452 3018 aconnector->edid = NULL;
0c8620d6
BL
3019#ifdef CONFIG_DRM_AMD_DC_HDCP
3020 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3021 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3022 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3023#endif
4562236b
HW
3024 }
3025
3026 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 3027
0f877894
OV
3028 update_subconnector_property(aconnector);
3029
dcd5fb82
MF
3030 if (sink)
3031 dc_sink_release(sink);
4562236b
HW
3032}
3033
e27c41d5 3034static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 3035{
4562236b
HW
3036 struct drm_connector *connector = &aconnector->base;
3037 struct drm_device *dev = connector->dev;
fbbdadf2 3038 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 3039 struct amdgpu_device *adev = drm_to_adev(dev);
10a36226 3040#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3041 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
10a36226 3042#endif
4562236b 3043
b972b4f9
HW
3044 if (adev->dm.disable_hpd_irq)
3045 return;
3046
1f6010a9
DF
3047 /*
3048 * In case of failure or MST no need to update connector status or notify the OS
3049 * since (for MST case) MST does this in its own context.
4562236b
HW
3050 */
3051 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3052
0c8620d6 3053#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3054 if (adev->dm.hdcp_workqueue) {
96a3b32e 3055 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3056 dm_con_state->update_hdcp = true;
3057 }
0c8620d6 3058#endif
2e0ac3d6
HW
3059 if (aconnector->fake_enable)
3060 aconnector->fake_enable = false;
3061
fbbdadf2
BL
3062 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3063 DRM_ERROR("KMS: Failed to detect connector\n");
3064
3065 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3066 emulated_link_detect(aconnector->dc_link);
3067
fbbdadf2
BL
3068 drm_modeset_lock_all(dev);
3069 dm_restore_drm_connector_state(dev, connector);
3070 drm_modeset_unlock_all(dev);
3071
3072 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
fc320a6f 3073 drm_kms_helper_connector_hotplug_event(connector);
fbbdadf2
BL
3074
3075 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3076 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3077
3078 drm_modeset_lock_all(dev);
3079 dm_restore_drm_connector_state(dev, connector);
3080 drm_modeset_unlock_all(dev);
3081
3082 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
fc320a6f 3083 drm_kms_helper_connector_hotplug_event(connector);
4562236b
HW
3084 }
3085 mutex_unlock(&aconnector->hpd_lock);
3086
3087}
3088
e27c41d5
JS
3089static void handle_hpd_irq(void *param)
3090{
3091 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3092
3093 handle_hpd_irq_helper(aconnector);
3094
3095}
3096
8e794421 3097static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3098{
3099 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3100 uint8_t dret;
3101 bool new_irq_handled = false;
3102 int dpcd_addr;
3103 int dpcd_bytes_to_read;
3104
3105 const int max_process_count = 30;
3106 int process_count = 0;
3107
3108 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3109
3110 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3111 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3112 /* DPCD 0x200 - 0x201 for downstream IRQ */
3113 dpcd_addr = DP_SINK_COUNT;
3114 } else {
3115 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3116 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3117 dpcd_addr = DP_SINK_COUNT_ESI;
3118 }
3119
3120 dret = drm_dp_dpcd_read(
3121 &aconnector->dm_dp_aux.aux,
3122 dpcd_addr,
3123 esi,
3124 dpcd_bytes_to_read);
3125
3126 while (dret == dpcd_bytes_to_read &&
3127 process_count < max_process_count) {
3128 uint8_t retry;
3129 dret = 0;
3130
3131 process_count++;
3132
f1ad2f5e 3133 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3134 /* handle HPD short pulse irq */
3135 if (aconnector->mst_mgr.mst_state)
3136 drm_dp_mst_hpd_irq(
3137 &aconnector->mst_mgr,
3138 esi,
3139 &new_irq_handled);
4562236b
HW
3140
3141 if (new_irq_handled) {
3142 /* ACK at DPCD to notify down stream */
3143 const int ack_dpcd_bytes_to_write =
3144 dpcd_bytes_to_read - 1;
3145
3146 for (retry = 0; retry < 3; retry++) {
3147 uint8_t wret;
3148
3149 wret = drm_dp_dpcd_write(
3150 &aconnector->dm_dp_aux.aux,
3151 dpcd_addr + 1,
3152 &esi[1],
3153 ack_dpcd_bytes_to_write);
3154 if (wret == ack_dpcd_bytes_to_write)
3155 break;
3156 }
3157
1f6010a9 3158 /* check if there is new irq to be handled */
4562236b
HW
3159 dret = drm_dp_dpcd_read(
3160 &aconnector->dm_dp_aux.aux,
3161 dpcd_addr,
3162 esi,
3163 dpcd_bytes_to_read);
3164
3165 new_irq_handled = false;
d4a6e8a9 3166 } else {
4562236b 3167 break;
d4a6e8a9 3168 }
4562236b
HW
3169 }
3170
3171 if (process_count == max_process_count)
f1ad2f5e 3172 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3173}
3174
8e794421
WL
3175static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3176 union hpd_irq_data hpd_irq_data)
3177{
3178 struct hpd_rx_irq_offload_work *offload_work =
3179 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3180
3181 if (!offload_work) {
3182 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3183 return;
3184 }
3185
3186 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3187 offload_work->data = hpd_irq_data;
3188 offload_work->offload_wq = offload_wq;
3189
3190 queue_work(offload_wq->wq, &offload_work->work);
3191 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3192}
3193
4562236b
HW
3194static void handle_hpd_rx_irq(void *param)
3195{
c84dec2f 3196 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3197 struct drm_connector *connector = &aconnector->base;
3198 struct drm_device *dev = connector->dev;
53cbf65c 3199 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3200 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3201 bool result = false;
fbbdadf2 3202 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3203 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3204 union hpd_irq_data hpd_irq_data;
8e794421
WL
3205 bool link_loss = false;
3206 bool has_left_work = false;
3207 int idx = aconnector->base.index;
3208 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3209
3210 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3211
b972b4f9
HW
3212 if (adev->dm.disable_hpd_irq)
3213 return;
3214
1f6010a9
DF
3215 /*
3216 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3217 * conflict, after implement i2c helper, this mutex should be
3218 * retired.
3219 */
b86e7eef 3220 mutex_lock(&aconnector->hpd_lock);
4562236b 3221
8e794421
WL
3222 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3223 &link_loss, true, &has_left_work);
3083a984 3224
8e794421
WL
3225 if (!has_left_work)
3226 goto out;
3227
3228 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3229 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3230 goto out;
3231 }
3232
3233 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3234 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3235 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3236 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3237 goto out;
3238 }
3083a984 3239
8e794421
WL
3240 if (link_loss) {
3241 bool skip = false;
d2aa1356 3242
8e794421
WL
3243 spin_lock(&offload_wq->offload_lock);
3244 skip = offload_wq->is_handling_link_loss;
3245
3246 if (!skip)
3247 offload_wq->is_handling_link_loss = true;
3248
3249 spin_unlock(&offload_wq->offload_lock);
3250
3251 if (!skip)
3252 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3253
3254 goto out;
3255 }
3256 }
c8ea79a8 3257
3083a984 3258out:
c8ea79a8 3259 if (result && !is_mst_root_connector) {
4562236b 3260 /* Downstream Port status changed. */
fbbdadf2
BL
3261 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3262 DRM_ERROR("KMS: Failed to detect connector\n");
3263
3264 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3265 emulated_link_detect(dc_link);
3266
3267 if (aconnector->fake_enable)
3268 aconnector->fake_enable = false;
3269
3270 amdgpu_dm_update_connector_after_detect(aconnector);
3271
3272
3273 drm_modeset_lock_all(dev);
3274 dm_restore_drm_connector_state(dev, connector);
3275 drm_modeset_unlock_all(dev);
3276
fc320a6f 3277 drm_kms_helper_connector_hotplug_event(connector);
fbbdadf2 3278 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3279
3280 if (aconnector->fake_enable)
3281 aconnector->fake_enable = false;
3282
4562236b
HW
3283 amdgpu_dm_update_connector_after_detect(aconnector);
3284
3285
3286 drm_modeset_lock_all(dev);
3287 dm_restore_drm_connector_state(dev, connector);
3288 drm_modeset_unlock_all(dev);
3289
fc320a6f 3290 drm_kms_helper_connector_hotplug_event(connector);
4562236b
HW
3291 }
3292 }
2a0f9270 3293#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3294 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3295 if (adev->dm.hdcp_workqueue)
3296 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3297 }
2a0f9270 3298#endif
4562236b 3299
b86e7eef 3300 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3301 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3302
3303 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3304}
3305
3306static void register_hpd_handlers(struct amdgpu_device *adev)
3307{
4a580877 3308 struct drm_device *dev = adev_to_drm(adev);
4562236b 3309 struct drm_connector *connector;
c84dec2f 3310 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3311 const struct dc_link *dc_link;
3312 struct dc_interrupt_params int_params = {0};
3313
3314 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3315 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3316
3317 list_for_each_entry(connector,
3318 &dev->mode_config.connector_list, head) {
3319
c84dec2f 3320 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3321 dc_link = aconnector->dc_link;
3322
3323 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3324 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3325 int_params.irq_source = dc_link->irq_source_hpd;
3326
3327 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3328 handle_hpd_irq,
3329 (void *) aconnector);
3330 }
3331
3332 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3333
3334 /* Also register for DP short pulse (hpd_rx). */
3335 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3336 int_params.irq_source = dc_link->irq_source_hpd_rx;
3337
3338 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3339 handle_hpd_rx_irq,
3340 (void *) aconnector);
8e794421
WL
3341
3342 if (adev->dm.hpd_rx_offload_wq)
3343 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3344 aconnector;
4562236b
HW
3345 }
3346 }
3347}
3348
55e56389
MR
3349#if defined(CONFIG_DRM_AMD_DC_SI)
3350/* Register IRQ sources and initialize IRQ callbacks */
3351static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3352{
3353 struct dc *dc = adev->dm.dc;
3354 struct common_irq_params *c_irq_params;
3355 struct dc_interrupt_params int_params = {0};
3356 int r;
3357 int i;
3358 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3359
3360 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3361 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3362
3363 /*
3364 * Actions of amdgpu_irq_add_id():
3365 * 1. Register a set() function with base driver.
3366 * Base driver will call set() function to enable/disable an
3367 * interrupt in DC hardware.
3368 * 2. Register amdgpu_dm_irq_handler().
3369 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3370 * coming from DC hardware.
3371 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3372 * for acknowledging and handling. */
3373
3374 /* Use VBLANK interrupt */
3375 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3376 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3377 if (r) {
3378 DRM_ERROR("Failed to add crtc irq id!\n");
3379 return r;
3380 }
3381
3382 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3383 int_params.irq_source =
3384 dc_interrupt_to_irq_source(dc, i+1 , 0);
3385
3386 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3387
3388 c_irq_params->adev = adev;
3389 c_irq_params->irq_src = int_params.irq_source;
3390
3391 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3392 dm_crtc_high_irq, c_irq_params);
3393 }
3394
3395 /* Use GRPH_PFLIP interrupt */
3396 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3397 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3398 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3399 if (r) {
3400 DRM_ERROR("Failed to add page flip irq id!\n");
3401 return r;
3402 }
3403
3404 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3405 int_params.irq_source =
3406 dc_interrupt_to_irq_source(dc, i, 0);
3407
3408 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3409
3410 c_irq_params->adev = adev;
3411 c_irq_params->irq_src = int_params.irq_source;
3412
3413 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3414 dm_pflip_high_irq, c_irq_params);
3415
3416 }
3417
3418 /* HPD */
3419 r = amdgpu_irq_add_id(adev, client_id,
3420 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3421 if (r) {
3422 DRM_ERROR("Failed to add hpd irq id!\n");
3423 return r;
3424 }
3425
3426 register_hpd_handlers(adev);
3427
3428 return 0;
3429}
3430#endif
3431
4562236b
HW
3432/* Register IRQ sources and initialize IRQ callbacks */
3433static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3434{
3435 struct dc *dc = adev->dm.dc;
3436 struct common_irq_params *c_irq_params;
3437 struct dc_interrupt_params int_params = {0};
3438 int r;
3439 int i;
1ffdeca6 3440 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3441
c08182f2 3442 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3443 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3444
3445 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3446 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3447
1f6010a9
DF
3448 /*
3449 * Actions of amdgpu_irq_add_id():
4562236b
HW
3450 * 1. Register a set() function with base driver.
3451 * Base driver will call set() function to enable/disable an
3452 * interrupt in DC hardware.
3453 * 2. Register amdgpu_dm_irq_handler().
3454 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3455 * coming from DC hardware.
3456 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3457 * for acknowledging and handling. */
3458
b57de80a 3459 /* Use VBLANK interrupt */
e9029155 3460 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3461 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3462 if (r) {
3463 DRM_ERROR("Failed to add crtc irq id!\n");
3464 return r;
3465 }
3466
3467 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3468 int_params.irq_source =
3d761e79 3469 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3470
b57de80a 3471 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3472
3473 c_irq_params->adev = adev;
3474 c_irq_params->irq_src = int_params.irq_source;
3475
3476 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3477 dm_crtc_high_irq, c_irq_params);
3478 }
3479
d2574c33
MK
3480 /* Use VUPDATE interrupt */
3481 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3482 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3483 if (r) {
3484 DRM_ERROR("Failed to add vupdate irq id!\n");
3485 return r;
3486 }
3487
3488 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3489 int_params.irq_source =
3490 dc_interrupt_to_irq_source(dc, i, 0);
3491
3492 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3493
3494 c_irq_params->adev = adev;
3495 c_irq_params->irq_src = int_params.irq_source;
3496
3497 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3498 dm_vupdate_high_irq, c_irq_params);
3499 }
3500
3d761e79 3501 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3502 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3503 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3504 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3505 if (r) {
3506 DRM_ERROR("Failed to add page flip irq id!\n");
3507 return r;
3508 }
3509
3510 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3511 int_params.irq_source =
3512 dc_interrupt_to_irq_source(dc, i, 0);
3513
3514 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3515
3516 c_irq_params->adev = adev;
3517 c_irq_params->irq_src = int_params.irq_source;
3518
3519 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3520 dm_pflip_high_irq, c_irq_params);
3521
3522 }
3523
3524 /* HPD */
2c8ad2d5
AD
3525 r = amdgpu_irq_add_id(adev, client_id,
3526 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3527 if (r) {
3528 DRM_ERROR("Failed to add hpd irq id!\n");
3529 return r;
3530 }
3531
3532 register_hpd_handlers(adev);
3533
3534 return 0;
3535}
3536
ff5ef992
AD
3537/* Register IRQ sources and initialize IRQ callbacks */
3538static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3539{
3540 struct dc *dc = adev->dm.dc;
3541 struct common_irq_params *c_irq_params;
3542 struct dc_interrupt_params int_params = {0};
3543 int r;
3544 int i;
660d5406
WL
3545#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3546 static const unsigned int vrtl_int_srcid[] = {
3547 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3548 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3549 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3550 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3551 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3552 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3553 };
3554#endif
ff5ef992
AD
3555
3556 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3557 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3558
1f6010a9
DF
3559 /*
3560 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3561 * 1. Register a set() function with base driver.
3562 * Base driver will call set() function to enable/disable an
3563 * interrupt in DC hardware.
3564 * 2. Register amdgpu_dm_irq_handler().
3565 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3566 * coming from DC hardware.
3567 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3568 * for acknowledging and handling.
1f6010a9 3569 */
ff5ef992
AD
3570
3571 /* Use VSTARTUP interrupt */
3572 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3573 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3574 i++) {
3760f76c 3575 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3576
3577 if (r) {
3578 DRM_ERROR("Failed to add crtc irq id!\n");
3579 return r;
3580 }
3581
3582 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3583 int_params.irq_source =
3584 dc_interrupt_to_irq_source(dc, i, 0);
3585
3586 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3587
3588 c_irq_params->adev = adev;
3589 c_irq_params->irq_src = int_params.irq_source;
3590
2346ef47
NK
3591 amdgpu_dm_irq_register_interrupt(
3592 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3593 }
3594
86bc2219
WL
3595 /* Use otg vertical line interrupt */
3596#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3597 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3598 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3599 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3600
3601 if (r) {
3602 DRM_ERROR("Failed to add vline0 irq id!\n");
3603 return r;
3604 }
3605
3606 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3607 int_params.irq_source =
660d5406
WL
3608 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3609
3610 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3611 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3612 break;
3613 }
86bc2219
WL
3614
3615 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3616 - DC_IRQ_SOURCE_DC1_VLINE0];
3617
3618 c_irq_params->adev = adev;
3619 c_irq_params->irq_src = int_params.irq_source;
3620
3621 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3622 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3623 }
3624#endif
3625
2346ef47
NK
3626 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3627 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3628 * to trigger at end of each vblank, regardless of state of the lock,
3629 * matching DCE behaviour.
3630 */
3631 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3632 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3633 i++) {
3634 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3635
3636 if (r) {
3637 DRM_ERROR("Failed to add vupdate irq id!\n");
3638 return r;
3639 }
3640
3641 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3642 int_params.irq_source =
3643 dc_interrupt_to_irq_source(dc, i, 0);
3644
3645 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3646
3647 c_irq_params->adev = adev;
3648 c_irq_params->irq_src = int_params.irq_source;
3649
ff5ef992 3650 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3651 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3652 }
3653
ff5ef992
AD
3654 /* Use GRPH_PFLIP interrupt */
3655 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
de95753c 3656 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
ff5ef992 3657 i++) {
3760f76c 3658 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3659 if (r) {
3660 DRM_ERROR("Failed to add page flip irq id!\n");
3661 return r;
3662 }
3663
3664 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3665 int_params.irq_source =
3666 dc_interrupt_to_irq_source(dc, i, 0);
3667
3668 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3669
3670 c_irq_params->adev = adev;
3671 c_irq_params->irq_src = int_params.irq_source;
3672
3673 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3674 dm_pflip_high_irq, c_irq_params);
3675
3676 }
3677
81927e28
JS
3678 /* HPD */
3679 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3680 &adev->hpd_irq);
3681 if (r) {
3682 DRM_ERROR("Failed to add hpd irq id!\n");
3683 return r;
3684 }
a08f16cf 3685
81927e28 3686 register_hpd_handlers(adev);
a08f16cf 3687
81927e28
JS
3688 return 0;
3689}
3690/* Register Outbox IRQ sources and initialize IRQ callbacks */
3691static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3692{
3693 struct dc *dc = adev->dm.dc;
3694 struct common_irq_params *c_irq_params;
3695 struct dc_interrupt_params int_params = {0};
3696 int r, i;
3697
3698 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3699 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3700
3701 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3702 &adev->dmub_outbox_irq);
3703 if (r) {
3704 DRM_ERROR("Failed to add outbox irq id!\n");
3705 return r;
3706 }
3707
3708 if (dc->ctx->dmub_srv) {
3709 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3710 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3711 int_params.irq_source =
81927e28 3712 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3713
81927e28 3714 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3715
3716 c_irq_params->adev = adev;
3717 c_irq_params->irq_src = int_params.irq_source;
3718
3719 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3720 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3721 }
3722
ff5ef992
AD
3723 return 0;
3724}
ff5ef992 3725
eb3dc897
NK
3726/*
3727 * Acquires the lock for the atomic state object and returns
3728 * the new atomic state.
3729 *
3730 * This should only be called during atomic check.
3731 */
17ce8a69
RL
3732int dm_atomic_get_state(struct drm_atomic_state *state,
3733 struct dm_atomic_state **dm_state)
eb3dc897
NK
3734{
3735 struct drm_device *dev = state->dev;
1348969a 3736 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3737 struct amdgpu_display_manager *dm = &adev->dm;
3738 struct drm_private_state *priv_state;
eb3dc897
NK
3739
3740 if (*dm_state)
3741 return 0;
3742
eb3dc897
NK
3743 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3744 if (IS_ERR(priv_state))
3745 return PTR_ERR(priv_state);
3746
3747 *dm_state = to_dm_atomic_state(priv_state);
3748
3749 return 0;
3750}
3751
dfd84d90 3752static struct dm_atomic_state *
eb3dc897
NK
3753dm_atomic_get_new_state(struct drm_atomic_state *state)
3754{
3755 struct drm_device *dev = state->dev;
1348969a 3756 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3757 struct amdgpu_display_manager *dm = &adev->dm;
3758 struct drm_private_obj *obj;
3759 struct drm_private_state *new_obj_state;
3760 int i;
3761
3762 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3763 if (obj->funcs == dm->atomic_obj.funcs)
3764 return to_dm_atomic_state(new_obj_state);
3765 }
3766
3767 return NULL;
3768}
3769
eb3dc897
NK
3770static struct drm_private_state *
3771dm_atomic_duplicate_state(struct drm_private_obj *obj)
3772{
3773 struct dm_atomic_state *old_state, *new_state;
3774
3775 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3776 if (!new_state)
3777 return NULL;
3778
3779 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3780
813d20dc
AW
3781 old_state = to_dm_atomic_state(obj->state);
3782
3783 if (old_state && old_state->context)
3784 new_state->context = dc_copy_state(old_state->context);
3785
eb3dc897
NK
3786 if (!new_state->context) {
3787 kfree(new_state);
3788 return NULL;
3789 }
3790
eb3dc897
NK
3791 return &new_state->base;
3792}
3793
3794static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3795 struct drm_private_state *state)
3796{
3797 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3798
3799 if (dm_state && dm_state->context)
3800 dc_release_state(dm_state->context);
3801
3802 kfree(dm_state);
3803}
3804
3805static struct drm_private_state_funcs dm_atomic_state_funcs = {
3806 .atomic_duplicate_state = dm_atomic_duplicate_state,
3807 .atomic_destroy_state = dm_atomic_destroy_state,
3808};
3809
4562236b
HW
3810static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3811{
eb3dc897 3812 struct dm_atomic_state *state;
4562236b
HW
3813 int r;
3814
3815 adev->mode_info.mode_config_initialized = true;
3816
4a580877
LT
3817 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3818 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3819
4a580877
LT
3820 adev_to_drm(adev)->mode_config.max_width = 16384;
3821 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3822
4a580877 3823 adev_to_drm(adev)->mode_config.preferred_depth = 24;
fc25fd60
AD
3824 /* disable prefer shadow for now due to hibernation issues */
3825 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
1f6010a9 3826 /* indicates support for immediate flip */
4a580877 3827 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3828
4a580877 3829 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3830
eb3dc897
NK
3831 state = kzalloc(sizeof(*state), GFP_KERNEL);
3832 if (!state)
3833 return -ENOMEM;
3834
813d20dc 3835 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3836 if (!state->context) {
3837 kfree(state);
3838 return -ENOMEM;
3839 }
3840
3841 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3842
4a580877 3843 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3844 &adev->dm.atomic_obj,
eb3dc897
NK
3845 &state->base,
3846 &dm_atomic_state_funcs);
3847
3dc9b1ce 3848 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3849 if (r) {
3850 dc_release_state(state->context);
3851 kfree(state);
4562236b 3852 return r;
b67a468a 3853 }
4562236b 3854
6ce8f316 3855 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3856 if (r) {
3857 dc_release_state(state->context);
3858 kfree(state);
6ce8f316 3859 return r;
b67a468a 3860 }
6ce8f316 3861
4562236b
HW
3862 return 0;
3863}
3864
206bbafe
DF
3865#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3866#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3867#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3868
7fd13bae
AD
3869static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3870 int bl_idx)
206bbafe
DF
3871{
3872#if defined(CONFIG_ACPI)
3873 struct amdgpu_dm_backlight_caps caps;
3874
58965855
FS
3875 memset(&caps, 0, sizeof(caps));
3876
7fd13bae 3877 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3878 return;
3879
f9b7f370 3880 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3881 if (caps.caps_valid) {
7fd13bae 3882 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3883 if (caps.aux_support)
3884 return;
7fd13bae
AD
3885 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3886 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3887 } else {
7fd13bae 3888 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3889 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3890 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3891 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3892 }
3893#else
7fd13bae 3894 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3895 return;
3896
7fd13bae
AD
3897 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3898 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3899#endif
3900}
3901
69d9f427
AM
3902static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3903 unsigned *min, unsigned *max)
94562810 3904{
94562810 3905 if (!caps)
69d9f427 3906 return 0;
94562810 3907
69d9f427
AM
3908 if (caps->aux_support) {
3909 // Firmware limits are in nits, DC API wants millinits.
3910 *max = 1000 * caps->aux_max_input_signal;
3911 *min = 1000 * caps->aux_min_input_signal;
94562810 3912 } else {
69d9f427
AM
3913 // Firmware limits are 8-bit, PWM control is 16-bit.
3914 *max = 0x101 * caps->max_input_signal;
3915 *min = 0x101 * caps->min_input_signal;
94562810 3916 }
69d9f427
AM
3917 return 1;
3918}
94562810 3919
69d9f427
AM
3920static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3921 uint32_t brightness)
3922{
3923 unsigned min, max;
94562810 3924
69d9f427
AM
3925 if (!get_brightness_range(caps, &min, &max))
3926 return brightness;
3927
3928 // Rescale 0..255 to min..max
3929 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3930 AMDGPU_MAX_BL_LEVEL);
3931}
3932
3933static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3934 uint32_t brightness)
3935{
3936 unsigned min, max;
3937
3938 if (!get_brightness_range(caps, &min, &max))
3939 return brightness;
3940
3941 if (brightness < min)
3942 return 0;
3943 // Rescale min..max to 0..255
3944 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3945 max - min);
94562810
RS
3946}
3947
4052287a 3948static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3949 int bl_idx,
3d6c9164 3950 u32 user_brightness)
4562236b 3951{
206bbafe 3952 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3953 struct dc_link *link;
3954 u32 brightness;
94562810 3955 bool rc;
4562236b 3956
7fd13bae
AD
3957 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3958 caps = dm->backlight_caps[bl_idx];
94562810 3959
7fd13bae 3960 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
3961 /* update scratch register */
3962 if (bl_idx == 0)
3963 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
3964 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3965 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3966
3d6c9164 3967 /* Change brightness based on AUX property */
118b4627 3968 if (caps.aux_support) {
7fd13bae
AD
3969 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3970 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3971 if (!rc)
3972 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3973 } else {
7fd13bae
AD
3974 rc = dc_link_set_backlight_level(link, brightness, 0);
3975 if (!rc)
3976 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3977 }
94562810 3978
4052287a
S
3979 if (rc)
3980 dm->actual_brightness[bl_idx] = user_brightness;
4562236b
HW
3981}
3982
3d6c9164 3983static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3984{
620a0d27 3985 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3986 int i;
3d6c9164 3987
7fd13bae
AD
3988 for (i = 0; i < dm->num_of_edps; i++) {
3989 if (bd == dm->backlight_dev[i])
3990 break;
3991 }
3992 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3993 i = 0;
3994 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3995
3996 return 0;
3997}
3998
7fd13bae
AD
3999static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4000 int bl_idx)
3d6c9164 4001{
0ad3e64e 4002 struct amdgpu_dm_backlight_caps caps;
7fd13bae 4003 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 4004
7fd13bae
AD
4005 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4006 caps = dm->backlight_caps[bl_idx];
620a0d27 4007
0ad3e64e 4008 if (caps.aux_support) {
0ad3e64e
AD
4009 u32 avg, peak;
4010 bool rc;
4011
4012 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4013 if (!rc)
7fd13bae 4014 return dm->brightness[bl_idx];
0ad3e64e
AD
4015 return convert_brightness_to_user(&caps, avg);
4016 } else {
7fd13bae 4017 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
4018
4019 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 4020 return dm->brightness[bl_idx];
0ad3e64e
AD
4021 return convert_brightness_to_user(&caps, ret);
4022 }
4562236b
HW
4023}
4024
3d6c9164
AD
4025static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4026{
4027 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4028 int i;
3d6c9164 4029
7fd13bae
AD
4030 for (i = 0; i < dm->num_of_edps; i++) {
4031 if (bd == dm->backlight_dev[i])
4032 break;
4033 }
4034 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4035 i = 0;
4036 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
4037}
4038
4562236b 4039static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 4040 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
4041 .get_brightness = amdgpu_dm_backlight_get_brightness,
4042 .update_status = amdgpu_dm_backlight_update_status,
4043};
4044
7578ecda
AD
4045static void
4046amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4047{
4048 char bl_name[16];
4049 struct backlight_properties props = { 0 };
4050
7fd13bae
AD
4051 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4052 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4053
4562236b 4054 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4055 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4056 props.type = BACKLIGHT_RAW;
4057
4058 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4059 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4060
7fd13bae
AD
4061 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4062 adev_to_drm(dm->adev)->dev,
4063 dm,
4064 &amdgpu_dm_backlight_ops,
4065 &props);
4562236b 4066
7fd13bae 4067 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4068 DRM_ERROR("DM: Backlight registration failed!\n");
4069 else
f1ad2f5e 4070 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4071}
4562236b 4072
df534fff 4073static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4074 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4075 enum drm_plane_type plane_type,
4076 const struct dc_plane_cap *plane_cap)
df534fff 4077{
f180b4bc 4078 struct drm_plane *plane;
df534fff
S
4079 unsigned long possible_crtcs;
4080 int ret = 0;
4081
f180b4bc 4082 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4083 if (!plane) {
4084 DRM_ERROR("KMS: Failed to allocate plane\n");
4085 return -ENOMEM;
4086 }
b2fddb13 4087 plane->type = plane_type;
df534fff
S
4088
4089 /*
b2fddb13
NK
4090 * HACK: IGT tests expect that the primary plane for a CRTC
4091 * can only have one possible CRTC. Only expose support for
4092 * any CRTC if they're not going to be used as a primary plane
4093 * for a CRTC - like overlay or underlay planes.
df534fff
S
4094 */
4095 possible_crtcs = 1 << plane_id;
4096 if (plane_id >= dm->dc->caps.max_streams)
4097 possible_crtcs = 0xff;
4098
cc1fec57 4099 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4100
4101 if (ret) {
4102 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4103 kfree(plane);
df534fff
S
4104 return ret;
4105 }
4106
54087768
NK
4107 if (mode_info)
4108 mode_info->planes[plane_id] = plane;
4109
df534fff
S
4110 return ret;
4111}
4112
89fc8d4e
HW
4113
4114static void register_backlight_device(struct amdgpu_display_manager *dm,
4115 struct dc_link *link)
4116{
89fc8d4e
HW
4117 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4118 link->type != dc_connection_none) {
1f6010a9
DF
4119 /*
4120 * Event if registration failed, we should continue with
89fc8d4e
HW
4121 * DM initialization because not having a backlight control
4122 * is better then a black screen.
4123 */
7fd13bae 4124 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4125 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4126
7fd13bae 4127 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4128 dm->backlight_link[dm->num_of_edps] = link;
4129 dm->num_of_edps++;
4130 }
89fc8d4e 4131 }
89fc8d4e
HW
4132}
4133
4134
1f6010a9
DF
4135/*
4136 * In this architecture, the association
4562236b
HW
4137 * connector -> encoder -> crtc
4138 * id not really requried. The crtc and connector will hold the
4139 * display_index as an abstraction to use with DAL component
4140 *
4141 * Returns 0 on success
4142 */
7578ecda 4143static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4144{
4145 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4146 int32_t i;
c84dec2f 4147 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4148 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4149 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4150 uint32_t link_cnt;
cc1fec57 4151 int32_t primary_planes;
fbbdadf2 4152 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4153 const struct dc_plane_cap *plane;
9470620e 4154 bool psr_feature_enabled = false;
4562236b 4155
d58159de
AD
4156 dm->display_indexes_num = dm->dc->caps.max_streams;
4157 /* Update the actual used number of crtc */
4158 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4159
4562236b 4160 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4161 if (amdgpu_dm_mode_config_init(dm->adev)) {
4162 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4163 return -EINVAL;
4562236b
HW
4164 }
4165
b2fddb13
NK
4166 /* There is one primary plane per CRTC */
4167 primary_planes = dm->dc->caps.max_streams;
54087768 4168 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4169
b2fddb13
NK
4170 /*
4171 * Initialize primary planes, implicit planes for legacy IOCTLS.
4172 * Order is reversed to match iteration order in atomic check.
4173 */
4174 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4175 plane = &dm->dc->caps.planes[i];
4176
b2fddb13 4177 if (initialize_plane(dm, mode_info, i,
cc1fec57 4178 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4179 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4180 goto fail;
d4e13b0d 4181 }
df534fff 4182 }
92f3ac40 4183
0d579c7e
NK
4184 /*
4185 * Initialize overlay planes, index starting after primary planes.
4186 * These planes have a higher DRM index than the primary planes since
4187 * they should be considered as having a higher z-order.
4188 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4189 *
4190 * Only support DCN for now, and only expose one so we don't encourage
4191 * userspace to use up all the pipes.
0d579c7e 4192 */
cc1fec57
NK
4193 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4194 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4195
4196 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4197 continue;
4198
4199 if (!plane->blends_with_above || !plane->blends_with_below)
4200 continue;
4201
ea36ad34 4202 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4203 continue;
4204
54087768 4205 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4206 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4207 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4208 goto fail;
d4e13b0d 4209 }
cc1fec57
NK
4210
4211 /* Only create one overlay plane. */
4212 break;
d4e13b0d 4213 }
4562236b 4214
d4e13b0d 4215 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4216 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4217 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4218 goto fail;
4562236b 4219 }
4562236b 4220
81927e28 4221 /* Use Outbox interrupt */
1d789535 4222 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4223 case IP_VERSION(3, 0, 0):
4224 case IP_VERSION(3, 1, 2):
4225 case IP_VERSION(3, 1, 3):
b5b8ed44 4226 case IP_VERSION(3, 1, 5):
de7cc1b4 4227 case IP_VERSION(3, 1, 6):
577359ca
AP
4228 case IP_VERSION(3, 2, 0):
4229 case IP_VERSION(3, 2, 1):
c08182f2 4230 case IP_VERSION(2, 1, 0):
81927e28
JS
4231 if (register_outbox_irq_handlers(dm->adev)) {
4232 DRM_ERROR("DM: Failed to initialize IRQ\n");
4233 goto fail;
4234 }
4235 break;
4236 default:
c08182f2 4237 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4238 adev->ip_versions[DCE_HWIP][0]);
81927e28 4239 }
9470620e
NK
4240
4241 /* Determine whether to enable PSR support by default. */
4242 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4243 switch (adev->ip_versions[DCE_HWIP][0]) {
4244 case IP_VERSION(3, 1, 2):
4245 case IP_VERSION(3, 1, 3):
b5b8ed44 4246 case IP_VERSION(3, 1, 5):
de7cc1b4 4247 case IP_VERSION(3, 1, 6):
577359ca
AP
4248 case IP_VERSION(3, 2, 0):
4249 case IP_VERSION(3, 2, 1):
9470620e
NK
4250 psr_feature_enabled = true;
4251 break;
4252 default:
4253 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4254 break;
4255 }
4256 }
81927e28 4257
4562236b
HW
4258 /* loops over all connectors on the board */
4259 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4260 struct dc_link *link = NULL;
4562236b
HW
4261
4262 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4263 DRM_ERROR(
4264 "KMS: Cannot support more than %d display indexes\n",
4265 AMDGPU_DM_MAX_DISPLAY_INDEX);
4266 continue;
4267 }
4268
4269 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4270 if (!aconnector)
cd8a2ae8 4271 goto fail;
4562236b
HW
4272
4273 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4274 if (!aencoder)
cd8a2ae8 4275 goto fail;
4562236b
HW
4276
4277 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4278 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4279 goto fail;
4562236b
HW
4280 }
4281
4282 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4283 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4284 goto fail;
4562236b
HW
4285 }
4286
89fc8d4e
HW
4287 link = dc_get_link_at_index(dm->dc, i);
4288
fbbdadf2
BL
4289 if (!dc_link_detect_sink(link, &new_connection_type))
4290 DRM_ERROR("KMS: Failed to detect connector\n");
4291
4292 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4293 emulated_link_detect(link);
4294 amdgpu_dm_update_connector_after_detect(aconnector);
4295
4296 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4297 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4298 register_backlight_device(dm, link);
dab60582
RL
4299 if (dm->num_of_edps)
4300 update_connector_ext_caps(aconnector);
9470620e 4301 if (psr_feature_enabled)
397a9bc5 4302 amdgpu_dm_set_psr_caps(link);
fdda8f34
MD
4303
4304 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4305 * PSR is also supported.
4306 */
4307 if (link->psr_settings.psr_feature_enabled)
4308 adev_to_drm(adev)->vblank_disable_immediate = false;
89fc8d4e
HW
4309 }
4310
4311
4562236b
HW
4312 }
4313
4314 /* Software is initialized. Now we can register interrupt handlers. */
4315 switch (adev->asic_type) {
55e56389
MR
4316#if defined(CONFIG_DRM_AMD_DC_SI)
4317 case CHIP_TAHITI:
4318 case CHIP_PITCAIRN:
4319 case CHIP_VERDE:
4320 case CHIP_OLAND:
4321 if (dce60_register_irq_handlers(dm->adev)) {
4322 DRM_ERROR("DM: Failed to initialize IRQ\n");
4323 goto fail;
4324 }
4325 break;
4326#endif
4562236b
HW
4327 case CHIP_BONAIRE:
4328 case CHIP_HAWAII:
cd4b356f
AD
4329 case CHIP_KAVERI:
4330 case CHIP_KABINI:
4331 case CHIP_MULLINS:
4562236b
HW
4332 case CHIP_TONGA:
4333 case CHIP_FIJI:
4334 case CHIP_CARRIZO:
4335 case CHIP_STONEY:
4336 case CHIP_POLARIS11:
4337 case CHIP_POLARIS10:
b264d345 4338 case CHIP_POLARIS12:
7737de91 4339 case CHIP_VEGAM:
2c8ad2d5 4340 case CHIP_VEGA10:
2325ff30 4341 case CHIP_VEGA12:
1fe6bf2f 4342 case CHIP_VEGA20:
4562236b
HW
4343 if (dce110_register_irq_handlers(dm->adev)) {
4344 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4345 goto fail;
4562236b
HW
4346 }
4347 break;
4348 default:
1d789535 4349 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4350 case IP_VERSION(1, 0, 0):
4351 case IP_VERSION(1, 0, 1):
c08182f2
AD
4352 case IP_VERSION(2, 0, 2):
4353 case IP_VERSION(2, 0, 3):
4354 case IP_VERSION(2, 0, 0):
4355 case IP_VERSION(2, 1, 0):
4356 case IP_VERSION(3, 0, 0):
4357 case IP_VERSION(3, 0, 2):
4358 case IP_VERSION(3, 0, 3):
4359 case IP_VERSION(3, 0, 1):
4360 case IP_VERSION(3, 1, 2):
4361 case IP_VERSION(3, 1, 3):
b5b8ed44 4362 case IP_VERSION(3, 1, 5):
de7cc1b4 4363 case IP_VERSION(3, 1, 6):
577359ca
AP
4364 case IP_VERSION(3, 2, 0):
4365 case IP_VERSION(3, 2, 1):
c08182f2
AD
4366 if (dcn10_register_irq_handlers(dm->adev)) {
4367 DRM_ERROR("DM: Failed to initialize IRQ\n");
4368 goto fail;
4369 }
4370 break;
4371 default:
2cbc6f42 4372 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4373 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4374 goto fail;
c08182f2 4375 }
2cbc6f42 4376 break;
4562236b
HW
4377 }
4378
4562236b 4379 return 0;
cd8a2ae8 4380fail:
4562236b 4381 kfree(aencoder);
4562236b 4382 kfree(aconnector);
54087768 4383
59d0f396 4384 return -EINVAL;
4562236b
HW
4385}
4386
7578ecda 4387static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4388{
eb3dc897 4389 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4390 return;
4391}
4392
4393/******************************************************************************
4394 * amdgpu_display_funcs functions
4395 *****************************************************************************/
4396
1f6010a9 4397/*
4562236b
HW
4398 * dm_bandwidth_update - program display watermarks
4399 *
4400 * @adev: amdgpu_device pointer
4401 *
4402 * Calculate and program the display watermarks and line buffer allocation.
4403 */
4404static void dm_bandwidth_update(struct amdgpu_device *adev)
4405{
49c07a99 4406 /* TODO: implement later */
4562236b
HW
4407}
4408
39cc5be2 4409static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4410 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4411 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4412 .backlight_set_level = NULL, /* never called for DC */
4413 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4414 .hpd_sense = NULL,/* called unconditionally */
4415 .hpd_set_polarity = NULL, /* called unconditionally */
4416 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4417 .page_flip_get_scanoutpos =
4418 dm_crtc_get_scanoutpos,/* called unconditionally */
4419 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4420 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4421};
4422
4423#if defined(CONFIG_DEBUG_KERNEL_DC)
4424
3ee6b26b
AD
4425static ssize_t s3_debug_store(struct device *device,
4426 struct device_attribute *attr,
4427 const char *buf,
4428 size_t count)
4562236b
HW
4429{
4430 int ret;
4431 int s3_state;
ef1de361 4432 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4433 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4434
4435 ret = kstrtoint(buf, 0, &s3_state);
4436
4437 if (ret == 0) {
4438 if (s3_state) {
4439 dm_resume(adev);
4a580877 4440 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4441 } else
4442 dm_suspend(adev);
4443 }
4444
4445 return ret == 0 ? count : 0;
4446}
4447
4448DEVICE_ATTR_WO(s3_debug);
4449
4450#endif
4451
4452static int dm_early_init(void *handle)
4453{
4454 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4455
4562236b 4456 switch (adev->asic_type) {
55e56389
MR
4457#if defined(CONFIG_DRM_AMD_DC_SI)
4458 case CHIP_TAHITI:
4459 case CHIP_PITCAIRN:
4460 case CHIP_VERDE:
4461 adev->mode_info.num_crtc = 6;
4462 adev->mode_info.num_hpd = 6;
4463 adev->mode_info.num_dig = 6;
4464 break;
4465 case CHIP_OLAND:
4466 adev->mode_info.num_crtc = 2;
4467 adev->mode_info.num_hpd = 2;
4468 adev->mode_info.num_dig = 2;
4469 break;
4470#endif
4562236b
HW
4471 case CHIP_BONAIRE:
4472 case CHIP_HAWAII:
4473 adev->mode_info.num_crtc = 6;
4474 adev->mode_info.num_hpd = 6;
4475 adev->mode_info.num_dig = 6;
4562236b 4476 break;
cd4b356f
AD
4477 case CHIP_KAVERI:
4478 adev->mode_info.num_crtc = 4;
4479 adev->mode_info.num_hpd = 6;
4480 adev->mode_info.num_dig = 7;
cd4b356f
AD
4481 break;
4482 case CHIP_KABINI:
4483 case CHIP_MULLINS:
4484 adev->mode_info.num_crtc = 2;
4485 adev->mode_info.num_hpd = 6;
4486 adev->mode_info.num_dig = 6;
cd4b356f 4487 break;
4562236b
HW
4488 case CHIP_FIJI:
4489 case CHIP_TONGA:
4490 adev->mode_info.num_crtc = 6;
4491 adev->mode_info.num_hpd = 6;
4492 adev->mode_info.num_dig = 7;
4562236b
HW
4493 break;
4494 case CHIP_CARRIZO:
4495 adev->mode_info.num_crtc = 3;
4496 adev->mode_info.num_hpd = 6;
4497 adev->mode_info.num_dig = 9;
4562236b
HW
4498 break;
4499 case CHIP_STONEY:
4500 adev->mode_info.num_crtc = 2;
4501 adev->mode_info.num_hpd = 6;
4502 adev->mode_info.num_dig = 9;
4562236b
HW
4503 break;
4504 case CHIP_POLARIS11:
b264d345 4505 case CHIP_POLARIS12:
4562236b
HW
4506 adev->mode_info.num_crtc = 5;
4507 adev->mode_info.num_hpd = 5;
4508 adev->mode_info.num_dig = 5;
4562236b
HW
4509 break;
4510 case CHIP_POLARIS10:
7737de91 4511 case CHIP_VEGAM:
4562236b
HW
4512 adev->mode_info.num_crtc = 6;
4513 adev->mode_info.num_hpd = 6;
4514 adev->mode_info.num_dig = 6;
4562236b 4515 break;
2c8ad2d5 4516 case CHIP_VEGA10:
2325ff30 4517 case CHIP_VEGA12:
1fe6bf2f 4518 case CHIP_VEGA20:
2c8ad2d5
AD
4519 adev->mode_info.num_crtc = 6;
4520 adev->mode_info.num_hpd = 6;
4521 adev->mode_info.num_dig = 6;
4522 break;
4562236b 4523 default:
cae5c1ab 4524
1d789535 4525 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4526 case IP_VERSION(2, 0, 2):
4527 case IP_VERSION(3, 0, 0):
4528 adev->mode_info.num_crtc = 6;
4529 adev->mode_info.num_hpd = 6;
4530 adev->mode_info.num_dig = 6;
4531 break;
4532 case IP_VERSION(2, 0, 0):
4533 case IP_VERSION(3, 0, 2):
4534 adev->mode_info.num_crtc = 5;
4535 adev->mode_info.num_hpd = 5;
4536 adev->mode_info.num_dig = 5;
4537 break;
4538 case IP_VERSION(2, 0, 3):
4539 case IP_VERSION(3, 0, 3):
4540 adev->mode_info.num_crtc = 2;
4541 adev->mode_info.num_hpd = 2;
4542 adev->mode_info.num_dig = 2;
4543 break;
559f591d
AD
4544 case IP_VERSION(1, 0, 0):
4545 case IP_VERSION(1, 0, 1):
c08182f2
AD
4546 case IP_VERSION(3, 0, 1):
4547 case IP_VERSION(2, 1, 0):
4548 case IP_VERSION(3, 1, 2):
4549 case IP_VERSION(3, 1, 3):
b5b8ed44 4550 case IP_VERSION(3, 1, 5):
de7cc1b4 4551 case IP_VERSION(3, 1, 6):
577359ca
AP
4552 case IP_VERSION(3, 2, 0):
4553 case IP_VERSION(3, 2, 1):
c08182f2
AD
4554 adev->mode_info.num_crtc = 4;
4555 adev->mode_info.num_hpd = 4;
4556 adev->mode_info.num_dig = 4;
4557 break;
4558 default:
2cbc6f42 4559 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4560 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4561 return -EINVAL;
c08182f2 4562 }
2cbc6f42 4563 break;
4562236b
HW
4564 }
4565
c8dd5715
MD
4566 amdgpu_dm_set_irq_funcs(adev);
4567
39cc5be2
AD
4568 if (adev->mode_info.funcs == NULL)
4569 adev->mode_info.funcs = &dm_display_funcs;
4570
1f6010a9
DF
4571 /*
4572 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4573 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4574 * amdgpu_device_init()
4575 */
4562236b
HW
4576#if defined(CONFIG_DEBUG_KERNEL_DC)
4577 device_create_file(
4a580877 4578 adev_to_drm(adev)->dev,
4562236b
HW
4579 &dev_attr_s3_debug);
4580#endif
4581
4582 return 0;
4583}
4584
9b690ef3 4585static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4586 struct dc_stream_state *new_stream,
4587 struct dc_stream_state *old_stream)
9b690ef3 4588{
2afda735 4589 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4590}
4591
4592static bool modereset_required(struct drm_crtc_state *crtc_state)
4593{
2afda735 4594 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4595}
4596
7578ecda 4597static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4598{
4599 drm_encoder_cleanup(encoder);
4600 kfree(encoder);
4601}
4602
4603static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4604 .destroy = amdgpu_dm_encoder_destroy,
4605};
4606
e7b07cee 4607
6300b3bd
MK
4608static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4609 struct drm_framebuffer *fb,
4610 int *min_downscale, int *max_upscale)
4611{
4612 struct amdgpu_device *adev = drm_to_adev(dev);
4613 struct dc *dc = adev->dm.dc;
4614 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4615 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4616
4617 switch (fb->format->format) {
4618 case DRM_FORMAT_P010:
4619 case DRM_FORMAT_NV12:
4620 case DRM_FORMAT_NV21:
4621 *max_upscale = plane_cap->max_upscale_factor.nv12;
4622 *min_downscale = plane_cap->max_downscale_factor.nv12;
4623 break;
4624
4625 case DRM_FORMAT_XRGB16161616F:
4626 case DRM_FORMAT_ARGB16161616F:
4627 case DRM_FORMAT_XBGR16161616F:
4628 case DRM_FORMAT_ABGR16161616F:
4629 *max_upscale = plane_cap->max_upscale_factor.fp16;
4630 *min_downscale = plane_cap->max_downscale_factor.fp16;
4631 break;
4632
4633 default:
4634 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4635 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4636 break;
4637 }
4638
4639 /*
4640 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4641 * scaling factor of 1.0 == 1000 units.
4642 */
4643 if (*max_upscale == 1)
4644 *max_upscale = 1000;
4645
4646 if (*min_downscale == 1)
4647 *min_downscale = 1000;
4648}
4649
4650
4375d625
S
4651static int fill_dc_scaling_info(struct amdgpu_device *adev,
4652 const struct drm_plane_state *state,
695af5f9 4653 struct dc_scaling_info *scaling_info)
e7b07cee 4654{
6300b3bd 4655 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4656
695af5f9 4657 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4658
695af5f9
NK
4659 /* Source is fixed 16.16 but we ignore mantissa for now... */
4660 scaling_info->src_rect.x = state->src_x >> 16;
4661 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4662
d89f6048
HW
4663 /*
4664 * For reasons we don't (yet) fully understand a non-zero
4665 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4666 * system hang on DCN1x.
4667 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4668 * let's reject both non-zero src_x and src_y.
4669 *
4670 * We currently know of only one use-case to reproduce a
4671 * scenario with non-zero src_x and src_y for NV12, which
4672 * is to gesture the YouTube Android app into full screen
4673 * on ChromeOS.
4674 */
4375d625
S
4675 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4676 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4677 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4678 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4679 return -EINVAL;
4680
695af5f9
NK
4681 scaling_info->src_rect.width = state->src_w >> 16;
4682 if (scaling_info->src_rect.width == 0)
4683 return -EINVAL;
4684
4685 scaling_info->src_rect.height = state->src_h >> 16;
4686 if (scaling_info->src_rect.height == 0)
4687 return -EINVAL;
4688
4689 scaling_info->dst_rect.x = state->crtc_x;
4690 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4691
4692 if (state->crtc_w == 0)
695af5f9 4693 return -EINVAL;
e7b07cee 4694
695af5f9 4695 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4696
4697 if (state->crtc_h == 0)
695af5f9 4698 return -EINVAL;
e7b07cee 4699
695af5f9 4700 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4701
695af5f9
NK
4702 /* DRM doesn't specify clipping on destination output. */
4703 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4704
6300b3bd
MK
4705 /* Validate scaling per-format with DC plane caps */
4706 if (state->plane && state->plane->dev && state->fb) {
4707 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4708 &min_downscale, &max_upscale);
4709 } else {
4710 min_downscale = 250;
4711 max_upscale = 16000;
4712 }
4713
6491f0c0
NK
4714 scale_w = scaling_info->dst_rect.width * 1000 /
4715 scaling_info->src_rect.width;
e7b07cee 4716
6300b3bd 4717 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4718 return -EINVAL;
4719
4720 scale_h = scaling_info->dst_rect.height * 1000 /
4721 scaling_info->src_rect.height;
4722
6300b3bd 4723 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4724 return -EINVAL;
4725
695af5f9
NK
4726 /*
4727 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4728 * assume reasonable defaults based on the format.
4729 */
e7b07cee 4730
695af5f9 4731 return 0;
4562236b 4732}
695af5f9 4733
a3241991
BN
4734static void
4735fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4736 uint64_t tiling_flags)
e7b07cee 4737{
a3241991
BN
4738 /* Fill GFX8 params */
4739 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4740 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4741
a3241991
BN
4742 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4743 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4744 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4745 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4746 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4747
a3241991
BN
4748 /* XXX fix me for VI */
4749 tiling_info->gfx8.num_banks = num_banks;
4750 tiling_info->gfx8.array_mode =
4751 DC_ARRAY_2D_TILED_THIN1;
4752 tiling_info->gfx8.tile_split = tile_split;
4753 tiling_info->gfx8.bank_width = bankw;
4754 tiling_info->gfx8.bank_height = bankh;
4755 tiling_info->gfx8.tile_aspect = mtaspect;
4756 tiling_info->gfx8.tile_mode =
4757 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4758 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4759 == DC_ARRAY_1D_TILED_THIN1) {
4760 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4761 }
4762
a3241991
BN
4763 tiling_info->gfx8.pipe_config =
4764 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4765}
4766
a3241991
BN
4767static void
4768fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4769 union dc_tiling_info *tiling_info)
4770{
4771 tiling_info->gfx9.num_pipes =
4772 adev->gfx.config.gb_addr_config_fields.num_pipes;
4773 tiling_info->gfx9.num_banks =
4774 adev->gfx.config.gb_addr_config_fields.num_banks;
4775 tiling_info->gfx9.pipe_interleave =
4776 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4777 tiling_info->gfx9.num_shader_engines =
4778 adev->gfx.config.gb_addr_config_fields.num_se;
4779 tiling_info->gfx9.max_compressed_frags =
4780 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4781 tiling_info->gfx9.num_rb_per_se =
4782 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4783 tiling_info->gfx9.shaderEnable = 1;
1d789535 4784 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4785 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4786}
4787
695af5f9 4788static int
a3241991
BN
4789validate_dcc(struct amdgpu_device *adev,
4790 const enum surface_pixel_format format,
4791 const enum dc_rotation_angle rotation,
4792 const union dc_tiling_info *tiling_info,
4793 const struct dc_plane_dcc_param *dcc,
4794 const struct dc_plane_address *address,
4795 const struct plane_size *plane_size)
7df7e505
NK
4796{
4797 struct dc *dc = adev->dm.dc;
8daa1218
NC
4798 struct dc_dcc_surface_param input;
4799 struct dc_surface_dcc_cap output;
7df7e505 4800
8daa1218
NC
4801 memset(&input, 0, sizeof(input));
4802 memset(&output, 0, sizeof(output));
4803
a3241991 4804 if (!dcc->enable)
87b7ebc2
RS
4805 return 0;
4806
a3241991
BN
4807 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4808 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4809 return -EINVAL;
7df7e505 4810
695af5f9 4811 input.format = format;
12e2b2d4
DL
4812 input.surface_size.width = plane_size->surface_size.width;
4813 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4814 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4815
695af5f9 4816 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4817 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4818 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4819 input.scan = SCAN_DIRECTION_VERTICAL;
4820
4821 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4822 return -EINVAL;
7df7e505
NK
4823
4824 if (!output.capable)
09e5665a 4825 return -EINVAL;
7df7e505 4826
a3241991
BN
4827 if (dcc->independent_64b_blks == 0 &&
4828 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4829 return -EINVAL;
7df7e505 4830
a3241991
BN
4831 return 0;
4832}
4833
37384b3f
BN
4834static bool
4835modifier_has_dcc(uint64_t modifier)
4836{
4837 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4838}
4839
4840static unsigned
4841modifier_gfx9_swizzle_mode(uint64_t modifier)
4842{
4843 if (modifier == DRM_FORMAT_MOD_LINEAR)
4844 return 0;
4845
4846 return AMD_FMT_MOD_GET(TILE, modifier);
4847}
4848
dfbbfe3c
BN
4849static const struct drm_format_info *
4850amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4851{
816853f9 4852 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4853}
4854
37384b3f
BN
4855static void
4856fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4857 union dc_tiling_info *tiling_info,
4858 uint64_t modifier)
4859{
4860 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4861 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4862 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
543036a2
AP
4863 unsigned int pipes_log2;
4864
4865 pipes_log2 = min(5u, mod_pipe_xor_bits);
37384b3f
BN
4866
4867 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4868
4869 if (!IS_AMD_FMT_MOD(modifier))
4870 return;
4871
4872 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4873 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4874
4875 if (adev->family >= AMDGPU_FAMILY_NV) {
4876 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4877 } else {
4878 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4879
4880 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4881 }
4882}
4883
faa37f54
BN
4884enum dm_micro_swizzle {
4885 MICRO_SWIZZLE_Z = 0,
4886 MICRO_SWIZZLE_S = 1,
4887 MICRO_SWIZZLE_D = 2,
4888 MICRO_SWIZZLE_R = 3
4889};
4890
4891static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4892 uint32_t format,
4893 uint64_t modifier)
4894{
4895 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4896 const struct drm_format_info *info = drm_format_info(format);
366e817e
BN
4897 int i;
4898
faa37f54
BN
4899 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4900
4901 if (!info)
4902 return false;
4903
4904 /*
fe180178
QZ
4905 * We always have to allow these modifiers:
4906 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4907 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4908 */
fe180178
QZ
4909 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4910 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4911 return true;
fe180178 4912 }
faa37f54 4913
366e817e
BN
4914 /* Check that the modifier is on the list of the plane's supported modifiers. */
4915 for (i = 0; i < plane->modifier_count; i++) {
4916 if (modifier == plane->modifiers[i])
fe180178
QZ
4917 break;
4918 }
366e817e
BN
4919 if (i == plane->modifier_count)
4920 return false;
faa37f54
BN
4921
4922 /*
4923 * For D swizzle the canonical modifier depends on the bpp, so check
4924 * it here.
4925 */
4926 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4927 adev->family >= AMDGPU_FAMILY_NV) {
4928 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4929 return false;
4930 }
4931
4932 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4933 info->cpp[0] < 8)
4934 return false;
4935
4936 if (modifier_has_dcc(modifier)) {
4937 /* Per radeonsi comments 16/64 bpp are more complicated. */
4938 if (info->cpp[0] != 4)
4939 return false;
951796f2
SS
4940 /* We support multi-planar formats, but not when combined with
4941 * additional DCC metadata planes. */
4942 if (info->num_planes > 1)
4943 return false;
faa37f54
BN
4944 }
4945
4946 return true;
4947}
4948
4949static void
4950add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4951{
4952 if (!*mods)
4953 return;
4954
4955 if (*cap - *size < 1) {
4956 uint64_t new_cap = *cap * 2;
4957 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4958
4959 if (!new_mods) {
4960 kfree(*mods);
4961 *mods = NULL;
4962 return;
4963 }
4964
4965 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4966 kfree(*mods);
4967 *mods = new_mods;
4968 *cap = new_cap;
4969 }
4970
4971 (*mods)[*size] = mod;
4972 *size += 1;
4973}
4974
4975static void
4976add_gfx9_modifiers(const struct amdgpu_device *adev,
4977 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4978{
4979 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4980 int pipe_xor_bits = min(8, pipes +
4981 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4982 int bank_xor_bits = min(8 - pipe_xor_bits,
4983 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4984 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4985 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4986
4987
4988 if (adev->family == AMDGPU_FAMILY_RV) {
4989 /* Raven2 and later */
4990 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4991
4992 /*
4993 * No _D DCC swizzles yet because we only allow 32bpp, which
4994 * doesn't support _D on DCN
4995 */
4996
4997 if (has_constant_encode) {
4998 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4999 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5000 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5001 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5002 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5003 AMD_FMT_MOD_SET(DCC, 1) |
5004 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5005 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5006 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5007 }
5008
5009 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5011 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5012 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5013 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5014 AMD_FMT_MOD_SET(DCC, 1) |
5015 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5016 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5017 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5018
5019 if (has_constant_encode) {
5020 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5021 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5022 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5023 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5024 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5025 AMD_FMT_MOD_SET(DCC, 1) |
5026 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5027 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5028 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5029
5030 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5031 AMD_FMT_MOD_SET(RB, rb) |
5032 AMD_FMT_MOD_SET(PIPE, pipes));
5033 }
5034
5035 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5036 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5037 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5038 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5039 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5040 AMD_FMT_MOD_SET(DCC, 1) |
5041 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5042 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5043 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5044 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5045 AMD_FMT_MOD_SET(RB, rb) |
5046 AMD_FMT_MOD_SET(PIPE, pipes));
5047 }
5048
5049 /*
5050 * Only supported for 64bpp on Raven, will be filtered on format in
5051 * dm_plane_format_mod_supported.
5052 */
5053 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5054 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5055 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5056 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5057 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5058
5059 if (adev->family == AMDGPU_FAMILY_RV) {
5060 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5061 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5062 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5063 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5064 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5065 }
5066
5067 /*
5068 * Only supported for 64bpp on Raven, will be filtered on format in
5069 * dm_plane_format_mod_supported.
5070 */
5071 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5072 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5073 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5074
5075 if (adev->family == AMDGPU_FAMILY_RV) {
5076 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5077 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5078 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5079 }
5080}
5081
5082static void
5083add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5084 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5085{
5086 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5087
5088 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5089 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5090 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5091 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5092 AMD_FMT_MOD_SET(DCC, 1) |
5093 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5094 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5095 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5096
5097 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5098 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5099 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5100 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5101 AMD_FMT_MOD_SET(DCC, 1) |
5102 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5103 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5104 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5105 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5106
5107 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5108 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5109 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5110 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5111
5112 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5113 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5114 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5115 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5116
5117
5118 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5119 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5120 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5121 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5122
5123 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5124 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5125 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5126}
5127
5128static void
5129add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5130 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5131{
5132 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5133 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5134
5135 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5137 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5138 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5139 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5140 AMD_FMT_MOD_SET(DCC, 1) |
5141 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5142 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5143 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5144 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5145
7f6ab50a
JA
5146 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5147 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5148 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5149 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5150 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5151 AMD_FMT_MOD_SET(DCC, 1) |
5152 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5153 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5154 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5155
faa37f54
BN
5156 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5157 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5158 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5159 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5160 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5161 AMD_FMT_MOD_SET(DCC, 1) |
5162 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5163 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5164 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5165 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5166 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5167
7f6ab50a
JA
5168 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5169 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5170 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5171 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5172 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5173 AMD_FMT_MOD_SET(DCC, 1) |
5174 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5175 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5176 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5177 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5178
faa37f54
BN
5179 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5180 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5181 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5182 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5183 AMD_FMT_MOD_SET(PACKERS, pkrs));
5184
5185 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5186 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5187 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5188 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5189 AMD_FMT_MOD_SET(PACKERS, pkrs));
5190
5191 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5192 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5193 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5194 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5195
5196 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5197 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5198 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5199}
5200
543036a2
AP
5201static void
5202add_gfx11_modifiers(struct amdgpu_device *adev,
5203 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5204{
5205 int num_pipes = 0;
5206 int pipe_xor_bits = 0;
5207 int num_pkrs = 0;
5208 int pkrs = 0;
5209 u32 gb_addr_config;
ff15cea3 5210 u8 i = 0;
543036a2
AP
5211 unsigned swizzle_r_x;
5212 uint64_t modifier_r_x;
5213 uint64_t modifier_dcc_best;
5214 uint64_t modifier_dcc_4k;
5215
5216 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5217 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5218 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5219 ASSERT(gb_addr_config != 0);
5220
5221 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5222 pkrs = ilog2(num_pkrs);
5223 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5224 pipe_xor_bits = ilog2(num_pipes);
5225
ff15cea3
AP
5226 for (i = 0; i < 2; i++) {
5227 /* Insert the best one first. */
5228 /* R_X swizzle modes are the best for rendering and DCC requires them. */
5229 if (num_pipes > 16)
5230 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5231 else
5232 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5233
5234 modifier_r_x = AMD_FMT_MOD |
5235 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5236 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5237 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5238 AMD_FMT_MOD_SET(PACKERS, pkrs);
5239
5240 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5241 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5242 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5243 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5244 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5245
5246 /* DCC settings for 4K and greater resolutions. (required by display hw) */
5247 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5248 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5249 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5250 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5251
5252 add_modifier(mods, size, capacity, modifier_dcc_best);
5253 add_modifier(mods, size, capacity, modifier_dcc_4k);
5254
5255 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5256 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5257
5258 add_modifier(mods, size, capacity, modifier_r_x);
5259 }
543036a2
AP
5260
5261 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5262 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5263 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5264}
5265
faa37f54 5266static int
543036a2 5267get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
faa37f54
BN
5268{
5269 uint64_t size = 0, capacity = 128;
5270 *mods = NULL;
5271
5272 /* We have not hooked up any pre-GFX9 modifiers. */
5273 if (adev->family < AMDGPU_FAMILY_AI)
5274 return 0;
5275
5276 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5277
5278 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5279 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5280 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5281 return *mods ? 0 : -ENOMEM;
5282 }
5283
5284 switch (adev->family) {
5285 case AMDGPU_FAMILY_AI:
5286 case AMDGPU_FAMILY_RV:
5287 add_gfx9_modifiers(adev, mods, &size, &capacity);
5288 break;
5289 case AMDGPU_FAMILY_NV:
5290 case AMDGPU_FAMILY_VGH:
1ebcaebd 5291 case AMDGPU_FAMILY_YC:
b5b8ed44 5292 case AMDGPU_FAMILY_GC_10_3_6:
de7cc1b4 5293 case AMDGPU_FAMILY_GC_10_3_7:
1d789535 5294 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5295 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5296 else
5297 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5298 break;
543036a2
AP
5299 case AMDGPU_FAMILY_GC_11_0_0:
5300 add_gfx11_modifiers(adev, mods, &size, &capacity);
5301 break;
faa37f54
BN
5302 }
5303
5304 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5305
5306 /* INVALID marks the end of the list. */
5307 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5308
5309 if (!*mods)
5310 return -ENOMEM;
5311
5312 return 0;
5313}
5314
37384b3f
BN
5315static int
5316fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5317 const struct amdgpu_framebuffer *afb,
5318 const enum surface_pixel_format format,
5319 const enum dc_rotation_angle rotation,
5320 const struct plane_size *plane_size,
5321 union dc_tiling_info *tiling_info,
5322 struct dc_plane_dcc_param *dcc,
5323 struct dc_plane_address *address,
5324 const bool force_disable_dcc)
5325{
5326 const uint64_t modifier = afb->base.modifier;
2be7f77f 5327 int ret = 0;
37384b3f
BN
5328
5329 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5330 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5331
5332 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5333 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5334 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5335 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5336
5337 dcc->enable = 1;
5338 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5339 dcc->independent_64b_blks = independent_64b_blks;
543036a2 5340 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
a86396c3 5341 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5342 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5343 else if (independent_128b_blks)
5344 dcc->dcc_ind_blk = hubp_ind_block_128b;
5345 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5346 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5347 else
5348 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5349 } else {
5350 if (independent_64b_blks)
5351 dcc->dcc_ind_blk = hubp_ind_block_64b;
5352 else
5353 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5354 }
37384b3f
BN
5355
5356 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5357 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5358 }
5359
5360 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5361 if (ret)
2be7f77f 5362 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5363
2be7f77f 5364 return ret;
09e5665a
NK
5365}
5366
5367static int
320932bf 5368fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5369 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5370 const enum surface_pixel_format format,
5371 const enum dc_rotation_angle rotation,
5372 const uint64_t tiling_flags,
09e5665a 5373 union dc_tiling_info *tiling_info,
12e2b2d4 5374 struct plane_size *plane_size,
09e5665a 5375 struct dc_plane_dcc_param *dcc,
87b7ebc2 5376 struct dc_plane_address *address,
5888f07a 5377 bool tmz_surface,
87b7ebc2 5378 bool force_disable_dcc)
09e5665a 5379{
320932bf 5380 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5381 int ret;
5382
5383 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5384 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5385 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5386 memset(address, 0, sizeof(*address));
5387
5888f07a
HW
5388 address->tmz_surface = tmz_surface;
5389
695af5f9 5390 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5391 uint64_t addr = afb->address + fb->offsets[0];
5392
12e2b2d4
DL
5393 plane_size->surface_size.x = 0;
5394 plane_size->surface_size.y = 0;
5395 plane_size->surface_size.width = fb->width;
5396 plane_size->surface_size.height = fb->height;
5397 plane_size->surface_pitch =
320932bf
NK
5398 fb->pitches[0] / fb->format->cpp[0];
5399
e0634e8d 5400 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5401 address->grph.addr.low_part = lower_32_bits(addr);
5402 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5403 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5404 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5405 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5406
12e2b2d4
DL
5407 plane_size->surface_size.x = 0;
5408 plane_size->surface_size.y = 0;
5409 plane_size->surface_size.width = fb->width;
5410 plane_size->surface_size.height = fb->height;
5411 plane_size->surface_pitch =
320932bf
NK
5412 fb->pitches[0] / fb->format->cpp[0];
5413
12e2b2d4
DL
5414 plane_size->chroma_size.x = 0;
5415 plane_size->chroma_size.y = 0;
320932bf 5416 /* TODO: set these based on surface format */
12e2b2d4
DL
5417 plane_size->chroma_size.width = fb->width / 2;
5418 plane_size->chroma_size.height = fb->height / 2;
320932bf 5419
12e2b2d4 5420 plane_size->chroma_pitch =
320932bf
NK
5421 fb->pitches[1] / fb->format->cpp[1];
5422
e0634e8d
NK
5423 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5424 address->video_progressive.luma_addr.low_part =
be7b9b32 5425 lower_32_bits(luma_addr);
e0634e8d 5426 address->video_progressive.luma_addr.high_part =
be7b9b32 5427 upper_32_bits(luma_addr);
e0634e8d
NK
5428 address->video_progressive.chroma_addr.low_part =
5429 lower_32_bits(chroma_addr);
5430 address->video_progressive.chroma_addr.high_part =
5431 upper_32_bits(chroma_addr);
5432 }
09e5665a 5433
a3241991 5434 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5435 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5436 rotation, plane_size,
5437 tiling_info, dcc,
5438 address,
5439 force_disable_dcc);
09e5665a
NK
5440 if (ret)
5441 return ret;
a3241991
BN
5442 } else {
5443 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5444 }
5445
5446 return 0;
7df7e505
NK
5447}
5448
d74004b6 5449static void
695af5f9 5450fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
76818cdd
SJK
5451 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5452 bool *global_alpha, int *global_alpha_value)
d74004b6
NK
5453{
5454 *per_pixel_alpha = false;
76818cdd 5455 *pre_multiplied_alpha = true;
d74004b6
NK
5456 *global_alpha = false;
5457 *global_alpha_value = 0xff;
5458
5459 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5460 return;
5461
76818cdd
SJK
5462 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5463 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
d74004b6
NK
5464 static const uint32_t alpha_formats[] = {
5465 DRM_FORMAT_ARGB8888,
5466 DRM_FORMAT_RGBA8888,
5467 DRM_FORMAT_ABGR8888,
5468 };
5469 uint32_t format = plane_state->fb->format->format;
5470 unsigned int i;
5471
5472 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5473 if (format == alpha_formats[i]) {
5474 *per_pixel_alpha = true;
5475 break;
5476 }
5477 }
76818cdd
SJK
5478
5479 if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5480 *pre_multiplied_alpha = false;
d74004b6
NK
5481 }
5482
5483 if (plane_state->alpha < 0xffff) {
5484 *global_alpha = true;
5485 *global_alpha_value = plane_state->alpha >> 8;
5486 }
5487}
5488
004fefa3
NK
5489static int
5490fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5491 const enum surface_pixel_format format,
004fefa3
NK
5492 enum dc_color_space *color_space)
5493{
5494 bool full_range;
5495
5496 *color_space = COLOR_SPACE_SRGB;
5497
5498 /* DRM color properties only affect non-RGB formats. */
695af5f9 5499 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5500 return 0;
5501
5502 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5503
5504 switch (plane_state->color_encoding) {
5505 case DRM_COLOR_YCBCR_BT601:
5506 if (full_range)
5507 *color_space = COLOR_SPACE_YCBCR601;
5508 else
5509 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5510 break;
5511
5512 case DRM_COLOR_YCBCR_BT709:
5513 if (full_range)
5514 *color_space = COLOR_SPACE_YCBCR709;
5515 else
5516 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5517 break;
5518
5519 case DRM_COLOR_YCBCR_BT2020:
5520 if (full_range)
5521 *color_space = COLOR_SPACE_2020_YCBCR;
5522 else
5523 return -EINVAL;
5524 break;
5525
5526 default:
5527 return -EINVAL;
5528 }
5529
5530 return 0;
5531}
5532
695af5f9
NK
5533static int
5534fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5535 const struct drm_plane_state *plane_state,
5536 const uint64_t tiling_flags,
5537 struct dc_plane_info *plane_info,
87b7ebc2 5538 struct dc_plane_address *address,
5888f07a 5539 bool tmz_surface,
87b7ebc2 5540 bool force_disable_dcc)
695af5f9
NK
5541{
5542 const struct drm_framebuffer *fb = plane_state->fb;
5543 const struct amdgpu_framebuffer *afb =
5544 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5545 int ret;
5546
5547 memset(plane_info, 0, sizeof(*plane_info));
5548
5549 switch (fb->format->format) {
5550 case DRM_FORMAT_C8:
5551 plane_info->format =
5552 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5553 break;
5554 case DRM_FORMAT_RGB565:
5555 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5556 break;
5557 case DRM_FORMAT_XRGB8888:
5558 case DRM_FORMAT_ARGB8888:
5559 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5560 break;
5561 case DRM_FORMAT_XRGB2101010:
5562 case DRM_FORMAT_ARGB2101010:
5563 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5564 break;
5565 case DRM_FORMAT_XBGR2101010:
5566 case DRM_FORMAT_ABGR2101010:
5567 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5568 break;
5569 case DRM_FORMAT_XBGR8888:
5570 case DRM_FORMAT_ABGR8888:
5571 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5572 break;
5573 case DRM_FORMAT_NV21:
5574 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5575 break;
5576 case DRM_FORMAT_NV12:
5577 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5578 break;
cbec6477
SW
5579 case DRM_FORMAT_P010:
5580 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5581 break;
492548dc
SW
5582 case DRM_FORMAT_XRGB16161616F:
5583 case DRM_FORMAT_ARGB16161616F:
5584 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5585 break;
2a5195dc
MK
5586 case DRM_FORMAT_XBGR16161616F:
5587 case DRM_FORMAT_ABGR16161616F:
5588 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5589 break;
58020403
MK
5590 case DRM_FORMAT_XRGB16161616:
5591 case DRM_FORMAT_ARGB16161616:
5592 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5593 break;
5594 case DRM_FORMAT_XBGR16161616:
5595 case DRM_FORMAT_ABGR16161616:
5596 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5597 break;
695af5f9
NK
5598 default:
5599 DRM_ERROR(
92f1d09c
SA
5600 "Unsupported screen format %p4cc\n",
5601 &fb->format->format);
695af5f9
NK
5602 return -EINVAL;
5603 }
5604
5605 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5606 case DRM_MODE_ROTATE_0:
5607 plane_info->rotation = ROTATION_ANGLE_0;
5608 break;
5609 case DRM_MODE_ROTATE_90:
5610 plane_info->rotation = ROTATION_ANGLE_90;
5611 break;
5612 case DRM_MODE_ROTATE_180:
5613 plane_info->rotation = ROTATION_ANGLE_180;
5614 break;
5615 case DRM_MODE_ROTATE_270:
5616 plane_info->rotation = ROTATION_ANGLE_270;
5617 break;
5618 default:
5619 plane_info->rotation = ROTATION_ANGLE_0;
5620 break;
5621 }
5622
5623 plane_info->visible = true;
5624 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5625
6d83a32d
MS
5626 plane_info->layer_index = 0;
5627
695af5f9
NK
5628 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5629 &plane_info->color_space);
5630 if (ret)
5631 return ret;
5632
5633 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5634 plane_info->rotation, tiling_flags,
5635 &plane_info->tiling_info,
5636 &plane_info->plane_size,
5888f07a 5637 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5638 force_disable_dcc);
695af5f9
NK
5639 if (ret)
5640 return ret;
5641
5642 fill_blending_from_plane_state(
76818cdd 5643 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
695af5f9
NK
5644 &plane_info->global_alpha, &plane_info->global_alpha_value);
5645
5646 return 0;
5647}
5648
5649static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5650 struct dc_plane_state *dc_plane_state,
5651 struct drm_plane_state *plane_state,
5652 struct drm_crtc_state *crtc_state)
e7b07cee 5653{
cf020d49 5654 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5655 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5656 struct dc_scaling_info scaling_info;
5657 struct dc_plane_info plane_info;
695af5f9 5658 int ret;
87b7ebc2 5659 bool force_disable_dcc = false;
e7b07cee 5660
4375d625 5661 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5662 if (ret)
5663 return ret;
e7b07cee 5664
695af5f9
NK
5665 dc_plane_state->src_rect = scaling_info.src_rect;
5666 dc_plane_state->dst_rect = scaling_info.dst_rect;
5667 dc_plane_state->clip_rect = scaling_info.clip_rect;
5668 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5669
87b7ebc2 5670 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5671 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5672 afb->tiling_flags,
695af5f9 5673 &plane_info,
87b7ebc2 5674 &dc_plane_state->address,
6eed95b0 5675 afb->tmz_surface,
87b7ebc2 5676 force_disable_dcc);
004fefa3
NK
5677 if (ret)
5678 return ret;
5679
695af5f9
NK
5680 dc_plane_state->format = plane_info.format;
5681 dc_plane_state->color_space = plane_info.color_space;
5682 dc_plane_state->format = plane_info.format;
5683 dc_plane_state->plane_size = plane_info.plane_size;
5684 dc_plane_state->rotation = plane_info.rotation;
5685 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5686 dc_plane_state->stereo_format = plane_info.stereo_format;
5687 dc_plane_state->tiling_info = plane_info.tiling_info;
5688 dc_plane_state->visible = plane_info.visible;
5689 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
76818cdd 5690 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
695af5f9
NK
5691 dc_plane_state->global_alpha = plane_info.global_alpha;
5692 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5693 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5694 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5695 dc_plane_state->flip_int_enabled = true;
695af5f9 5696
e277adc5
LSL
5697 /*
5698 * Always set input transfer function, since plane state is refreshed
5699 * every time.
5700 */
cf020d49
NK
5701 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5702 if (ret)
5703 return ret;
e7b07cee 5704
cf020d49 5705 return 0;
e7b07cee
HW
5706}
5707
7cc191ee
LL
5708/**
5709 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5710 *
5711 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5712 * remote fb
5713 * @old_plane_state: Old state of @plane
5714 * @new_plane_state: New state of @plane
5715 * @crtc_state: New state of CRTC connected to the @plane
5716 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5717 *
5718 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5719 * (referred to as "damage clips" in DRM nomenclature) that require updating on
5720 * the eDP remote buffer. The responsibility of specifying the dirty regions is
5721 * amdgpu_dm's.
5722 *
5723 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5724 * plane with regions that require flushing to the eDP remote buffer. In
5725 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5726 * implicitly provide damage clips without any client support via the plane
5727 * bounds.
5728 *
5729 * Today, amdgpu_dm only supports the MPO and cursor usecase.
5730 *
5731 * TODO: Also enable for FB_DAMAGE_CLIPS
5732 */
5733static void fill_dc_dirty_rects(struct drm_plane *plane,
5734 struct drm_plane_state *old_plane_state,
5735 struct drm_plane_state *new_plane_state,
5736 struct drm_crtc_state *crtc_state,
5737 struct dc_flip_addrs *flip_addrs)
5738{
5739 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5740 struct rect *dirty_rects = flip_addrs->dirty_rects;
5741 uint32_t num_clips;
5742 bool bb_changed;
5743 bool fb_changed;
5744 uint32_t i = 0;
5745
5746 flip_addrs->dirty_rect_count = 0;
5747
5748 /*
5749 * Cursor plane has it's own dirty rect update interface. See
5750 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5751 */
5752 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5753 return;
5754
5755 /*
5756 * Today, we only consider MPO use-case for PSR SU. If MPO not
5757 * requested, and there is a plane update, do FFU.
5758 */
5759 if (!dm_crtc_state->mpo_requested) {
5760 dirty_rects[0].x = 0;
5761 dirty_rects[0].y = 0;
5762 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5763 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5764 flip_addrs->dirty_rect_count = 1;
5765 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5766 new_plane_state->plane->base.id,
5767 dm_crtc_state->base.mode.crtc_hdisplay,
5768 dm_crtc_state->base.mode.crtc_vdisplay);
5769 return;
5770 }
5771
5772 /*
5773 * MPO is requested. Add entire plane bounding box to dirty rects if
5774 * flipped to or damaged.
5775 *
5776 * If plane is moved or resized, also add old bounding box to dirty
5777 * rects.
5778 */
5779 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5780 fb_changed = old_plane_state->fb->base.id !=
5781 new_plane_state->fb->base.id;
5782 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5783 old_plane_state->crtc_y != new_plane_state->crtc_y ||
5784 old_plane_state->crtc_w != new_plane_state->crtc_w ||
5785 old_plane_state->crtc_h != new_plane_state->crtc_h);
5786
5787 DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5788 new_plane_state->plane->base.id,
5789 bb_changed, fb_changed, num_clips);
5790
5791 if (num_clips || fb_changed || bb_changed) {
5792 dirty_rects[i].x = new_plane_state->crtc_x;
5793 dirty_rects[i].y = new_plane_state->crtc_y;
5794 dirty_rects[i].width = new_plane_state->crtc_w;
5795 dirty_rects[i].height = new_plane_state->crtc_h;
5796 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5797 new_plane_state->plane->base.id,
5798 dirty_rects[i].x, dirty_rects[i].y,
5799 dirty_rects[i].width, dirty_rects[i].height);
5800 i += 1;
5801 }
5802
5803 /* Add old plane bounding-box if plane is moved or resized */
5804 if (bb_changed) {
5805 dirty_rects[i].x = old_plane_state->crtc_x;
5806 dirty_rects[i].y = old_plane_state->crtc_y;
5807 dirty_rects[i].width = old_plane_state->crtc_w;
5808 dirty_rects[i].height = old_plane_state->crtc_h;
5809 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5810 old_plane_state->plane->base.id,
5811 dirty_rects[i].x, dirty_rects[i].y,
5812 dirty_rects[i].width, dirty_rects[i].height);
5813 i += 1;
5814 }
5815
5816 flip_addrs->dirty_rect_count = i;
5817}
5818
3ee6b26b
AD
5819static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5820 const struct dm_connector_state *dm_state,
5821 struct dc_stream_state *stream)
e7b07cee
HW
5822{
5823 enum amdgpu_rmx_type rmx_type;
5824
5825 struct rect src = { 0 }; /* viewport in composition space*/
5826 struct rect dst = { 0 }; /* stream addressable area */
5827
5828 /* no mode. nothing to be done */
5829 if (!mode)
5830 return;
5831
5832 /* Full screen scaling by default */
5833 src.width = mode->hdisplay;
5834 src.height = mode->vdisplay;
5835 dst.width = stream->timing.h_addressable;
5836 dst.height = stream->timing.v_addressable;
5837
f4791779
HW
5838 if (dm_state) {
5839 rmx_type = dm_state->scaling;
5840 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5841 if (src.width * dst.height <
5842 src.height * dst.width) {
5843 /* height needs less upscaling/more downscaling */
5844 dst.width = src.width *
5845 dst.height / src.height;
5846 } else {
5847 /* width needs less upscaling/more downscaling */
5848 dst.height = src.height *
5849 dst.width / src.width;
5850 }
5851 } else if (rmx_type == RMX_CENTER) {
5852 dst = src;
e7b07cee 5853 }
e7b07cee 5854
f4791779
HW
5855 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5856 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5857
f4791779
HW
5858 if (dm_state->underscan_enable) {
5859 dst.x += dm_state->underscan_hborder / 2;
5860 dst.y += dm_state->underscan_vborder / 2;
5861 dst.width -= dm_state->underscan_hborder;
5862 dst.height -= dm_state->underscan_vborder;
5863 }
e7b07cee
HW
5864 }
5865
5866 stream->src = src;
5867 stream->dst = dst;
5868
4711c033
LT
5869 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5870 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5871
5872}
5873
3ee6b26b 5874static enum dc_color_depth
42ba01fc 5875convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5876 bool is_y420, int requested_bpc)
e7b07cee 5877{
1bc22f20 5878 uint8_t bpc;
01c22997 5879
1bc22f20
SW
5880 if (is_y420) {
5881 bpc = 8;
5882
5883 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5884 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5885 bpc = 16;
5886 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5887 bpc = 12;
5888 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5889 bpc = 10;
5890 } else {
5891 bpc = (uint8_t)connector->display_info.bpc;
5892 /* Assume 8 bpc by default if no bpc is specified. */
5893 bpc = bpc ? bpc : 8;
5894 }
e7b07cee 5895
cbd14ae7 5896 if (requested_bpc > 0) {
01c22997
NK
5897 /*
5898 * Cap display bpc based on the user requested value.
5899 *
5900 * The value for state->max_bpc may not correctly updated
5901 * depending on when the connector gets added to the state
5902 * or if this was called outside of atomic check, so it
5903 * can't be used directly.
5904 */
cbd14ae7 5905 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5906
1825fd34
NK
5907 /* Round down to the nearest even number. */
5908 bpc = bpc - (bpc & 1);
5909 }
07e3a1cf 5910
e7b07cee
HW
5911 switch (bpc) {
5912 case 0:
1f6010a9
DF
5913 /*
5914 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5915 * EDID revision before 1.4
5916 * TODO: Fix edid parsing
5917 */
5918 return COLOR_DEPTH_888;
5919 case 6:
5920 return COLOR_DEPTH_666;
5921 case 8:
5922 return COLOR_DEPTH_888;
5923 case 10:
5924 return COLOR_DEPTH_101010;
5925 case 12:
5926 return COLOR_DEPTH_121212;
5927 case 14:
5928 return COLOR_DEPTH_141414;
5929 case 16:
5930 return COLOR_DEPTH_161616;
5931 default:
5932 return COLOR_DEPTH_UNDEFINED;
5933 }
5934}
5935
3ee6b26b
AD
5936static enum dc_aspect_ratio
5937get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5938{
e11d4147
LSL
5939 /* 1-1 mapping, since both enums follow the HDMI spec. */
5940 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5941}
5942
3ee6b26b
AD
5943static enum dc_color_space
5944get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5945{
5946 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5947
5948 switch (dc_crtc_timing->pixel_encoding) {
5949 case PIXEL_ENCODING_YCBCR422:
5950 case PIXEL_ENCODING_YCBCR444:
5951 case PIXEL_ENCODING_YCBCR420:
5952 {
5953 /*
5954 * 27030khz is the separation point between HDTV and SDTV
5955 * according to HDMI spec, we use YCbCr709 and YCbCr601
5956 * respectively
5957 */
380604e2 5958 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5959 if (dc_crtc_timing->flags.Y_ONLY)
5960 color_space =
5961 COLOR_SPACE_YCBCR709_LIMITED;
5962 else
5963 color_space = COLOR_SPACE_YCBCR709;
5964 } else {
5965 if (dc_crtc_timing->flags.Y_ONLY)
5966 color_space =
5967 COLOR_SPACE_YCBCR601_LIMITED;
5968 else
5969 color_space = COLOR_SPACE_YCBCR601;
5970 }
5971
5972 }
5973 break;
5974 case PIXEL_ENCODING_RGB:
5975 color_space = COLOR_SPACE_SRGB;
5976 break;
5977
5978 default:
5979 WARN_ON(1);
5980 break;
5981 }
5982
5983 return color_space;
5984}
5985
ea117312
TA
5986static bool adjust_colour_depth_from_display_info(
5987 struct dc_crtc_timing *timing_out,
5988 const struct drm_display_info *info)
400443e8 5989{
ea117312 5990 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5991 int normalized_clk;
400443e8 5992 do {
380604e2 5993 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5994 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5995 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5996 normalized_clk /= 2;
5997 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5998 switch (depth) {
5999 case COLOR_DEPTH_888:
6000 break;
400443e8
ML
6001 case COLOR_DEPTH_101010:
6002 normalized_clk = (normalized_clk * 30) / 24;
6003 break;
6004 case COLOR_DEPTH_121212:
6005 normalized_clk = (normalized_clk * 36) / 24;
6006 break;
6007 case COLOR_DEPTH_161616:
6008 normalized_clk = (normalized_clk * 48) / 24;
6009 break;
6010 default:
ea117312
TA
6011 /* The above depths are the only ones valid for HDMI. */
6012 return false;
400443e8 6013 }
ea117312
TA
6014 if (normalized_clk <= info->max_tmds_clock) {
6015 timing_out->display_color_depth = depth;
6016 return true;
6017 }
6018 } while (--depth > COLOR_DEPTH_666);
6019 return false;
400443e8 6020}
e7b07cee 6021
42ba01fc
NK
6022static void fill_stream_properties_from_drm_display_mode(
6023 struct dc_stream_state *stream,
6024 const struct drm_display_mode *mode_in,
6025 const struct drm_connector *connector,
6026 const struct drm_connector_state *connector_state,
cbd14ae7
SW
6027 const struct dc_stream_state *old_stream,
6028 int requested_bpc)
e7b07cee
HW
6029{
6030 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 6031 const struct drm_display_info *info = &connector->display_info;
d4252eee 6032 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
6033 struct hdmi_vendor_infoframe hv_frame;
6034 struct hdmi_avi_infoframe avi_frame;
e7b07cee 6035
acf83f86
WL
6036 memset(&hv_frame, 0, sizeof(hv_frame));
6037 memset(&avi_frame, 0, sizeof(avi_frame));
6038
e7b07cee
HW
6039 timing_out->h_border_left = 0;
6040 timing_out->h_border_right = 0;
6041 timing_out->v_border_top = 0;
6042 timing_out->v_border_bottom = 0;
6043 /* TODO: un-hardcode */
fe61a2f1 6044 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 6045 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 6046 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
6047 else if (drm_mode_is_420_also(info, mode_in)
6048 && aconnector->force_yuv420_output)
6049 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
c03d0b52 6050 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
ceb3dbb4 6051 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
6052 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6053 else
6054 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6055
6056 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6057 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
6058 connector,
6059 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6060 requested_bpc);
e7b07cee
HW
6061 timing_out->scan_type = SCANNING_TYPE_NODATA;
6062 timing_out->hdmi_vic = 0;
b333730d
BL
6063
6064 if(old_stream) {
6065 timing_out->vic = old_stream->timing.vic;
6066 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6067 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6068 } else {
6069 timing_out->vic = drm_match_cea_mode(mode_in);
6070 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6071 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6072 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6073 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6074 }
e7b07cee 6075
1cb1d477
WL
6076 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6077 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6078 timing_out->vic = avi_frame.video_code;
6079 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6080 timing_out->hdmi_vic = hv_frame.vic;
6081 }
6082
fe8858bb
NC
6083 if (is_freesync_video_mode(mode_in, aconnector)) {
6084 timing_out->h_addressable = mode_in->hdisplay;
6085 timing_out->h_total = mode_in->htotal;
6086 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6087 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6088 timing_out->v_total = mode_in->vtotal;
6089 timing_out->v_addressable = mode_in->vdisplay;
6090 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6091 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6092 timing_out->pix_clk_100hz = mode_in->clock * 10;
6093 } else {
6094 timing_out->h_addressable = mode_in->crtc_hdisplay;
6095 timing_out->h_total = mode_in->crtc_htotal;
6096 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6097 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6098 timing_out->v_total = mode_in->crtc_vtotal;
6099 timing_out->v_addressable = mode_in->crtc_vdisplay;
6100 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6101 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6102 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6103 }
a85ba005 6104
e7b07cee 6105 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
6106
6107 stream->output_color_space = get_output_color_space(timing_out);
6108
e43a432c
AK
6109 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6110 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
6111 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6112 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6113 drm_mode_is_420_also(info, mode_in) &&
6114 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6115 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6116 adjust_colour_depth_from_display_info(timing_out, info);
6117 }
6118 }
e7b07cee
HW
6119}
6120
3ee6b26b
AD
6121static void fill_audio_info(struct audio_info *audio_info,
6122 const struct drm_connector *drm_connector,
6123 const struct dc_sink *dc_sink)
e7b07cee
HW
6124{
6125 int i = 0;
6126 int cea_revision = 0;
6127 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6128
6129 audio_info->manufacture_id = edid_caps->manufacturer_id;
6130 audio_info->product_id = edid_caps->product_id;
6131
6132 cea_revision = drm_connector->display_info.cea_rev;
6133
090afc1e 6134 strscpy(audio_info->display_name,
d2b2562c 6135 edid_caps->display_name,
090afc1e 6136 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 6137
b830ebc9 6138 if (cea_revision >= 3) {
e7b07cee
HW
6139 audio_info->mode_count = edid_caps->audio_mode_count;
6140
6141 for (i = 0; i < audio_info->mode_count; ++i) {
6142 audio_info->modes[i].format_code =
6143 (enum audio_format_code)
6144 (edid_caps->audio_modes[i].format_code);
6145 audio_info->modes[i].channel_count =
6146 edid_caps->audio_modes[i].channel_count;
6147 audio_info->modes[i].sample_rates.all =
6148 edid_caps->audio_modes[i].sample_rate;
6149 audio_info->modes[i].sample_size =
6150 edid_caps->audio_modes[i].sample_size;
6151 }
6152 }
6153
6154 audio_info->flags.all = edid_caps->speaker_flags;
6155
6156 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 6157 if (drm_connector->latency_present[0]) {
e7b07cee
HW
6158 audio_info->video_latency = drm_connector->video_latency[0];
6159 audio_info->audio_latency = drm_connector->audio_latency[0];
6160 }
6161
6162 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6163
6164}
6165
3ee6b26b
AD
6166static void
6167copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6168 struct drm_display_mode *dst_mode)
e7b07cee
HW
6169{
6170 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6171 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6172 dst_mode->crtc_clock = src_mode->crtc_clock;
6173 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6174 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 6175 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
6176 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6177 dst_mode->crtc_htotal = src_mode->crtc_htotal;
6178 dst_mode->crtc_hskew = src_mode->crtc_hskew;
6179 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6180 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6181 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6182 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6183 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6184}
6185
3ee6b26b
AD
6186static void
6187decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6188 const struct drm_display_mode *native_mode,
6189 bool scale_enabled)
e7b07cee
HW
6190{
6191 if (scale_enabled) {
6192 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6193 } else if (native_mode->clock == drm_mode->clock &&
6194 native_mode->htotal == drm_mode->htotal &&
6195 native_mode->vtotal == drm_mode->vtotal) {
6196 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6197 } else {
6198 /* no scaling nor amdgpu inserted, no need to patch */
6199 }
6200}
6201
aed15309
ML
6202static struct dc_sink *
6203create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 6204{
2e0ac3d6 6205 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 6206 struct dc_sink *sink = NULL;
2e0ac3d6
HW
6207 sink_init_data.link = aconnector->dc_link;
6208 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6209
6210 sink = dc_sink_create(&sink_init_data);
423788c7 6211 if (!sink) {
2e0ac3d6 6212 DRM_ERROR("Failed to create sink!\n");
aed15309 6213 return NULL;
423788c7 6214 }
2e0ac3d6 6215 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 6216
aed15309 6217 return sink;
2e0ac3d6
HW
6218}
6219
fa2123db
ML
6220static void set_multisync_trigger_params(
6221 struct dc_stream_state *stream)
6222{
ec372186
ML
6223 struct dc_stream_state *master = NULL;
6224
fa2123db 6225 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
6226 master = stream->triggered_crtc_reset.event_source;
6227 stream->triggered_crtc_reset.event =
6228 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6229 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6230 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
6231 }
6232}
6233
6234static void set_master_stream(struct dc_stream_state *stream_set[],
6235 int stream_count)
6236{
6237 int j, highest_rfr = 0, master_stream = 0;
6238
6239 for (j = 0; j < stream_count; j++) {
6240 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6241 int refresh_rate = 0;
6242
380604e2 6243 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
6244 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6245 if (refresh_rate > highest_rfr) {
6246 highest_rfr = refresh_rate;
6247 master_stream = j;
6248 }
6249 }
6250 }
6251 for (j = 0; j < stream_count; j++) {
03736f4c 6252 if (stream_set[j])
fa2123db
ML
6253 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6254 }
6255}
6256
6257static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6258{
6259 int i = 0;
ec372186 6260 struct dc_stream_state *stream;
fa2123db
ML
6261
6262 if (context->stream_count < 2)
6263 return;
6264 for (i = 0; i < context->stream_count ; i++) {
6265 if (!context->streams[i])
6266 continue;
1f6010a9
DF
6267 /*
6268 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6269 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6270 * For now it's set to false
fa2123db 6271 */
fa2123db 6272 }
ec372186 6273
fa2123db 6274 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6275
6276 for (i = 0; i < context->stream_count ; i++) {
6277 stream = context->streams[i];
6278
6279 if (!stream)
6280 continue;
6281
6282 set_multisync_trigger_params(stream);
6283 }
fa2123db
ML
6284}
6285
ea2be5c0 6286#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6287static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6288 struct dc_sink *sink, struct dc_stream_state *stream,
6289 struct dsc_dec_dpcd_caps *dsc_caps)
6290{
6291 stream->timing.flags.DSC = 0;
63ad5371 6292 dsc_caps->is_dsc_supported = false;
998b7ad2 6293
2665f63a
ML
6294 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6295 sink->sink_signal == SIGNAL_TYPE_EDP)) {
50b1f44e
FZ
6296 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6297 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6298 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6299 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6300 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6301 dsc_caps);
998b7ad2
FZ
6302 }
6303}
6304
2665f63a
ML
6305static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6306 struct dc_sink *sink, struct dc_stream_state *stream,
6307 struct dsc_dec_dpcd_caps *dsc_caps,
6308 uint32_t max_dsc_target_bpp_limit_override)
6309{
6310 const struct dc_link_settings *verified_link_cap = NULL;
6311 uint32_t link_bw_in_kbps;
6312 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6313 struct dc *dc = sink->ctx->dc;
6314 struct dc_dsc_bw_range bw_range = {0};
6315 struct dc_dsc_config dsc_cfg = {0};
6316
6317 verified_link_cap = dc_link_get_link_cap(stream->link);
6318 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6319 edp_min_bpp_x16 = 8 * 16;
6320 edp_max_bpp_x16 = 8 * 16;
6321
6322 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6323 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6324
6325 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6326 edp_min_bpp_x16 = edp_max_bpp_x16;
6327
6328 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6329 dc->debug.dsc_min_slice_height_override,
6330 edp_min_bpp_x16, edp_max_bpp_x16,
6331 dsc_caps,
6332 &stream->timing,
6333 &bw_range)) {
6334
6335 if (bw_range.max_kbps < link_bw_in_kbps) {
6336 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6337 dsc_caps,
6338 dc->debug.dsc_min_slice_height_override,
6339 max_dsc_target_bpp_limit_override,
6340 0,
6341 &stream->timing,
6342 &dsc_cfg)) {
6343 stream->timing.dsc_cfg = dsc_cfg;
6344 stream->timing.flags.DSC = 1;
6345 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6346 }
6347 return;
6348 }
6349 }
6350
6351 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6352 dsc_caps,
6353 dc->debug.dsc_min_slice_height_override,
6354 max_dsc_target_bpp_limit_override,
6355 link_bw_in_kbps,
6356 &stream->timing,
6357 &dsc_cfg)) {
6358 stream->timing.dsc_cfg = dsc_cfg;
6359 stream->timing.flags.DSC = 1;
6360 }
6361}
6362
998b7ad2
FZ
6363static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6364 struct dc_sink *sink, struct dc_stream_state *stream,
6365 struct dsc_dec_dpcd_caps *dsc_caps)
6366{
6367 struct drm_connector *drm_connector = &aconnector->base;
6368 uint32_t link_bandwidth_kbps;
f1c1a982 6369 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 6370 struct dc *dc = sink->ctx->dc;
50b1f44e
FZ
6371 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6372 uint32_t dsc_max_supported_bw_in_kbps;
998b7ad2
FZ
6373
6374 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6375 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6376
6377 if (stream->link && stream->link->local_sink)
6378 max_dsc_target_bpp_limit_override =
6379 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
de7cc1b4 6380
998b7ad2
FZ
6381 /* Set DSC policy according to dsc_clock_en */
6382 dc_dsc_policy_set_enable_dsc_when_not_needed(
6383 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6384
2665f63a
ML
6385 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6386 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6387
6388 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6389
6390 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
50b1f44e
FZ
6391 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6392 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
998b7ad2
FZ
6393 dsc_caps,
6394 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6395 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6396 link_bandwidth_kbps,
6397 &stream->timing,
6398 &stream->timing.dsc_cfg)) {
50b1f44e
FZ
6399 stream->timing.flags.DSC = 1;
6400 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6401 __func__, drm_connector->name);
6402 }
6403 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6404 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6405 max_supported_bw_in_kbps = link_bandwidth_kbps;
6406 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6407
6408 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6409 max_supported_bw_in_kbps > 0 &&
6410 dsc_max_supported_bw_in_kbps > 0)
6411 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6412 dsc_caps,
6413 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6414 max_dsc_target_bpp_limit_override,
6415 dsc_max_supported_bw_in_kbps,
6416 &stream->timing,
6417 &stream->timing.dsc_cfg)) {
6418 stream->timing.flags.DSC = 1;
6419 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6420 __func__, drm_connector->name);
6421 }
998b7ad2
FZ
6422 }
6423 }
6424
6425 /* Overwrite the stream flag if DSC is enabled through debugfs */
6426 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6427 stream->timing.flags.DSC = 1;
6428
6429 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6430 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6431
6432 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6433 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6434
6435 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6436 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6437}
433e5dec 6438#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6439
5fd953a3
RS
6440/**
6441 * DOC: FreeSync Video
6442 *
6443 * When a userspace application wants to play a video, the content follows a
6444 * standard format definition that usually specifies the FPS for that format.
6445 * The below list illustrates some video format and the expected FPS,
6446 * respectively:
6447 *
6448 * - TV/NTSC (23.976 FPS)
6449 * - Cinema (24 FPS)
6450 * - TV/PAL (25 FPS)
6451 * - TV/NTSC (29.97 FPS)
6452 * - TV/NTSC (30 FPS)
6453 * - Cinema HFR (48 FPS)
6454 * - TV/PAL (50 FPS)
6455 * - Commonly used (60 FPS)
12cdff6b 6456 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6457 *
6458 * The list of standards video format is not huge and can be added to the
6459 * connector modeset list beforehand. With that, userspace can leverage
6460 * FreeSync to extends the front porch in order to attain the target refresh
6461 * rate. Such a switch will happen seamlessly, without screen blanking or
6462 * reprogramming of the output in any other way. If the userspace requests a
6463 * modesetting change compatible with FreeSync modes that only differ in the
6464 * refresh rate, DC will skip the full update and avoid blink during the
6465 * transition. For example, the video player can change the modesetting from
6466 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6467 * causing any display blink. This same concept can be applied to a mode
6468 * setting change.
6469 */
a85ba005
NC
6470static struct drm_display_mode *
6471get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6472 bool use_probed_modes)
6473{
6474 struct drm_display_mode *m, *m_pref = NULL;
6475 u16 current_refresh, highest_refresh;
6476 struct list_head *list_head = use_probed_modes ?
6477 &aconnector->base.probed_modes :
6478 &aconnector->base.modes;
6479
6480 if (aconnector->freesync_vid_base.clock != 0)
6481 return &aconnector->freesync_vid_base;
6482
6483 /* Find the preferred mode */
6484 list_for_each_entry (m, list_head, head) {
6485 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6486 m_pref = m;
6487 break;
6488 }
6489 }
6490
6491 if (!m_pref) {
6492 /* Probably an EDID with no preferred mode. Fallback to first entry */
6493 m_pref = list_first_entry_or_null(
6494 &aconnector->base.modes, struct drm_display_mode, head);
6495 if (!m_pref) {
6496 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6497 return NULL;
6498 }
6499 }
6500
6501 highest_refresh = drm_mode_vrefresh(m_pref);
6502
6503 /*
6504 * Find the mode with highest refresh rate with same resolution.
6505 * For some monitors, preferred mode is not the mode with highest
6506 * supported refresh rate.
6507 */
6508 list_for_each_entry (m, list_head, head) {
6509 current_refresh = drm_mode_vrefresh(m);
6510
6511 if (m->hdisplay == m_pref->hdisplay &&
6512 m->vdisplay == m_pref->vdisplay &&
6513 highest_refresh < current_refresh) {
6514 highest_refresh = current_refresh;
6515 m_pref = m;
6516 }
6517 }
6518
426c89aa 6519 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
a85ba005
NC
6520 return m_pref;
6521}
6522
fe8858bb 6523static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6524 struct amdgpu_dm_connector *aconnector)
6525{
6526 struct drm_display_mode *high_mode;
6527 int timing_diff;
6528
6529 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6530 if (!high_mode || !mode)
6531 return false;
6532
6533 timing_diff = high_mode->vtotal - mode->vtotal;
6534
6535 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6536 high_mode->hdisplay != mode->hdisplay ||
6537 high_mode->vdisplay != mode->vdisplay ||
6538 high_mode->hsync_start != mode->hsync_start ||
6539 high_mode->hsync_end != mode->hsync_end ||
6540 high_mode->htotal != mode->htotal ||
6541 high_mode->hskew != mode->hskew ||
6542 high_mode->vscan != mode->vscan ||
6543 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6544 high_mode->vsync_end - mode->vsync_end != timing_diff)
6545 return false;
6546 else
6547 return true;
6548}
6549
f11d9373 6550static struct dc_stream_state *
3ee6b26b
AD
6551create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6552 const struct drm_display_mode *drm_mode,
b333730d 6553 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6554 const struct dc_stream_state *old_stream,
6555 int requested_bpc)
e7b07cee
HW
6556{
6557 struct drm_display_mode *preferred_mode = NULL;
391ef035 6558 struct drm_connector *drm_connector;
42ba01fc
NK
6559 const struct drm_connector_state *con_state =
6560 dm_state ? &dm_state->base : NULL;
0971c40e 6561 struct dc_stream_state *stream = NULL;
e7b07cee 6562 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6563 struct drm_display_mode saved_mode;
6564 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6565 bool native_mode_found = false;
b0781603
NK
6566 bool recalculate_timing = false;
6567 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6568 int mode_refresh;
58124bf8 6569 int preferred_refresh = 0;
defeb878 6570#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6571 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6572#endif
aed15309 6573 struct dc_sink *sink = NULL;
a85ba005
NC
6574
6575 memset(&saved_mode, 0, sizeof(saved_mode));
6576
b830ebc9 6577 if (aconnector == NULL) {
e7b07cee 6578 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6579 return stream;
e7b07cee
HW
6580 }
6581
e7b07cee 6582 drm_connector = &aconnector->base;
2e0ac3d6 6583
f4ac176e 6584 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6585 sink = create_fake_sink(aconnector);
6586 if (!sink)
6587 return stream;
aed15309
ML
6588 } else {
6589 sink = aconnector->dc_sink;
dcd5fb82 6590 dc_sink_retain(sink);
f4ac176e 6591 }
2e0ac3d6 6592
aed15309 6593 stream = dc_create_stream_for_sink(sink);
4562236b 6594
b830ebc9 6595 if (stream == NULL) {
e7b07cee 6596 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6597 goto finish;
e7b07cee
HW
6598 }
6599
ceb3dbb4
JL
6600 stream->dm_stream_context = aconnector;
6601
4a36fcba
WL
6602 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6603 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6604
e7b07cee
HW
6605 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6606 /* Search for preferred mode */
6607 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6608 native_mode_found = true;
6609 break;
6610 }
6611 }
6612 if (!native_mode_found)
6613 preferred_mode = list_first_entry_or_null(
6614 &aconnector->base.modes,
6615 struct drm_display_mode,
6616 head);
6617
b333730d
BL
6618 mode_refresh = drm_mode_vrefresh(&mode);
6619
b830ebc9 6620 if (preferred_mode == NULL) {
1f6010a9
DF
6621 /*
6622 * This may not be an error, the use case is when we have no
e7b07cee
HW
6623 * usermode calls to reset and set mode upon hotplug. In this
6624 * case, we call set mode ourselves to restore the previous mode
6625 * and the modelist may not be filled in in time.
6626 */
f1ad2f5e 6627 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6628 } else {
de05abe6 6629 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
a85ba005
NC
6630 if (recalculate_timing) {
6631 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
426c89aa
VS
6632 drm_mode_copy(&saved_mode, &mode);
6633 drm_mode_copy(&mode, freesync_mode);
a85ba005
NC
6634 } else {
6635 decide_crtc_timing_for_drm_display_mode(
b0781603 6636 &mode, preferred_mode, scale);
a85ba005 6637
b0781603
NK
6638 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6639 }
e7b07cee
HW
6640 }
6641
a85ba005
NC
6642 if (recalculate_timing)
6643 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6644 else if (!dm_state)
f783577c
JFZ
6645 drm_mode_set_crtcinfo(&mode, 0);
6646
a85ba005 6647 /*
b333730d
BL
6648 * If scaling is enabled and refresh rate didn't change
6649 * we copy the vic and polarities of the old timings
6650 */
b0781603 6651 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6652 fill_stream_properties_from_drm_display_mode(
6653 stream, &mode, &aconnector->base, con_state, NULL,
6654 requested_bpc);
b333730d 6655 else
a85ba005
NC
6656 fill_stream_properties_from_drm_display_mode(
6657 stream, &mode, &aconnector->base, con_state, old_stream,
6658 requested_bpc);
b333730d 6659
defeb878 6660#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6661 /* SST DSC determination policy */
6662 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6663 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6664 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6665#endif
6666
e7b07cee
HW
6667 update_stream_scaling_settings(&mode, dm_state, stream);
6668
6669 fill_audio_info(
6670 &stream->audio_info,
6671 drm_connector,
aed15309 6672 sink);
e7b07cee 6673
ceb3dbb4 6674 update_stream_signal(stream, sink);
9182b4cb 6675
d832fc3b 6676 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6677 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6678
8a488f5d
RL
6679 if (stream->link->psr_settings.psr_feature_enabled) {
6680 //
6681 // should decide stream support vsc sdp colorimetry capability
6682 // before building vsc info packet
6683 //
6684 stream->use_vsc_sdp_for_colorimetry = false;
6685 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6686 stream->use_vsc_sdp_for_colorimetry =
6687 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6688 } else {
6689 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6690 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6691 }
0c5a0bbb 6692 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
1a365683
RL
6693 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6694
8c322309 6695 }
aed15309 6696finish:
dcd5fb82 6697 dc_sink_release(sink);
9e3efe3e 6698
e7b07cee
HW
6699 return stream;
6700}
6701
7578ecda 6702static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6703{
6704 drm_crtc_cleanup(crtc);
6705 kfree(crtc);
6706}
6707
6708static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6709 struct drm_crtc_state *state)
e7b07cee
HW
6710{
6711 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6712
6713 /* TODO Destroy dc_stream objects are stream object is flattened */
6714 if (cur->stream)
6715 dc_stream_release(cur->stream);
6716
6717
6718 __drm_atomic_helper_crtc_destroy_state(state);
6719
6720
6721 kfree(state);
6722}
6723
6724static void dm_crtc_reset_state(struct drm_crtc *crtc)
6725{
6726 struct dm_crtc_state *state;
6727
6728 if (crtc->state)
6729 dm_crtc_destroy_state(crtc, crtc->state);
6730
6731 state = kzalloc(sizeof(*state), GFP_KERNEL);
6732 if (WARN_ON(!state))
6733 return;
6734
1f8a52ec 6735 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6736}
6737
6738static struct drm_crtc_state *
6739dm_crtc_duplicate_state(struct drm_crtc *crtc)
6740{
6741 struct dm_crtc_state *state, *cur;
6742
6743 cur = to_dm_crtc_state(crtc->state);
6744
6745 if (WARN_ON(!crtc->state))
6746 return NULL;
6747
2004f45e 6748 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6749 if (!state)
6750 return NULL;
e7b07cee
HW
6751
6752 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6753
6754 if (cur->stream) {
6755 state->stream = cur->stream;
6756 dc_stream_retain(state->stream);
6757 }
6758
d6ef9b41 6759 state->active_planes = cur->active_planes;
98e6436d 6760 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6761 state->abm_level = cur->abm_level;
bb47de73
NK
6762 state->vrr_supported = cur->vrr_supported;
6763 state->freesync_config = cur->freesync_config;
cf020d49
NK
6764 state->cm_has_degamma = cur->cm_has_degamma;
6765 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
7cc191ee 6766 state->mpo_requested = cur->mpo_requested;
e7b07cee
HW
6767 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6768
6769 return &state->base;
6770}
6771
86bc2219 6772#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6773static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6774{
6775 crtc_debugfs_init(crtc);
6776
6777 return 0;
6778}
6779#endif
6780
d2574c33
MK
6781static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6782{
6783 enum dc_irq_source irq_source;
6784 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6785 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6786 int rc;
6787
6788 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6789
6790 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6791
4711c033
LT
6792 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6793 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6794 return rc;
6795}
589d2739
HW
6796
6797static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6798{
6799 enum dc_irq_source irq_source;
6800 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6801 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6802 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
71338cb4 6803 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6804 struct vblank_control_work *work;
d2574c33
MK
6805 int rc = 0;
6806
6807 if (enable) {
6808 /* vblank irq on -> Only need vupdate irq in vrr mode */
6809 if (amdgpu_dm_vrr_active(acrtc_state))
6810 rc = dm_set_vupdate_irq(crtc, true);
6811 } else {
6812 /* vblank irq off -> vupdate irq off */
6813 rc = dm_set_vupdate_irq(crtc, false);
6814 }
6815
6816 if (rc)
6817 return rc;
589d2739
HW
6818
6819 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6820
6821 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6822 return -EBUSY;
6823
98ab5f35
BL
6824 if (amdgpu_in_reset(adev))
6825 return 0;
6826
06dd1888
NK
6827 if (dm->vblank_control_workqueue) {
6828 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6829 if (!work)
6830 return -ENOMEM;
09a5df6c 6831
06dd1888
NK
6832 INIT_WORK(&work->work, vblank_control_worker);
6833 work->dm = dm;
6834 work->acrtc = acrtc;
6835 work->enable = enable;
09a5df6c 6836
06dd1888
NK
6837 if (acrtc_state->stream) {
6838 dc_stream_retain(acrtc_state->stream);
6839 work->stream = acrtc_state->stream;
6840 }
58aa1c50 6841
06dd1888
NK
6842 queue_work(dm->vblank_control_workqueue, &work->work);
6843 }
71338cb4 6844
71338cb4 6845 return 0;
589d2739
HW
6846}
6847
6848static int dm_enable_vblank(struct drm_crtc *crtc)
6849{
6850 return dm_set_vblank(crtc, true);
6851}
6852
6853static void dm_disable_vblank(struct drm_crtc *crtc)
6854{
6855 dm_set_vblank(crtc, false);
6856}
6857
faf26f2b 6858/* Implemented only the options currently available for the driver */
e7b07cee
HW
6859static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6860 .reset = dm_crtc_reset_state,
6861 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6862 .set_config = drm_atomic_helper_set_config,
6863 .page_flip = drm_atomic_helper_page_flip,
6864 .atomic_duplicate_state = dm_crtc_duplicate_state,
6865 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6866 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6867 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6868 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6869 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6870 .enable_vblank = dm_enable_vblank,
6871 .disable_vblank = dm_disable_vblank,
e3eff4b5 6872 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6873#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6874 .late_register = amdgpu_dm_crtc_late_register,
6875#endif
e7b07cee
HW
6876};
6877
6878static enum drm_connector_status
6879amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6880{
6881 bool connected;
c84dec2f 6882 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6883
1f6010a9
DF
6884 /*
6885 * Notes:
e7b07cee
HW
6886 * 1. This interface is NOT called in context of HPD irq.
6887 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6888 * makes it a bad place for *any* MST-related activity.
6889 */
e7b07cee 6890
8580d60b
HW
6891 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6892 !aconnector->fake_enable)
e7b07cee
HW
6893 connected = (aconnector->dc_sink != NULL);
6894 else
6895 connected = (aconnector->base.force == DRM_FORCE_ON);
6896
0f877894
OV
6897 update_subconnector_property(aconnector);
6898
e7b07cee
HW
6899 return (connected ? connector_status_connected :
6900 connector_status_disconnected);
6901}
6902
3ee6b26b
AD
6903int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6904 struct drm_connector_state *connector_state,
6905 struct drm_property *property,
6906 uint64_t val)
e7b07cee
HW
6907{
6908 struct drm_device *dev = connector->dev;
1348969a 6909 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6910 struct dm_connector_state *dm_old_state =
6911 to_dm_connector_state(connector->state);
6912 struct dm_connector_state *dm_new_state =
6913 to_dm_connector_state(connector_state);
6914
6915 int ret = -EINVAL;
6916
6917 if (property == dev->mode_config.scaling_mode_property) {
6918 enum amdgpu_rmx_type rmx_type;
6919
6920 switch (val) {
6921 case DRM_MODE_SCALE_CENTER:
6922 rmx_type = RMX_CENTER;
6923 break;
6924 case DRM_MODE_SCALE_ASPECT:
6925 rmx_type = RMX_ASPECT;
6926 break;
6927 case DRM_MODE_SCALE_FULLSCREEN:
6928 rmx_type = RMX_FULL;
6929 break;
6930 case DRM_MODE_SCALE_NONE:
6931 default:
6932 rmx_type = RMX_OFF;
6933 break;
6934 }
6935
6936 if (dm_old_state->scaling == rmx_type)
6937 return 0;
6938
6939 dm_new_state->scaling = rmx_type;
6940 ret = 0;
6941 } else if (property == adev->mode_info.underscan_hborder_property) {
6942 dm_new_state->underscan_hborder = val;
6943 ret = 0;
6944 } else if (property == adev->mode_info.underscan_vborder_property) {
6945 dm_new_state->underscan_vborder = val;
6946 ret = 0;
6947 } else if (property == adev->mode_info.underscan_property) {
6948 dm_new_state->underscan_enable = val;
6949 ret = 0;
c1ee92f9
DF
6950 } else if (property == adev->mode_info.abm_level_property) {
6951 dm_new_state->abm_level = val;
6952 ret = 0;
e7b07cee
HW
6953 }
6954
6955 return ret;
6956}
6957
3ee6b26b
AD
6958int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6959 const struct drm_connector_state *state,
6960 struct drm_property *property,
6961 uint64_t *val)
e7b07cee
HW
6962{
6963 struct drm_device *dev = connector->dev;
1348969a 6964 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6965 struct dm_connector_state *dm_state =
6966 to_dm_connector_state(state);
6967 int ret = -EINVAL;
6968
6969 if (property == dev->mode_config.scaling_mode_property) {
6970 switch (dm_state->scaling) {
6971 case RMX_CENTER:
6972 *val = DRM_MODE_SCALE_CENTER;
6973 break;
6974 case RMX_ASPECT:
6975 *val = DRM_MODE_SCALE_ASPECT;
6976 break;
6977 case RMX_FULL:
6978 *val = DRM_MODE_SCALE_FULLSCREEN;
6979 break;
6980 case RMX_OFF:
6981 default:
6982 *val = DRM_MODE_SCALE_NONE;
6983 break;
6984 }
6985 ret = 0;
6986 } else if (property == adev->mode_info.underscan_hborder_property) {
6987 *val = dm_state->underscan_hborder;
6988 ret = 0;
6989 } else if (property == adev->mode_info.underscan_vborder_property) {
6990 *val = dm_state->underscan_vborder;
6991 ret = 0;
6992 } else if (property == adev->mode_info.underscan_property) {
6993 *val = dm_state->underscan_enable;
6994 ret = 0;
c1ee92f9
DF
6995 } else if (property == adev->mode_info.abm_level_property) {
6996 *val = dm_state->abm_level;
6997 ret = 0;
e7b07cee 6998 }
c1ee92f9 6999
e7b07cee
HW
7000 return ret;
7001}
7002
526c654a
ED
7003static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7004{
7005 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7006
7007 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7008}
7009
7578ecda 7010static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 7011{
c84dec2f 7012 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 7013 const struct dc_link *link = aconnector->dc_link;
1348969a 7014 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 7015 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 7016 int i;
ada8ce15 7017
5dff80bd
AG
7018 /*
7019 * Call only if mst_mgr was iniitalized before since it's not done
7020 * for all connector types.
7021 */
7022 if (aconnector->mst_mgr.dev)
7023 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7024
7fd13bae
AD
7025 for (i = 0; i < dm->num_of_edps; i++) {
7026 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7027 backlight_device_unregister(dm->backlight_dev[i]);
7028 dm->backlight_dev[i] = NULL;
7029 }
e7b07cee 7030 }
dcd5fb82
MF
7031
7032 if (aconnector->dc_em_sink)
7033 dc_sink_release(aconnector->dc_em_sink);
7034 aconnector->dc_em_sink = NULL;
7035 if (aconnector->dc_sink)
7036 dc_sink_release(aconnector->dc_sink);
7037 aconnector->dc_sink = NULL;
7038
e86e8947 7039 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
7040 drm_connector_unregister(connector);
7041 drm_connector_cleanup(connector);
526c654a
ED
7042 if (aconnector->i2c) {
7043 i2c_del_adapter(&aconnector->i2c->base);
7044 kfree(aconnector->i2c);
7045 }
7daec99f 7046 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 7047
e7b07cee
HW
7048 kfree(connector);
7049}
7050
7051void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7052{
7053 struct dm_connector_state *state =
7054 to_dm_connector_state(connector->state);
7055
df099b9b
LSL
7056 if (connector->state)
7057 __drm_atomic_helper_connector_destroy_state(connector->state);
7058
e7b07cee
HW
7059 kfree(state);
7060
7061 state = kzalloc(sizeof(*state), GFP_KERNEL);
7062
7063 if (state) {
7064 state->scaling = RMX_OFF;
7065 state->underscan_enable = false;
7066 state->underscan_hborder = 0;
7067 state->underscan_vborder = 0;
01933ba4 7068 state->base.max_requested_bpc = 8;
3261e013
ML
7069 state->vcpi_slots = 0;
7070 state->pbn = 0;
c3e50f89
NK
7071 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7072 state->abm_level = amdgpu_dm_abm_level;
7073
df099b9b 7074 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
7075 }
7076}
7077
3ee6b26b
AD
7078struct drm_connector_state *
7079amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
7080{
7081 struct dm_connector_state *state =
7082 to_dm_connector_state(connector->state);
7083
7084 struct dm_connector_state *new_state =
7085 kmemdup(state, sizeof(*state), GFP_KERNEL);
7086
98e6436d
AK
7087 if (!new_state)
7088 return NULL;
e7b07cee 7089
98e6436d
AK
7090 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7091
7092 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 7093 new_state->abm_level = state->abm_level;
922454c2
NK
7094 new_state->scaling = state->scaling;
7095 new_state->underscan_enable = state->underscan_enable;
7096 new_state->underscan_hborder = state->underscan_hborder;
7097 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
7098 new_state->vcpi_slots = state->vcpi_slots;
7099 new_state->pbn = state->pbn;
98e6436d 7100 return &new_state->base;
e7b07cee
HW
7101}
7102
14f04fa4
AD
7103static int
7104amdgpu_dm_connector_late_register(struct drm_connector *connector)
7105{
7106 struct amdgpu_dm_connector *amdgpu_dm_connector =
7107 to_amdgpu_dm_connector(connector);
00a8037e 7108 int r;
14f04fa4 7109
00a8037e
AD
7110 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7111 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7112 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7113 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7114 if (r)
7115 return r;
7116 }
7117
7118#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
7119 connector_debugfs_init(amdgpu_dm_connector);
7120#endif
7121
7122 return 0;
7123}
7124
e7b07cee
HW
7125static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7126 .reset = amdgpu_dm_connector_funcs_reset,
7127 .detect = amdgpu_dm_connector_detect,
7128 .fill_modes = drm_helper_probe_single_connector_modes,
7129 .destroy = amdgpu_dm_connector_destroy,
7130 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7131 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7132 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 7133 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 7134 .late_register = amdgpu_dm_connector_late_register,
526c654a 7135 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
7136};
7137
e7b07cee
HW
7138static int get_modes(struct drm_connector *connector)
7139{
7140 return amdgpu_dm_connector_get_modes(connector);
7141}
7142
c84dec2f 7143static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
7144{
7145 struct dc_sink_init_data init_params = {
7146 .link = aconnector->dc_link,
7147 .sink_signal = SIGNAL_TYPE_VIRTUAL
7148 };
70e8ffc5 7149 struct edid *edid;
e7b07cee 7150
a89ff457 7151 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
7152 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7153 aconnector->base.name);
7154
7155 aconnector->base.force = DRM_FORCE_OFF;
7156 aconnector->base.override_edid = false;
7157 return;
7158 }
7159
70e8ffc5
HW
7160 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7161
e7b07cee
HW
7162 aconnector->edid = edid;
7163
7164 aconnector->dc_em_sink = dc_link_add_remote_sink(
7165 aconnector->dc_link,
7166 (uint8_t *)edid,
7167 (edid->extensions + 1) * EDID_LENGTH,
7168 &init_params);
7169
dcd5fb82 7170 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
7171 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7172 aconnector->dc_link->local_sink :
7173 aconnector->dc_em_sink;
dcd5fb82
MF
7174 dc_sink_retain(aconnector->dc_sink);
7175 }
e7b07cee
HW
7176}
7177
c84dec2f 7178static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
7179{
7180 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7181
1f6010a9
DF
7182 /*
7183 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
7184 * Those settings have to be != 0 to get initial modeset
7185 */
7186 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7187 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7188 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7189 }
7190
7191
7192 aconnector->base.override_edid = true;
7193 create_eml_sink(aconnector);
7194}
7195
17ce8a69 7196struct dc_stream_state *
cbd14ae7
SW
7197create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7198 const struct drm_display_mode *drm_mode,
7199 const struct dm_connector_state *dm_state,
7200 const struct dc_stream_state *old_stream)
7201{
7202 struct drm_connector *connector = &aconnector->base;
1348969a 7203 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 7204 struct dc_stream_state *stream;
4b7da34b
SW
7205 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7206 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
7207 enum dc_status dc_result = DC_OK;
7208
7209 do {
7210 stream = create_stream_for_sink(aconnector, drm_mode,
7211 dm_state, old_stream,
7212 requested_bpc);
7213 if (stream == NULL) {
7214 DRM_ERROR("Failed to create stream for sink!\n");
7215 break;
7216 }
7217
e9a7d236
RS
7218 dc_result = dc_validate_stream(adev->dm.dc, stream);
7219 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
f04d275d 7220 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7221
cbd14ae7 7222 if (dc_result != DC_OK) {
74a16675 7223 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
7224 drm_mode->hdisplay,
7225 drm_mode->vdisplay,
7226 drm_mode->clock,
74a16675
RS
7227 dc_result,
7228 dc_status_to_str(dc_result));
cbd14ae7
SW
7229
7230 dc_stream_release(stream);
7231 stream = NULL;
7232 requested_bpc -= 2; /* lower bpc to retry validation */
7233 }
7234
7235 } while (stream == NULL && requested_bpc >= 6);
7236
68eb3ae3
WS
7237 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7238 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7239
7240 aconnector->force_yuv420_output = true;
7241 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7242 dm_state, old_stream);
7243 aconnector->force_yuv420_output = false;
7244 }
7245
cbd14ae7
SW
7246 return stream;
7247}
7248
ba9ca088 7249enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 7250 struct drm_display_mode *mode)
e7b07cee
HW
7251{
7252 int result = MODE_ERROR;
7253 struct dc_sink *dc_sink;
e7b07cee 7254 /* TODO: Unhardcode stream count */
0971c40e 7255 struct dc_stream_state *stream;
c84dec2f 7256 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7257
7258 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7259 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7260 return result;
7261
1f6010a9
DF
7262 /*
7263 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
7264 * EDID mgmt
7265 */
7266 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7267 !aconnector->dc_em_sink)
7268 handle_edid_mgmt(aconnector);
7269
c84dec2f 7270 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 7271
ad975f44
VL
7272 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7273 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
7274 DRM_ERROR("dc_sink is NULL!\n");
7275 goto fail;
7276 }
7277
cbd14ae7
SW
7278 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7279 if (stream) {
7280 dc_stream_release(stream);
e7b07cee 7281 result = MODE_OK;
cbd14ae7 7282 }
e7b07cee
HW
7283
7284fail:
7285 /* TODO: error handling*/
7286 return result;
7287}
7288
88694af9
NK
7289static int fill_hdr_info_packet(const struct drm_connector_state *state,
7290 struct dc_info_packet *out)
7291{
7292 struct hdmi_drm_infoframe frame;
7293 unsigned char buf[30]; /* 26 + 4 */
7294 ssize_t len;
7295 int ret, i;
7296
7297 memset(out, 0, sizeof(*out));
7298
7299 if (!state->hdr_output_metadata)
7300 return 0;
7301
7302 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7303 if (ret)
7304 return ret;
7305
7306 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7307 if (len < 0)
7308 return (int)len;
7309
7310 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7311 if (len != 30)
7312 return -EINVAL;
7313
7314 /* Prepare the infopacket for DC. */
7315 switch (state->connector->connector_type) {
7316 case DRM_MODE_CONNECTOR_HDMIA:
7317 out->hb0 = 0x87; /* type */
7318 out->hb1 = 0x01; /* version */
7319 out->hb2 = 0x1A; /* length */
7320 out->sb[0] = buf[3]; /* checksum */
7321 i = 1;
7322 break;
7323
7324 case DRM_MODE_CONNECTOR_DisplayPort:
7325 case DRM_MODE_CONNECTOR_eDP:
7326 out->hb0 = 0x00; /* sdp id, zero */
7327 out->hb1 = 0x87; /* type */
7328 out->hb2 = 0x1D; /* payload len - 1 */
7329 out->hb3 = (0x13 << 2); /* sdp version */
7330 out->sb[0] = 0x01; /* version */
7331 out->sb[1] = 0x1A; /* length */
7332 i = 2;
7333 break;
7334
7335 default:
7336 return -EINVAL;
7337 }
7338
7339 memcpy(&out->sb[i], &buf[4], 26);
7340 out->valid = true;
7341
7342 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7343 sizeof(out->sb), false);
7344
7345 return 0;
7346}
7347
88694af9
NK
7348static int
7349amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7350 struct drm_atomic_state *state)
88694af9 7351{
51e857af
SP
7352 struct drm_connector_state *new_con_state =
7353 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7354 struct drm_connector_state *old_con_state =
7355 drm_atomic_get_old_connector_state(state, conn);
7356 struct drm_crtc *crtc = new_con_state->crtc;
7357 struct drm_crtc_state *new_crtc_state;
7358 int ret;
7359
e8a98235
RS
7360 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7361
88694af9
NK
7362 if (!crtc)
7363 return 0;
7364
72921cdf 7365 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7366 struct dc_info_packet hdr_infopacket;
7367
7368 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7369 if (ret)
7370 return ret;
7371
7372 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7373 if (IS_ERR(new_crtc_state))
7374 return PTR_ERR(new_crtc_state);
7375
7376 /*
7377 * DC considers the stream backends changed if the
7378 * static metadata changes. Forcing the modeset also
7379 * gives a simple way for userspace to switch from
b232d4ed
NK
7380 * 8bpc to 10bpc when setting the metadata to enter
7381 * or exit HDR.
7382 *
7383 * Changing the static metadata after it's been
7384 * set is permissible, however. So only force a
7385 * modeset if we're entering or exiting HDR.
88694af9 7386 */
b232d4ed
NK
7387 new_crtc_state->mode_changed =
7388 !old_con_state->hdr_output_metadata ||
7389 !new_con_state->hdr_output_metadata;
88694af9
NK
7390 }
7391
7392 return 0;
7393}
7394
e7b07cee
HW
7395static const struct drm_connector_helper_funcs
7396amdgpu_dm_connector_helper_funcs = {
7397 /*
1f6010a9 7398 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7399 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7400 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7401 * in get_modes call back, not just return the modes count
7402 */
e7b07cee
HW
7403 .get_modes = get_modes,
7404 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7405 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7406};
7407
7408static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7409{
7410}
7411
d6ef9b41 7412static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7413{
7414 struct drm_atomic_state *state = new_crtc_state->state;
7415 struct drm_plane *plane;
7416 int num_active = 0;
7417
7418 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7419 struct drm_plane_state *new_plane_state;
7420
7421 /* Cursor planes are "fake". */
7422 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7423 continue;
7424
7425 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7426
7427 if (!new_plane_state) {
7428 /*
7429 * The plane is enable on the CRTC and hasn't changed
7430 * state. This means that it previously passed
7431 * validation and is therefore enabled.
7432 */
7433 num_active += 1;
7434 continue;
7435 }
7436
7437 /* We need a framebuffer to be considered enabled. */
7438 num_active += (new_plane_state->fb != NULL);
7439 }
7440
d6ef9b41
NK
7441 return num_active;
7442}
7443
8fe684e9
NK
7444static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7445 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7446{
7447 struct dm_crtc_state *dm_new_crtc_state =
7448 to_dm_crtc_state(new_crtc_state);
7449
7450 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7451
7452 if (!dm_new_crtc_state->stream)
7453 return;
7454
7455 dm_new_crtc_state->active_planes =
7456 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7457}
7458
3ee6b26b 7459static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7460 struct drm_atomic_state *state)
e7b07cee 7461{
29b77ad7
MR
7462 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7463 crtc);
1348969a 7464 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7465 struct dc *dc = adev->dm.dc;
29b77ad7 7466 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7467 int ret = -EINVAL;
7468
5b8c5969 7469 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7470
29b77ad7 7471 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7472
bcd74374
ND
7473 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7474 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7475 return ret;
7476 }
7477
bc92c065 7478 /*
b836a274
MD
7479 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7480 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7481 * planes are disabled, which is not supported by the hardware. And there is legacy
7482 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7483 */
29b77ad7 7484 if (crtc_state->enable &&
ea9522f5
SS
7485 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7486 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7487 return -EINVAL;
ea9522f5 7488 }
c14a005c 7489
b836a274
MD
7490 /* In some use cases, like reset, no stream is attached */
7491 if (!dm_crtc_state->stream)
7492 return 0;
7493
62c933f9 7494 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7495 return 0;
7496
ea9522f5 7497 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7498 return ret;
7499}
7500
3ee6b26b
AD
7501static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7502 const struct drm_display_mode *mode,
7503 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7504{
7505 return true;
7506}
7507
7508static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7509 .disable = dm_crtc_helper_disable,
7510 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7511 .mode_fixup = dm_crtc_helper_mode_fixup,
7512 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7513};
7514
7515static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7516{
7517
7518}
7519
f04d275d 7520int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
3261e013
ML
7521{
7522 switch (display_color_depth) {
7523 case COLOR_DEPTH_666:
7524 return 6;
7525 case COLOR_DEPTH_888:
7526 return 8;
7527 case COLOR_DEPTH_101010:
7528 return 10;
7529 case COLOR_DEPTH_121212:
7530 return 12;
7531 case COLOR_DEPTH_141414:
7532 return 14;
7533 case COLOR_DEPTH_161616:
7534 return 16;
7535 default:
7536 break;
7537 }
7538 return 0;
7539}
7540
3ee6b26b
AD
7541static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7542 struct drm_crtc_state *crtc_state,
7543 struct drm_connector_state *conn_state)
e7b07cee 7544{
3261e013
ML
7545 struct drm_atomic_state *state = crtc_state->state;
7546 struct drm_connector *connector = conn_state->connector;
7547 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7548 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7549 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7550 struct drm_dp_mst_topology_mgr *mst_mgr;
7551 struct drm_dp_mst_port *mst_port;
7552 enum dc_color_depth color_depth;
7553 int clock, bpp = 0;
1bc22f20 7554 bool is_y420 = false;
3261e013
ML
7555
7556 if (!aconnector->port || !aconnector->dc_sink)
7557 return 0;
7558
7559 mst_port = aconnector->port;
7560 mst_mgr = &aconnector->mst_port->mst_mgr;
7561
7562 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7563 return 0;
7564
7565 if (!state->duplicated) {
cbd14ae7 7566 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7567 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7568 aconnector->force_yuv420_output;
cbd14ae7
SW
7569 color_depth = convert_color_depth_from_display_info(connector,
7570 is_y420,
7571 max_bpc);
3261e013
ML
7572 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7573 clock = adjusted_mode->clock;
dc48529f 7574 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7575 }
7576 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7577 mst_mgr,
7578 mst_port,
1c6c1cb5 7579 dm_new_connector_state->pbn,
03ca9600 7580 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7581 if (dm_new_connector_state->vcpi_slots < 0) {
7582 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7583 return dm_new_connector_state->vcpi_slots;
7584 }
e7b07cee
HW
7585 return 0;
7586}
7587
7588const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7589 .disable = dm_encoder_helper_disable,
7590 .atomic_check = dm_encoder_helper_atomic_check
7591};
7592
d9fe1a4c 7593#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7594static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7595 struct dc_state *dc_state,
7596 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7597{
7598 struct dc_stream_state *stream = NULL;
7599 struct drm_connector *connector;
5760dcb9 7600 struct drm_connector_state *new_con_state;
29b9ba74
ML
7601 struct amdgpu_dm_connector *aconnector;
7602 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7603 int i, j;
7604 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7605
5760dcb9 7606 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7607
7608 aconnector = to_amdgpu_dm_connector(connector);
7609
7610 if (!aconnector->port)
7611 continue;
7612
7613 if (!new_con_state || !new_con_state->crtc)
7614 continue;
7615
7616 dm_conn_state = to_dm_connector_state(new_con_state);
7617
7618 for (j = 0; j < dc_state->stream_count; j++) {
7619 stream = dc_state->streams[j];
7620 if (!stream)
7621 continue;
7622
7623 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7624 break;
7625
7626 stream = NULL;
7627 }
7628
7629 if (!stream)
7630 continue;
7631
29b9ba74 7632 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7633 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7634 for (j = 0; j < dc_state->stream_count; j++) {
7635 if (vars[j].aconnector == aconnector) {
7636 pbn = vars[j].pbn;
7637 break;
7638 }
7639 }
7640
a550bb16
HW
7641 if (j == dc_state->stream_count)
7642 continue;
7643
7644 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7645
7646 if (stream->timing.flags.DSC != 1) {
7647 dm_conn_state->pbn = pbn;
7648 dm_conn_state->vcpi_slots = slot_num;
7649
7650 drm_dp_mst_atomic_enable_dsc(state,
7651 aconnector->port,
7652 dm_conn_state->pbn,
7653 0,
7654 false);
7655 continue;
7656 }
7657
29b9ba74
ML
7658 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7659 aconnector->port,
7660 pbn, pbn_div,
7661 true);
7662 if (vcpi < 0)
7663 return vcpi;
7664
7665 dm_conn_state->pbn = pbn;
7666 dm_conn_state->vcpi_slots = vcpi;
7667 }
7668 return 0;
7669}
d9fe1a4c 7670#endif
29b9ba74 7671
e7b07cee
HW
7672static void dm_drm_plane_reset(struct drm_plane *plane)
7673{
7674 struct dm_plane_state *amdgpu_state = NULL;
7675
7676 if (plane->state)
7677 plane->funcs->atomic_destroy_state(plane, plane->state);
7678
7679 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7680 WARN_ON(amdgpu_state == NULL);
1f6010a9 7681
7ddaef96
NK
7682 if (amdgpu_state)
7683 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7684}
7685
7686static struct drm_plane_state *
7687dm_drm_plane_duplicate_state(struct drm_plane *plane)
7688{
7689 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7690
7691 old_dm_plane_state = to_dm_plane_state(plane->state);
7692 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7693 if (!dm_plane_state)
7694 return NULL;
7695
7696 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7697
3be5262e
HW
7698 if (old_dm_plane_state->dc_state) {
7699 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7700 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7701 }
7702
7703 return &dm_plane_state->base;
7704}
7705
dfd84d90 7706static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7707 struct drm_plane_state *state)
e7b07cee
HW
7708{
7709 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7710
3be5262e
HW
7711 if (dm_plane_state->dc_state)
7712 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7713
0627bbd3 7714 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7715}
7716
7717static const struct drm_plane_funcs dm_plane_funcs = {
7718 .update_plane = drm_atomic_helper_update_plane,
7719 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7720 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7721 .reset = dm_drm_plane_reset,
7722 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7723 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7724 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7725};
7726
3ee6b26b
AD
7727static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7728 struct drm_plane_state *new_state)
e7b07cee
HW
7729{
7730 struct amdgpu_framebuffer *afb;
7731 struct drm_gem_object *obj;
5d43be0c 7732 struct amdgpu_device *adev;
e7b07cee 7733 struct amdgpu_bo *rbo;
e7b07cee 7734 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5d43be0c
CK
7735 uint32_t domain;
7736 int r;
e7b07cee
HW
7737
7738 if (!new_state->fb) {
4711c033 7739 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7740 return 0;
7741 }
7742
7743 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7744 obj = new_state->fb->obj[0];
e7b07cee 7745 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7746 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09 7747
f06e2167 7748 r = amdgpu_bo_reserve(rbo, true);
0f257b09
CZ
7749 if (r) {
7750 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7751 return r;
0f257b09 7752 }
e7b07cee 7753
f06e2167
CK
7754 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7755 if (r) {
7756 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7757 goto error_unlock;
7758 }
7759
5d43be0c 7760 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7761 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7762 else
7763 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7764
7b7c6c81 7765 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7766 if (unlikely(r != 0)) {
30b7c614
HW
7767 if (r != -ERESTARTSYS)
7768 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
f06e2167 7769 goto error_unlock;
e7b07cee
HW
7770 }
7771
bb812f1e
JZ
7772 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7773 if (unlikely(r != 0)) {
bb812f1e 7774 DRM_ERROR("%p bind failed\n", rbo);
f06e2167 7775 goto error_unpin;
e7b07cee 7776 }
7df7e505 7777
f06e2167 7778 amdgpu_bo_unreserve(rbo);
bb812f1e 7779
7b7c6c81 7780 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7781
7782 amdgpu_bo_ref(rbo);
7783
cf322b49
NK
7784 /**
7785 * We don't do surface updates on planes that have been newly created,
7786 * but we also don't have the afb->address during atomic check.
7787 *
7788 * Fill in buffer attributes depending on the address here, but only on
7789 * newly created planes since they're not being used by DC yet and this
7790 * won't modify global state.
7791 */
7792 dm_plane_state_old = to_dm_plane_state(plane->state);
7793 dm_plane_state_new = to_dm_plane_state(new_state);
7794
3be5262e 7795 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7796 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7797 struct dc_plane_state *plane_state =
7798 dm_plane_state_new->dc_state;
7799 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7800
320932bf 7801 fill_plane_buffer_attributes(
695af5f9 7802 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7803 afb->tiling_flags,
cf322b49
NK
7804 &plane_state->tiling_info, &plane_state->plane_size,
7805 &plane_state->dcc, &plane_state->address,
6eed95b0 7806 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7807 }
7808
e7b07cee 7809 return 0;
f06e2167
CK
7810
7811error_unpin:
7812 amdgpu_bo_unpin(rbo);
7813
7814error_unlock:
7815 amdgpu_bo_unreserve(rbo);
7816 return r;
e7b07cee
HW
7817}
7818
3ee6b26b
AD
7819static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7820 struct drm_plane_state *old_state)
e7b07cee
HW
7821{
7822 struct amdgpu_bo *rbo;
e7b07cee
HW
7823 int r;
7824
7825 if (!old_state->fb)
7826 return;
7827
e68d14dd 7828 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7829 r = amdgpu_bo_reserve(rbo, false);
7830 if (unlikely(r)) {
7831 DRM_ERROR("failed to reserve rbo before unpin\n");
7832 return;
b830ebc9
HW
7833 }
7834
7835 amdgpu_bo_unpin(rbo);
7836 amdgpu_bo_unreserve(rbo);
7837 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7838}
7839
8c44515b
AP
7840static int dm_plane_helper_check_state(struct drm_plane_state *state,
7841 struct drm_crtc_state *new_crtc_state)
7842{
6300b3bd
MK
7843 struct drm_framebuffer *fb = state->fb;
7844 int min_downscale, max_upscale;
7845 int min_scale = 0;
7846 int max_scale = INT_MAX;
7847
40d916a2 7848 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7849 if (fb && state->crtc) {
40d916a2
NC
7850 /* Validate viewport to cover the case when only the position changes */
7851 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7852 int viewport_width = state->crtc_w;
7853 int viewport_height = state->crtc_h;
7854
7855 if (state->crtc_x < 0)
7856 viewport_width += state->crtc_x;
7857 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7858 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7859
7860 if (state->crtc_y < 0)
7861 viewport_height += state->crtc_y;
7862 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7863 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7864
4abdb72b
NC
7865 if (viewport_width < 0 || viewport_height < 0) {
7866 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7867 return -EINVAL;
7868 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7869 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7870 return -EINVAL;
4abdb72b
NC
7871 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7872 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7873 return -EINVAL;
4abdb72b
NC
7874 }
7875
40d916a2
NC
7876 }
7877
7878 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7879 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7880 &min_downscale, &max_upscale);
7881 /*
7882 * Convert to drm convention: 16.16 fixed point, instead of dc's
7883 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7884 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7885 */
7886 min_scale = (1000 << 16) / max_upscale;
7887 max_scale = (1000 << 16) / min_downscale;
7888 }
8c44515b 7889
8c44515b 7890 return drm_atomic_helper_check_plane_state(
6300b3bd 7891 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7892}
7893
7578ecda 7894static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7895 struct drm_atomic_state *state)
cbd19488 7896{
7c11b99a
MR
7897 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7898 plane);
1348969a 7899 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7900 struct dc *dc = adev->dm.dc;
78171832 7901 struct dm_plane_state *dm_plane_state;
695af5f9 7902 struct dc_scaling_info scaling_info;
8c44515b 7903 struct drm_crtc_state *new_crtc_state;
695af5f9 7904 int ret;
78171832 7905
ba5c1649 7906 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7907
ba5c1649 7908 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7909
3be5262e 7910 if (!dm_plane_state->dc_state)
9a3329b1 7911 return 0;
cbd19488 7912
8c44515b 7913 new_crtc_state =
dec92020 7914 drm_atomic_get_new_crtc_state(state,
ba5c1649 7915 new_plane_state->crtc);
8c44515b
AP
7916 if (!new_crtc_state)
7917 return -EINVAL;
7918
ba5c1649 7919 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7920 if (ret)
7921 return ret;
7922
4375d625 7923 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7924 if (ret)
7925 return ret;
a05bcff1 7926
62c933f9 7927 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7928 return 0;
7929
7930 return -EINVAL;
7931}
7932
674e78ac 7933static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7934 struct drm_atomic_state *state)
674e78ac
NK
7935{
7936 /* Only support async updates on cursor planes. */
7937 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7938 return -EINVAL;
7939
7940 return 0;
7941}
7942
7943static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7944 struct drm_atomic_state *state)
674e78ac 7945{
5ddb0bd4
MR
7946 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7947 plane);
674e78ac 7948 struct drm_plane_state *old_state =
5ddb0bd4 7949 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7950
e8a98235
RS
7951 trace_amdgpu_dm_atomic_update_cursor(new_state);
7952
332af874 7953 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7954
7955 plane->state->src_x = new_state->src_x;
7956 plane->state->src_y = new_state->src_y;
7957 plane->state->src_w = new_state->src_w;
7958 plane->state->src_h = new_state->src_h;
7959 plane->state->crtc_x = new_state->crtc_x;
7960 plane->state->crtc_y = new_state->crtc_y;
7961 plane->state->crtc_w = new_state->crtc_w;
7962 plane->state->crtc_h = new_state->crtc_h;
7963
7964 handle_cursor_update(plane, old_state);
7965}
7966
e7b07cee
HW
7967static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7968 .prepare_fb = dm_plane_helper_prepare_fb,
7969 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7970 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7971 .atomic_async_check = dm_plane_atomic_async_check,
7972 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7973};
7974
7975/*
7976 * TODO: these are currently initialized to rgb formats only.
7977 * For future use cases we should either initialize them dynamically based on
7978 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7979 * check will succeed, and let DC implement proper check
e7b07cee 7980 */
d90371b0 7981static const uint32_t rgb_formats[] = {
e7b07cee
HW
7982 DRM_FORMAT_XRGB8888,
7983 DRM_FORMAT_ARGB8888,
7984 DRM_FORMAT_RGBA8888,
7985 DRM_FORMAT_XRGB2101010,
7986 DRM_FORMAT_XBGR2101010,
7987 DRM_FORMAT_ARGB2101010,
7988 DRM_FORMAT_ABGR2101010,
58020403
MK
7989 DRM_FORMAT_XRGB16161616,
7990 DRM_FORMAT_XBGR16161616,
7991 DRM_FORMAT_ARGB16161616,
7992 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7993 DRM_FORMAT_XBGR8888,
7994 DRM_FORMAT_ABGR8888,
46dd9ff7 7995 DRM_FORMAT_RGB565,
e7b07cee
HW
7996};
7997
0d579c7e
NK
7998static const uint32_t overlay_formats[] = {
7999 DRM_FORMAT_XRGB8888,
8000 DRM_FORMAT_ARGB8888,
8001 DRM_FORMAT_RGBA8888,
8002 DRM_FORMAT_XBGR8888,
8003 DRM_FORMAT_ABGR8888,
7267a1a9 8004 DRM_FORMAT_RGB565
e7b07cee
HW
8005};
8006
8007static const u32 cursor_formats[] = {
8008 DRM_FORMAT_ARGB8888
8009};
8010
37c6a93b
NK
8011static int get_plane_formats(const struct drm_plane *plane,
8012 const struct dc_plane_cap *plane_cap,
8013 uint32_t *formats, int max_formats)
e7b07cee 8014{
37c6a93b
NK
8015 int i, num_formats = 0;
8016
8017 /*
8018 * TODO: Query support for each group of formats directly from
8019 * DC plane caps. This will require adding more formats to the
8020 * caps list.
8021 */
e7b07cee 8022
f180b4bc 8023 switch (plane->type) {
e7b07cee 8024 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
8025 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8026 if (num_formats >= max_formats)
8027 break;
8028
8029 formats[num_formats++] = rgb_formats[i];
8030 }
8031
ea36ad34 8032 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 8033 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
8034 if (plane_cap && plane_cap->pixel_format_support.p010)
8035 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
8036 if (plane_cap && plane_cap->pixel_format_support.fp16) {
8037 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8038 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
8039 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8040 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 8041 }
e7b07cee 8042 break;
37c6a93b 8043
e7b07cee 8044 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
8045 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8046 if (num_formats >= max_formats)
8047 break;
8048
8049 formats[num_formats++] = overlay_formats[i];
8050 }
e7b07cee 8051 break;
37c6a93b 8052
e7b07cee 8053 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
8054 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8055 if (num_formats >= max_formats)
8056 break;
8057
8058 formats[num_formats++] = cursor_formats[i];
8059 }
e7b07cee
HW
8060 break;
8061 }
8062
37c6a93b
NK
8063 return num_formats;
8064}
8065
8066static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8067 struct drm_plane *plane,
8068 unsigned long possible_crtcs,
8069 const struct dc_plane_cap *plane_cap)
8070{
8071 uint32_t formats[32];
8072 int num_formats;
8073 int res = -EPERM;
ecc874a6 8074 unsigned int supported_rotations;
faa37f54 8075 uint64_t *modifiers = NULL;
37c6a93b
NK
8076
8077 num_formats = get_plane_formats(plane, plane_cap, formats,
8078 ARRAY_SIZE(formats));
8079
faa37f54
BN
8080 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8081 if (res)
8082 return res;
8083
2af10429
TE
8084 if (modifiers == NULL)
8085 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8086
4a580877 8087 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 8088 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
8089 modifiers, plane->type, NULL);
8090 kfree(modifiers);
37c6a93b
NK
8091 if (res)
8092 return res;
8093
cc1fec57
NK
8094 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8095 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6 8096 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
76818cdd
SJK
8097 BIT(DRM_MODE_BLEND_PREMULTI) |
8098 BIT(DRM_MODE_BLEND_COVERAGE);
d74004b6
NK
8099
8100 drm_plane_create_alpha_property(plane);
8101 drm_plane_create_blend_mode_property(plane, blend_caps);
8102 }
8103
fc8e5230 8104 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
8105 plane_cap &&
8106 (plane_cap->pixel_format_support.nv12 ||
8107 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
8108 /* This only affects YUV formats. */
8109 drm_plane_create_color_properties(
8110 plane,
8111 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
8112 BIT(DRM_COLOR_YCBCR_BT709) |
8113 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
8114 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8115 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8116 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8117 }
8118
ecc874a6
PLG
8119 supported_rotations =
8120 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8121 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8122
1347385f
SS
8123 if (dm->adev->asic_type >= CHIP_BONAIRE &&
8124 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
8125 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8126 supported_rotations);
ecc874a6 8127
f180b4bc 8128 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 8129
96719c54 8130 /* Create (reset) the plane state */
f180b4bc
HW
8131 if (plane->funcs->reset)
8132 plane->funcs->reset(plane);
96719c54 8133
37c6a93b 8134 return 0;
e7b07cee
HW
8135}
8136
7578ecda
AD
8137static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8138 struct drm_plane *plane,
8139 uint32_t crtc_index)
e7b07cee
HW
8140{
8141 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 8142 struct drm_plane *cursor_plane;
e7b07cee
HW
8143
8144 int res = -ENOMEM;
8145
8146 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8147 if (!cursor_plane)
8148 goto fail;
8149
f180b4bc 8150 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 8151 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
8152
8153 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8154 if (!acrtc)
8155 goto fail;
8156
8157 res = drm_crtc_init_with_planes(
8158 dm->ddev,
8159 &acrtc->base,
8160 plane,
f180b4bc 8161 cursor_plane,
e7b07cee
HW
8162 &amdgpu_dm_crtc_funcs, NULL);
8163
8164 if (res)
8165 goto fail;
8166
8167 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8168
96719c54
HW
8169 /* Create (reset) the plane state */
8170 if (acrtc->base.funcs->reset)
8171 acrtc->base.funcs->reset(&acrtc->base);
8172
e7b07cee
HW
8173 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8174 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8175
8176 acrtc->crtc_id = crtc_index;
8177 acrtc->base.enabled = false;
c37e2d29 8178 acrtc->otg_inst = -1;
e7b07cee
HW
8179
8180 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
8181 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8182 true, MAX_COLOR_LUT_ENTRIES);
086247a4 8183 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 8184
e7b07cee
HW
8185 return 0;
8186
8187fail:
b830ebc9
HW
8188 kfree(acrtc);
8189 kfree(cursor_plane);
e7b07cee
HW
8190 return res;
8191}
8192
8193
8194static int to_drm_connector_type(enum signal_type st)
8195{
8196 switch (st) {
8197 case SIGNAL_TYPE_HDMI_TYPE_A:
8198 return DRM_MODE_CONNECTOR_HDMIA;
8199 case SIGNAL_TYPE_EDP:
8200 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
8201 case SIGNAL_TYPE_LVDS:
8202 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
8203 case SIGNAL_TYPE_RGB:
8204 return DRM_MODE_CONNECTOR_VGA;
8205 case SIGNAL_TYPE_DISPLAY_PORT:
8206 case SIGNAL_TYPE_DISPLAY_PORT_MST:
8207 return DRM_MODE_CONNECTOR_DisplayPort;
8208 case SIGNAL_TYPE_DVI_DUAL_LINK:
8209 case SIGNAL_TYPE_DVI_SINGLE_LINK:
8210 return DRM_MODE_CONNECTOR_DVID;
8211 case SIGNAL_TYPE_VIRTUAL:
8212 return DRM_MODE_CONNECTOR_VIRTUAL;
8213
8214 default:
8215 return DRM_MODE_CONNECTOR_Unknown;
8216 }
8217}
8218
2b4c1c05
DV
8219static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8220{
62afb4ad
JRS
8221 struct drm_encoder *encoder;
8222
8223 /* There is only one encoder per connector */
8224 drm_connector_for_each_possible_encoder(connector, encoder)
8225 return encoder;
8226
8227 return NULL;
2b4c1c05
DV
8228}
8229
e7b07cee
HW
8230static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8231{
e7b07cee
HW
8232 struct drm_encoder *encoder;
8233 struct amdgpu_encoder *amdgpu_encoder;
8234
2b4c1c05 8235 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
8236
8237 if (encoder == NULL)
8238 return;
8239
8240 amdgpu_encoder = to_amdgpu_encoder(encoder);
8241
8242 amdgpu_encoder->native_mode.clock = 0;
8243
8244 if (!list_empty(&connector->probed_modes)) {
8245 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 8246
e7b07cee 8247 list_for_each_entry(preferred_mode,
b830ebc9
HW
8248 &connector->probed_modes,
8249 head) {
8250 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8251 amdgpu_encoder->native_mode = *preferred_mode;
8252
e7b07cee
HW
8253 break;
8254 }
8255
8256 }
8257}
8258
3ee6b26b
AD
8259static struct drm_display_mode *
8260amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8261 char *name,
8262 int hdisplay, int vdisplay)
e7b07cee
HW
8263{
8264 struct drm_device *dev = encoder->dev;
8265 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8266 struct drm_display_mode *mode = NULL;
8267 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8268
8269 mode = drm_mode_duplicate(dev, native_mode);
8270
b830ebc9 8271 if (mode == NULL)
e7b07cee
HW
8272 return NULL;
8273
8274 mode->hdisplay = hdisplay;
8275 mode->vdisplay = vdisplay;
8276 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 8277 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
8278
8279 return mode;
8280
8281}
8282
8283static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 8284 struct drm_connector *connector)
e7b07cee
HW
8285{
8286 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8287 struct drm_display_mode *mode = NULL;
8288 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
8289 struct amdgpu_dm_connector *amdgpu_dm_connector =
8290 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8291 int i;
8292 int n;
8293 struct mode_size {
8294 char name[DRM_DISPLAY_MODE_LEN];
8295 int w;
8296 int h;
b830ebc9 8297 } common_modes[] = {
e7b07cee
HW
8298 { "640x480", 640, 480},
8299 { "800x600", 800, 600},
8300 { "1024x768", 1024, 768},
8301 { "1280x720", 1280, 720},
8302 { "1280x800", 1280, 800},
8303 {"1280x1024", 1280, 1024},
8304 { "1440x900", 1440, 900},
8305 {"1680x1050", 1680, 1050},
8306 {"1600x1200", 1600, 1200},
8307 {"1920x1080", 1920, 1080},
8308 {"1920x1200", 1920, 1200}
8309 };
8310
b830ebc9 8311 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
8312
8313 for (i = 0; i < n; i++) {
8314 struct drm_display_mode *curmode = NULL;
8315 bool mode_existed = false;
8316
8317 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
8318 common_modes[i].h > native_mode->vdisplay ||
8319 (common_modes[i].w == native_mode->hdisplay &&
8320 common_modes[i].h == native_mode->vdisplay))
8321 continue;
e7b07cee
HW
8322
8323 list_for_each_entry(curmode, &connector->probed_modes, head) {
8324 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 8325 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
8326 mode_existed = true;
8327 break;
8328 }
8329 }
8330
8331 if (mode_existed)
8332 continue;
8333
8334 mode = amdgpu_dm_create_common_mode(encoder,
8335 common_modes[i].name, common_modes[i].w,
8336 common_modes[i].h);
588a7017
ZQ
8337 if (!mode)
8338 continue;
8339
e7b07cee 8340 drm_mode_probed_add(connector, mode);
c84dec2f 8341 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
8342 }
8343}
8344
d77de788
SS
8345static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8346{
8347 struct drm_encoder *encoder;
8348 struct amdgpu_encoder *amdgpu_encoder;
8349 const struct drm_display_mode *native_mode;
8350
8351 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8352 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8353 return;
8354
8355 encoder = amdgpu_dm_connector_to_encoder(connector);
8356 if (!encoder)
8357 return;
8358
8359 amdgpu_encoder = to_amdgpu_encoder(encoder);
8360
8361 native_mode = &amdgpu_encoder->native_mode;
8362 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8363 return;
8364
8365 drm_connector_set_panel_orientation_with_quirk(connector,
8366 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8367 native_mode->hdisplay,
8368 native_mode->vdisplay);
8369}
8370
3ee6b26b
AD
8371static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8372 struct edid *edid)
e7b07cee 8373{
c84dec2f
HW
8374 struct amdgpu_dm_connector *amdgpu_dm_connector =
8375 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8376
8377 if (edid) {
8378 /* empty probed_modes */
8379 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8380 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8381 drm_add_edid_modes(connector, edid);
8382
f1e5e913
YMM
8383 /* sorting the probed modes before calling function
8384 * amdgpu_dm_get_native_mode() since EDID can have
8385 * more than one preferred mode. The modes that are
8386 * later in the probed mode list could be of higher
8387 * and preferred resolution. For example, 3840x2160
8388 * resolution in base EDID preferred timing and 4096x2160
8389 * preferred resolution in DID extension block later.
8390 */
8391 drm_mode_sort(&connector->probed_modes);
e7b07cee 8392 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8393
8394 /* Freesync capabilities are reset by calling
8395 * drm_add_edid_modes() and need to be
8396 * restored here.
8397 */
8398 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8399
8400 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8401 } else {
c84dec2f 8402 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8403 }
e7b07cee
HW
8404}
8405
a85ba005
NC
8406static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8407 struct drm_display_mode *mode)
8408{
8409 struct drm_display_mode *m;
8410
8411 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8412 if (drm_mode_equal(m, mode))
8413 return true;
8414 }
8415
8416 return false;
8417}
8418
8419static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8420{
8421 const struct drm_display_mode *m;
8422 struct drm_display_mode *new_mode;
8423 uint i;
8424 uint32_t new_modes_count = 0;
8425
8426 /* Standard FPS values
8427 *
12cdff6b
SC
8428 * 23.976 - TV/NTSC
8429 * 24 - Cinema
8430 * 25 - TV/PAL
8431 * 29.97 - TV/NTSC
8432 * 30 - TV/NTSC
8433 * 48 - Cinema HFR
8434 * 50 - TV/PAL
8435 * 60 - Commonly used
8436 * 48,72,96,120 - Multiples of 24
a85ba005 8437 */
9ce5ed6e
CIK
8438 static const uint32_t common_rates[] = {
8439 23976, 24000, 25000, 29970, 30000,
12cdff6b 8440 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8441 };
a85ba005
NC
8442
8443 /*
8444 * Find mode with highest refresh rate with the same resolution
8445 * as the preferred mode. Some monitors report a preferred mode
8446 * with lower resolution than the highest refresh rate supported.
8447 */
8448
8449 m = get_highest_refresh_rate_mode(aconnector, true);
8450 if (!m)
8451 return 0;
8452
8453 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8454 uint64_t target_vtotal, target_vtotal_diff;
8455 uint64_t num, den;
8456
8457 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8458 continue;
8459
8460 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8461 common_rates[i] > aconnector->max_vfreq * 1000)
8462 continue;
8463
8464 num = (unsigned long long)m->clock * 1000 * 1000;
8465 den = common_rates[i] * (unsigned long long)m->htotal;
8466 target_vtotal = div_u64(num, den);
8467 target_vtotal_diff = target_vtotal - m->vtotal;
8468
8469 /* Check for illegal modes */
8470 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8471 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8472 m->vtotal + target_vtotal_diff < m->vsync_end)
8473 continue;
8474
8475 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8476 if (!new_mode)
8477 goto out;
8478
8479 new_mode->vtotal += (u16)target_vtotal_diff;
8480 new_mode->vsync_start += (u16)target_vtotal_diff;
8481 new_mode->vsync_end += (u16)target_vtotal_diff;
8482 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8483 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8484
8485 if (!is_duplicate_mode(aconnector, new_mode)) {
8486 drm_mode_probed_add(&aconnector->base, new_mode);
8487 new_modes_count += 1;
8488 } else
8489 drm_mode_destroy(aconnector->base.dev, new_mode);
8490 }
8491 out:
8492 return new_modes_count;
8493}
8494
8495static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8496 struct edid *edid)
8497{
8498 struct amdgpu_dm_connector *amdgpu_dm_connector =
8499 to_amdgpu_dm_connector(connector);
8500
de05abe6 8501 if (!edid)
a85ba005 8502 return;
fe8858bb 8503
a85ba005
NC
8504 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8505 amdgpu_dm_connector->num_modes +=
8506 add_fs_modes(amdgpu_dm_connector);
8507}
8508
7578ecda 8509static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8510{
c84dec2f
HW
8511 struct amdgpu_dm_connector *amdgpu_dm_connector =
8512 to_amdgpu_dm_connector(connector);
e7b07cee 8513 struct drm_encoder *encoder;
c84dec2f 8514 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8515
2b4c1c05 8516 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8517
5c0e6840 8518 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8519 amdgpu_dm_connector->num_modes =
8520 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8521 } else {
8522 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8523 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8524 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8525 }
3e332d3a 8526 amdgpu_dm_fbc_init(connector);
5099114b 8527
c84dec2f 8528 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8529}
8530
3ee6b26b
AD
8531void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8532 struct amdgpu_dm_connector *aconnector,
8533 int connector_type,
8534 struct dc_link *link,
8535 int link_index)
e7b07cee 8536{
1348969a 8537 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8538
f04bee34
NK
8539 /*
8540 * Some of the properties below require access to state, like bpc.
8541 * Allocate some default initial connector state with our reset helper.
8542 */
8543 if (aconnector->base.funcs->reset)
8544 aconnector->base.funcs->reset(&aconnector->base);
8545
e7b07cee
HW
8546 aconnector->connector_id = link_index;
8547 aconnector->dc_link = link;
8548 aconnector->base.interlace_allowed = false;
8549 aconnector->base.doublescan_allowed = false;
8550 aconnector->base.stereo_allowed = false;
8551 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8552 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8553 aconnector->audio_inst = -1;
e7b07cee
HW
8554 mutex_init(&aconnector->hpd_lock);
8555
1f6010a9
DF
8556 /*
8557 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8558 * which means HPD hot plug not supported
8559 */
e7b07cee
HW
8560 switch (connector_type) {
8561 case DRM_MODE_CONNECTOR_HDMIA:
8562 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8563 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8564 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8565 break;
8566 case DRM_MODE_CONNECTOR_DisplayPort:
8567 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
d715c9a2 8568 link->link_enc = link_enc_cfg_get_link_enc(link);
7b201d53 8569 ASSERT(link->link_enc);
f6e03f80
JS
8570 if (link->link_enc)
8571 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8572 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8573 break;
8574 case DRM_MODE_CONNECTOR_DVID:
8575 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8576 break;
8577 default:
8578 break;
8579 }
8580
8581 drm_object_attach_property(&aconnector->base.base,
8582 dm->ddev->mode_config.scaling_mode_property,
8583 DRM_MODE_SCALE_NONE);
8584
8585 drm_object_attach_property(&aconnector->base.base,
8586 adev->mode_info.underscan_property,
8587 UNDERSCAN_OFF);
8588 drm_object_attach_property(&aconnector->base.base,
8589 adev->mode_info.underscan_hborder_property,
8590 0);
8591 drm_object_attach_property(&aconnector->base.base,
8592 adev->mode_info.underscan_vborder_property,
8593 0);
1825fd34 8594
8c61b31e
JFZ
8595 if (!aconnector->mst_port)
8596 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8597
4a8ca46b
RL
8598 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8599 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8600 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8601
c1ee92f9 8602 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8603 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8604 drm_object_attach_property(&aconnector->base.base,
8605 adev->mode_info.abm_level_property, 0);
8606 }
bb47de73
NK
8607
8608 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8609 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8610 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8611 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8612
8c61b31e
JFZ
8613 if (!aconnector->mst_port)
8614 drm_connector_attach_vrr_capable_property(&aconnector->base);
8615
0c8620d6 8616#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8617 if (adev->dm.hdcp_workqueue)
53e108aa 8618 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8619#endif
bb47de73 8620 }
e7b07cee
HW
8621}
8622
7578ecda
AD
8623static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8624 struct i2c_msg *msgs, int num)
e7b07cee
HW
8625{
8626 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8627 struct ddc_service *ddc_service = i2c->ddc_service;
8628 struct i2c_command cmd;
8629 int i;
8630 int result = -EIO;
8631
b830ebc9 8632 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8633
8634 if (!cmd.payloads)
8635 return result;
8636
8637 cmd.number_of_payloads = num;
8638 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8639 cmd.speed = 100;
8640
8641 for (i = 0; i < num; i++) {
8642 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8643 cmd.payloads[i].address = msgs[i].addr;
8644 cmd.payloads[i].length = msgs[i].len;
8645 cmd.payloads[i].data = msgs[i].buf;
8646 }
8647
c85e6e54
DF
8648 if (dc_submit_i2c(
8649 ddc_service->ctx->dc,
22676bc5 8650 ddc_service->link->link_index,
e7b07cee
HW
8651 &cmd))
8652 result = num;
8653
8654 kfree(cmd.payloads);
8655 return result;
8656}
8657
7578ecda 8658static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8659{
8660 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8661}
8662
8663static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8664 .master_xfer = amdgpu_dm_i2c_xfer,
8665 .functionality = amdgpu_dm_i2c_func,
8666};
8667
3ee6b26b
AD
8668static struct amdgpu_i2c_adapter *
8669create_i2c(struct ddc_service *ddc_service,
8670 int link_index,
8671 int *res)
e7b07cee
HW
8672{
8673 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8674 struct amdgpu_i2c_adapter *i2c;
8675
b830ebc9 8676 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8677 if (!i2c)
8678 return NULL;
e7b07cee
HW
8679 i2c->base.owner = THIS_MODULE;
8680 i2c->base.class = I2C_CLASS_DDC;
8681 i2c->base.dev.parent = &adev->pdev->dev;
8682 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8683 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8684 i2c_set_adapdata(&i2c->base, i2c);
8685 i2c->ddc_service = ddc_service;
8686
8687 return i2c;
8688}
8689
89fc8d4e 8690
1f6010a9
DF
8691/*
8692 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8693 * dc_link which will be represented by this aconnector.
8694 */
7578ecda
AD
8695static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8696 struct amdgpu_dm_connector *aconnector,
8697 uint32_t link_index,
8698 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8699{
8700 int res = 0;
8701 int connector_type;
8702 struct dc *dc = dm->dc;
8703 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8704 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8705
8706 link->priv = aconnector;
e7b07cee 8707
f1ad2f5e 8708 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8709
8710 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8711 if (!i2c) {
8712 DRM_ERROR("Failed to create i2c adapter data\n");
8713 return -ENOMEM;
8714 }
8715
e7b07cee
HW
8716 aconnector->i2c = i2c;
8717 res = i2c_add_adapter(&i2c->base);
8718
8719 if (res) {
8720 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8721 goto out_free;
8722 }
8723
8724 connector_type = to_drm_connector_type(link->connector_signal);
8725
17165de2 8726 res = drm_connector_init_with_ddc(
e7b07cee
HW
8727 dm->ddev,
8728 &aconnector->base,
8729 &amdgpu_dm_connector_funcs,
17165de2
AP
8730 connector_type,
8731 &i2c->base);
e7b07cee
HW
8732
8733 if (res) {
8734 DRM_ERROR("connector_init failed\n");
8735 aconnector->connector_id = -1;
8736 goto out_free;
8737 }
8738
8739 drm_connector_helper_add(
8740 &aconnector->base,
8741 &amdgpu_dm_connector_helper_funcs);
8742
8743 amdgpu_dm_connector_init_helper(
8744 dm,
8745 aconnector,
8746 connector_type,
8747 link,
8748 link_index);
8749
cde4c44d 8750 drm_connector_attach_encoder(
e7b07cee
HW
8751 &aconnector->base, &aencoder->base);
8752
e7b07cee
HW
8753 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8754 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8755 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8756
e7b07cee
HW
8757out_free:
8758 if (res) {
8759 kfree(i2c);
8760 aconnector->i2c = NULL;
8761 }
8762 return res;
8763}
8764
8765int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8766{
8767 switch (adev->mode_info.num_crtc) {
8768 case 1:
8769 return 0x1;
8770 case 2:
8771 return 0x3;
8772 case 3:
8773 return 0x7;
8774 case 4:
8775 return 0xf;
8776 case 5:
8777 return 0x1f;
8778 case 6:
8779 default:
8780 return 0x3f;
8781 }
8782}
8783
7578ecda
AD
8784static int amdgpu_dm_encoder_init(struct drm_device *dev,
8785 struct amdgpu_encoder *aencoder,
8786 uint32_t link_index)
e7b07cee 8787{
1348969a 8788 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8789
8790 int res = drm_encoder_init(dev,
8791 &aencoder->base,
8792 &amdgpu_dm_encoder_funcs,
8793 DRM_MODE_ENCODER_TMDS,
8794 NULL);
8795
8796 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8797
8798 if (!res)
8799 aencoder->encoder_id = link_index;
8800 else
8801 aencoder->encoder_id = -1;
8802
8803 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8804
8805 return res;
8806}
8807
3ee6b26b
AD
8808static void manage_dm_interrupts(struct amdgpu_device *adev,
8809 struct amdgpu_crtc *acrtc,
8810 bool enable)
e7b07cee
HW
8811{
8812 /*
8fe684e9
NK
8813 * We have no guarantee that the frontend index maps to the same
8814 * backend index - some even map to more than one.
8815 *
8816 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8817 */
8818 int irq_type =
734dd01d 8819 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8820 adev,
8821 acrtc->crtc_id);
8822
8823 if (enable) {
8824 drm_crtc_vblank_on(&acrtc->base);
8825 amdgpu_irq_get(
8826 adev,
8827 &adev->pageflip_irq,
8828 irq_type);
86bc2219
WL
8829#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8830 amdgpu_irq_get(
8831 adev,
8832 &adev->vline0_irq,
8833 irq_type);
8834#endif
e7b07cee 8835 } else {
86bc2219
WL
8836#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8837 amdgpu_irq_put(
8838 adev,
8839 &adev->vline0_irq,
8840 irq_type);
8841#endif
e7b07cee
HW
8842 amdgpu_irq_put(
8843 adev,
8844 &adev->pageflip_irq,
8845 irq_type);
8846 drm_crtc_vblank_off(&acrtc->base);
8847 }
8848}
8849
8fe684e9
NK
8850static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8851 struct amdgpu_crtc *acrtc)
8852{
8853 int irq_type =
8854 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8855
8856 /**
8857 * This reads the current state for the IRQ and force reapplies
8858 * the setting to hardware.
8859 */
8860 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8861}
8862
3ee6b26b
AD
8863static bool
8864is_scaling_state_different(const struct dm_connector_state *dm_state,
8865 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8866{
8867 if (dm_state->scaling != old_dm_state->scaling)
8868 return true;
8869 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8870 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8871 return true;
8872 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8873 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8874 return true;
b830ebc9
HW
8875 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8876 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8877 return true;
e7b07cee
HW
8878 return false;
8879}
8880
0c8620d6
BL
8881#ifdef CONFIG_DRM_AMD_DC_HDCP
8882static bool is_content_protection_different(struct drm_connector_state *state,
8883 const struct drm_connector_state *old_state,
8884 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8885{
8886 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8887 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8888
31c0ed90 8889 /* Handle: Type0/1 change */
53e108aa
BL
8890 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8891 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8892 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8893 return true;
8894 }
8895
31c0ed90
BL
8896 /* CP is being re enabled, ignore this
8897 *
8898 * Handles: ENABLED -> DESIRED
8899 */
0c8620d6
BL
8900 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8901 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8902 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8903 return false;
8904 }
8905
31c0ed90
BL
8906 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8907 *
8908 * Handles: UNDESIRED -> ENABLED
8909 */
0c8620d6
BL
8910 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8911 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8912 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8913
0d9a947b
QZ
8914 /* Stream removed and re-enabled
8915 *
8916 * Can sometimes overlap with the HPD case,
8917 * thus set update_hdcp to false to avoid
8918 * setting HDCP multiple times.
8919 *
8920 * Handles: DESIRED -> DESIRED (Special case)
8921 */
8922 if (!(old_state->crtc && old_state->crtc->enabled) &&
8923 state->crtc && state->crtc->enabled &&
8924 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8925 dm_con_state->update_hdcp = false;
8926 return true;
8927 }
8928
8929 /* Hot-plug, headless s3, dpms
8930 *
8931 * Only start HDCP if the display is connected/enabled.
8932 * update_hdcp flag will be set to false until the next
8933 * HPD comes in.
31c0ed90
BL
8934 *
8935 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8936 */
97f6c917
BL
8937 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8938 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8939 dm_con_state->update_hdcp = false;
0c8620d6 8940 return true;
97f6c917 8941 }
0c8620d6 8942
31c0ed90
BL
8943 /*
8944 * Handles: UNDESIRED -> UNDESIRED
8945 * DESIRED -> DESIRED
8946 * ENABLED -> ENABLED
8947 */
0c8620d6
BL
8948 if (old_state->content_protection == state->content_protection)
8949 return false;
8950
31c0ed90
BL
8951 /*
8952 * Handles: UNDESIRED -> DESIRED
8953 * DESIRED -> UNDESIRED
8954 * ENABLED -> UNDESIRED
8955 */
97f6c917 8956 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8957 return true;
8958
31c0ed90
BL
8959 /*
8960 * Handles: DESIRED -> ENABLED
8961 */
0c8620d6
BL
8962 return false;
8963}
8964
0c8620d6 8965#endif
3ee6b26b
AD
8966static void remove_stream(struct amdgpu_device *adev,
8967 struct amdgpu_crtc *acrtc,
8968 struct dc_stream_state *stream)
e7b07cee
HW
8969{
8970 /* this is the update mode case */
e7b07cee
HW
8971
8972 acrtc->otg_inst = -1;
8973 acrtc->enabled = false;
8974}
8975
7578ecda
AD
8976static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8977 struct dc_cursor_position *position)
2a8f6ccb 8978{
f4c2cc43 8979 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8980 int x, y;
8981 int xorigin = 0, yorigin = 0;
8982
e371e19c 8983 if (!crtc || !plane->state->fb)
2a8f6ccb 8984 return 0;
2a8f6ccb
HW
8985
8986 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8987 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8988 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8989 __func__,
8990 plane->state->crtc_w,
8991 plane->state->crtc_h);
8992 return -EINVAL;
8993 }
8994
8995 x = plane->state->crtc_x;
8996 y = plane->state->crtc_y;
c14a005c 8997
e371e19c
NK
8998 if (x <= -amdgpu_crtc->max_cursor_width ||
8999 y <= -amdgpu_crtc->max_cursor_height)
9000 return 0;
9001
2a8f6ccb
HW
9002 if (x < 0) {
9003 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9004 x = 0;
9005 }
9006 if (y < 0) {
9007 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9008 y = 0;
9009 }
9010 position->enable = true;
d243b6ff 9011 position->translate_by_source = true;
2a8f6ccb
HW
9012 position->x = x;
9013 position->y = y;
9014 position->x_hotspot = xorigin;
9015 position->y_hotspot = yorigin;
9016
9017 return 0;
9018}
9019
3ee6b26b
AD
9020static void handle_cursor_update(struct drm_plane *plane,
9021 struct drm_plane_state *old_plane_state)
e7b07cee 9022{
1348969a 9023 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
9024 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9025 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9026 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9027 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9028 uint64_t address = afb ? afb->address : 0;
6a30a929 9029 struct dc_cursor_position position = {0};
2a8f6ccb
HW
9030 struct dc_cursor_attributes attributes;
9031 int ret;
9032
e7b07cee
HW
9033 if (!plane->state->fb && !old_plane_state->fb)
9034 return;
9035
cb2318b7 9036 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
9037 __func__,
9038 amdgpu_crtc->crtc_id,
9039 plane->state->crtc_w,
9040 plane->state->crtc_h);
2a8f6ccb
HW
9041
9042 ret = get_cursor_position(plane, crtc, &position);
9043 if (ret)
9044 return;
9045
9046 if (!position.enable) {
9047 /* turn off cursor */
674e78ac
NK
9048 if (crtc_state && crtc_state->stream) {
9049 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
9050 dc_stream_set_cursor_position(crtc_state->stream,
9051 &position);
674e78ac
NK
9052 mutex_unlock(&adev->dm.dc_lock);
9053 }
2a8f6ccb 9054 return;
e7b07cee 9055 }
e7b07cee 9056
2a8f6ccb
HW
9057 amdgpu_crtc->cursor_width = plane->state->crtc_w;
9058 amdgpu_crtc->cursor_height = plane->state->crtc_h;
9059
c1cefe11 9060 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
9061 attributes.address.high_part = upper_32_bits(address);
9062 attributes.address.low_part = lower_32_bits(address);
9063 attributes.width = plane->state->crtc_w;
9064 attributes.height = plane->state->crtc_h;
9065 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9066 attributes.rotation_angle = 0;
9067 attributes.attribute_flags.value = 0;
9068
03a66367 9069 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 9070
886daac9 9071 if (crtc_state->stream) {
674e78ac 9072 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
9073 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9074 &attributes))
9075 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 9076
2a8f6ccb
HW
9077 if (!dc_stream_set_cursor_position(crtc_state->stream,
9078 &position))
9079 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 9080 mutex_unlock(&adev->dm.dc_lock);
886daac9 9081 }
2a8f6ccb 9082}
e7b07cee
HW
9083
9084static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9085{
9086
9087 assert_spin_locked(&acrtc->base.dev->event_lock);
9088 WARN_ON(acrtc->event);
9089
9090 acrtc->event = acrtc->base.state->event;
9091
9092 /* Set the flip status */
9093 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9094
9095 /* Mark this event as consumed */
9096 acrtc->base.state->event = NULL;
9097
cb2318b7
VL
9098 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9099 acrtc->crtc_id);
e7b07cee
HW
9100}
9101
bb47de73
NK
9102static void update_freesync_state_on_stream(
9103 struct amdgpu_display_manager *dm,
9104 struct dm_crtc_state *new_crtc_state,
180db303
NK
9105 struct dc_stream_state *new_stream,
9106 struct dc_plane_state *surface,
9107 u32 flip_timestamp_in_us)
bb47de73 9108{
09aef2c4 9109 struct mod_vrr_params vrr_params;
bb47de73 9110 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 9111 struct amdgpu_device *adev = dm->adev;
585d450c 9112 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 9113 unsigned long flags;
4cda3243 9114 bool pack_sdp_v1_3 = false;
bb47de73
NK
9115
9116 if (!new_stream)
9117 return;
9118
9119 /*
9120 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9121 * For now it's sufficient to just guard against these conditions.
9122 */
9123
9124 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9125 return;
9126
4a580877 9127 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 9128 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 9129
180db303
NK
9130 if (surface) {
9131 mod_freesync_handle_preflip(
9132 dm->freesync_module,
9133 surface,
9134 new_stream,
9135 flip_timestamp_in_us,
9136 &vrr_params);
09aef2c4
MK
9137
9138 if (adev->family < AMDGPU_FAMILY_AI &&
9139 amdgpu_dm_vrr_active(new_crtc_state)) {
9140 mod_freesync_handle_v_update(dm->freesync_module,
9141 new_stream, &vrr_params);
e63e2491
EB
9142
9143 /* Need to call this before the frame ends. */
9144 dc_stream_adjust_vmin_vmax(dm->dc,
9145 new_crtc_state->stream,
9146 &vrr_params.adjust);
09aef2c4 9147 }
180db303 9148 }
bb47de73
NK
9149
9150 mod_freesync_build_vrr_infopacket(
9151 dm->freesync_module,
9152 new_stream,
180db303 9153 &vrr_params,
ecd0136b
HT
9154 PACKET_TYPE_VRR,
9155 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
9156 &vrr_infopacket,
9157 pack_sdp_v1_3);
bb47de73 9158
8a48b44c 9159 new_crtc_state->freesync_timing_changed |=
585d450c 9160 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
9161 &vrr_params.adjust,
9162 sizeof(vrr_params.adjust)) != 0);
bb47de73 9163
8a48b44c 9164 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
9165 (memcmp(&new_crtc_state->vrr_infopacket,
9166 &vrr_infopacket,
9167 sizeof(vrr_infopacket)) != 0);
9168
585d450c 9169 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
9170 new_crtc_state->vrr_infopacket = vrr_infopacket;
9171
585d450c 9172 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
9173 new_stream->vrr_infopacket = vrr_infopacket;
9174
9175 if (new_crtc_state->freesync_vrr_info_changed)
9176 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9177 new_crtc_state->base.crtc->base.id,
9178 (int)new_crtc_state->base.vrr_enabled,
180db303 9179 (int)vrr_params.state);
09aef2c4 9180
4a580877 9181 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
9182}
9183
585d450c 9184static void update_stream_irq_parameters(
e854194c
MK
9185 struct amdgpu_display_manager *dm,
9186 struct dm_crtc_state *new_crtc_state)
9187{
9188 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 9189 struct mod_vrr_params vrr_params;
e854194c 9190 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 9191 struct amdgpu_device *adev = dm->adev;
585d450c 9192 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 9193 unsigned long flags;
e854194c
MK
9194
9195 if (!new_stream)
9196 return;
9197
9198 /*
9199 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9200 * For now it's sufficient to just guard against these conditions.
9201 */
9202 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9203 return;
9204
4a580877 9205 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 9206 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 9207
e854194c
MK
9208 if (new_crtc_state->vrr_supported &&
9209 config.min_refresh_in_uhz &&
9210 config.max_refresh_in_uhz) {
a85ba005
NC
9211 /*
9212 * if freesync compatible mode was set, config.state will be set
9213 * in atomic check
9214 */
9215 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9216 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9217 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9218 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9219 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9220 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9221 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9222 } else {
9223 config.state = new_crtc_state->base.vrr_enabled ?
9224 VRR_STATE_ACTIVE_VARIABLE :
9225 VRR_STATE_INACTIVE;
9226 }
e854194c
MK
9227 } else {
9228 config.state = VRR_STATE_UNSUPPORTED;
9229 }
9230
9231 mod_freesync_build_vrr_params(dm->freesync_module,
9232 new_stream,
9233 &config, &vrr_params);
9234
9235 new_crtc_state->freesync_timing_changed |=
585d450c
AP
9236 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9237 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 9238
585d450c
AP
9239 new_crtc_state->freesync_config = config;
9240 /* Copy state for access from DM IRQ handler */
9241 acrtc->dm_irq_params.freesync_config = config;
9242 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9243 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 9244 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
9245}
9246
66b0c973
MK
9247static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9248 struct dm_crtc_state *new_state)
9249{
9250 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9251 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9252
9253 if (!old_vrr_active && new_vrr_active) {
9254 /* Transition VRR inactive -> active:
9255 * While VRR is active, we must not disable vblank irq, as a
9256 * reenable after disable would compute bogus vblank/pflip
9257 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
9258 *
9259 * We also need vupdate irq for the actual core vblank handling
9260 * at end of vblank.
66b0c973 9261 */
d2574c33 9262 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
9263 drm_crtc_vblank_get(new_state->base.crtc);
9264 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9265 __func__, new_state->base.crtc->base.id);
9266 } else if (old_vrr_active && !new_vrr_active) {
9267 /* Transition VRR active -> inactive:
9268 * Allow vblank irq disable again for fixed refresh rate.
9269 */
d2574c33 9270 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
9271 drm_crtc_vblank_put(new_state->base.crtc);
9272 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9273 __func__, new_state->base.crtc->base.id);
9274 }
9275}
9276
8ad27806
NK
9277static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9278{
9279 struct drm_plane *plane;
5760dcb9 9280 struct drm_plane_state *old_plane_state;
8ad27806
NK
9281 int i;
9282
9283 /*
9284 * TODO: Make this per-stream so we don't issue redundant updates for
9285 * commits with multiple streams.
9286 */
5760dcb9 9287 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
9288 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9289 handle_cursor_update(plane, old_plane_state);
9290}
9291
3be5262e 9292static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 9293 struct dc_state *dc_state,
3ee6b26b
AD
9294 struct drm_device *dev,
9295 struct amdgpu_display_manager *dm,
9296 struct drm_crtc *pcrtc,
420cd472 9297 bool wait_for_vblank)
e7b07cee 9298{
efc8278e 9299 uint32_t i;
8a48b44c 9300 uint64_t timestamp_ns;
e7b07cee 9301 struct drm_plane *plane;
0bc9706d 9302 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 9303 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
9304 struct drm_crtc_state *new_pcrtc_state =
9305 drm_atomic_get_new_crtc_state(state, pcrtc);
9306 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
9307 struct dm_crtc_state *dm_old_crtc_state =
9308 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 9309 int planes_count = 0, vpos, hpos;
570c91d5 9310 long r;
e7b07cee 9311 unsigned long flags;
8a48b44c 9312 struct amdgpu_bo *abo;
fdd1fe57
MK
9313 uint32_t target_vblank, last_flip_vblank;
9314 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 9315 bool pflip_present = false;
bc7f670e
DF
9316 struct {
9317 struct dc_surface_update surface_updates[MAX_SURFACES];
9318 struct dc_plane_info plane_infos[MAX_SURFACES];
9319 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 9320 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 9321 struct dc_stream_update stream_update;
74aa7bd4 9322 } *bundle;
bc7f670e 9323
74aa7bd4 9324 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 9325
74aa7bd4
DF
9326 if (!bundle) {
9327 dm_error("Failed to allocate update bundle\n");
4b510503
NK
9328 goto cleanup;
9329 }
e7b07cee 9330
8ad27806
NK
9331 /*
9332 * Disable the cursor first if we're disabling all the planes.
9333 * It'll remain on the screen after the planes are re-enabled
9334 * if we don't.
9335 */
9336 if (acrtc_state->active_planes == 0)
9337 amdgpu_dm_commit_cursors(state);
9338
e7b07cee 9339 /* update planes when needed */
efc8278e 9340 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9341 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9342 struct drm_crtc_state *new_crtc_state;
0bc9706d 9343 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9344 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9345 bool plane_needs_flip;
c7af5f77 9346 struct dc_plane_state *dc_plane;
54d76575 9347 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9348
80c218d5
NK
9349 /* Cursor plane is handled after stream updates */
9350 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9351 continue;
e7b07cee 9352
f5ba60fe
DD
9353 if (!fb || !crtc || pcrtc != crtc)
9354 continue;
9355
9356 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9357 if (!new_crtc_state->active)
e7b07cee
HW
9358 continue;
9359
bc7f670e 9360 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9361
74aa7bd4 9362 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9363 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9364 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9365 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9366 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9367 }
8a48b44c 9368
4375d625 9369 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9370 &bundle->scaling_infos[planes_count]);
8a48b44c 9371
695af5f9
NK
9372 bundle->surface_updates[planes_count].scaling_info =
9373 &bundle->scaling_infos[planes_count];
8a48b44c 9374
f5031000 9375 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9376
f5031000 9377 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9378
f5031000
DF
9379 if (!plane_needs_flip) {
9380 planes_count += 1;
9381 continue;
9382 }
8a48b44c 9383
2fac0f53
CK
9384 abo = gem_to_amdgpu_bo(fb->obj[0]);
9385
f8308898
AG
9386 /*
9387 * Wait for all fences on this FB. Do limited wait to avoid
9388 * deadlock during GPU reset when this fence will not signal
9389 * but we hold reservation lock for the BO.
9390 */
7bc80a54
CK
9391 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9392 DMA_RESV_USAGE_WRITE, false,
d3fae3b3 9393 msecs_to_jiffies(5000));
f8308898 9394 if (unlikely(r <= 0))
ed8a5fb2 9395 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9396
695af5f9 9397 fill_dc_plane_info_and_addr(
8ce5d842 9398 dm->adev, new_plane_state,
6eed95b0 9399 afb->tiling_flags,
695af5f9 9400 &bundle->plane_infos[planes_count],
87b7ebc2 9401 &bundle->flip_addrs[planes_count].address,
6eed95b0 9402 afb->tmz_surface, false);
87b7ebc2 9403
9f07550b 9404 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9405 new_plane_state->plane->index,
9406 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9407
9408 bundle->surface_updates[planes_count].plane_info =
9409 &bundle->plane_infos[planes_count];
8a48b44c 9410
7cc191ee
LL
9411 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9412 new_crtc_state,
9413 &bundle->flip_addrs[planes_count]);
9414
caff0e66
NK
9415 /*
9416 * Only allow immediate flips for fast updates that don't
9417 * change FB pitch, DCC state, rotation or mirroing.
9418 */
f5031000 9419 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9420 crtc->state->async_flip &&
caff0e66 9421 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9422
f5031000
DF
9423 timestamp_ns = ktime_get_ns();
9424 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9425 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9426 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9427
f5031000
DF
9428 if (!bundle->surface_updates[planes_count].surface) {
9429 DRM_ERROR("No surface for CRTC: id=%d\n",
9430 acrtc_attach->crtc_id);
9431 continue;
bc7f670e
DF
9432 }
9433
f5031000
DF
9434 if (plane == pcrtc->primary)
9435 update_freesync_state_on_stream(
9436 dm,
9437 acrtc_state,
9438 acrtc_state->stream,
9439 dc_plane,
9440 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9441
9f07550b 9442 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9443 __func__,
9444 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9445 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9446
9447 planes_count += 1;
9448
8a48b44c
DF
9449 }
9450
74aa7bd4 9451 if (pflip_present) {
634092b1
MK
9452 if (!vrr_active) {
9453 /* Use old throttling in non-vrr fixed refresh rate mode
9454 * to keep flip scheduling based on target vblank counts
9455 * working in a backwards compatible way, e.g., for
9456 * clients using the GLX_OML_sync_control extension or
9457 * DRI3/Present extension with defined target_msc.
9458 */
e3eff4b5 9459 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9460 }
9461 else {
9462 /* For variable refresh rate mode only:
9463 * Get vblank of last completed flip to avoid > 1 vrr
9464 * flips per video frame by use of throttling, but allow
9465 * flip programming anywhere in the possibly large
9466 * variable vrr vblank interval for fine-grained flip
9467 * timing control and more opportunity to avoid stutter
9468 * on late submission of flips.
9469 */
9470 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9471 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9472 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9473 }
9474
fdd1fe57 9475 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9476
9477 /*
9478 * Wait until we're out of the vertical blank period before the one
9479 * targeted by the flip
9480 */
9481 while ((acrtc_attach->enabled &&
9482 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9483 0, &vpos, &hpos, NULL,
9484 NULL, &pcrtc->hwmode)
9485 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9486 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9487 (int)(target_vblank -
e3eff4b5 9488 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9489 usleep_range(1000, 1100);
9490 }
9491
8fe684e9
NK
9492 /**
9493 * Prepare the flip event for the pageflip interrupt to handle.
9494 *
9495 * This only works in the case where we've already turned on the
9496 * appropriate hardware blocks (eg. HUBP) so in the transition case
9497 * from 0 -> n planes we have to skip a hardware generated event
9498 * and rely on sending it from software.
9499 */
9500 if (acrtc_attach->base.state->event &&
10a36226 9501 acrtc_state->active_planes > 0) {
8a48b44c
DF
9502 drm_crtc_vblank_get(pcrtc);
9503
9504 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9505
9506 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9507 prepare_flip_isr(acrtc_attach);
9508
9509 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9510 }
9511
9512 if (acrtc_state->stream) {
8a48b44c 9513 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9514 bundle->stream_update.vrr_infopacket =
8a48b44c 9515 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9516 }
e7b07cee
HW
9517 }
9518
bc92c065 9519 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9520 if ((planes_count || acrtc_state->active_planes == 0) &&
9521 acrtc_state->stream) {
58aa1c50
NK
9522 /*
9523 * If PSR or idle optimizations are enabled then flush out
9524 * any pending work before hardware programming.
9525 */
06dd1888
NK
9526 if (dm->vblank_control_workqueue)
9527 flush_workqueue(dm->vblank_control_workqueue);
58aa1c50 9528
b6e881c9 9529 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9530 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9531 bundle->stream_update.src = acrtc_state->stream->src;
9532 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9533 }
9534
cf020d49
NK
9535 if (new_pcrtc_state->color_mgmt_changed) {
9536 /*
9537 * TODO: This isn't fully correct since we've actually
9538 * already modified the stream in place.
9539 */
9540 bundle->stream_update.gamut_remap =
9541 &acrtc_state->stream->gamut_remap_matrix;
9542 bundle->stream_update.output_csc_transform =
9543 &acrtc_state->stream->csc_color_matrix;
9544 bundle->stream_update.out_transfer_func =
9545 acrtc_state->stream->out_transfer_func;
9546 }
bc7f670e 9547
8a48b44c 9548 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9549 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9550 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9551
e63e2491
EB
9552 /*
9553 * If FreeSync state on the stream has changed then we need to
9554 * re-adjust the min/max bounds now that DC doesn't handle this
9555 * as part of commit.
9556 */
a85ba005 9557 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9558 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9559 dc_stream_adjust_vmin_vmax(
9560 dm->dc, acrtc_state->stream,
585d450c 9561 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9562 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9563 }
bc7f670e 9564 mutex_lock(&dm->dc_lock);
8c322309 9565 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9566 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9567 amdgpu_dm_psr_disable(acrtc_state->stream);
9568
bc7f670e 9569 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9570 bundle->surface_updates,
bc7f670e
DF
9571 planes_count,
9572 acrtc_state->stream,
efc8278e
AJ
9573 &bundle->stream_update,
9574 dc_state);
8c322309 9575
8fe684e9
NK
9576 /**
9577 * Enable or disable the interrupts on the backend.
9578 *
9579 * Most pipes are put into power gating when unused.
9580 *
9581 * When power gating is enabled on a pipe we lose the
9582 * interrupt enablement state when power gating is disabled.
9583 *
9584 * So we need to update the IRQ control state in hardware
9585 * whenever the pipe turns on (since it could be previously
9586 * power gated) or off (since some pipes can't be power gated
9587 * on some ASICs).
9588 */
9589 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9590 dm_update_pflip_irq_state(drm_to_adev(dev),
9591 acrtc_attach);
8fe684e9 9592
8c322309 9593 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9594 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9595 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9596 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9597
9598 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9599 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9600 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9601 struct amdgpu_dm_connector *aconn =
9602 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9603
9604 if (aconn->psr_skip_count > 0)
9605 aconn->psr_skip_count--;
58aa1c50
NK
9606
9607 /* Allow PSR when skip count is 0. */
9608 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7cc191ee
LL
9609
9610 /*
9611 * If sink supports PSR SU, there is no need to rely on
9612 * a vblank event disable request to enable PSR. PSR SU
9613 * can be enabled immediately once OS demonstrates an
9614 * adequate number of fast atomic commits to notify KMD
9615 * of update events. See `vblank_control_worker()`.
9616 */
9617 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9618 acrtc_attach->dm_irq_params.allow_psr_entry &&
9619 !acrtc_state->stream->link->psr_settings.psr_allow_active)
9620 amdgpu_dm_psr_enable(acrtc_state->stream);
58aa1c50
NK
9621 } else {
9622 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9623 }
9624
bc7f670e 9625 mutex_unlock(&dm->dc_lock);
e7b07cee 9626 }
4b510503 9627
8ad27806
NK
9628 /*
9629 * Update cursor state *after* programming all the planes.
9630 * This avoids redundant programming in the case where we're going
9631 * to be disabling a single plane - those pipes are being disabled.
9632 */
9633 if (acrtc_state->active_planes)
9634 amdgpu_dm_commit_cursors(state);
80c218d5 9635
4b510503 9636cleanup:
74aa7bd4 9637 kfree(bundle);
e7b07cee
HW
9638}
9639
6ce8f316
NK
9640static void amdgpu_dm_commit_audio(struct drm_device *dev,
9641 struct drm_atomic_state *state)
9642{
1348969a 9643 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9644 struct amdgpu_dm_connector *aconnector;
9645 struct drm_connector *connector;
9646 struct drm_connector_state *old_con_state, *new_con_state;
9647 struct drm_crtc_state *new_crtc_state;
9648 struct dm_crtc_state *new_dm_crtc_state;
9649 const struct dc_stream_status *status;
9650 int i, inst;
9651
9652 /* Notify device removals. */
9653 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9654 if (old_con_state->crtc != new_con_state->crtc) {
9655 /* CRTC changes require notification. */
9656 goto notify;
9657 }
9658
9659 if (!new_con_state->crtc)
9660 continue;
9661
9662 new_crtc_state = drm_atomic_get_new_crtc_state(
9663 state, new_con_state->crtc);
9664
9665 if (!new_crtc_state)
9666 continue;
9667
9668 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9669 continue;
9670
9671 notify:
9672 aconnector = to_amdgpu_dm_connector(connector);
9673
9674 mutex_lock(&adev->dm.audio_lock);
9675 inst = aconnector->audio_inst;
9676 aconnector->audio_inst = -1;
9677 mutex_unlock(&adev->dm.audio_lock);
9678
9679 amdgpu_dm_audio_eld_notify(adev, inst);
9680 }
9681
9682 /* Notify audio device additions. */
9683 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9684 if (!new_con_state->crtc)
9685 continue;
9686
9687 new_crtc_state = drm_atomic_get_new_crtc_state(
9688 state, new_con_state->crtc);
9689
9690 if (!new_crtc_state)
9691 continue;
9692
9693 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9694 continue;
9695
9696 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9697 if (!new_dm_crtc_state->stream)
9698 continue;
9699
9700 status = dc_stream_get_status(new_dm_crtc_state->stream);
9701 if (!status)
9702 continue;
9703
9704 aconnector = to_amdgpu_dm_connector(connector);
9705
9706 mutex_lock(&adev->dm.audio_lock);
9707 inst = status->audio_inst;
9708 aconnector->audio_inst = inst;
9709 mutex_unlock(&adev->dm.audio_lock);
9710
9711 amdgpu_dm_audio_eld_notify(adev, inst);
9712 }
9713}
9714
1f6010a9 9715/*
27b3f4fc
LSL
9716 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9717 * @crtc_state: the DRM CRTC state
9718 * @stream_state: the DC stream state.
9719 *
9720 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9721 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9722 */
9723static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9724 struct dc_stream_state *stream_state)
9725{
b9952f93 9726 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9727}
e7b07cee 9728
b8592b48
LL
9729/**
9730 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9731 * @state: The atomic state to commit
9732 *
9733 * This will tell DC to commit the constructed DC state from atomic_check,
9734 * programming the hardware. Any failures here implies a hardware failure, since
9735 * atomic check should have filtered anything non-kosher.
9736 */
7578ecda 9737static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9738{
9739 struct drm_device *dev = state->dev;
1348969a 9740 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9741 struct amdgpu_display_manager *dm = &adev->dm;
9742 struct dm_atomic_state *dm_state;
eb3dc897 9743 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9744 uint32_t i, j;
5cc6dcbd 9745 struct drm_crtc *crtc;
0bc9706d 9746 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9747 unsigned long flags;
9748 bool wait_for_vblank = true;
9749 struct drm_connector *connector;
c2cea706 9750 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9751 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9752 int crtc_disable_count = 0;
6ee90e88 9753 bool mode_set_reset_required = false;
e7b07cee 9754
e8a98235
RS
9755 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9756
e7b07cee
HW
9757 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9758
eb3dc897
NK
9759 dm_state = dm_atomic_get_new_state(state);
9760 if (dm_state && dm_state->context) {
9761 dc_state = dm_state->context;
9762 } else {
9763 /* No state changes, retain current state. */
813d20dc 9764 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9765 ASSERT(dc_state_temp);
9766 dc_state = dc_state_temp;
9767 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9768 }
e7b07cee 9769
6d90a208
AP
9770 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9771 new_crtc_state, i) {
9772 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9773
9774 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9775
9776 if (old_crtc_state->active &&
9777 (!new_crtc_state->active ||
9778 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9779 manage_dm_interrupts(adev, acrtc, false);
9780 dc_stream_release(dm_old_crtc_state->stream);
9781 }
9782 }
9783
8976f73b
RS
9784 drm_atomic_helper_calc_timestamping_constants(state);
9785
e7b07cee 9786 /* update changed items */
0bc9706d 9787 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9788 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9789
54d76575
LSL
9790 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9791 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9792
9f07550b 9793 drm_dbg_state(state->dev,
e7b07cee
HW
9794 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9795 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9796 "connectors_changed:%d\n",
9797 acrtc->crtc_id,
0bc9706d
LSL
9798 new_crtc_state->enable,
9799 new_crtc_state->active,
9800 new_crtc_state->planes_changed,
9801 new_crtc_state->mode_changed,
9802 new_crtc_state->active_changed,
9803 new_crtc_state->connectors_changed);
e7b07cee 9804
5c68c652
VL
9805 /* Disable cursor if disabling crtc */
9806 if (old_crtc_state->active && !new_crtc_state->active) {
9807 struct dc_cursor_position position;
9808
9809 memset(&position, 0, sizeof(position));
9810 mutex_lock(&dm->dc_lock);
9811 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9812 mutex_unlock(&dm->dc_lock);
9813 }
9814
27b3f4fc
LSL
9815 /* Copy all transient state flags into dc state */
9816 if (dm_new_crtc_state->stream) {
9817 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9818 dm_new_crtc_state->stream);
9819 }
9820
e7b07cee
HW
9821 /* handles headless hotplug case, updating new_state and
9822 * aconnector as needed
9823 */
9824
54d76575 9825 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9826
4711c033 9827 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9828
54d76575 9829 if (!dm_new_crtc_state->stream) {
e7b07cee 9830 /*
b830ebc9
HW
9831 * this could happen because of issues with
9832 * userspace notifications delivery.
9833 * In this case userspace tries to set mode on
1f6010a9
DF
9834 * display which is disconnected in fact.
9835 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9836 * We expect reset mode will come soon.
9837 *
9838 * This can also happen when unplug is done
9839 * during resume sequence ended
9840 *
9841 * In this case, we want to pretend we still
9842 * have a sink to keep the pipe running so that
9843 * hw state is consistent with the sw state
9844 */
f1ad2f5e 9845 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9846 __func__, acrtc->base.base.id);
9847 continue;
9848 }
9849
54d76575
LSL
9850 if (dm_old_crtc_state->stream)
9851 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9852
97028037
LP
9853 pm_runtime_get_noresume(dev->dev);
9854
e7b07cee 9855 acrtc->enabled = true;
0bc9706d
LSL
9856 acrtc->hw_mode = new_crtc_state->mode;
9857 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9858 mode_set_reset_required = true;
0bc9706d 9859 } else if (modereset_required(new_crtc_state)) {
4711c033 9860 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9861 /* i.e. reset mode */
6ee90e88 9862 if (dm_old_crtc_state->stream)
54d76575 9863 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9864
6ee90e88 9865 mode_set_reset_required = true;
e7b07cee
HW
9866 }
9867 } /* for_each_crtc_in_state() */
9868
eb3dc897 9869 if (dc_state) {
6ee90e88 9870 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9871 if (mode_set_reset_required) {
06dd1888
NK
9872 if (dm->vblank_control_workqueue)
9873 flush_workqueue(dm->vblank_control_workqueue);
cae5c1ab 9874
6ee90e88 9875 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9876 }
6ee90e88 9877
eb3dc897 9878 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9879 mutex_lock(&dm->dc_lock);
eb3dc897 9880 WARN_ON(!dc_commit_state(dm->dc, dc_state));
f3106c94
JC
9881
9882 /* Allow idle optimization when vblank count is 0 for display off */
9883 if (dm->active_vblank_irq_count == 0)
9884 dc_allow_idle_optimizations(dm->dc, true);
674e78ac 9885 mutex_unlock(&dm->dc_lock);
fa2123db 9886 }
fe8858bb 9887
0bc9706d 9888 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9889 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9890
54d76575 9891 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9892
54d76575 9893 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9894 const struct dc_stream_status *status =
54d76575 9895 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9896
eb3dc897 9897 if (!status)
09f609c3
LL
9898 status = dc_stream_get_status_from_state(dc_state,
9899 dm_new_crtc_state->stream);
e7b07cee 9900 if (!status)
54d76575 9901 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9902 else
9903 acrtc->otg_inst = status->primary_otg_inst;
9904 }
9905 }
0c8620d6
BL
9906#ifdef CONFIG_DRM_AMD_DC_HDCP
9907 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9908 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9909 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9910 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9911
9912 new_crtc_state = NULL;
9913
9914 if (acrtc)
9915 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9916
9917 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9918
9919 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9920 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9921 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9922 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9923 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9924 continue;
9925 }
9926
9927 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9928 hdcp_update_display(
9929 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9930 new_con_state->hdcp_content_type,
0e86d3d4 9931 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9932 }
9933#endif
e7b07cee 9934
02d6a6fc 9935 /* Handle connector state changes */
c2cea706 9936 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9937 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9938 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9939 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9940 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9941 struct dc_stream_update stream_update;
b232d4ed 9942 struct dc_info_packet hdr_packet;
e7b07cee 9943 struct dc_stream_status *status = NULL;
b232d4ed 9944 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9945
efc8278e 9946 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9947 memset(&stream_update, 0, sizeof(stream_update));
9948
44d09c6a 9949 if (acrtc) {
0bc9706d 9950 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9951 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9952 }
0bc9706d 9953
e7b07cee 9954 /* Skip any modesets/resets */
0bc9706d 9955 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9956 continue;
9957
54d76575 9958 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9959 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9960
b232d4ed
NK
9961 scaling_changed = is_scaling_state_different(dm_new_con_state,
9962 dm_old_con_state);
9963
9964 abm_changed = dm_new_crtc_state->abm_level !=
9965 dm_old_crtc_state->abm_level;
9966
9967 hdr_changed =
72921cdf 9968 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9969
9970 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9971 continue;
e7b07cee 9972
b6e881c9 9973 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9974 if (scaling_changed) {
02d6a6fc 9975 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9976 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9977
02d6a6fc
DF
9978 stream_update.src = dm_new_crtc_state->stream->src;
9979 stream_update.dst = dm_new_crtc_state->stream->dst;
9980 }
9981
b232d4ed 9982 if (abm_changed) {
02d6a6fc
DF
9983 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9984
9985 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9986 }
70e8ffc5 9987
b232d4ed
NK
9988 if (hdr_changed) {
9989 fill_hdr_info_packet(new_con_state, &hdr_packet);
9990 stream_update.hdr_static_metadata = &hdr_packet;
9991 }
9992
54d76575 9993 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9994
9995 if (WARN_ON(!status))
9996 continue;
9997
3be5262e 9998 WARN_ON(!status->plane_count);
e7b07cee 9999
02d6a6fc
DF
10000 /*
10001 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10002 * Here we create an empty update on each plane.
10003 * To fix this, DC should permit updating only stream properties.
10004 */
10005 for (j = 0; j < status->plane_count; j++)
efc8278e 10006 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
10007
10008
10009 mutex_lock(&dm->dc_lock);
10010 dc_commit_updates_for_stream(dm->dc,
efc8278e 10011 dummy_updates,
02d6a6fc
DF
10012 status->plane_count,
10013 dm_new_crtc_state->stream,
efc8278e
AJ
10014 &stream_update,
10015 dc_state);
02d6a6fc 10016 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
10017 }
10018
b5e83f6f 10019 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 10020 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 10021 new_crtc_state, i) {
fe2a1965
LP
10022 if (old_crtc_state->active && !new_crtc_state->active)
10023 crtc_disable_count++;
10024
54d76575 10025 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 10026 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 10027
585d450c
AP
10028 /* For freesync config update on crtc state and params for irq */
10029 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 10030
66b0c973
MK
10031 /* Handle vrr on->off / off->on transitions */
10032 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10033 dm_new_crtc_state);
e7b07cee
HW
10034 }
10035
8fe684e9
NK
10036 /**
10037 * Enable interrupts for CRTCs that are newly enabled or went through
10038 * a modeset. It was intentionally deferred until after the front end
10039 * state was modified to wait until the OTG was on and so the IRQ
10040 * handlers didn't access stale or invalid state.
10041 */
10042 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10043 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 10044#ifdef CONFIG_DEBUG_FS
86bc2219 10045 bool configure_crc = false;
8e7b6fee 10046 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
10047#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10048 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10049#endif
10050 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10051 cur_crc_src = acrtc->dm_irq_params.crc_src;
10052 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 10053#endif
585d450c
AP
10054 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10055
8fe684e9
NK
10056 if (new_crtc_state->active &&
10057 (!old_crtc_state->active ||
10058 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
10059 dc_stream_retain(dm_new_crtc_state->stream);
10060 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 10061 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 10062
24eb9374 10063#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
10064 /**
10065 * Frontend may have changed so reapply the CRC capture
10066 * settings for the stream.
10067 */
10068 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 10069
8e7b6fee 10070 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
10071 configure_crc = true;
10072#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
10073 if (amdgpu_dm_crc_window_is_activated(crtc)) {
10074 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10075 acrtc->dm_irq_params.crc_window.update_win = true;
10076 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10077 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10078 crc_rd_wrk->crtc = crtc;
10079 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10080 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10081 }
86bc2219 10082#endif
e2881d6d 10083 }
c920888c 10084
86bc2219 10085 if (configure_crc)
bbc49fc0
WL
10086 if (amdgpu_dm_crtc_configure_crc_source(
10087 crtc, dm_new_crtc_state, cur_crc_src))
10088 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 10089#endif
8fe684e9
NK
10090 }
10091 }
e7b07cee 10092
420cd472 10093 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 10094 if (new_crtc_state->async_flip)
420cd472
DF
10095 wait_for_vblank = false;
10096
e7b07cee 10097 /* update planes when needed per crtc*/
5cc6dcbd 10098 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 10099 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 10100
54d76575 10101 if (dm_new_crtc_state->stream)
eb3dc897 10102 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 10103 dm, crtc, wait_for_vblank);
e7b07cee
HW
10104 }
10105
6ce8f316
NK
10106 /* Update audio instances for each connector. */
10107 amdgpu_dm_commit_audio(dev, state);
10108
7230362c 10109 /* restore the backlight level */
7fd13bae
AD
10110 for (i = 0; i < dm->num_of_edps; i++) {
10111 if (dm->backlight_dev[i] &&
4052287a 10112 (dm->actual_brightness[i] != dm->brightness[i]))
7fd13bae
AD
10113 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10114 }
83a3439d 10115
e7b07cee
HW
10116 /*
10117 * send vblank event on all events not handled in flip and
10118 * mark consumed event for drm_atomic_helper_commit_hw_done
10119 */
4a580877 10120 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 10121 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 10122
0bc9706d
LSL
10123 if (new_crtc_state->event)
10124 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 10125
0bc9706d 10126 new_crtc_state->event = NULL;
e7b07cee 10127 }
4a580877 10128 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 10129
29c8f234
LL
10130 /* Signal HW programming completion */
10131 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
10132
10133 if (wait_for_vblank)
320a1274 10134 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
10135
10136 drm_atomic_helper_cleanup_planes(dev, state);
97028037 10137
5f6fab24
AD
10138 /* return the stolen vga memory back to VRAM */
10139 if (!adev->mman.keep_stolen_vga_memory)
10140 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10141 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10142
1f6010a9
DF
10143 /*
10144 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
10145 * so we can put the GPU into runtime suspend if we're not driving any
10146 * displays anymore
10147 */
fe2a1965
LP
10148 for (i = 0; i < crtc_disable_count; i++)
10149 pm_runtime_put_autosuspend(dev->dev);
97028037 10150 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
10151
10152 if (dc_state_temp)
10153 dc_release_state(dc_state_temp);
e7b07cee
HW
10154}
10155
10156
10157static int dm_force_atomic_commit(struct drm_connector *connector)
10158{
10159 int ret = 0;
10160 struct drm_device *ddev = connector->dev;
10161 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10162 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10163 struct drm_plane *plane = disconnected_acrtc->base.primary;
10164 struct drm_connector_state *conn_state;
10165 struct drm_crtc_state *crtc_state;
10166 struct drm_plane_state *plane_state;
10167
10168 if (!state)
10169 return -ENOMEM;
10170
10171 state->acquire_ctx = ddev->mode_config.acquire_ctx;
10172
10173 /* Construct an atomic state to restore previous display setting */
10174
10175 /*
10176 * Attach connectors to drm_atomic_state
10177 */
10178 conn_state = drm_atomic_get_connector_state(state, connector);
10179
10180 ret = PTR_ERR_OR_ZERO(conn_state);
10181 if (ret)
2dc39051 10182 goto out;
e7b07cee
HW
10183
10184 /* Attach crtc to drm_atomic_state*/
10185 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10186
10187 ret = PTR_ERR_OR_ZERO(crtc_state);
10188 if (ret)
2dc39051 10189 goto out;
e7b07cee
HW
10190
10191 /* force a restore */
10192 crtc_state->mode_changed = true;
10193
10194 /* Attach plane to drm_atomic_state */
10195 plane_state = drm_atomic_get_plane_state(state, plane);
10196
10197 ret = PTR_ERR_OR_ZERO(plane_state);
10198 if (ret)
2dc39051 10199 goto out;
e7b07cee
HW
10200
10201 /* Call commit internally with the state we just constructed */
10202 ret = drm_atomic_commit(state);
e7b07cee 10203
2dc39051 10204out:
e7b07cee 10205 drm_atomic_state_put(state);
2dc39051
VL
10206 if (ret)
10207 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
10208
10209 return ret;
10210}
10211
10212/*
1f6010a9
DF
10213 * This function handles all cases when set mode does not come upon hotplug.
10214 * This includes when a display is unplugged then plugged back into the
10215 * same port and when running without usermode desktop manager supprot
e7b07cee 10216 */
3ee6b26b
AD
10217void dm_restore_drm_connector_state(struct drm_device *dev,
10218 struct drm_connector *connector)
e7b07cee 10219{
c84dec2f 10220 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
10221 struct amdgpu_crtc *disconnected_acrtc;
10222 struct dm_crtc_state *acrtc_state;
10223
10224 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10225 return;
10226
10227 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
10228 if (!disconnected_acrtc)
10229 return;
e7b07cee 10230
70e8ffc5
HW
10231 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10232 if (!acrtc_state->stream)
e7b07cee
HW
10233 return;
10234
10235 /*
10236 * If the previous sink is not released and different from the current,
10237 * we deduce we are in a state where we can not rely on usermode call
10238 * to turn on the display, so we do it here
10239 */
10240 if (acrtc_state->stream->sink != aconnector->dc_sink)
10241 dm_force_atomic_commit(&aconnector->base);
10242}
10243
1f6010a9 10244/*
e7b07cee
HW
10245 * Grabs all modesetting locks to serialize against any blocking commits,
10246 * Waits for completion of all non blocking commits.
10247 */
3ee6b26b
AD
10248static int do_aquire_global_lock(struct drm_device *dev,
10249 struct drm_atomic_state *state)
e7b07cee
HW
10250{
10251 struct drm_crtc *crtc;
10252 struct drm_crtc_commit *commit;
10253 long ret;
10254
1f6010a9
DF
10255 /*
10256 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
10257 * ensure that when the framework release it the
10258 * extra locks we are locking here will get released to
10259 */
10260 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10261 if (ret)
10262 return ret;
10263
10264 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10265 spin_lock(&crtc->commit_lock);
10266 commit = list_first_entry_or_null(&crtc->commit_list,
10267 struct drm_crtc_commit, commit_entry);
10268 if (commit)
10269 drm_crtc_commit_get(commit);
10270 spin_unlock(&crtc->commit_lock);
10271
10272 if (!commit)
10273 continue;
10274
1f6010a9
DF
10275 /*
10276 * Make sure all pending HW programming completed and
e7b07cee
HW
10277 * page flips done
10278 */
10279 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10280
10281 if (ret > 0)
10282 ret = wait_for_completion_interruptible_timeout(
10283 &commit->flip_done, 10*HZ);
10284
10285 if (ret == 0)
10286 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 10287 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
10288
10289 drm_crtc_commit_put(commit);
10290 }
10291
10292 return ret < 0 ? ret : 0;
10293}
10294
bb47de73
NK
10295static void get_freesync_config_for_crtc(
10296 struct dm_crtc_state *new_crtc_state,
10297 struct dm_connector_state *new_con_state)
98e6436d
AK
10298{
10299 struct mod_freesync_config config = {0};
98e6436d
AK
10300 struct amdgpu_dm_connector *aconnector =
10301 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 10302 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 10303 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 10304 bool fs_vid_mode = false;
98e6436d 10305
a057ec46 10306 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
10307 vrefresh >= aconnector->min_vfreq &&
10308 vrefresh <= aconnector->max_vfreq;
bb47de73 10309
a057ec46
IB
10310 if (new_crtc_state->vrr_supported) {
10311 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
10312 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10313
10314 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10315 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 10316 config.vsif_supported = true;
180db303 10317 config.btr = true;
98e6436d 10318
a85ba005
NC
10319 if (fs_vid_mode) {
10320 config.state = VRR_STATE_ACTIVE_FIXED;
10321 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10322 goto out;
10323 } else if (new_crtc_state->base.vrr_enabled) {
10324 config.state = VRR_STATE_ACTIVE_VARIABLE;
10325 } else {
10326 config.state = VRR_STATE_INACTIVE;
10327 }
10328 }
10329out:
bb47de73
NK
10330 new_crtc_state->freesync_config = config;
10331}
98e6436d 10332
bb47de73
NK
10333static void reset_freesync_config_for_crtc(
10334 struct dm_crtc_state *new_crtc_state)
10335{
10336 new_crtc_state->vrr_supported = false;
98e6436d 10337
bb47de73
NK
10338 memset(&new_crtc_state->vrr_infopacket, 0,
10339 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
10340}
10341
a85ba005
NC
10342static bool
10343is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10344 struct drm_crtc_state *new_crtc_state)
10345{
1cbd7887 10346 const struct drm_display_mode *old_mode, *new_mode;
a85ba005
NC
10347
10348 if (!old_crtc_state || !new_crtc_state)
10349 return false;
10350
1cbd7887
VS
10351 old_mode = &old_crtc_state->mode;
10352 new_mode = &new_crtc_state->mode;
10353
10354 if (old_mode->clock == new_mode->clock &&
10355 old_mode->hdisplay == new_mode->hdisplay &&
10356 old_mode->vdisplay == new_mode->vdisplay &&
10357 old_mode->htotal == new_mode->htotal &&
10358 old_mode->vtotal != new_mode->vtotal &&
10359 old_mode->hsync_start == new_mode->hsync_start &&
10360 old_mode->vsync_start != new_mode->vsync_start &&
10361 old_mode->hsync_end == new_mode->hsync_end &&
10362 old_mode->vsync_end != new_mode->vsync_end &&
10363 old_mode->hskew == new_mode->hskew &&
10364 old_mode->vscan == new_mode->vscan &&
10365 (old_mode->vsync_end - old_mode->vsync_start) ==
10366 (new_mode->vsync_end - new_mode->vsync_start))
a85ba005
NC
10367 return true;
10368
10369 return false;
10370}
10371
10372static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10373 uint64_t num, den, res;
10374 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10375
10376 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10377
10378 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10379 den = (unsigned long long)new_crtc_state->mode.htotal *
10380 (unsigned long long)new_crtc_state->mode.vtotal;
10381
10382 res = div_u64(num, den);
10383 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10384}
10385
f11d9373 10386static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
17ce8a69
RL
10387 struct drm_atomic_state *state,
10388 struct drm_crtc *crtc,
10389 struct drm_crtc_state *old_crtc_state,
10390 struct drm_crtc_state *new_crtc_state,
10391 bool enable,
10392 bool *lock_and_validation_needed)
e7b07cee 10393{
eb3dc897 10394 struct dm_atomic_state *dm_state = NULL;
54d76575 10395 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10396 struct dc_stream_state *new_stream;
62f55537 10397 int ret = 0;
d4d4a645 10398
1f6010a9
DF
10399 /*
10400 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10401 * update changed items
10402 */
4b9674e5
LL
10403 struct amdgpu_crtc *acrtc = NULL;
10404 struct amdgpu_dm_connector *aconnector = NULL;
10405 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10406 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10407
4b9674e5 10408 new_stream = NULL;
9635b754 10409
4b9674e5
LL
10410 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10411 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10412 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10413 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10414
4b9674e5
LL
10415 /* TODO This hack should go away */
10416 if (aconnector && enable) {
10417 /* Make sure fake sink is created in plug-in scenario */
10418 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10419 &aconnector->base);
10420 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10421 &aconnector->base);
19f89e23 10422
4b9674e5
LL
10423 if (IS_ERR(drm_new_conn_state)) {
10424 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10425 goto fail;
10426 }
19f89e23 10427
4b9674e5
LL
10428 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10429 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10430
02d35a67
JFZ
10431 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10432 goto skip_modeset;
10433
cbd14ae7
SW
10434 new_stream = create_validate_stream_for_sink(aconnector,
10435 &new_crtc_state->mode,
10436 dm_new_conn_state,
10437 dm_old_crtc_state->stream);
19f89e23 10438
4b9674e5
LL
10439 /*
10440 * we can have no stream on ACTION_SET if a display
10441 * was disconnected during S3, in this case it is not an
10442 * error, the OS will be updated after detection, and
10443 * will do the right thing on next atomic commit
10444 */
19f89e23 10445
4b9674e5
LL
10446 if (!new_stream) {
10447 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10448 __func__, acrtc->base.base.id);
10449 ret = -ENOMEM;
10450 goto fail;
10451 }
e7b07cee 10452
3d4e52d0
VL
10453 /*
10454 * TODO: Check VSDB bits to decide whether this should
10455 * be enabled or not.
10456 */
10457 new_stream->triggered_crtc_reset.enabled =
10458 dm->force_timing_sync;
10459
4b9674e5 10460 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10461
88694af9
NK
10462 ret = fill_hdr_info_packet(drm_new_conn_state,
10463 &new_stream->hdr_static_metadata);
10464 if (ret)
10465 goto fail;
10466
7e930949
NK
10467 /*
10468 * If we already removed the old stream from the context
10469 * (and set the new stream to NULL) then we can't reuse
10470 * the old stream even if the stream and scaling are unchanged.
10471 * We'll hit the BUG_ON and black screen.
10472 *
10473 * TODO: Refactor this function to allow this check to work
10474 * in all conditions.
10475 */
de05abe6 10476 if (dm_new_crtc_state->stream &&
a85ba005
NC
10477 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10478 goto skip_modeset;
10479
7e930949
NK
10480 if (dm_new_crtc_state->stream &&
10481 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10482 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10483 new_crtc_state->mode_changed = false;
10484 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10485 new_crtc_state->mode_changed);
62f55537 10486 }
4b9674e5 10487 }
b830ebc9 10488
02d35a67 10489 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10490 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10491 goto skip_modeset;
e7b07cee 10492
9f07550b 10493 drm_dbg_state(state->dev,
4b9674e5
LL
10494 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10495 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10496 "connectors_changed:%d\n",
10497 acrtc->crtc_id,
10498 new_crtc_state->enable,
10499 new_crtc_state->active,
10500 new_crtc_state->planes_changed,
10501 new_crtc_state->mode_changed,
10502 new_crtc_state->active_changed,
10503 new_crtc_state->connectors_changed);
62f55537 10504
4b9674e5
LL
10505 /* Remove stream for any changed/disabled CRTC */
10506 if (!enable) {
62f55537 10507
4b9674e5
LL
10508 if (!dm_old_crtc_state->stream)
10509 goto skip_modeset;
eb3dc897 10510
de05abe6 10511 if (dm_new_crtc_state->stream &&
a85ba005
NC
10512 is_timing_unchanged_for_freesync(new_crtc_state,
10513 old_crtc_state)) {
10514 new_crtc_state->mode_changed = false;
10515 DRM_DEBUG_DRIVER(
10516 "Mode change not required for front porch change, "
10517 "setting mode_changed to %d",
10518 new_crtc_state->mode_changed);
10519
10520 set_freesync_fixed_config(dm_new_crtc_state);
10521
10522 goto skip_modeset;
de05abe6 10523 } else if (aconnector &&
a85ba005
NC
10524 is_freesync_video_mode(&new_crtc_state->mode,
10525 aconnector)) {
e88ebd83
SC
10526 struct drm_display_mode *high_mode;
10527
10528 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10529 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10530 set_freesync_fixed_config(dm_new_crtc_state);
10531 }
a85ba005
NC
10532 }
10533
4b9674e5
LL
10534 ret = dm_atomic_get_state(state, &dm_state);
10535 if (ret)
10536 goto fail;
e7b07cee 10537
4b9674e5
LL
10538 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10539 crtc->base.id);
62f55537 10540
4b9674e5
LL
10541 /* i.e. reset mode */
10542 if (dc_remove_stream_from_ctx(
10543 dm->dc,
10544 dm_state->context,
10545 dm_old_crtc_state->stream) != DC_OK) {
10546 ret = -EINVAL;
10547 goto fail;
10548 }
62f55537 10549
4b9674e5
LL
10550 dc_stream_release(dm_old_crtc_state->stream);
10551 dm_new_crtc_state->stream = NULL;
bb47de73 10552
4b9674e5 10553 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10554
4b9674e5 10555 *lock_and_validation_needed = true;
62f55537 10556
4b9674e5
LL
10557 } else {/* Add stream for any updated/enabled CRTC */
10558 /*
10559 * Quick fix to prevent NULL pointer on new_stream when
10560 * added MST connectors not found in existing crtc_state in the chained mode
10561 * TODO: need to dig out the root cause of that
10562 */
84a8b390 10563 if (!aconnector)
4b9674e5 10564 goto skip_modeset;
62f55537 10565
4b9674e5
LL
10566 if (modereset_required(new_crtc_state))
10567 goto skip_modeset;
62f55537 10568
4b9674e5
LL
10569 if (modeset_required(new_crtc_state, new_stream,
10570 dm_old_crtc_state->stream)) {
62f55537 10571
4b9674e5 10572 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10573
4b9674e5
LL
10574 ret = dm_atomic_get_state(state, &dm_state);
10575 if (ret)
10576 goto fail;
27b3f4fc 10577
4b9674e5 10578 dm_new_crtc_state->stream = new_stream;
62f55537 10579
4b9674e5 10580 dc_stream_retain(new_stream);
1dc90497 10581
4711c033
LT
10582 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10583 crtc->base.id);
1dc90497 10584
4b9674e5
LL
10585 if (dc_add_stream_to_ctx(
10586 dm->dc,
10587 dm_state->context,
10588 dm_new_crtc_state->stream) != DC_OK) {
10589 ret = -EINVAL;
10590 goto fail;
9b690ef3
BL
10591 }
10592
4b9674e5
LL
10593 *lock_and_validation_needed = true;
10594 }
10595 }
e277adc5 10596
4b9674e5
LL
10597skip_modeset:
10598 /* Release extra reference */
10599 if (new_stream)
10600 dc_stream_release(new_stream);
e277adc5 10601
4b9674e5
LL
10602 /*
10603 * We want to do dc stream updates that do not require a
10604 * full modeset below.
10605 */
2afda735 10606 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10607 return 0;
10608 /*
10609 * Given above conditions, the dc state cannot be NULL because:
10610 * 1. We're in the process of enabling CRTCs (just been added
10611 * to the dc context, or already is on the context)
10612 * 2. Has a valid connector attached, and
10613 * 3. Is currently active and enabled.
10614 * => The dc stream state currently exists.
10615 */
10616 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10617
4b9674e5 10618 /* Scaling or underscan settings */
c521fc31
RL
10619 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10620 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10621 update_stream_scaling_settings(
10622 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10623
b05e2c5e
DF
10624 /* ABM settings */
10625 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10626
4b9674e5
LL
10627 /*
10628 * Color management settings. We also update color properties
10629 * when a modeset is needed, to ensure it gets reprogrammed.
10630 */
10631 if (dm_new_crtc_state->base.color_mgmt_changed ||
10632 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10633 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10634 if (ret)
10635 goto fail;
62f55537 10636 }
e7b07cee 10637
4b9674e5
LL
10638 /* Update Freesync settings. */
10639 get_freesync_config_for_crtc(dm_new_crtc_state,
10640 dm_new_conn_state);
10641
62f55537 10642 return ret;
9635b754
DS
10643
10644fail:
10645 if (new_stream)
10646 dc_stream_release(new_stream);
10647 return ret;
62f55537 10648}
9b690ef3 10649
f6ff2a08
NK
10650static bool should_reset_plane(struct drm_atomic_state *state,
10651 struct drm_plane *plane,
10652 struct drm_plane_state *old_plane_state,
10653 struct drm_plane_state *new_plane_state)
10654{
10655 struct drm_plane *other;
10656 struct drm_plane_state *old_other_state, *new_other_state;
10657 struct drm_crtc_state *new_crtc_state;
10658 int i;
10659
70a1efac
NK
10660 /*
10661 * TODO: Remove this hack once the checks below are sufficient
10662 * enough to determine when we need to reset all the planes on
10663 * the stream.
10664 */
10665 if (state->allow_modeset)
10666 return true;
10667
f6ff2a08
NK
10668 /* Exit early if we know that we're adding or removing the plane. */
10669 if (old_plane_state->crtc != new_plane_state->crtc)
10670 return true;
10671
10672 /* old crtc == new_crtc == NULL, plane not in context. */
10673 if (!new_plane_state->crtc)
10674 return false;
10675
10676 new_crtc_state =
10677 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10678
10679 if (!new_crtc_state)
10680 return true;
10681
7316c4ad
NK
10682 /* CRTC Degamma changes currently require us to recreate planes. */
10683 if (new_crtc_state->color_mgmt_changed)
10684 return true;
10685
f6ff2a08
NK
10686 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10687 return true;
10688
10689 /*
10690 * If there are any new primary or overlay planes being added or
10691 * removed then the z-order can potentially change. To ensure
10692 * correct z-order and pipe acquisition the current DC architecture
10693 * requires us to remove and recreate all existing planes.
10694 *
10695 * TODO: Come up with a more elegant solution for this.
10696 */
10697 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10698 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10699 if (other->type == DRM_PLANE_TYPE_CURSOR)
10700 continue;
10701
10702 if (old_other_state->crtc != new_plane_state->crtc &&
10703 new_other_state->crtc != new_plane_state->crtc)
10704 continue;
10705
10706 if (old_other_state->crtc != new_other_state->crtc)
10707 return true;
10708
dc4cb30d
NK
10709 /* Src/dst size and scaling updates. */
10710 if (old_other_state->src_w != new_other_state->src_w ||
10711 old_other_state->src_h != new_other_state->src_h ||
10712 old_other_state->crtc_w != new_other_state->crtc_w ||
10713 old_other_state->crtc_h != new_other_state->crtc_h)
10714 return true;
10715
10716 /* Rotation / mirroring updates. */
10717 if (old_other_state->rotation != new_other_state->rotation)
10718 return true;
10719
10720 /* Blending updates. */
10721 if (old_other_state->pixel_blend_mode !=
10722 new_other_state->pixel_blend_mode)
10723 return true;
10724
10725 /* Alpha updates. */
10726 if (old_other_state->alpha != new_other_state->alpha)
10727 return true;
10728
10729 /* Colorspace changes. */
10730 if (old_other_state->color_range != new_other_state->color_range ||
10731 old_other_state->color_encoding != new_other_state->color_encoding)
10732 return true;
10733
9a81cc60
NK
10734 /* Framebuffer checks fall at the end. */
10735 if (!old_other_state->fb || !new_other_state->fb)
10736 continue;
10737
10738 /* Pixel format changes can require bandwidth updates. */
10739 if (old_other_state->fb->format != new_other_state->fb->format)
10740 return true;
10741
6eed95b0
BN
10742 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10743 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10744
10745 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10746 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10747 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10748 return true;
10749 }
10750
10751 return false;
10752}
10753
b0455fda
SS
10754static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10755 struct drm_plane_state *new_plane_state,
10756 struct drm_framebuffer *fb)
10757{
e72868c4
SS
10758 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10759 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10760 unsigned int pitch;
e72868c4 10761 bool linear;
b0455fda
SS
10762
10763 if (fb->width > new_acrtc->max_cursor_width ||
10764 fb->height > new_acrtc->max_cursor_height) {
10765 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10766 new_plane_state->fb->width,
10767 new_plane_state->fb->height);
10768 return -EINVAL;
10769 }
10770 if (new_plane_state->src_w != fb->width << 16 ||
10771 new_plane_state->src_h != fb->height << 16) {
10772 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10773 return -EINVAL;
10774 }
10775
10776 /* Pitch in pixels */
10777 pitch = fb->pitches[0] / fb->format->cpp[0];
10778
10779 if (fb->width != pitch) {
10780 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10781 fb->width, pitch);
10782 return -EINVAL;
10783 }
10784
10785 switch (pitch) {
10786 case 64:
10787 case 128:
10788 case 256:
10789 /* FB pitch is supported by cursor plane */
10790 break;
10791 default:
10792 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10793 return -EINVAL;
10794 }
10795
e72868c4
SS
10796 /* Core DRM takes care of checking FB modifiers, so we only need to
10797 * check tiling flags when the FB doesn't have a modifier. */
10798 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10799 if (adev->family < AMDGPU_FAMILY_AI) {
10800 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10801 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10802 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10803 } else {
10804 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10805 }
10806 if (!linear) {
10807 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10808 return -EINVAL;
10809 }
10810 }
10811
b0455fda
SS
10812 return 0;
10813}
10814
9e869063
LL
10815static int dm_update_plane_state(struct dc *dc,
10816 struct drm_atomic_state *state,
10817 struct drm_plane *plane,
10818 struct drm_plane_state *old_plane_state,
10819 struct drm_plane_state *new_plane_state,
10820 bool enable,
10821 bool *lock_and_validation_needed)
62f55537 10822{
eb3dc897
NK
10823
10824 struct dm_atomic_state *dm_state = NULL;
62f55537 10825 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10826 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10827 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10828 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10829 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10830 bool needs_reset;
62f55537 10831 int ret = 0;
e7b07cee 10832
9b690ef3 10833
9e869063
LL
10834 new_plane_crtc = new_plane_state->crtc;
10835 old_plane_crtc = old_plane_state->crtc;
10836 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10837 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10838
626bf90f
SS
10839 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10840 if (!enable || !new_plane_crtc ||
10841 drm_atomic_plane_disabling(plane->state, new_plane_state))
10842 return 0;
10843
10844 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10845
5f581248
SS
10846 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10847 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10848 return -EINVAL;
10849 }
10850
24f99d2b 10851 if (new_plane_state->fb) {
b0455fda
SS
10852 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10853 new_plane_state->fb);
10854 if (ret)
10855 return ret;
24f99d2b
SS
10856 }
10857
9e869063 10858 return 0;
626bf90f 10859 }
9b690ef3 10860
f6ff2a08
NK
10861 needs_reset = should_reset_plane(state, plane, old_plane_state,
10862 new_plane_state);
10863
9e869063
LL
10864 /* Remove any changed/removed planes */
10865 if (!enable) {
f6ff2a08 10866 if (!needs_reset)
9e869063 10867 return 0;
a7b06724 10868
9e869063
LL
10869 if (!old_plane_crtc)
10870 return 0;
62f55537 10871
9e869063
LL
10872 old_crtc_state = drm_atomic_get_old_crtc_state(
10873 state, old_plane_crtc);
10874 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10875
9e869063
LL
10876 if (!dm_old_crtc_state->stream)
10877 return 0;
62f55537 10878
9e869063
LL
10879 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10880 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10881
9e869063
LL
10882 ret = dm_atomic_get_state(state, &dm_state);
10883 if (ret)
10884 return ret;
eb3dc897 10885
9e869063
LL
10886 if (!dc_remove_plane_from_context(
10887 dc,
10888 dm_old_crtc_state->stream,
10889 dm_old_plane_state->dc_state,
10890 dm_state->context)) {
62f55537 10891
c3537613 10892 return -EINVAL;
9e869063 10893 }
e7b07cee 10894
9b690ef3 10895
9e869063
LL
10896 dc_plane_state_release(dm_old_plane_state->dc_state);
10897 dm_new_plane_state->dc_state = NULL;
1dc90497 10898
9e869063 10899 *lock_and_validation_needed = true;
1dc90497 10900
9e869063
LL
10901 } else { /* Add new planes */
10902 struct dc_plane_state *dc_new_plane_state;
1dc90497 10903
9e869063
LL
10904 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10905 return 0;
e7b07cee 10906
9e869063
LL
10907 if (!new_plane_crtc)
10908 return 0;
e7b07cee 10909
9e869063
LL
10910 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10911 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10912
9e869063
LL
10913 if (!dm_new_crtc_state->stream)
10914 return 0;
62f55537 10915
f6ff2a08 10916 if (!needs_reset)
9e869063 10917 return 0;
62f55537 10918
8c44515b
AP
10919 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10920 if (ret)
10921 return ret;
10922
9e869063 10923 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10924
9e869063
LL
10925 dc_new_plane_state = dc_create_plane_state(dc);
10926 if (!dc_new_plane_state)
10927 return -ENOMEM;
62f55537 10928
4711c033
LT
10929 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10930 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10931
695af5f9 10932 ret = fill_dc_plane_attributes(
1348969a 10933 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10934 dc_new_plane_state,
10935 new_plane_state,
10936 new_crtc_state);
10937 if (ret) {
10938 dc_plane_state_release(dc_new_plane_state);
10939 return ret;
10940 }
62f55537 10941
9e869063
LL
10942 ret = dm_atomic_get_state(state, &dm_state);
10943 if (ret) {
10944 dc_plane_state_release(dc_new_plane_state);
10945 return ret;
10946 }
eb3dc897 10947
9e869063
LL
10948 /*
10949 * Any atomic check errors that occur after this will
10950 * not need a release. The plane state will be attached
10951 * to the stream, and therefore part of the atomic
10952 * state. It'll be released when the atomic state is
10953 * cleaned.
10954 */
10955 if (!dc_add_plane_to_context(
10956 dc,
10957 dm_new_crtc_state->stream,
10958 dc_new_plane_state,
10959 dm_state->context)) {
62f55537 10960
9e869063
LL
10961 dc_plane_state_release(dc_new_plane_state);
10962 return -EINVAL;
10963 }
8c45c5db 10964
9e869063 10965 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10966
214993e1
ML
10967 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10968
9e869063
LL
10969 /* Tell DC to do a full surface update every time there
10970 * is a plane change. Inefficient, but works for now.
10971 */
10972 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10973
10974 *lock_and_validation_needed = true;
62f55537 10975 }
e7b07cee
HW
10976
10977
62f55537
AG
10978 return ret;
10979}
a87fa993 10980
69cb5629
VZ
10981static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10982 int *src_w, int *src_h)
10983{
10984 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10985 case DRM_MODE_ROTATE_90:
10986 case DRM_MODE_ROTATE_270:
10987 *src_w = plane_state->src_h >> 16;
10988 *src_h = plane_state->src_w >> 16;
10989 break;
10990 case DRM_MODE_ROTATE_0:
10991 case DRM_MODE_ROTATE_180:
10992 default:
10993 *src_w = plane_state->src_w >> 16;
10994 *src_h = plane_state->src_h >> 16;
10995 break;
10996 }
10997}
10998
12f4849a
SS
10999static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11000 struct drm_crtc *crtc,
11001 struct drm_crtc_state *new_crtc_state)
11002{
d1bfbe8a
SS
11003 struct drm_plane *cursor = crtc->cursor, *underlying;
11004 struct drm_plane_state *new_cursor_state, *new_underlying_state;
11005 int i;
11006 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
69cb5629
VZ
11007 int cursor_src_w, cursor_src_h;
11008 int underlying_src_w, underlying_src_h;
12f4849a
SS
11009
11010 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11011 * cursor per pipe but it's going to inherit the scaling and
11012 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 11013 * blending properties match the underlying planes'. */
12f4849a 11014
d1bfbe8a
SS
11015 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11016 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
11017 return 0;
11018 }
11019
69cb5629
VZ
11020 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11021 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11022 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
12f4849a 11023
d1bfbe8a
SS
11024 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11025 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
11026 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11027 continue;
12f4849a 11028
d1bfbe8a
SS
11029 /* Ignore disabled planes */
11030 if (!new_underlying_state->fb)
11031 continue;
11032
69cb5629
VZ
11033 dm_get_oriented_plane_size(new_underlying_state,
11034 &underlying_src_w, &underlying_src_h);
11035 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11036 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
d1bfbe8a
SS
11037
11038 if (cursor_scale_w != underlying_scale_w ||
11039 cursor_scale_h != underlying_scale_h) {
11040 drm_dbg_atomic(crtc->dev,
11041 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11042 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11043 return -EINVAL;
11044 }
11045
11046 /* If this plane covers the whole CRTC, no need to check planes underneath */
11047 if (new_underlying_state->crtc_x <= 0 &&
11048 new_underlying_state->crtc_y <= 0 &&
11049 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11050 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11051 break;
12f4849a
SS
11052 }
11053
11054 return 0;
11055}
11056
e10517b3 11057#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
11058static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11059{
11060 struct drm_connector *connector;
128f8ed5 11061 struct drm_connector_state *conn_state, *old_conn_state;
44be939f
ML
11062 struct amdgpu_dm_connector *aconnector = NULL;
11063 int i;
128f8ed5
RL
11064 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11065 if (!conn_state->crtc)
11066 conn_state = old_conn_state;
11067
44be939f
ML
11068 if (conn_state->crtc != crtc)
11069 continue;
11070
11071 aconnector = to_amdgpu_dm_connector(connector);
11072 if (!aconnector->port || !aconnector->mst_port)
11073 aconnector = NULL;
11074 else
11075 break;
11076 }
11077
11078 if (!aconnector)
11079 return 0;
11080
11081 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11082}
e10517b3 11083#endif
44be939f 11084
b8592b48
LL
11085/**
11086 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11087 * @dev: The DRM device
11088 * @state: The atomic state to commit
11089 *
11090 * Validate that the given atomic state is programmable by DC into hardware.
11091 * This involves constructing a &struct dc_state reflecting the new hardware
11092 * state we wish to commit, then querying DC to see if it is programmable. It's
11093 * important not to modify the existing DC state. Otherwise, atomic_check
11094 * may unexpectedly commit hardware changes.
11095 *
11096 * When validating the DC state, it's important that the right locks are
11097 * acquired. For full updates case which removes/adds/updates streams on one
11098 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11099 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 11100 * flip using DRMs synchronization events.
b8592b48
LL
11101 *
11102 * Note that DM adds the affected connectors for all CRTCs in state, when that
11103 * might not seem necessary. This is because DC stream creation requires the
11104 * DC sink, which is tied to the DRM connector state. Cleaning this up should
11105 * be possible but non-trivial - a possible TODO item.
11106 *
11107 * Return: -Error code if validation failed.
11108 */
7578ecda
AD
11109static int amdgpu_dm_atomic_check(struct drm_device *dev,
11110 struct drm_atomic_state *state)
62f55537 11111{
1348969a 11112 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 11113 struct dm_atomic_state *dm_state = NULL;
62f55537 11114 struct dc *dc = adev->dm.dc;
62f55537 11115 struct drm_connector *connector;
c2cea706 11116 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 11117 struct drm_crtc *crtc;
fc9e9920 11118 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
11119 struct drm_plane *plane;
11120 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 11121 enum dc_status status;
1e88ad0a 11122 int ret, i;
62f55537 11123 bool lock_and_validation_needed = false;
214993e1 11124 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6513104b
HW
11125#if defined(CONFIG_DRM_AMD_DC_DCN)
11126 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
11127 struct drm_dp_mst_topology_state *mst_state;
11128 struct drm_dp_mst_topology_mgr *mgr;
6513104b 11129#endif
62f55537 11130
e8a98235 11131 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 11132
62f55537 11133 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
11134 if (ret) {
11135 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 11136 goto fail;
68ca1c3e 11137 }
62f55537 11138
c5892a10
SW
11139 /* Check connector changes */
11140 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11141 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11142 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11143
11144 /* Skip connectors that are disabled or part of modeset already. */
11145 if (!old_con_state->crtc && !new_con_state->crtc)
11146 continue;
11147
11148 if (!new_con_state->crtc)
11149 continue;
11150
11151 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11152 if (IS_ERR(new_crtc_state)) {
68ca1c3e 11153 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
11154 ret = PTR_ERR(new_crtc_state);
11155 goto fail;
11156 }
11157
11158 if (dm_old_con_state->abm_level !=
11159 dm_new_con_state->abm_level)
11160 new_crtc_state->connectors_changed = true;
11161 }
11162
e10517b3 11163#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 11164 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
11165 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11166 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11167 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
11168 if (ret) {
11169 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 11170 goto fail;
68ca1c3e 11171 }
44be939f
ML
11172 }
11173 }
71be4b16 11174 if (!pre_validate_dsc(state, &dm_state, vars)) {
11175 ret = -EINVAL;
11176 goto fail;
11177 }
44be939f 11178 }
e10517b3 11179#endif
1e88ad0a 11180 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
11181 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11182
1e88ad0a 11183 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 11184 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
11185 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11186 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 11187 continue;
7bef1af3 11188
03fc4cf4 11189 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
11190 if (ret) {
11191 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 11192 goto fail;
68ca1c3e 11193 }
03fc4cf4 11194
1e88ad0a
S
11195 if (!new_crtc_state->enable)
11196 continue;
fc9e9920 11197
1e88ad0a 11198 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
11199 if (ret) {
11200 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 11201 goto fail;
68ca1c3e 11202 }
fc9e9920 11203
1e88ad0a 11204 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
11205 if (ret) {
11206 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 11207 goto fail;
68ca1c3e 11208 }
115a385c 11209
cbac53f7 11210 if (dm_old_crtc_state->dsc_force_changed)
115a385c 11211 new_crtc_state->mode_changed = true;
e7b07cee
HW
11212 }
11213
2d9e6431
NK
11214 /*
11215 * Add all primary and overlay planes on the CRTC to the state
11216 * whenever a plane is enabled to maintain correct z-ordering
11217 * and to enable fast surface updates.
11218 */
11219 drm_for_each_crtc(crtc, dev) {
11220 bool modified = false;
11221
11222 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11223 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11224 continue;
11225
11226 if (new_plane_state->crtc == crtc ||
11227 old_plane_state->crtc == crtc) {
11228 modified = true;
11229 break;
11230 }
11231 }
11232
11233 if (!modified)
11234 continue;
11235
11236 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11237 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11238 continue;
11239
11240 new_plane_state =
11241 drm_atomic_get_plane_state(state, plane);
11242
11243 if (IS_ERR(new_plane_state)) {
11244 ret = PTR_ERR(new_plane_state);
68ca1c3e 11245 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
11246 goto fail;
11247 }
11248 }
11249 }
11250
62f55537 11251 /* Remove exiting planes if they are modified */
9e869063
LL
11252 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11253 ret = dm_update_plane_state(dc, state, plane,
11254 old_plane_state,
11255 new_plane_state,
11256 false,
11257 &lock_and_validation_needed);
68ca1c3e
S
11258 if (ret) {
11259 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11260 goto fail;
68ca1c3e 11261 }
62f55537
AG
11262 }
11263
11264 /* Disable all crtcs which require disable */
4b9674e5
LL
11265 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11266 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11267 old_crtc_state,
11268 new_crtc_state,
11269 false,
11270 &lock_and_validation_needed);
68ca1c3e
S
11271 if (ret) {
11272 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 11273 goto fail;
68ca1c3e 11274 }
62f55537
AG
11275 }
11276
11277 /* Enable all crtcs which require enable */
4b9674e5
LL
11278 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11279 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11280 old_crtc_state,
11281 new_crtc_state,
11282 true,
11283 &lock_and_validation_needed);
68ca1c3e
S
11284 if (ret) {
11285 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 11286 goto fail;
68ca1c3e 11287 }
62f55537
AG
11288 }
11289
11290 /* Add new/modified planes */
9e869063
LL
11291 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11292 ret = dm_update_plane_state(dc, state, plane,
11293 old_plane_state,
11294 new_plane_state,
11295 true,
11296 &lock_and_validation_needed);
68ca1c3e
S
11297 if (ret) {
11298 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11299 goto fail;
68ca1c3e 11300 }
62f55537
AG
11301 }
11302
b349f76e
ES
11303 /* Run this here since we want to validate the streams we created */
11304 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
11305 if (ret) {
11306 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 11307 goto fail;
68ca1c3e 11308 }
62f55537 11309
214993e1
ML
11310 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11311 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11312 if (dm_new_crtc_state->mpo_requested)
11313 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11314 }
11315
12f4849a
SS
11316 /* Check cursor planes scaling */
11317 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11318 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
11319 if (ret) {
11320 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 11321 goto fail;
68ca1c3e 11322 }
12f4849a
SS
11323 }
11324
43d10d30
NK
11325 if (state->legacy_cursor_update) {
11326 /*
11327 * This is a fast cursor update coming from the plane update
11328 * helper, check if it can be done asynchronously for better
11329 * performance.
11330 */
11331 state->async_update =
11332 !drm_atomic_helper_async_check(dev, state);
11333
11334 /*
11335 * Skip the remaining global validation if this is an async
11336 * update. Cursor updates can be done without affecting
11337 * state or bandwidth calcs and this avoids the performance
11338 * penalty of locking the private state object and
11339 * allocating a new dc_state.
11340 */
11341 if (state->async_update)
11342 return 0;
11343 }
11344
ebdd27e1 11345 /* Check scaling and underscan changes*/
1f6010a9 11346 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
11347 * new stream into context w\o causing full reset. Need to
11348 * decide how to handle.
11349 */
c2cea706 11350 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
11351 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11352 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11353 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
11354
11355 /* Skip any modesets/resets */
0bc9706d
LSL
11356 if (!acrtc || drm_atomic_crtc_needs_modeset(
11357 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
11358 continue;
11359
b830ebc9 11360 /* Skip any thing not scale or underscan changes */
54d76575 11361 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
11362 continue;
11363
11364 lock_and_validation_needed = true;
11365 }
11366
41724ea2
BL
11367#if defined(CONFIG_DRM_AMD_DC_DCN)
11368 /* set the slot info for each mst_state based on the link encoding format */
11369 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11370 struct amdgpu_dm_connector *aconnector;
11371 struct drm_connector *connector;
11372 struct drm_connector_list_iter iter;
11373 u8 link_coding_cap;
11374
11375 if (!mgr->mst_state )
11376 continue;
11377
11378 drm_connector_list_iter_begin(dev, &iter);
11379 drm_for_each_connector_iter(connector, &iter) {
11380 int id = connector->index;
11381
11382 if (id == mst_state->mgr->conn_base_id) {
11383 aconnector = to_amdgpu_dm_connector(connector);
11384 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11385 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11386
11387 break;
11388 }
11389 }
11390 drm_connector_list_iter_end(&iter);
11391
11392 }
11393#endif
f6d7c7fa
NK
11394 /**
11395 * Streams and planes are reset when there are changes that affect
11396 * bandwidth. Anything that affects bandwidth needs to go through
11397 * DC global validation to ensure that the configuration can be applied
11398 * to hardware.
11399 *
11400 * We have to currently stall out here in atomic_check for outstanding
11401 * commits to finish in this case because our IRQ handlers reference
11402 * DRM state directly - we can end up disabling interrupts too early
11403 * if we don't.
11404 *
11405 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11406 */
f6d7c7fa 11407 if (lock_and_validation_needed) {
eb3dc897 11408 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11409 if (ret) {
11410 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11411 goto fail;
68ca1c3e 11412 }
e7b07cee
HW
11413
11414 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11415 if (ret) {
11416 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11417 goto fail;
68ca1c3e 11418 }
1dc90497 11419
d9fe1a4c 11420#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11421 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11422 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
71be4b16 11423 ret = -EINVAL;
8c20a1ed 11424 goto fail;
68ca1c3e 11425 }
8c20a1ed 11426
6513104b 11427 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11428 if (ret) {
11429 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11430 goto fail;
68ca1c3e 11431 }
d9fe1a4c 11432#endif
29b9ba74 11433
ded58c7b
ZL
11434 /*
11435 * Perform validation of MST topology in the state:
11436 * We need to perform MST atomic check before calling
11437 * dc_validate_global_state(), or there is a chance
11438 * to get stuck in an infinite loop and hang eventually.
11439 */
11440 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11441 if (ret) {
11442 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11443 goto fail;
68ca1c3e 11444 }
85fb8bb9 11445 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 11446 if (status != DC_OK) {
68ca1c3e 11447 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11448 dc_status_to_str(status), status);
e7b07cee
HW
11449 ret = -EINVAL;
11450 goto fail;
11451 }
bd200d19 11452 } else {
674e78ac 11453 /*
bd200d19
NK
11454 * The commit is a fast update. Fast updates shouldn't change
11455 * the DC context, affect global validation, and can have their
11456 * commit work done in parallel with other commits not touching
11457 * the same resource. If we have a new DC context as part of
11458 * the DM atomic state from validation we need to free it and
11459 * retain the existing one instead.
fde9f39a
MR
11460 *
11461 * Furthermore, since the DM atomic state only contains the DC
11462 * context and can safely be annulled, we can free the state
11463 * and clear the associated private object now to free
11464 * some memory and avoid a possible use-after-free later.
674e78ac 11465 */
bd200d19 11466
fde9f39a
MR
11467 for (i = 0; i < state->num_private_objs; i++) {
11468 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11469
fde9f39a
MR
11470 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11471 int j = state->num_private_objs-1;
bd200d19 11472
fde9f39a
MR
11473 dm_atomic_destroy_state(obj,
11474 state->private_objs[i].state);
11475
11476 /* If i is not at the end of the array then the
11477 * last element needs to be moved to where i was
11478 * before the array can safely be truncated.
11479 */
11480 if (i != j)
11481 state->private_objs[i] =
11482 state->private_objs[j];
bd200d19 11483
fde9f39a
MR
11484 state->private_objs[j].ptr = NULL;
11485 state->private_objs[j].state = NULL;
11486 state->private_objs[j].old_state = NULL;
11487 state->private_objs[j].new_state = NULL;
11488
11489 state->num_private_objs = j;
11490 break;
11491 }
bd200d19 11492 }
e7b07cee
HW
11493 }
11494
caff0e66
NK
11495 /* Store the overall update type for use later in atomic check. */
11496 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11497 struct dm_crtc_state *dm_new_crtc_state =
11498 to_dm_crtc_state(new_crtc_state);
11499
f6d7c7fa
NK
11500 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11501 UPDATE_TYPE_FULL :
11502 UPDATE_TYPE_FAST;
e7b07cee
HW
11503 }
11504
11505 /* Must be success */
11506 WARN_ON(ret);
e8a98235
RS
11507
11508 trace_amdgpu_dm_atomic_check_finish(state, ret);
11509
e7b07cee
HW
11510 return ret;
11511
11512fail:
11513 if (ret == -EDEADLK)
01e28f9c 11514 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11515 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11516 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11517 else
01e28f9c 11518 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11519
e8a98235
RS
11520 trace_amdgpu_dm_atomic_check_finish(state, ret);
11521
e7b07cee
HW
11522 return ret;
11523}
11524
3ee6b26b
AD
11525static bool is_dp_capable_without_timing_msa(struct dc *dc,
11526 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11527{
11528 uint8_t dpcd_data;
11529 bool capable = false;
11530
c84dec2f 11531 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11532 dm_helpers_dp_read_dpcd(
11533 NULL,
c84dec2f 11534 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11535 DP_DOWN_STREAM_PORT_COUNT,
11536 &dpcd_data,
11537 sizeof(dpcd_data))) {
11538 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11539 }
11540
11541 return capable;
11542}
f9b4f20c 11543
46db138d
SW
11544static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11545 unsigned int offset,
11546 unsigned int total_length,
11547 uint8_t *data,
11548 unsigned int length,
11549 struct amdgpu_hdmi_vsdb_info *vsdb)
11550{
11551 bool res;
11552 union dmub_rb_cmd cmd;
11553 struct dmub_cmd_send_edid_cea *input;
11554 struct dmub_cmd_edid_cea_output *output;
11555
11556 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11557 return false;
11558
11559 memset(&cmd, 0, sizeof(cmd));
11560
11561 input = &cmd.edid_cea.data.input;
11562
11563 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11564 cmd.edid_cea.header.sub_type = 0;
11565 cmd.edid_cea.header.payload_bytes =
11566 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11567 input->offset = offset;
11568 input->length = length;
eb9e59eb 11569 input->cea_total_length = total_length;
46db138d
SW
11570 memcpy(input->payload, data, length);
11571
11572 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11573 if (!res) {
11574 DRM_ERROR("EDID CEA parser failed\n");
11575 return false;
11576 }
11577
11578 output = &cmd.edid_cea.data.output;
11579
11580 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11581 if (!output->ack.success) {
11582 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11583 output->ack.offset);
11584 }
11585 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11586 if (!output->amd_vsdb.vsdb_found)
11587 return false;
11588
11589 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11590 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11591 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11592 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11593 } else {
b76a8062 11594 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11595 return false;
11596 }
11597
11598 return true;
11599}
11600
11601static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11602 uint8_t *edid_ext, int len,
11603 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11604{
11605 int i;
f9b4f20c
SW
11606
11607 /* send extension block to DMCU for parsing */
11608 for (i = 0; i < len; i += 8) {
11609 bool res;
11610 int offset;
11611
11612 /* send 8 bytes a time */
46db138d 11613 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11614 return false;
11615
11616 if (i+8 == len) {
11617 /* EDID block sent completed, expect result */
11618 int version, min_rate, max_rate;
11619
46db138d 11620 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11621 if (res) {
11622 /* amd vsdb found */
11623 vsdb_info->freesync_supported = 1;
11624 vsdb_info->amd_vsdb_version = version;
11625 vsdb_info->min_refresh_rate_hz = min_rate;
11626 vsdb_info->max_refresh_rate_hz = max_rate;
11627 return true;
11628 }
11629 /* not amd vsdb */
11630 return false;
11631 }
11632
11633 /* check for ack*/
46db138d 11634 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11635 if (!res)
11636 return false;
11637 }
11638
11639 return false;
11640}
11641
46db138d
SW
11642static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11643 uint8_t *edid_ext, int len,
11644 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11645{
11646 int i;
11647
11648 /* send extension block to DMCU for parsing */
11649 for (i = 0; i < len; i += 8) {
11650 /* send 8 bytes a time */
11651 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11652 return false;
11653 }
11654
11655 return vsdb_info->freesync_supported;
11656}
11657
11658static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11659 uint8_t *edid_ext, int len,
11660 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11661{
11662 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11663
11664 if (adev->dm.dmub_srv)
11665 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11666 else
11667 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11668}
11669
7c7dd774 11670static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11671 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11672{
11673 uint8_t *edid_ext = NULL;
11674 int i;
11675 bool valid_vsdb_found = false;
11676
11677 /*----- drm_find_cea_extension() -----*/
11678 /* No EDID or EDID extensions */
11679 if (edid == NULL || edid->extensions == 0)
7c7dd774 11680 return -ENODEV;
f9b4f20c
SW
11681
11682 /* Find CEA extension */
11683 for (i = 0; i < edid->extensions; i++) {
11684 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11685 if (edid_ext[0] == CEA_EXT)
11686 break;
11687 }
11688
11689 if (i == edid->extensions)
7c7dd774 11690 return -ENODEV;
f9b4f20c
SW
11691
11692 /*----- cea_db_offsets() -----*/
11693 if (edid_ext[0] != CEA_EXT)
7c7dd774 11694 return -ENODEV;
f9b4f20c
SW
11695
11696 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11697
11698 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11699}
11700
98e6436d
AK
11701void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11702 struct edid *edid)
e7b07cee 11703{
eb0709ba 11704 int i = 0;
e7b07cee
HW
11705 struct detailed_timing *timing;
11706 struct detailed_non_pixel *data;
11707 struct detailed_data_monitor_range *range;
c84dec2f
HW
11708 struct amdgpu_dm_connector *amdgpu_dm_connector =
11709 to_amdgpu_dm_connector(connector);
bb47de73 11710 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11711 struct dc_sink *sink;
e7b07cee
HW
11712
11713 struct drm_device *dev = connector->dev;
1348969a 11714 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11715 bool freesync_capable = false;
f9b4f20c 11716 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11717
8218d7f1
HW
11718 if (!connector->state) {
11719 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11720 goto update;
8218d7f1
HW
11721 }
11722
9b2fdc33
AP
11723 sink = amdgpu_dm_connector->dc_sink ?
11724 amdgpu_dm_connector->dc_sink :
11725 amdgpu_dm_connector->dc_em_sink;
11726
11727 if (!edid || !sink) {
98e6436d
AK
11728 dm_con_state = to_dm_connector_state(connector->state);
11729
11730 amdgpu_dm_connector->min_vfreq = 0;
11731 amdgpu_dm_connector->max_vfreq = 0;
11732 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11733 connector->display_info.monitor_range.min_vfreq = 0;
11734 connector->display_info.monitor_range.max_vfreq = 0;
11735 freesync_capable = false;
98e6436d 11736
bb47de73 11737 goto update;
98e6436d
AK
11738 }
11739
8218d7f1
HW
11740 dm_con_state = to_dm_connector_state(connector->state);
11741
e7b07cee 11742 if (!adev->dm.freesync_module)
bb47de73 11743 goto update;
f9b4f20c
SW
11744
11745
9b2fdc33
AP
11746 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11747 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11748 bool edid_check_required = false;
11749
11750 if (edid) {
e7b07cee
HW
11751 edid_check_required = is_dp_capable_without_timing_msa(
11752 adev->dm.dc,
c84dec2f 11753 amdgpu_dm_connector);
e7b07cee 11754 }
e7b07cee 11755
f9b4f20c
SW
11756 if (edid_check_required == true && (edid->version > 1 ||
11757 (edid->version == 1 && edid->revision > 1))) {
11758 for (i = 0; i < 4; i++) {
e7b07cee 11759
f9b4f20c
SW
11760 timing = &edid->detailed_timings[i];
11761 data = &timing->data.other_data;
11762 range = &data->data.range;
11763 /*
11764 * Check if monitor has continuous frequency mode
11765 */
11766 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11767 continue;
11768 /*
11769 * Check for flag range limits only. If flag == 1 then
11770 * no additional timing information provided.
11771 * Default GTF, GTF Secondary curve and CVT are not
11772 * supported
11773 */
11774 if (range->flags != 1)
11775 continue;
a0ffc3fd 11776
f9b4f20c
SW
11777 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11778 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11779 amdgpu_dm_connector->pixel_clock_mhz =
11780 range->pixel_clock_mhz * 10;
a0ffc3fd 11781
f9b4f20c
SW
11782 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11783 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11784
f9b4f20c
SW
11785 break;
11786 }
98e6436d 11787
f9b4f20c
SW
11788 if (amdgpu_dm_connector->max_vfreq -
11789 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11790
f9b4f20c
SW
11791 freesync_capable = true;
11792 }
11793 }
9b2fdc33 11794 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11795 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11796 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11797 timing = &edid->detailed_timings[i];
11798 data = &timing->data.other_data;
11799
11800 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11801 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11802 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11803 freesync_capable = true;
11804
11805 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11806 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11807 }
11808 }
bb47de73
NK
11809
11810update:
11811 if (dm_con_state)
11812 dm_con_state->freesync_capable = freesync_capable;
11813
11814 if (connector->vrr_capable_property)
11815 drm_connector_set_vrr_capable_property(connector,
11816 freesync_capable);
e7b07cee
HW
11817}
11818
3d4e52d0
VL
11819void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11820{
1348969a 11821 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11822 struct dc *dc = adev->dm.dc;
11823 int i;
11824
11825 mutex_lock(&adev->dm.dc_lock);
11826 if (dc->current_state) {
11827 for (i = 0; i < dc->current_state->stream_count; ++i)
11828 dc->current_state->streams[i]
11829 ->triggered_crtc_reset.enabled =
11830 adev->dm.force_timing_sync;
11831
11832 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11833 dc_trigger_sync(dc, dc->current_state);
11834 }
11835 mutex_unlock(&adev->dm.dc_lock);
11836}
9d83722d
RS
11837
11838void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11839 uint32_t value, const char *func_name)
11840{
11841#ifdef DM_CHECK_ADDR_0
11842 if (address == 0) {
11843 DC_ERR("invalid register write. address = 0");
11844 return;
11845 }
11846#endif
11847 cgs_write_register(ctx->cgs_device, address, value);
11848 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11849}
11850
11851uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11852 const char *func_name)
11853{
11854 uint32_t value;
11855#ifdef DM_CHECK_ADDR_0
11856 if (address == 0) {
11857 DC_ERR("invalid register read; address = 0\n");
11858 return 0;
11859 }
11860#endif
11861
11862 if (ctx->dmub_srv &&
11863 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11864 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11865 ASSERT(false);
11866 return 0;
11867 }
11868
11869 value = cgs_read_register(ctx->cgs_device, address);
11870
11871 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11872
11873 return value;
11874}
81927e28 11875
240e6d25
IB
11876static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11877 struct dc_context *ctx,
11878 uint8_t status_type,
11879 uint32_t *operation_result)
88f52b1f
JS
11880{
11881 struct amdgpu_device *adev = ctx->driver_context;
11882 int return_status = -1;
11883 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11884
11885 if (is_cmd_aux) {
11886 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11887 return_status = p_notify->aux_reply.length;
11888 *operation_result = p_notify->result;
11889 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11890 *operation_result = AUX_RET_ERROR_TIMEOUT;
11891 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11892 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11893 } else {
11894 *operation_result = AUX_RET_ERROR_UNKNOWN;
11895 }
11896 } else {
11897 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11898 return_status = 0;
11899 *operation_result = p_notify->sc_status;
11900 } else {
11901 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11902 }
11903 }
11904
11905 return return_status;
11906}
11907
11908int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11909 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11910{
11911 struct amdgpu_device *adev = ctx->driver_context;
11912 int ret = 0;
11913
88f52b1f
JS
11914 if (is_cmd_aux) {
11915 dc_process_dmub_aux_transfer_async(ctx->dc,
11916 link_index, (struct aux_payload *)cmd_payload);
11917 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11918 (struct set_config_cmd_payload *)cmd_payload,
11919 adev->dm.dmub_notify)) {
11920 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11921 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11922 (uint32_t *)operation_result);
11923 }
11924
9e3a50d2 11925 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11926 if (ret == 0) {
9e3a50d2 11927 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11928 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11929 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11930 (uint32_t *)operation_result);
81927e28 11931 }
81927e28 11932
88f52b1f
JS
11933 if (is_cmd_aux) {
11934 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11935 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11936
88f52b1f
JS
11937 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11938 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11939 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11940 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11941 adev->dm.dmub_notify->aux_reply.length);
11942 }
11943 }
81927e28
JS
11944 }
11945
88f52b1f
JS
11946 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11947 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11948 (uint32_t *)operation_result);
81927e28 11949}
1edf5ae1
ZL
11950
11951/*
11952 * Check whether seamless boot is supported.
11953 *
11954 * So far we only support seamless boot on CHIP_VANGOGH.
11955 * If everything goes well, we may consider expanding
11956 * seamless boot to other ASICs.
11957 */
11958bool check_seamless_boot_capability(struct amdgpu_device *adev)
11959{
11960 switch (adev->asic_type) {
11961 case CHIP_VANGOGH:
11962 if (!adev->mman.keep_stolen_vga_memory)
11963 return true;
11964 break;
11965 default:
11966 break;
11967 }
11968
11969 return false;
11970}