drm/amd/display: update bios scratch when setting backlight
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
53e108aa 51#include <drm/drm_hdcp.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
1f579254 54#include "amdgpu_atombios.h"
4562236b
HW
55
56#include "amd_shared.h"
57#include "amdgpu_dm_irq.h"
58#include "dm_helpers.h"
e7b07cee 59#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
60#if defined(CONFIG_DEBUG_FS)
61#include "amdgpu_dm_debugfs.h"
62#endif
f4594cd1 63#include "amdgpu_dm_psr.h"
4562236b
HW
64
65#include "ivsrcid/ivsrcid_vislands30.h"
66
81927e28 67#include "i2caux_interface.h"
4562236b
HW
68#include <linux/module.h>
69#include <linux/moduleparam.h>
e7b07cee 70#include <linux/types.h>
97028037 71#include <linux/pm_runtime.h>
09d21852 72#include <linux/pci.h>
a94d5569 73#include <linux/firmware.h>
6ce8f316 74#include <linux/component.h>
4562236b
HW
75
76#include <drm/drm_atomic.h>
674e78ac 77#include <drm/drm_atomic_uapi.h>
4562236b
HW
78#include <drm/drm_atomic_helper.h>
79#include <drm/drm_dp_mst_helper.h>
e7b07cee 80#include <drm/drm_fb_helper.h>
09d21852 81#include <drm/drm_fourcc.h>
e7b07cee 82#include <drm/drm_edid.h>
09d21852 83#include <drm/drm_vblank.h>
6ce8f316 84#include <drm/drm_audio_component.h>
4562236b 85
b86a1aa3 86#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 87#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 88
ad941f7a
FX
89#include "dcn/dcn_1_0_offset.h"
90#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
91#include "soc15_hw_ip.h"
92#include "vega10_ip_offset.h"
ff5ef992
AD
93
94#include "soc15_common.h"
95#endif
96
e7b07cee 97#include "modules/inc/mod_freesync.h"
bbf854dc 98#include "modules/power/power_helpers.h"
ecd0136b 99#include "modules/inc/mod_info_packet.h"
e7b07cee 100
743b9786
NK
101#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
103#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
105#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
107#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
109#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
111#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
113#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
115#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 117
a94d5569
DF
118#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
119MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 120
5ea23931
RL
121#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
122MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
8c7aea40
NK
124/* Number of bytes in PSP header for firmware. */
125#define PSP_HEADER_BYTES 0x100
126
127/* Number of bytes in PSP footer for firmware. */
128#define PSP_FOOTER_BYTES 0x100
129
b8592b48
LL
130/**
131 * DOC: overview
132 *
133 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 134 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
135 * requests into DC requests, and DC responses into DRM responses.
136 *
137 * The root control structure is &struct amdgpu_display_manager.
138 */
139
7578ecda
AD
140/* basic init/fini API */
141static int amdgpu_dm_init(struct amdgpu_device *adev);
142static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 143static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 144
0f877894
OV
145static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146{
147 switch (link->dpcd_caps.dongle_type) {
148 case DISPLAY_DONGLE_NONE:
149 return DRM_MODE_SUBCONNECTOR_Native;
150 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 return DRM_MODE_SUBCONNECTOR_VGA;
152 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_DVID;
155 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_HDMIA;
158 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 default:
160 return DRM_MODE_SUBCONNECTOR_Unknown;
161 }
162}
163
164static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165{
166 struct dc_link *link = aconnector->dc_link;
167 struct drm_connector *connector = &aconnector->base;
168 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 return;
172
173 if (aconnector->dc_sink)
174 subconnector = get_subconnector_type(link);
175
176 drm_object_property_set_value(&connector->base,
177 connector->dev->mode_config.dp_subconnector_property,
178 subconnector);
179}
180
1f6010a9
DF
181/*
182 * initializes drm_device display related structures, based on the information
7578ecda
AD
183 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184 * drm_encoder, drm_mode_config
185 *
186 * Returns 0 on success
187 */
188static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189/* removes and deallocates the drm structures, created by the above function */
190static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
7578ecda 192static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 193 struct drm_plane *plane,
cc1fec57
NK
194 unsigned long possible_crtcs,
195 const struct dc_plane_cap *plane_cap);
7578ecda
AD
196static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 uint32_t link_index);
199static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 struct amdgpu_dm_connector *amdgpu_dm_connector,
201 uint32_t link_index,
202 struct amdgpu_encoder *amdgpu_encoder);
203static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 struct amdgpu_encoder *aencoder,
205 uint32_t link_index);
206
207static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
7578ecda
AD
209static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 struct drm_atomic_state *state);
213
674e78ac
NK
214static void handle_cursor_update(struct drm_plane *plane,
215 struct drm_plane_state *old_plane_state);
7578ecda 216
dfbbfe3c
BN
217static const struct drm_format_info *
218amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
e27c41d5 220static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 221static void handle_hpd_rx_irq(void *param);
e27c41d5 222
a85ba005
NC
223static bool
224is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 struct drm_crtc_state *new_crtc_state);
4562236b
HW
226/*
227 * dm_vblank_get_counter
228 *
229 * @brief
230 * Get counter for number of vertical blanks
231 *
232 * @param
233 * struct amdgpu_device *adev - [in] desired amdgpu device
234 * int disp_idx - [in] which CRTC to get the counter from
235 *
236 * @return
237 * Counter for vertical blanks
238 */
239static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240{
241 if (crtc >= adev->mode_info.num_crtc)
242 return 0;
243 else {
244 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
585d450c 246 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
247 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 crtc);
4562236b
HW
249 return 0;
250 }
251
585d450c 252 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
253 }
254}
255
256static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 257 u32 *vbl, u32 *position)
4562236b 258{
81c50963
ST
259 uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
4562236b
HW
261 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 return -EINVAL;
263 else {
264 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
585d450c 266 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
267 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 crtc);
4562236b
HW
269 return 0;
270 }
271
81c50963
ST
272 /*
273 * TODO rework base driver to use values directly.
274 * for now parse it back into reg-format
275 */
585d450c 276 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
277 &v_blank_start,
278 &v_blank_end,
279 &h_position,
280 &v_position);
281
e806208d
AG
282 *position = v_position | (h_position << 16);
283 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
284 }
285
286 return 0;
287}
288
289static bool dm_is_idle(void *handle)
290{
291 /* XXX todo */
292 return true;
293}
294
295static int dm_wait_for_idle(void *handle)
296{
297 /* XXX todo */
298 return 0;
299}
300
301static bool dm_check_soft_reset(void *handle)
302{
303 return false;
304}
305
306static int dm_soft_reset(void *handle)
307{
308 /* XXX todo */
309 return 0;
310}
311
3ee6b26b
AD
312static struct amdgpu_crtc *
313get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 int otg_inst)
4562236b 315{
4a580877 316 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
317 struct drm_crtc *crtc;
318 struct amdgpu_crtc *amdgpu_crtc;
319
bcd74374 320 if (WARN_ON(otg_inst == -1))
4562236b 321 return adev->mode_info.crtcs[0];
4562236b
HW
322
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326 if (amdgpu_crtc->otg_inst == otg_inst)
327 return amdgpu_crtc;
328 }
329
330 return NULL;
331}
332
585d450c
AP
333static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334{
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
339}
340
66b0c973
MK
341static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342{
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345}
346
a85ba005
NC
347static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
349{
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
351 return true;
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 return true;
354 else
355 return false;
356}
357
b8e8c934
HW
358/**
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
361 *
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
364 */
4562236b
HW
365static void dm_pflip_high_irq(void *interrupt_params)
366{
4562236b
HW
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
370 unsigned long flags;
71bbe51a 371 struct drm_pending_vblank_event *e;
71bbe51a
MK
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 bool vrr_active;
4562236b
HW
374
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377 /* IRQ could occur when in initial stage */
1f6010a9 378 /* TODO work and BO cleanup */
4562236b 379 if (amdgpu_crtc == NULL) {
cb2318b7 380 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
381 return;
382 }
383
4a580877 384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
385
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
391 amdgpu_crtc);
4a580877 392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
393 return;
394 }
395
71bbe51a
MK
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
4562236b 399
bcd74374 400 WARN_ON(!e);
1159898a 401
585d450c 402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
403
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 if (!vrr_active ||
585d450c 406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
412 */
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 414
71bbe51a
MK
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
417 */
418 if (e) {
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
423 }
424 } else if (e) {
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
431 *
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
436 */
437
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
441
4a580877 442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
443 e = NULL;
444 }
4562236b 445
fdd1fe57
MK
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
450 */
5d1c59c4 451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 453
54f5499a 454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 456
cb2318b7
VL
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
4562236b
HW
460}
461
d2574c33
MK
462static void dm_vupdate_high_irq(void *interrupt_params)
463{
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
47588233
RS
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 470 unsigned long flags;
585d450c 471 int vrr_active;
d2574c33
MK
472
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475 if (acrtc) {
585d450c 476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
481
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 frame_duration_ns,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 }
d2574c33 488
cb2318b7 489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 490 acrtc->crtc_id,
585d450c 491 vrr_active);
d2574c33
MK
492
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
498 */
585d450c 499 if (vrr_active) {
d2574c33 500 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
501
502 /* BTR processing for pre-DCE12 ASICs */
585d450c 503 if (acrtc->dm_irq_params.stream &&
09aef2c4 504 adev->family < AMDGPU_FAMILY_AI) {
4a580877 505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
585d450c
AP
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
510
511 dc_stream_adjust_vmin_vmax(
512 adev->dm.dc,
585d450c
AP
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
516 }
517 }
d2574c33
MK
518 }
519}
520
b8e8c934
HW
521/**
522 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 523 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
524 *
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526 * event handler.
527 */
4562236b
HW
528static void dm_crtc_high_irq(void *interrupt_params)
529{
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
4562236b 532 struct amdgpu_crtc *acrtc;
09aef2c4 533 unsigned long flags;
585d450c 534 int vrr_active;
4562236b 535
b57de80a 536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
537 if (!acrtc)
538 return;
539
585d450c 540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 541
cb2318b7 542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 543 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 544
2346ef47
NK
545 /**
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
550 */
585d450c 551 if (!vrr_active)
2346ef47
NK
552 drm_crtc_handle_vblank(&acrtc->base);
553
554 /**
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
557 */
16f17eda 558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
559
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
562 return;
16f17eda 563
4a580877 564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 565
585d450c
AP
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 570 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
16f17eda 573
585d450c
AP
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
576 }
577
2b5aed9a
MK
578 /*
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
583 *
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
587 */
2346ef47
NK
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 590 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
591 if (acrtc->event) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 acrtc->event = NULL;
594 drm_crtc_vblank_put(&acrtc->base);
595 }
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 }
598
4a580877 599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
600}
601
86bc2219 602#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 603#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
604/**
605 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606 * DCN generation ASICs
48e01bf4 607 * @interrupt_params: interrupt parameters
86bc2219
WL
608 *
609 * Used to set crc window/read out crc value at vertical line 0 position
610 */
86bc2219
WL
611static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612{
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
616
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619 if (!acrtc)
620 return;
621
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623}
433e5dec 624#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 625
e27c41d5
JS
626/**
627 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628 * @adev: amdgpu_device pointer
629 * @notify: dmub notification structure
630 *
631 * Dmub AUX or SET_CONFIG command completion processing callback
632 * Copies dmub notification to DM which is to be read by AUX command.
633 * issuing thread and also signals the event to wake up the thread.
634 */
635void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636{
637 if (adev->dm.dmub_notify)
638 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 complete(&adev->dm.dmub_aux_transfer_done);
641}
642
643/**
644 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645 * @adev: amdgpu_device pointer
646 * @notify: dmub notification structure
647 *
648 * Dmub Hpd interrupt processing callback. Gets displayindex through the
649 * ink index and calls helper to do the processing.
650 */
651void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652{
653 struct amdgpu_dm_connector *aconnector;
f6e03f80 654 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
655 struct drm_connector *connector;
656 struct drm_connector_list_iter iter;
657 struct dc_link *link;
658 uint8_t link_index = 0;
659 struct drm_device *dev = adev->dm.ddev;
660
661 if (adev == NULL)
662 return;
663
664 if (notify == NULL) {
665 DRM_ERROR("DMUB HPD callback notification was NULL");
666 return;
667 }
668
669 if (notify->link_index > adev->dm.dc->link_count) {
670 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671 return;
672 }
673
e27c41d5 674 link_index = notify->link_index;
e27c41d5
JS
675 link = adev->dm.dc->links[link_index];
676
677 drm_connector_list_iter_begin(dev, &iter);
678 drm_for_each_connector_iter(connector, &iter) {
679 aconnector = to_amdgpu_dm_connector(connector);
680 if (link && aconnector->dc_link == link) {
681 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 682 hpd_aconnector = aconnector;
e27c41d5
JS
683 break;
684 }
685 }
686 drm_connector_list_iter_end(&iter);
e27c41d5 687
c40a09e5
NK
688 if (hpd_aconnector) {
689 if (notify->type == DMUB_NOTIFICATION_HPD)
690 handle_hpd_irq_helper(hpd_aconnector);
691 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 handle_hpd_rx_irq(hpd_aconnector);
693 }
e27c41d5
JS
694}
695
696/**
697 * register_dmub_notify_callback - Sets callback for DMUB notify
698 * @adev: amdgpu_device pointer
699 * @type: Type of dmub notification
700 * @callback: Dmub interrupt callback function
701 * @dmub_int_thread_offload: offload indicator
702 *
703 * API to register a dmub callback handler for a dmub notification
704 * Also sets indicator whether callback processing to be offloaded.
705 * to dmub interrupt handling thread
706 * Return: true if successfully registered, false if there is existing registration
707 */
708bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710{
711 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 adev->dm.dmub_callback[type] = callback;
713 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714 } else
715 return false;
716
717 return true;
718}
719
720static void dm_handle_hpd_work(struct work_struct *work)
721{
722 struct dmub_hpd_work *dmub_hpd_wrk;
723
724 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725
726 if (!dmub_hpd_wrk->dmub_notify) {
727 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728 return;
729 }
730
731 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 dmub_hpd_wrk->dmub_notify);
734 }
094b21c1
JS
735
736 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
737 kfree(dmub_hpd_wrk);
738
739}
740
e25515e2 741#define DMUB_TRACE_MAX_READ 64
81927e28
JS
742/**
743 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744 * @interrupt_params: used for determining the Outbox instance
745 *
746 * Handles the Outbox Interrupt
747 * event handler.
748 */
81927e28
JS
749static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750{
751 struct dmub_notification notify;
752 struct common_irq_params *irq_params = interrupt_params;
753 struct amdgpu_device *adev = irq_params->adev;
754 struct amdgpu_display_manager *dm = &adev->dm;
755 struct dmcub_trace_buf_entry entry = { 0 };
756 uint32_t count = 0;
e27c41d5 757 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 758 struct dc_link *plink = NULL;
81927e28 759
f6e03f80
JS
760 if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 762
f6e03f80
JS
763 do {
764 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 DRM_ERROR("DM: notify type %d invalid!", notify.type);
767 continue;
768 }
c40a09e5
NK
769 if (!dm->dmub_callback[notify.type]) {
770 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771 continue;
772 }
f6e03f80 773 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
774 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775 if (!dmub_hpd_wrk) {
776 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777 return;
778 }
779 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 if (!dmub_hpd_wrk->dmub_notify) {
781 kfree(dmub_hpd_wrk);
782 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783 return;
784 }
785 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 if (dmub_hpd_wrk->dmub_notify)
787 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
788 dmub_hpd_wrk->adev = adev;
789 if (notify.type == DMUB_NOTIFICATION_HPD) {
790 plink = adev->dm.dc->links[notify.link_index];
791 if (plink) {
792 plink->hpd_status =
b97788e5 793 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 794 }
e27c41d5 795 }
f6e03f80
JS
796 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797 } else {
798 dm->dmub_callback[notify.type](adev, &notify);
799 }
800 } while (notify.pending_notification);
81927e28
JS
801 }
802
803
804 do {
805 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807 entry.param0, entry.param1);
808
809 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811 } else
812 break;
813
814 count++;
815
816 } while (count <= DMUB_TRACE_MAX_READ);
817
f6e03f80
JS
818 if (count > DMUB_TRACE_MAX_READ)
819 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 820}
433e5dec 821#endif /* CONFIG_DRM_AMD_DC_DCN */
86bc2219 822
4562236b
HW
823static int dm_set_clockgating_state(void *handle,
824 enum amd_clockgating_state state)
825{
826 return 0;
827}
828
829static int dm_set_powergating_state(void *handle,
830 enum amd_powergating_state state)
831{
832 return 0;
833}
834
835/* Prototypes of private functions */
836static int dm_early_init(void* handle);
837
a32e24b4 838/* Allocate memory for FBC compressed data */
3e332d3a 839static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 840{
3e332d3a 841 struct drm_device *dev = connector->dev;
1348969a 842 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 843 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
844 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845 struct drm_display_mode *mode;
42e67c3b
RL
846 unsigned long max_size = 0;
847
848 if (adev->dm.dc->fbc_compressor == NULL)
849 return;
a32e24b4 850
3e332d3a 851 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
852 return;
853
3e332d3a
RL
854 if (compressor->bo_ptr)
855 return;
42e67c3b 856
42e67c3b 857
3e332d3a
RL
858 list_for_each_entry(mode, &connector->modes, head) {
859 if (max_size < mode->htotal * mode->vtotal)
860 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
861 }
862
863 if (max_size) {
864 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 865 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 866 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
867
868 if (r)
42e67c3b
RL
869 DRM_ERROR("DM: Failed to initialize FBC\n");
870 else {
871 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873 }
874
a32e24b4
RL
875 }
876
877}
a32e24b4 878
6ce8f316
NK
879static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880 int pipe, bool *enabled,
881 unsigned char *buf, int max_bytes)
882{
883 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 884 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
885 struct drm_connector *connector;
886 struct drm_connector_list_iter conn_iter;
887 struct amdgpu_dm_connector *aconnector;
888 int ret = 0;
889
890 *enabled = false;
891
892 mutex_lock(&adev->dm.audio_lock);
893
894 drm_connector_list_iter_begin(dev, &conn_iter);
895 drm_for_each_connector_iter(connector, &conn_iter) {
896 aconnector = to_amdgpu_dm_connector(connector);
897 if (aconnector->audio_inst != port)
898 continue;
899
900 *enabled = true;
901 ret = drm_eld_size(connector->eld);
902 memcpy(buf, connector->eld, min(max_bytes, ret));
903
904 break;
905 }
906 drm_connector_list_iter_end(&conn_iter);
907
908 mutex_unlock(&adev->dm.audio_lock);
909
910 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911
912 return ret;
913}
914
915static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916 .get_eld = amdgpu_dm_audio_component_get_eld,
917};
918
919static int amdgpu_dm_audio_component_bind(struct device *kdev,
920 struct device *hda_kdev, void *data)
921{
922 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 923 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
924 struct drm_audio_component *acomp = data;
925
926 acomp->ops = &amdgpu_dm_audio_component_ops;
927 acomp->dev = kdev;
928 adev->dm.audio_component = acomp;
929
930 return 0;
931}
932
933static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934 struct device *hda_kdev, void *data)
935{
936 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 937 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
938 struct drm_audio_component *acomp = data;
939
940 acomp->ops = NULL;
941 acomp->dev = NULL;
942 adev->dm.audio_component = NULL;
943}
944
945static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946 .bind = amdgpu_dm_audio_component_bind,
947 .unbind = amdgpu_dm_audio_component_unbind,
948};
949
950static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951{
952 int i, ret;
953
954 if (!amdgpu_audio)
955 return 0;
956
957 adev->mode_info.audio.enabled = true;
958
959 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960
961 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962 adev->mode_info.audio.pin[i].channels = -1;
963 adev->mode_info.audio.pin[i].rate = -1;
964 adev->mode_info.audio.pin[i].bits_per_sample = -1;
965 adev->mode_info.audio.pin[i].status_bits = 0;
966 adev->mode_info.audio.pin[i].category_code = 0;
967 adev->mode_info.audio.pin[i].connected = false;
968 adev->mode_info.audio.pin[i].id =
969 adev->dm.dc->res_pool->audios[i]->inst;
970 adev->mode_info.audio.pin[i].offset = 0;
971 }
972
973 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974 if (ret < 0)
975 return ret;
976
977 adev->dm.audio_registered = true;
978
979 return 0;
980}
981
982static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983{
984 if (!amdgpu_audio)
985 return;
986
987 if (!adev->mode_info.audio.enabled)
988 return;
989
990 if (adev->dm.audio_registered) {
991 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992 adev->dm.audio_registered = false;
993 }
994
995 /* TODO: Disable audio? */
996
997 adev->mode_info.audio.enabled = false;
998}
999
dfd84d90 1000static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1001{
1002 struct drm_audio_component *acomp = adev->dm.audio_component;
1003
1004 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006
1007 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008 pin, -1);
1009 }
1010}
1011
743b9786
NK
1012static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013{
743b9786
NK
1014 const struct dmcub_firmware_header_v1_0 *hdr;
1015 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1016 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1017 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1020 struct dmub_srv_hw_params hw_params;
1021 enum dmub_status status;
1022 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1023 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786 1024 bool has_hw_support;
5b109397 1025 struct dc *dc = adev->dm.dc;
743b9786
NK
1026
1027 if (!dmub_srv)
1028 /* DMUB isn't supported on the ASIC. */
1029 return 0;
1030
8c7aea40
NK
1031 if (!fb_info) {
1032 DRM_ERROR("No framebuffer info for DMUB service.\n");
1033 return -EINVAL;
1034 }
1035
743b9786
NK
1036 if (!dmub_fw) {
1037 /* Firmware required for DMUB support. */
1038 DRM_ERROR("No firmware provided for DMUB.\n");
1039 return -EINVAL;
1040 }
1041
1042 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043 if (status != DMUB_STATUS_OK) {
1044 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045 return -EINVAL;
1046 }
1047
1048 if (!has_hw_support) {
1049 DRM_INFO("DMUB unsupported on ASIC\n");
1050 return 0;
1051 }
1052
1053 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054
743b9786
NK
1055 fw_inst_const = dmub_fw->data +
1056 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1057 PSP_HEADER_BYTES;
743b9786
NK
1058
1059 fw_bss_data = dmub_fw->data +
1060 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 le32_to_cpu(hdr->inst_const_bytes);
1062
1063 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1064 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066
1067 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068
ddde28a5
HW
1069 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070 * amdgpu_ucode_init_single_fw will load dmub firmware
1071 * fw_inst_const part to cw0; otherwise, the firmware back door load
1072 * will be done by dm_dmub_hw_init
1073 */
1074 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076 fw_inst_const_size);
1077 }
1078
a576b345
NK
1079 if (fw_bss_data_size)
1080 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1082
1083 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1084 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085 adev->bios_size);
1086
1087 /* Reset regions that need to be reset. */
1088 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090
1091 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093
1094 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1096
1097 /* Initialize hardware. */
1098 memset(&hw_params, 0, sizeof(hw_params));
1099 hw_params.fb_base = adev->gmc.fb_start;
1100 hw_params.fb_offset = adev->gmc.aper_base;
1101
31a7f4bb
HW
1102 /* backdoor load firmware and trigger dmub running */
1103 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104 hw_params.load_inst_const = true;
1105
743b9786
NK
1106 if (dmcu)
1107 hw_params.psp_version = dmcu->psp_version;
1108
8c7aea40
NK
1109 for (i = 0; i < fb_info->num_fb; ++i)
1110 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1111
5b109397
JS
1112 switch (adev->asic_type) {
1113 case CHIP_YELLOW_CARP:
1114 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115 hw_params.dpia_supported = true;
1116#if defined(CONFIG_DRM_AMD_DC_DCN)
1117 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118#endif
1119 }
1120 break;
1121 default:
1122 break;
1123 }
1124
743b9786
NK
1125 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126 if (status != DMUB_STATUS_OK) {
1127 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128 return -EINVAL;
1129 }
1130
1131 /* Wait for firmware load to finish. */
1132 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133 if (status != DMUB_STATUS_OK)
1134 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135
1136 /* Init DMCU and ABM if available. */
1137 if (dmcu && abm) {
1138 dmcu->funcs->dmcu_init(dmcu);
1139 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140 }
1141
051b7887
RL
1142 if (!adev->dm.dc->ctx->dmub_srv)
1143 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1144 if (!adev->dm.dc->ctx->dmub_srv) {
1145 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146 return -ENOMEM;
1147 }
1148
743b9786
NK
1149 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150 adev->dm.dmcub_fw_version);
1151
1152 return 0;
1153}
1154
a3fe0e33 1155#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1156static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1157{
c0fb85ae
YZ
1158 uint64_t pt_base;
1159 uint32_t logical_addr_low;
1160 uint32_t logical_addr_high;
1161 uint32_t agp_base, agp_bot, agp_top;
1162 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1163
a0f884f5
NK
1164 memset(pa_config, 0, sizeof(*pa_config));
1165
c0fb85ae
YZ
1166 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1168
c0fb85ae
YZ
1169 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170 /*
1171 * Raven2 has a HW issue that it is unable to use the vram which
1172 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173 * workaround that increase system aperture high address (add 1)
1174 * to get rid of the VM fault and hardware hang.
1175 */
1176 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177 else
1178 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1179
c0fb85ae
YZ
1180 agp_base = 0;
1181 agp_bot = adev->gmc.agp_start >> 24;
1182 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1183
c44a22b3 1184
c0fb85ae
YZ
1185 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1191
c0fb85ae
YZ
1192 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194
1195 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198
1199 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202
1203 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206
1207 pa_config->is_hvm_enabled = 0;
c44a22b3 1208
c44a22b3 1209}
e6cd859d 1210#endif
ea3b4242 1211#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1212static void vblank_control_worker(struct work_struct *work)
ea3b4242 1213{
09a5df6c
NK
1214 struct vblank_control_work *vblank_work =
1215 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1216 struct amdgpu_display_manager *dm = vblank_work->dm;
1217
1218 mutex_lock(&dm->dc_lock);
1219
1220 if (vblank_work->enable)
1221 dm->active_vblank_irq_count++;
5af50b0b 1222 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1223 dm->active_vblank_irq_count--;
1224
2cbcb78c 1225 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1226
4711c033 1227 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1228
58aa1c50
NK
1229 /* Control PSR based on vblank requirements from OS */
1230 if (vblank_work->stream && vblank_work->stream->link) {
1231 if (vblank_work->enable) {
1232 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233 amdgpu_dm_psr_disable(vblank_work->stream);
1234 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237 amdgpu_dm_psr_enable(vblank_work->stream);
1238 }
1239 }
1240
ea3b4242 1241 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1242
1243 dc_stream_release(vblank_work->stream);
1244
09a5df6c 1245 kfree(vblank_work);
ea3b4242
QZ
1246}
1247
ea3b4242 1248#endif
8e794421
WL
1249
1250static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251{
1252 struct hpd_rx_irq_offload_work *offload_work;
1253 struct amdgpu_dm_connector *aconnector;
1254 struct dc_link *dc_link;
1255 struct amdgpu_device *adev;
1256 enum dc_connection_type new_connection_type = dc_connection_none;
1257 unsigned long flags;
1258
1259 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260 aconnector = offload_work->offload_wq->aconnector;
1261
1262 if (!aconnector) {
1263 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264 goto skip;
1265 }
1266
1267 adev = drm_to_adev(aconnector->base.dev);
1268 dc_link = aconnector->dc_link;
1269
1270 mutex_lock(&aconnector->hpd_lock);
1271 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272 DRM_ERROR("KMS: Failed to detect connector\n");
1273 mutex_unlock(&aconnector->hpd_lock);
1274
1275 if (new_connection_type == dc_connection_none)
1276 goto skip;
1277
1278 if (amdgpu_in_reset(adev))
1279 goto skip;
1280
1281 mutex_lock(&adev->dm.dc_lock);
1282 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283 dc_link_dp_handle_automated_test(dc_link);
1284 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287 dc_link_dp_handle_link_loss(dc_link);
1288 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289 offload_work->offload_wq->is_handling_link_loss = false;
1290 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291 }
1292 mutex_unlock(&adev->dm.dc_lock);
1293
1294skip:
1295 kfree(offload_work);
1296
1297}
1298
1299static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300{
1301 int max_caps = dc->caps.max_links;
1302 int i = 0;
1303 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304
1305 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306
1307 if (!hpd_rx_offload_wq)
1308 return NULL;
1309
1310
1311 for (i = 0; i < max_caps; i++) {
1312 hpd_rx_offload_wq[i].wq =
1313 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314
1315 if (hpd_rx_offload_wq[i].wq == NULL) {
1316 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317 return NULL;
1318 }
1319
1320 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321 }
1322
1323 return hpd_rx_offload_wq;
1324}
1325
3ce51649
AD
1326struct amdgpu_stutter_quirk {
1327 u16 chip_vendor;
1328 u16 chip_device;
1329 u16 subsys_vendor;
1330 u16 subsys_device;
1331 u8 revision;
1332};
1333
1334static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337 { 0, 0, 0, 0, 0 },
1338};
1339
1340static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341{
1342 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343
1344 while (p && p->chip_device != 0) {
1345 if (pdev->vendor == p->chip_vendor &&
1346 pdev->device == p->chip_device &&
1347 pdev->subsystem_vendor == p->subsys_vendor &&
1348 pdev->subsystem_device == p->subsys_device &&
1349 pdev->revision == p->revision) {
1350 return true;
1351 }
1352 ++p;
1353 }
1354 return false;
1355}
1356
7578ecda 1357static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1358{
1359 struct dc_init_data init_data;
52704fca
BL
1360#ifdef CONFIG_DRM_AMD_DC_HDCP
1361 struct dc_callback_init init_params;
1362#endif
743b9786 1363 int r;
52704fca 1364
4a580877 1365 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1366 adev->dm.adev = adev;
1367
4562236b
HW
1368 /* Zero all the fields */
1369 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1370#ifdef CONFIG_DRM_AMD_DC_HDCP
1371 memset(&init_params, 0, sizeof(init_params));
1372#endif
4562236b 1373
674e78ac 1374 mutex_init(&adev->dm.dc_lock);
6ce8f316 1375 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1376#if defined(CONFIG_DRM_AMD_DC_DCN)
1377 spin_lock_init(&adev->dm.vblank_lock);
1378#endif
674e78ac 1379
4562236b
HW
1380 if(amdgpu_dm_irq_init(adev)) {
1381 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382 goto error;
1383 }
1384
1385 init_data.asic_id.chip_family = adev->family;
1386
2dc31ca1 1387 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1388 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1389 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1390
770d13b1 1391 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1392 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393 init_data.asic_id.atombios_base_address =
1394 adev->mode_info.atom_context->bios;
1395
1396 init_data.driver = adev;
1397
1398 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399
1400 if (!adev->dm.cgs_device) {
1401 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402 goto error;
1403 }
1404
1405 init_data.cgs_device = adev->dm.cgs_device;
1406
4562236b
HW
1407 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408
60fb100b
AD
1409 switch (adev->asic_type) {
1410 case CHIP_CARRIZO:
1411 case CHIP_STONEY:
1ebcaebd
NK
1412 init_data.flags.gpu_vm_support = true;
1413 break;
60fb100b 1414 default:
1d789535 1415 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1416 case IP_VERSION(2, 1, 0):
1417 init_data.flags.gpu_vm_support = true;
91adec9e
ML
1418 switch (adev->dm.dmcub_fw_version) {
1419 case 0: /* development */
1420 case 0x1: /* linux-firmware.git hash 6d9f399 */
1421 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422 init_data.flags.disable_dmcu = false;
1423 break;
1424 default:
1425 init_data.flags.disable_dmcu = true;
1426 }
c08182f2 1427 break;
559f591d
AD
1428 case IP_VERSION(1, 0, 0):
1429 case IP_VERSION(1, 0, 1):
c08182f2
AD
1430 case IP_VERSION(3, 0, 1):
1431 case IP_VERSION(3, 1, 2):
1432 case IP_VERSION(3, 1, 3):
1433 init_data.flags.gpu_vm_support = true;
1434 break;
1435 case IP_VERSION(2, 0, 3):
1436 init_data.flags.disable_dmcu = true;
1437 break;
1438 default:
1439 break;
1440 }
60fb100b
AD
1441 break;
1442 }
6e227308 1443
04b94af4
AD
1444 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445 init_data.flags.fbc_support = true;
1446
d99f38ae
AD
1447 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448 init_data.flags.multi_mon_pp_mclk_switch = true;
1449
eaf56410
LL
1450 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1452
1453 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1455
27eaa492 1456 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1457
1edf5ae1
ZL
1458 if (check_seamless_boot_capability(adev)) {
1459 init_data.flags.power_down_display_on_boot = false;
1460 init_data.flags.allow_seamless_boot_optimization = true;
1461 DRM_INFO("Seamless boot condition check passed\n");
1462 }
1463
0dd79532 1464 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1465 /* Display Core create. */
1466 adev->dm.dc = dc_create(&init_data);
1467
423788c7 1468 if (adev->dm.dc) {
76121231 1469 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1470 } else {
76121231 1471 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1472 goto error;
1473 }
4562236b 1474
8a791dab
HW
1475 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1476 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1477 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1478 }
1479
f99d8762
HW
1480 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1481 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1482 if (dm_should_disable_stutter(adev->pdev))
1483 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1484
8a791dab
HW
1485 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1486 adev->dm.dc->debug.disable_stutter = true;
1487
2665f63a 1488 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1489 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1490 adev->dm.dc->debug.disable_dsc_edp = true;
1491 }
8a791dab
HW
1492
1493 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1494 adev->dm.dc->debug.disable_clock_gate = true;
1495
743b9786
NK
1496 r = dm_dmub_hw_init(adev);
1497 if (r) {
1498 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1499 goto error;
1500 }
1501
bb6785c1
NK
1502 dc_hardware_init(adev->dm.dc);
1503
8e794421
WL
1504 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1505 if (!adev->dm.hpd_rx_offload_wq) {
1506 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1507 goto error;
1508 }
1509
0b08c54b 1510#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1511 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1512 struct dc_phy_addr_space_config pa_config;
1513
0b08c54b 1514 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1515
0b08c54b
YZ
1516 // Call the DC init_memory func
1517 dc_setup_system_context(adev->dm.dc, &pa_config);
1518 }
1519#endif
c0fb85ae 1520
4562236b
HW
1521 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1522 if (!adev->dm.freesync_module) {
1523 DRM_ERROR(
1524 "amdgpu: failed to initialize freesync_module.\n");
1525 } else
f1ad2f5e 1526 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1527 adev->dm.freesync_module);
1528
e277adc5
LSL
1529 amdgpu_dm_init_color_mod();
1530
ea3b4242
QZ
1531#if defined(CONFIG_DRM_AMD_DC_DCN)
1532 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1533 adev->dm.vblank_control_workqueue =
1534 create_singlethread_workqueue("dm_vblank_control_workqueue");
1535 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1536 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1537 }
1538#endif
1539
52704fca 1540#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1541 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1542 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1543
96a3b32e
BL
1544 if (!adev->dm.hdcp_workqueue)
1545 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1546 else
1547 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1548
96a3b32e
BL
1549 dc_init_callbacks(adev->dm.dc, &init_params);
1550 }
9a65df19
WL
1551#endif
1552#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1553 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1554#endif
81927e28
JS
1555 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1556 init_completion(&adev->dm.dmub_aux_transfer_done);
1557 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1558 if (!adev->dm.dmub_notify) {
1559 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1560 goto error;
1561 }
e27c41d5
JS
1562
1563 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1564 if (!adev->dm.delayed_hpd_wq) {
1565 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1566 goto error;
1567 }
1568
81927e28 1569 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1570#if defined(CONFIG_DRM_AMD_DC_DCN)
1571 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1572 dmub_aux_setconfig_callback, false)) {
1573 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1574 goto error;
1575 }
1576 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1577 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1578 goto error;
1579 }
c40a09e5
NK
1580 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1581 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1582 goto error;
1583 }
433e5dec 1584#endif /* CONFIG_DRM_AMD_DC_DCN */
81927e28
JS
1585 }
1586
4562236b
HW
1587 if (amdgpu_dm_initialize_drm_device(adev)) {
1588 DRM_ERROR(
1589 "amdgpu: failed to initialize sw for display support.\n");
1590 goto error;
1591 }
1592
f74367e4
AD
1593 /* create fake encoders for MST */
1594 dm_dp_create_fake_mst_encoders(adev);
1595
4562236b
HW
1596 /* TODO: Add_display_info? */
1597
1598 /* TODO use dynamic cursor width */
4a580877
LT
1599 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1600 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1601
92020e81
AD
1602 /* Disable vblank IRQs aggressively for power-saving */
1603 adev_to_drm(adev)->vblank_disable_immediate = true;
1604
4a580877 1605 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1606 DRM_ERROR(
1607 "amdgpu: failed to initialize sw for display support.\n");
1608 goto error;
1609 }
1610
c0fb85ae 1611
f1ad2f5e 1612 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1613
1614 return 0;
1615error:
1616 amdgpu_dm_fini(adev);
1617
59d0f396 1618 return -EINVAL;
4562236b
HW
1619}
1620
e9669fb7
AG
1621static int amdgpu_dm_early_fini(void *handle)
1622{
1623 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1624
1625 amdgpu_dm_audio_fini(adev);
1626
1627 return 0;
1628}
1629
7578ecda 1630static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1631{
f74367e4
AD
1632 int i;
1633
09a5df6c
NK
1634#if defined(CONFIG_DRM_AMD_DC_DCN)
1635 if (adev->dm.vblank_control_workqueue) {
1636 destroy_workqueue(adev->dm.vblank_control_workqueue);
1637 adev->dm.vblank_control_workqueue = NULL;
1638 }
1639#endif
1640
f74367e4
AD
1641 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1642 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1643 }
1644
4562236b 1645 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1646
9a65df19
WL
1647#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1648 if (adev->dm.crc_rd_wrk) {
1649 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1650 kfree(adev->dm.crc_rd_wrk);
1651 adev->dm.crc_rd_wrk = NULL;
1652 }
1653#endif
52704fca
BL
1654#ifdef CONFIG_DRM_AMD_DC_HDCP
1655 if (adev->dm.hdcp_workqueue) {
e96b1b29 1656 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1657 adev->dm.hdcp_workqueue = NULL;
1658 }
1659
1660 if (adev->dm.dc)
1661 dc_deinit_callbacks(adev->dm.dc);
1662#endif
51ba6912 1663
3beac533 1664 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1665
81927e28
JS
1666 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1667 kfree(adev->dm.dmub_notify);
1668 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1669 destroy_workqueue(adev->dm.delayed_hpd_wq);
1670 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1671 }
1672
743b9786
NK
1673 if (adev->dm.dmub_bo)
1674 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1675 &adev->dm.dmub_bo_gpu_addr,
1676 &adev->dm.dmub_bo_cpu_addr);
52704fca 1677
006c26a0
AG
1678 if (adev->dm.hpd_rx_offload_wq) {
1679 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1680 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1681 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1682 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1683 }
1684 }
1685
1686 kfree(adev->dm.hpd_rx_offload_wq);
1687 adev->dm.hpd_rx_offload_wq = NULL;
1688 }
1689
c8bdf2b6
ED
1690 /* DC Destroy TODO: Replace destroy DAL */
1691 if (adev->dm.dc)
1692 dc_destroy(&adev->dm.dc);
4562236b
HW
1693 /*
1694 * TODO: pageflip, vlank interrupt
1695 *
1696 * amdgpu_dm_irq_fini(adev);
1697 */
1698
1699 if (adev->dm.cgs_device) {
1700 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1701 adev->dm.cgs_device = NULL;
1702 }
1703 if (adev->dm.freesync_module) {
1704 mod_freesync_destroy(adev->dm.freesync_module);
1705 adev->dm.freesync_module = NULL;
1706 }
674e78ac 1707
6ce8f316 1708 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1709 mutex_destroy(&adev->dm.dc_lock);
1710
4562236b
HW
1711 return;
1712}
1713
a94d5569 1714static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1715{
a7669aff 1716 const char *fw_name_dmcu = NULL;
a94d5569
DF
1717 int r;
1718 const struct dmcu_firmware_header_v1_0 *hdr;
1719
1720 switch(adev->asic_type) {
55e56389
MR
1721#if defined(CONFIG_DRM_AMD_DC_SI)
1722 case CHIP_TAHITI:
1723 case CHIP_PITCAIRN:
1724 case CHIP_VERDE:
1725 case CHIP_OLAND:
1726#endif
a94d5569
DF
1727 case CHIP_BONAIRE:
1728 case CHIP_HAWAII:
1729 case CHIP_KAVERI:
1730 case CHIP_KABINI:
1731 case CHIP_MULLINS:
1732 case CHIP_TONGA:
1733 case CHIP_FIJI:
1734 case CHIP_CARRIZO:
1735 case CHIP_STONEY:
1736 case CHIP_POLARIS11:
1737 case CHIP_POLARIS10:
1738 case CHIP_POLARIS12:
1739 case CHIP_VEGAM:
1740 case CHIP_VEGA10:
1741 case CHIP_VEGA12:
1742 case CHIP_VEGA20:
1743 return 0;
5ea23931
RL
1744 case CHIP_NAVI12:
1745 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1746 break;
a94d5569 1747 case CHIP_RAVEN:
a7669aff
HW
1748 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1749 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1750 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1751 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1752 else
a7669aff 1753 return 0;
a94d5569
DF
1754 break;
1755 default:
1d789535 1756 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1757 case IP_VERSION(2, 0, 2):
1758 case IP_VERSION(2, 0, 3):
1759 case IP_VERSION(2, 0, 0):
1760 case IP_VERSION(2, 1, 0):
1761 case IP_VERSION(3, 0, 0):
1762 case IP_VERSION(3, 0, 2):
1763 case IP_VERSION(3, 0, 3):
1764 case IP_VERSION(3, 0, 1):
1765 case IP_VERSION(3, 1, 2):
1766 case IP_VERSION(3, 1, 3):
1767 return 0;
1768 default:
1769 break;
1770 }
a94d5569 1771 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1772 return -EINVAL;
a94d5569
DF
1773 }
1774
1775 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1776 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1777 return 0;
1778 }
1779
1780 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1781 if (r == -ENOENT) {
1782 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1783 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1784 adev->dm.fw_dmcu = NULL;
1785 return 0;
1786 }
1787 if (r) {
1788 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1789 fw_name_dmcu);
1790 return r;
1791 }
1792
1793 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1794 if (r) {
1795 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1796 fw_name_dmcu);
1797 release_firmware(adev->dm.fw_dmcu);
1798 adev->dm.fw_dmcu = NULL;
1799 return r;
1800 }
1801
1802 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1803 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1804 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1805 adev->firmware.fw_size +=
1806 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1807
1808 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1809 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1810 adev->firmware.fw_size +=
1811 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1812
ee6e89c0
DF
1813 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1814
a94d5569
DF
1815 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1816
4562236b
HW
1817 return 0;
1818}
1819
743b9786
NK
1820static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1821{
1822 struct amdgpu_device *adev = ctx;
1823
1824 return dm_read_reg(adev->dm.dc->ctx, address);
1825}
1826
1827static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1828 uint32_t value)
1829{
1830 struct amdgpu_device *adev = ctx;
1831
1832 return dm_write_reg(adev->dm.dc->ctx, address, value);
1833}
1834
1835static int dm_dmub_sw_init(struct amdgpu_device *adev)
1836{
1837 struct dmub_srv_create_params create_params;
8c7aea40
NK
1838 struct dmub_srv_region_params region_params;
1839 struct dmub_srv_region_info region_info;
1840 struct dmub_srv_fb_params fb_params;
1841 struct dmub_srv_fb_info *fb_info;
1842 struct dmub_srv *dmub_srv;
743b9786
NK
1843 const struct dmcub_firmware_header_v1_0 *hdr;
1844 const char *fw_name_dmub;
1845 enum dmub_asic dmub_asic;
1846 enum dmub_status status;
1847 int r;
1848
1d789535 1849 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1850 case IP_VERSION(2, 1, 0):
743b9786
NK
1851 dmub_asic = DMUB_ASIC_DCN21;
1852 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1853 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1854 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1855 break;
c08182f2 1856 case IP_VERSION(3, 0, 0):
1d789535 1857 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1858 dmub_asic = DMUB_ASIC_DCN30;
1859 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1860 } else {
1861 dmub_asic = DMUB_ASIC_DCN30;
1862 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1863 }
79037324 1864 break;
c08182f2 1865 case IP_VERSION(3, 0, 1):
469989ca
RL
1866 dmub_asic = DMUB_ASIC_DCN301;
1867 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1868 break;
c08182f2 1869 case IP_VERSION(3, 0, 2):
2a411205
BL
1870 dmub_asic = DMUB_ASIC_DCN302;
1871 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1872 break;
c08182f2 1873 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1874 dmub_asic = DMUB_ASIC_DCN303;
1875 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1876 break;
c08182f2
AD
1877 case IP_VERSION(3, 1, 2):
1878 case IP_VERSION(3, 1, 3):
3137f792 1879 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1880 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1881 break;
743b9786
NK
1882
1883 default:
1884 /* ASIC doesn't support DMUB. */
1885 return 0;
1886 }
1887
743b9786
NK
1888 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1889 if (r) {
1890 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1891 return 0;
1892 }
1893
1894 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1895 if (r) {
1896 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1897 return 0;
1898 }
1899
743b9786 1900 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1901 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1902
9a6ed547
NK
1903 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1904 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1905 AMDGPU_UCODE_ID_DMCUB;
1906 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1907 adev->dm.dmub_fw;
1908 adev->firmware.fw_size +=
1909 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1910
9a6ed547
NK
1911 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1912 adev->dm.dmcub_fw_version);
1913 }
1914
743b9786 1915
8c7aea40
NK
1916 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1917 dmub_srv = adev->dm.dmub_srv;
1918
1919 if (!dmub_srv) {
1920 DRM_ERROR("Failed to allocate DMUB service!\n");
1921 return -ENOMEM;
1922 }
1923
1924 memset(&create_params, 0, sizeof(create_params));
1925 create_params.user_ctx = adev;
1926 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1927 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1928 create_params.asic = dmub_asic;
1929
1930 /* Create the DMUB service. */
1931 status = dmub_srv_create(dmub_srv, &create_params);
1932 if (status != DMUB_STATUS_OK) {
1933 DRM_ERROR("Error creating DMUB service: %d\n", status);
1934 return -EINVAL;
1935 }
1936
1937 /* Calculate the size of all the regions for the DMUB service. */
1938 memset(&region_params, 0, sizeof(region_params));
1939
1940 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1941 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1942 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1943 region_params.vbios_size = adev->bios_size;
0922b899 1944 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1945 adev->dm.dmub_fw->data +
1946 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1947 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1948 region_params.fw_inst_const =
1949 adev->dm.dmub_fw->data +
1950 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1951 PSP_HEADER_BYTES;
8c7aea40
NK
1952
1953 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1954 &region_info);
1955
1956 if (status != DMUB_STATUS_OK) {
1957 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1958 return -EINVAL;
1959 }
1960
1961 /*
1962 * Allocate a framebuffer based on the total size of all the regions.
1963 * TODO: Move this into GART.
1964 */
1965 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1966 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1967 &adev->dm.dmub_bo_gpu_addr,
1968 &adev->dm.dmub_bo_cpu_addr);
1969 if (r)
1970 return r;
1971
1972 /* Rebase the regions on the framebuffer address. */
1973 memset(&fb_params, 0, sizeof(fb_params));
1974 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1975 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1976 fb_params.region_info = &region_info;
1977
1978 adev->dm.dmub_fb_info =
1979 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1980 fb_info = adev->dm.dmub_fb_info;
1981
1982 if (!fb_info) {
1983 DRM_ERROR(
1984 "Failed to allocate framebuffer info for DMUB service!\n");
1985 return -ENOMEM;
1986 }
1987
1988 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1989 if (status != DMUB_STATUS_OK) {
1990 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1991 return -EINVAL;
1992 }
1993
743b9786
NK
1994 return 0;
1995}
1996
a94d5569
DF
1997static int dm_sw_init(void *handle)
1998{
1999 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
2000 int r;
2001
2002 r = dm_dmub_sw_init(adev);
2003 if (r)
2004 return r;
a94d5569
DF
2005
2006 return load_dmcu_fw(adev);
2007}
2008
4562236b
HW
2009static int dm_sw_fini(void *handle)
2010{
a94d5569
DF
2011 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2012
8c7aea40
NK
2013 kfree(adev->dm.dmub_fb_info);
2014 adev->dm.dmub_fb_info = NULL;
2015
743b9786
NK
2016 if (adev->dm.dmub_srv) {
2017 dmub_srv_destroy(adev->dm.dmub_srv);
2018 adev->dm.dmub_srv = NULL;
2019 }
2020
75e1658e
ND
2021 release_firmware(adev->dm.dmub_fw);
2022 adev->dm.dmub_fw = NULL;
743b9786 2023
75e1658e
ND
2024 release_firmware(adev->dm.fw_dmcu);
2025 adev->dm.fw_dmcu = NULL;
a94d5569 2026
4562236b
HW
2027 return 0;
2028}
2029
7abcf6b5 2030static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2031{
c84dec2f 2032 struct amdgpu_dm_connector *aconnector;
4562236b 2033 struct drm_connector *connector;
f8d2d39e 2034 struct drm_connector_list_iter iter;
7abcf6b5 2035 int ret = 0;
4562236b 2036
f8d2d39e
LP
2037 drm_connector_list_iter_begin(dev, &iter);
2038 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2039 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2040 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2041 aconnector->mst_mgr.aux) {
f1ad2f5e 2042 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2043 aconnector,
2044 aconnector->base.base.id);
7abcf6b5
AG
2045
2046 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2047 if (ret < 0) {
2048 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2049 aconnector->dc_link->type =
2050 dc_connection_single;
2051 break;
7abcf6b5 2052 }
f8d2d39e 2053 }
4562236b 2054 }
f8d2d39e 2055 drm_connector_list_iter_end(&iter);
4562236b 2056
7abcf6b5
AG
2057 return ret;
2058}
2059
2060static int dm_late_init(void *handle)
2061{
42e67c3b 2062 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2063
bbf854dc
DF
2064 struct dmcu_iram_parameters params;
2065 unsigned int linear_lut[16];
2066 int i;
17bdb4a8 2067 struct dmcu *dmcu = NULL;
bbf854dc 2068
17bdb4a8
JFZ
2069 dmcu = adev->dm.dc->res_pool->dmcu;
2070
bbf854dc
DF
2071 for (i = 0; i < 16; i++)
2072 linear_lut[i] = 0xFFFF * i / 15;
2073
2074 params.set = 0;
75068994 2075 params.backlight_ramping_override = false;
bbf854dc
DF
2076 params.backlight_ramping_start = 0xCCCC;
2077 params.backlight_ramping_reduction = 0xCCCCCCCC;
2078 params.backlight_lut_array_size = 16;
2079 params.backlight_lut_array = linear_lut;
2080
2ad0cdf9
AK
2081 /* Min backlight level after ABM reduction, Don't allow below 1%
2082 * 0xFFFF x 0.01 = 0x28F
2083 */
2084 params.min_abm_backlight = 0x28F;
5cb32419 2085 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2086 * dmcu object will be null.
2087 * ABM 2.4 and up are implemented on dmcub.
2088 */
2089 if (dmcu) {
2090 if (!dmcu_load_iram(dmcu, params))
2091 return -EINVAL;
2092 } else if (adev->dm.dc->ctx->dmub_srv) {
2093 struct dc_link *edp_links[MAX_NUM_EDP];
2094 int edp_num;
bbf854dc 2095
6e568e43
JW
2096 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2097 for (i = 0; i < edp_num; i++) {
2098 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2099 return -EINVAL;
2100 }
2101 }
bbf854dc 2102
4a580877 2103 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2104}
2105
2106static void s3_handle_mst(struct drm_device *dev, bool suspend)
2107{
c84dec2f 2108 struct amdgpu_dm_connector *aconnector;
4562236b 2109 struct drm_connector *connector;
f8d2d39e 2110 struct drm_connector_list_iter iter;
fe7553be
LP
2111 struct drm_dp_mst_topology_mgr *mgr;
2112 int ret;
2113 bool need_hotplug = false;
4562236b 2114
f8d2d39e
LP
2115 drm_connector_list_iter_begin(dev, &iter);
2116 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2117 aconnector = to_amdgpu_dm_connector(connector);
2118 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2119 aconnector->mst_port)
2120 continue;
2121
2122 mgr = &aconnector->mst_mgr;
2123
2124 if (suspend) {
2125 drm_dp_mst_topology_mgr_suspend(mgr);
2126 } else {
6f85f738 2127 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2128 if (ret < 0) {
2129 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2130 need_hotplug = true;
2131 }
2132 }
4562236b 2133 }
f8d2d39e 2134 drm_connector_list_iter_end(&iter);
fe7553be
LP
2135
2136 if (need_hotplug)
2137 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2138}
2139
9340dfd3
HW
2140static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2141{
2142 struct smu_context *smu = &adev->smu;
2143 int ret = 0;
2144
2145 if (!is_support_sw_smu(adev))
2146 return 0;
2147
2148 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2149 * on window driver dc implementation.
2150 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2151 * should be passed to smu during boot up and resume from s3.
2152 * boot up: dc calculate dcn watermark clock settings within dc_create,
2153 * dcn20_resource_construct
2154 * then call pplib functions below to pass the settings to smu:
2155 * smu_set_watermarks_for_clock_ranges
2156 * smu_set_watermarks_table
2157 * navi10_set_watermarks_table
2158 * smu_write_watermarks_table
2159 *
2160 * For Renoir, clock settings of dcn watermark are also fixed values.
2161 * dc has implemented different flow for window driver:
2162 * dc_hardware_init / dc_set_power_state
2163 * dcn10_init_hw
2164 * notify_wm_ranges
2165 * set_wm_ranges
2166 * -- Linux
2167 * smu_set_watermarks_for_clock_ranges
2168 * renoir_set_watermarks_table
2169 * smu_write_watermarks_table
2170 *
2171 * For Linux,
2172 * dc_hardware_init -> amdgpu_dm_init
2173 * dc_set_power_state --> dm_resume
2174 *
2175 * therefore, this function apply to navi10/12/14 but not Renoir
2176 * *
2177 */
1d789535 2178 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2179 case IP_VERSION(2, 0, 2):
2180 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2181 break;
2182 default:
2183 return 0;
2184 }
2185
e7a95eea
EQ
2186 ret = smu_write_watermarks_table(smu);
2187 if (ret) {
2188 DRM_ERROR("Failed to update WMTABLE!\n");
2189 return ret;
9340dfd3
HW
2190 }
2191
9340dfd3
HW
2192 return 0;
2193}
2194
b8592b48
LL
2195/**
2196 * dm_hw_init() - Initialize DC device
28d687ea 2197 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2198 *
2199 * Initialize the &struct amdgpu_display_manager device. This involves calling
2200 * the initializers of each DM component, then populating the struct with them.
2201 *
2202 * Although the function implies hardware initialization, both hardware and
2203 * software are initialized here. Splitting them out to their relevant init
2204 * hooks is a future TODO item.
2205 *
2206 * Some notable things that are initialized here:
2207 *
2208 * - Display Core, both software and hardware
2209 * - DC modules that we need (freesync and color management)
2210 * - DRM software states
2211 * - Interrupt sources and handlers
2212 * - Vblank support
2213 * - Debug FS entries, if enabled
2214 */
4562236b
HW
2215static int dm_hw_init(void *handle)
2216{
2217 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2218 /* Create DAL display manager */
2219 amdgpu_dm_init(adev);
4562236b
HW
2220 amdgpu_dm_hpd_init(adev);
2221
4562236b
HW
2222 return 0;
2223}
2224
b8592b48
LL
2225/**
2226 * dm_hw_fini() - Teardown DC device
28d687ea 2227 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2228 *
2229 * Teardown components within &struct amdgpu_display_manager that require
2230 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2231 * were loaded. Also flush IRQ workqueues and disable them.
2232 */
4562236b
HW
2233static int dm_hw_fini(void *handle)
2234{
2235 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2236
2237 amdgpu_dm_hpd_fini(adev);
2238
2239 amdgpu_dm_irq_fini(adev);
21de3396 2240 amdgpu_dm_fini(adev);
4562236b
HW
2241 return 0;
2242}
2243
cdaae837
BL
2244
2245static int dm_enable_vblank(struct drm_crtc *crtc);
2246static void dm_disable_vblank(struct drm_crtc *crtc);
2247
2248static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2249 struct dc_state *state, bool enable)
2250{
2251 enum dc_irq_source irq_source;
2252 struct amdgpu_crtc *acrtc;
2253 int rc = -EBUSY;
2254 int i = 0;
2255
2256 for (i = 0; i < state->stream_count; i++) {
2257 acrtc = get_crtc_by_otg_inst(
2258 adev, state->stream_status[i].primary_otg_inst);
2259
2260 if (acrtc && state->stream_status[i].plane_count != 0) {
2261 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2262 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2263 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2264 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2265 if (rc)
2266 DRM_WARN("Failed to %s pflip interrupts\n",
2267 enable ? "enable" : "disable");
2268
2269 if (enable) {
2270 rc = dm_enable_vblank(&acrtc->base);
2271 if (rc)
2272 DRM_WARN("Failed to enable vblank interrupts\n");
2273 } else {
2274 dm_disable_vblank(&acrtc->base);
2275 }
2276
2277 }
2278 }
2279
2280}
2281
dfd84d90 2282static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2283{
2284 struct dc_state *context = NULL;
2285 enum dc_status res = DC_ERROR_UNEXPECTED;
2286 int i;
2287 struct dc_stream_state *del_streams[MAX_PIPES];
2288 int del_streams_count = 0;
2289
2290 memset(del_streams, 0, sizeof(del_streams));
2291
2292 context = dc_create_state(dc);
2293 if (context == NULL)
2294 goto context_alloc_fail;
2295
2296 dc_resource_state_copy_construct_current(dc, context);
2297
2298 /* First remove from context all streams */
2299 for (i = 0; i < context->stream_count; i++) {
2300 struct dc_stream_state *stream = context->streams[i];
2301
2302 del_streams[del_streams_count++] = stream;
2303 }
2304
2305 /* Remove all planes for removed streams and then remove the streams */
2306 for (i = 0; i < del_streams_count; i++) {
2307 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2308 res = DC_FAIL_DETACH_SURFACES;
2309 goto fail;
2310 }
2311
2312 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2313 if (res != DC_OK)
2314 goto fail;
2315 }
2316
cdaae837
BL
2317 res = dc_commit_state(dc, context);
2318
2319fail:
2320 dc_release_state(context);
2321
2322context_alloc_fail:
2323 return res;
2324}
2325
8e794421
WL
2326static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2327{
2328 int i;
2329
2330 if (dm->hpd_rx_offload_wq) {
2331 for (i = 0; i < dm->dc->caps.max_links; i++)
2332 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2333 }
2334}
2335
4562236b
HW
2336static int dm_suspend(void *handle)
2337{
2338 struct amdgpu_device *adev = handle;
2339 struct amdgpu_display_manager *dm = &adev->dm;
2340 int ret = 0;
4562236b 2341
53b3f8f4 2342 if (amdgpu_in_reset(adev)) {
cdaae837 2343 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2344
2345#if defined(CONFIG_DRM_AMD_DC_DCN)
2346 dc_allow_idle_optimizations(adev->dm.dc, false);
2347#endif
2348
cdaae837
BL
2349 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2350
2351 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2352
2353 amdgpu_dm_commit_zero_streams(dm->dc);
2354
2355 amdgpu_dm_irq_suspend(adev);
2356
8e794421
WL
2357 hpd_rx_irq_work_suspend(dm);
2358
cdaae837
BL
2359 return ret;
2360 }
4562236b 2361
d2f0b53b 2362 WARN_ON(adev->dm.cached_state);
4a580877 2363 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2364
4a580877 2365 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2366
4562236b
HW
2367 amdgpu_dm_irq_suspend(adev);
2368
8e794421
WL
2369 hpd_rx_irq_work_suspend(dm);
2370
32f5062d 2371 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2372
1c2075d4 2373 return 0;
4562236b
HW
2374}
2375
1daf8c63
AD
2376static struct amdgpu_dm_connector *
2377amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2378 struct drm_crtc *crtc)
4562236b
HW
2379{
2380 uint32_t i;
c2cea706 2381 struct drm_connector_state *new_con_state;
4562236b
HW
2382 struct drm_connector *connector;
2383 struct drm_crtc *crtc_from_state;
2384
c2cea706
LSL
2385 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2386 crtc_from_state = new_con_state->crtc;
4562236b
HW
2387
2388 if (crtc_from_state == crtc)
c84dec2f 2389 return to_amdgpu_dm_connector(connector);
4562236b
HW
2390 }
2391
2392 return NULL;
2393}
2394
fbbdadf2
BL
2395static void emulated_link_detect(struct dc_link *link)
2396{
2397 struct dc_sink_init_data sink_init_data = { 0 };
2398 struct display_sink_capability sink_caps = { 0 };
2399 enum dc_edid_status edid_status;
2400 struct dc_context *dc_ctx = link->ctx;
2401 struct dc_sink *sink = NULL;
2402 struct dc_sink *prev_sink = NULL;
2403
2404 link->type = dc_connection_none;
2405 prev_sink = link->local_sink;
2406
30164a16
VL
2407 if (prev_sink)
2408 dc_sink_release(prev_sink);
fbbdadf2
BL
2409
2410 switch (link->connector_signal) {
2411 case SIGNAL_TYPE_HDMI_TYPE_A: {
2412 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2413 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2414 break;
2415 }
2416
2417 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2418 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2419 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2420 break;
2421 }
2422
2423 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2424 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2425 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2426 break;
2427 }
2428
2429 case SIGNAL_TYPE_LVDS: {
2430 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2431 sink_caps.signal = SIGNAL_TYPE_LVDS;
2432 break;
2433 }
2434
2435 case SIGNAL_TYPE_EDP: {
2436 sink_caps.transaction_type =
2437 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2438 sink_caps.signal = SIGNAL_TYPE_EDP;
2439 break;
2440 }
2441
2442 case SIGNAL_TYPE_DISPLAY_PORT: {
2443 sink_caps.transaction_type =
2444 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2445 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2446 break;
2447 }
2448
2449 default:
2450 DC_ERROR("Invalid connector type! signal:%d\n",
2451 link->connector_signal);
2452 return;
2453 }
2454
2455 sink_init_data.link = link;
2456 sink_init_data.sink_signal = sink_caps.signal;
2457
2458 sink = dc_sink_create(&sink_init_data);
2459 if (!sink) {
2460 DC_ERROR("Failed to create sink!\n");
2461 return;
2462 }
2463
dcd5fb82 2464 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2465 link->local_sink = sink;
2466
2467 edid_status = dm_helpers_read_local_edid(
2468 link->ctx,
2469 link,
2470 sink);
2471
2472 if (edid_status != EDID_OK)
2473 DC_ERROR("Failed to read EDID");
2474
2475}
2476
cdaae837
BL
2477static void dm_gpureset_commit_state(struct dc_state *dc_state,
2478 struct amdgpu_display_manager *dm)
2479{
2480 struct {
2481 struct dc_surface_update surface_updates[MAX_SURFACES];
2482 struct dc_plane_info plane_infos[MAX_SURFACES];
2483 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2484 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2485 struct dc_stream_update stream_update;
2486 } * bundle;
2487 int k, m;
2488
2489 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2490
2491 if (!bundle) {
2492 dm_error("Failed to allocate update bundle\n");
2493 goto cleanup;
2494 }
2495
2496 for (k = 0; k < dc_state->stream_count; k++) {
2497 bundle->stream_update.stream = dc_state->streams[k];
2498
2499 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2500 bundle->surface_updates[m].surface =
2501 dc_state->stream_status->plane_states[m];
2502 bundle->surface_updates[m].surface->force_full_update =
2503 true;
2504 }
2505 dc_commit_updates_for_stream(
2506 dm->dc, bundle->surface_updates,
2507 dc_state->stream_status->plane_count,
efc8278e 2508 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2509 }
2510
2511cleanup:
2512 kfree(bundle);
2513
2514 return;
2515}
2516
035f5496 2517static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2518{
2519 struct dc_stream_state *stream_state;
2520 struct amdgpu_dm_connector *aconnector = link->priv;
2521 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2522 struct dc_stream_update stream_update;
2523 bool dpms_off = true;
2524
2525 memset(&stream_update, 0, sizeof(stream_update));
2526 stream_update.dpms_off = &dpms_off;
2527
2528 mutex_lock(&adev->dm.dc_lock);
2529 stream_state = dc_stream_find_from_link(link);
2530
2531 if (stream_state == NULL) {
2532 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2533 mutex_unlock(&adev->dm.dc_lock);
2534 return;
2535 }
2536
2537 stream_update.stream = stream_state;
035f5496 2538 acrtc_state->force_dpms_off = true;
3c4d55c9 2539 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2540 stream_state, &stream_update,
2541 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2542 mutex_unlock(&adev->dm.dc_lock);
2543}
2544
4562236b
HW
2545static int dm_resume(void *handle)
2546{
2547 struct amdgpu_device *adev = handle;
4a580877 2548 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2549 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2550 struct amdgpu_dm_connector *aconnector;
4562236b 2551 struct drm_connector *connector;
f8d2d39e 2552 struct drm_connector_list_iter iter;
4562236b 2553 struct drm_crtc *crtc;
c2cea706 2554 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2555 struct dm_crtc_state *dm_new_crtc_state;
2556 struct drm_plane *plane;
2557 struct drm_plane_state *new_plane_state;
2558 struct dm_plane_state *dm_new_plane_state;
113b7a01 2559 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2560 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2561 struct dc_state *dc_state;
2562 int i, r, j;
4562236b 2563
53b3f8f4 2564 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2565 dc_state = dm->cached_dc_state;
2566
6d63fcc2
NK
2567 /*
2568 * The dc->current_state is backed up into dm->cached_dc_state
2569 * before we commit 0 streams.
2570 *
2571 * DC will clear link encoder assignments on the real state
2572 * but the changes won't propagate over to the copy we made
2573 * before the 0 streams commit.
2574 *
2575 * DC expects that link encoder assignments are *not* valid
2576 * when committing a state, so as a workaround it needs to be
2577 * cleared here.
2578 */
2579 link_enc_cfg_init(dm->dc, dc_state);
2580
524a0ba6
NK
2581 amdgpu_dm_outbox_init(adev);
2582
cdaae837
BL
2583 r = dm_dmub_hw_init(adev);
2584 if (r)
2585 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2586
2587 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2588 dc_resume(dm->dc);
2589
2590 amdgpu_dm_irq_resume_early(adev);
2591
2592 for (i = 0; i < dc_state->stream_count; i++) {
2593 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2594 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2595 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2596 = 0xffffffff;
2597 }
2598 }
2599
2600 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2601
cdaae837
BL
2602 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2603
2604 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2605
2606 dc_release_state(dm->cached_dc_state);
2607 dm->cached_dc_state = NULL;
2608
2609 amdgpu_dm_irq_resume_late(adev);
2610
2611 mutex_unlock(&dm->dc_lock);
2612
2613 return 0;
2614 }
113b7a01
LL
2615 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2616 dc_release_state(dm_state->context);
2617 dm_state->context = dc_create_state(dm->dc);
2618 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2619 dc_resource_state_construct(dm->dc, dm_state->context);
2620
8c7aea40
NK
2621 /* Before powering on DC we need to re-initialize DMUB. */
2622 r = dm_dmub_hw_init(adev);
2623 if (r)
2624 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2625
a80aa93d
ML
2626 /* power on hardware */
2627 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2628
4562236b
HW
2629 /* program HPD filter */
2630 dc_resume(dm->dc);
2631
4562236b
HW
2632 /*
2633 * early enable HPD Rx IRQ, should be done before set mode as short
2634 * pulse interrupts are used for MST
2635 */
2636 amdgpu_dm_irq_resume_early(adev);
2637
d20ebea8 2638 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2639 s3_handle_mst(ddev, false);
2640
4562236b 2641 /* Do detection*/
f8d2d39e
LP
2642 drm_connector_list_iter_begin(ddev, &iter);
2643 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2644 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2645
2646 /*
2647 * this is the case when traversing through already created
2648 * MST connectors, should be skipped
2649 */
2650 if (aconnector->mst_port)
2651 continue;
2652
03ea364c 2653 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2654 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2655 DRM_ERROR("KMS: Failed to detect connector\n");
2656
2657 if (aconnector->base.force && new_connection_type == dc_connection_none)
2658 emulated_link_detect(aconnector->dc_link);
2659 else
2660 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2661
2662 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2663 aconnector->fake_enable = false;
2664
dcd5fb82
MF
2665 if (aconnector->dc_sink)
2666 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2667 aconnector->dc_sink = NULL;
2668 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2669 mutex_unlock(&aconnector->hpd_lock);
4562236b 2670 }
f8d2d39e 2671 drm_connector_list_iter_end(&iter);
4562236b 2672
1f6010a9 2673 /* Force mode set in atomic commit */
a80aa93d 2674 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2675 new_crtc_state->active_changed = true;
4f346e65 2676
fcb4019e
LSL
2677 /*
2678 * atomic_check is expected to create the dc states. We need to release
2679 * them here, since they were duplicated as part of the suspend
2680 * procedure.
2681 */
a80aa93d 2682 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2683 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2684 if (dm_new_crtc_state->stream) {
2685 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2686 dc_stream_release(dm_new_crtc_state->stream);
2687 dm_new_crtc_state->stream = NULL;
2688 }
2689 }
2690
a80aa93d 2691 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2692 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2693 if (dm_new_plane_state->dc_state) {
2694 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2695 dc_plane_state_release(dm_new_plane_state->dc_state);
2696 dm_new_plane_state->dc_state = NULL;
2697 }
2698 }
2699
2d1af6a1 2700 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2701
a80aa93d 2702 dm->cached_state = NULL;
0a214e2f 2703
9faa4237 2704 amdgpu_dm_irq_resume_late(adev);
4562236b 2705
9340dfd3
HW
2706 amdgpu_dm_smu_write_watermarks_table(adev);
2707
2d1af6a1 2708 return 0;
4562236b
HW
2709}
2710
b8592b48
LL
2711/**
2712 * DOC: DM Lifecycle
2713 *
2714 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2715 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2716 * the base driver's device list to be initialized and torn down accordingly.
2717 *
2718 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2719 */
2720
4562236b
HW
2721static const struct amd_ip_funcs amdgpu_dm_funcs = {
2722 .name = "dm",
2723 .early_init = dm_early_init,
7abcf6b5 2724 .late_init = dm_late_init,
4562236b
HW
2725 .sw_init = dm_sw_init,
2726 .sw_fini = dm_sw_fini,
e9669fb7 2727 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2728 .hw_init = dm_hw_init,
2729 .hw_fini = dm_hw_fini,
2730 .suspend = dm_suspend,
2731 .resume = dm_resume,
2732 .is_idle = dm_is_idle,
2733 .wait_for_idle = dm_wait_for_idle,
2734 .check_soft_reset = dm_check_soft_reset,
2735 .soft_reset = dm_soft_reset,
2736 .set_clockgating_state = dm_set_clockgating_state,
2737 .set_powergating_state = dm_set_powergating_state,
2738};
2739
2740const struct amdgpu_ip_block_version dm_ip_block =
2741{
2742 .type = AMD_IP_BLOCK_TYPE_DCE,
2743 .major = 1,
2744 .minor = 0,
2745 .rev = 0,
2746 .funcs = &amdgpu_dm_funcs,
2747};
2748
ca3268c4 2749
b8592b48
LL
2750/**
2751 * DOC: atomic
2752 *
2753 * *WIP*
2754 */
0a323b84 2755
b3663f70 2756static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2757 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2758 .get_format_info = amd_get_format_info,
366c1baa 2759 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2760 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2761 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2762};
2763
2764static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2765 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2766};
2767
94562810
RS
2768static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2769{
2770 u32 max_cll, min_cll, max, min, q, r;
2771 struct amdgpu_dm_backlight_caps *caps;
2772 struct amdgpu_display_manager *dm;
2773 struct drm_connector *conn_base;
2774 struct amdgpu_device *adev;
ec11fe37 2775 struct dc_link *link = NULL;
94562810
RS
2776 static const u8 pre_computed_values[] = {
2777 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2778 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2779 int i;
94562810
RS
2780
2781 if (!aconnector || !aconnector->dc_link)
2782 return;
2783
ec11fe37 2784 link = aconnector->dc_link;
2785 if (link->connector_signal != SIGNAL_TYPE_EDP)
2786 return;
2787
94562810 2788 conn_base = &aconnector->base;
1348969a 2789 adev = drm_to_adev(conn_base->dev);
94562810 2790 dm = &adev->dm;
7fd13bae
AD
2791 for (i = 0; i < dm->num_of_edps; i++) {
2792 if (link == dm->backlight_link[i])
2793 break;
2794 }
2795 if (i >= dm->num_of_edps)
2796 return;
2797 caps = &dm->backlight_caps[i];
94562810
RS
2798 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2799 caps->aux_support = false;
2800 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2801 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2802
d0ae0b64 2803 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2804 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2805 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2806 caps->aux_support = true;
2807
7a46f05e
TI
2808 if (amdgpu_backlight == 0)
2809 caps->aux_support = false;
2810 else if (amdgpu_backlight == 1)
2811 caps->aux_support = true;
2812
94562810
RS
2813 /* From the specification (CTA-861-G), for calculating the maximum
2814 * luminance we need to use:
2815 * Luminance = 50*2**(CV/32)
2816 * Where CV is a one-byte value.
2817 * For calculating this expression we may need float point precision;
2818 * to avoid this complexity level, we take advantage that CV is divided
2819 * by a constant. From the Euclids division algorithm, we know that CV
2820 * can be written as: CV = 32*q + r. Next, we replace CV in the
2821 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2822 * need to pre-compute the value of r/32. For pre-computing the values
2823 * We just used the following Ruby line:
2824 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2825 * The results of the above expressions can be verified at
2826 * pre_computed_values.
2827 */
2828 q = max_cll >> 5;
2829 r = max_cll % 32;
2830 max = (1 << q) * pre_computed_values[r];
2831
2832 // min luminance: maxLum * (CV/255)^2 / 100
2833 q = DIV_ROUND_CLOSEST(min_cll, 255);
2834 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2835
2836 caps->aux_max_input_signal = max;
2837 caps->aux_min_input_signal = min;
2838}
2839
97e51c16
HW
2840void amdgpu_dm_update_connector_after_detect(
2841 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2842{
2843 struct drm_connector *connector = &aconnector->base;
2844 struct drm_device *dev = connector->dev;
b73a22d3 2845 struct dc_sink *sink;
4562236b
HW
2846
2847 /* MST handled by drm_mst framework */
2848 if (aconnector->mst_mgr.mst_state == true)
2849 return;
2850
4562236b 2851 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2852 if (sink)
2853 dc_sink_retain(sink);
4562236b 2854
1f6010a9
DF
2855 /*
2856 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2857 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2858 * Skip if already done during boot.
4562236b
HW
2859 */
2860 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2861 && aconnector->dc_em_sink) {
2862
1f6010a9
DF
2863 /*
2864 * For S3 resume with headless use eml_sink to fake stream
2865 * because on resume connector->sink is set to NULL
4562236b
HW
2866 */
2867 mutex_lock(&dev->mode_config.mutex);
2868
2869 if (sink) {
922aa1e1 2870 if (aconnector->dc_sink) {
98e6436d 2871 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2872 /*
2873 * retain and release below are used to
2874 * bump up refcount for sink because the link doesn't point
2875 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2876 * reshuffle by UMD we will get into unwanted dc_sink release
2877 */
dcd5fb82 2878 dc_sink_release(aconnector->dc_sink);
922aa1e1 2879 }
4562236b 2880 aconnector->dc_sink = sink;
dcd5fb82 2881 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2882 amdgpu_dm_update_freesync_caps(connector,
2883 aconnector->edid);
4562236b 2884 } else {
98e6436d 2885 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2886 if (!aconnector->dc_sink) {
4562236b 2887 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2888 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2889 }
4562236b
HW
2890 }
2891
2892 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2893
2894 if (sink)
2895 dc_sink_release(sink);
4562236b
HW
2896 return;
2897 }
2898
2899 /*
2900 * TODO: temporary guard to look for proper fix
2901 * if this sink is MST sink, we should not do anything
2902 */
dcd5fb82
MF
2903 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2904 dc_sink_release(sink);
4562236b 2905 return;
dcd5fb82 2906 }
4562236b
HW
2907
2908 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2909 /*
2910 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2911 * Do nothing!!
2912 */
f1ad2f5e 2913 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2914 aconnector->connector_id);
dcd5fb82
MF
2915 if (sink)
2916 dc_sink_release(sink);
4562236b
HW
2917 return;
2918 }
2919
f1ad2f5e 2920 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2921 aconnector->connector_id, aconnector->dc_sink, sink);
2922
2923 mutex_lock(&dev->mode_config.mutex);
2924
1f6010a9
DF
2925 /*
2926 * 1. Update status of the drm connector
2927 * 2. Send an event and let userspace tell us what to do
2928 */
4562236b 2929 if (sink) {
1f6010a9
DF
2930 /*
2931 * TODO: check if we still need the S3 mode update workaround.
2932 * If yes, put it here.
2933 */
c64b0d6b 2934 if (aconnector->dc_sink) {
98e6436d 2935 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2936 dc_sink_release(aconnector->dc_sink);
2937 }
4562236b
HW
2938
2939 aconnector->dc_sink = sink;
dcd5fb82 2940 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2941 if (sink->dc_edid.length == 0) {
4562236b 2942 aconnector->edid = NULL;
e6142dd5
AP
2943 if (aconnector->dc_link->aux_mode) {
2944 drm_dp_cec_unset_edid(
2945 &aconnector->dm_dp_aux.aux);
2946 }
900b3cb1 2947 } else {
4562236b 2948 aconnector->edid =
e6142dd5 2949 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2950
c555f023 2951 drm_connector_update_edid_property(connector,
e6142dd5 2952 aconnector->edid);
e6142dd5
AP
2953 if (aconnector->dc_link->aux_mode)
2954 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2955 aconnector->edid);
4562236b 2956 }
e6142dd5 2957
98e6436d 2958 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2959 update_connector_ext_caps(aconnector);
4562236b 2960 } else {
e86e8947 2961 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2962 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2963 drm_connector_update_edid_property(connector, NULL);
4562236b 2964 aconnector->num_modes = 0;
dcd5fb82 2965 dc_sink_release(aconnector->dc_sink);
4562236b 2966 aconnector->dc_sink = NULL;
5326c452 2967 aconnector->edid = NULL;
0c8620d6
BL
2968#ifdef CONFIG_DRM_AMD_DC_HDCP
2969 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2970 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2971 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2972#endif
4562236b
HW
2973 }
2974
2975 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2976
0f877894
OV
2977 update_subconnector_property(aconnector);
2978
dcd5fb82
MF
2979 if (sink)
2980 dc_sink_release(sink);
4562236b
HW
2981}
2982
e27c41d5 2983static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 2984{
4562236b
HW
2985 struct drm_connector *connector = &aconnector->base;
2986 struct drm_device *dev = connector->dev;
fbbdadf2 2987 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2988 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2989 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 2990 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 2991
b972b4f9
HW
2992 if (adev->dm.disable_hpd_irq)
2993 return;
2994
035f5496
AP
2995 if (dm_con_state->base.state && dm_con_state->base.crtc)
2996 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2997 dm_con_state->base.state,
2998 dm_con_state->base.crtc));
1f6010a9
DF
2999 /*
3000 * In case of failure or MST no need to update connector status or notify the OS
3001 * since (for MST case) MST does this in its own context.
4562236b
HW
3002 */
3003 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3004
0c8620d6 3005#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3006 if (adev->dm.hdcp_workqueue) {
96a3b32e 3007 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3008 dm_con_state->update_hdcp = true;
3009 }
0c8620d6 3010#endif
2e0ac3d6
HW
3011 if (aconnector->fake_enable)
3012 aconnector->fake_enable = false;
3013
fbbdadf2
BL
3014 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3015 DRM_ERROR("KMS: Failed to detect connector\n");
3016
3017 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3018 emulated_link_detect(aconnector->dc_link);
3019
fbbdadf2
BL
3020 drm_modeset_lock_all(dev);
3021 dm_restore_drm_connector_state(dev, connector);
3022 drm_modeset_unlock_all(dev);
3023
3024 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3025 drm_kms_helper_hotplug_event(dev);
3026
3027 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3028 if (new_connection_type == dc_connection_none &&
035f5496
AP
3029 aconnector->dc_link->type == dc_connection_none &&
3030 dm_crtc_state)
3031 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 3032
3c4d55c9 3033 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3034
3035 drm_modeset_lock_all(dev);
3036 dm_restore_drm_connector_state(dev, connector);
3037 drm_modeset_unlock_all(dev);
3038
3039 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3040 drm_kms_helper_hotplug_event(dev);
3041 }
3042 mutex_unlock(&aconnector->hpd_lock);
3043
3044}
3045
e27c41d5
JS
3046static void handle_hpd_irq(void *param)
3047{
3048 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3049
3050 handle_hpd_irq_helper(aconnector);
3051
3052}
3053
8e794421 3054static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3055{
3056 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3057 uint8_t dret;
3058 bool new_irq_handled = false;
3059 int dpcd_addr;
3060 int dpcd_bytes_to_read;
3061
3062 const int max_process_count = 30;
3063 int process_count = 0;
3064
3065 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3066
3067 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3068 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3069 /* DPCD 0x200 - 0x201 for downstream IRQ */
3070 dpcd_addr = DP_SINK_COUNT;
3071 } else {
3072 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3073 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3074 dpcd_addr = DP_SINK_COUNT_ESI;
3075 }
3076
3077 dret = drm_dp_dpcd_read(
3078 &aconnector->dm_dp_aux.aux,
3079 dpcd_addr,
3080 esi,
3081 dpcd_bytes_to_read);
3082
3083 while (dret == dpcd_bytes_to_read &&
3084 process_count < max_process_count) {
3085 uint8_t retry;
3086 dret = 0;
3087
3088 process_count++;
3089
f1ad2f5e 3090 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3091 /* handle HPD short pulse irq */
3092 if (aconnector->mst_mgr.mst_state)
3093 drm_dp_mst_hpd_irq(
3094 &aconnector->mst_mgr,
3095 esi,
3096 &new_irq_handled);
4562236b
HW
3097
3098 if (new_irq_handled) {
3099 /* ACK at DPCD to notify down stream */
3100 const int ack_dpcd_bytes_to_write =
3101 dpcd_bytes_to_read - 1;
3102
3103 for (retry = 0; retry < 3; retry++) {
3104 uint8_t wret;
3105
3106 wret = drm_dp_dpcd_write(
3107 &aconnector->dm_dp_aux.aux,
3108 dpcd_addr + 1,
3109 &esi[1],
3110 ack_dpcd_bytes_to_write);
3111 if (wret == ack_dpcd_bytes_to_write)
3112 break;
3113 }
3114
1f6010a9 3115 /* check if there is new irq to be handled */
4562236b
HW
3116 dret = drm_dp_dpcd_read(
3117 &aconnector->dm_dp_aux.aux,
3118 dpcd_addr,
3119 esi,
3120 dpcd_bytes_to_read);
3121
3122 new_irq_handled = false;
d4a6e8a9 3123 } else {
4562236b 3124 break;
d4a6e8a9 3125 }
4562236b
HW
3126 }
3127
3128 if (process_count == max_process_count)
f1ad2f5e 3129 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3130}
3131
8e794421
WL
3132static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3133 union hpd_irq_data hpd_irq_data)
3134{
3135 struct hpd_rx_irq_offload_work *offload_work =
3136 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3137
3138 if (!offload_work) {
3139 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3140 return;
3141 }
3142
3143 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3144 offload_work->data = hpd_irq_data;
3145 offload_work->offload_wq = offload_wq;
3146
3147 queue_work(offload_wq->wq, &offload_work->work);
3148 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3149}
3150
4562236b
HW
3151static void handle_hpd_rx_irq(void *param)
3152{
c84dec2f 3153 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3154 struct drm_connector *connector = &aconnector->base;
3155 struct drm_device *dev = connector->dev;
53cbf65c 3156 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3157 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3158 bool result = false;
fbbdadf2 3159 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3160 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3161 union hpd_irq_data hpd_irq_data;
8e794421
WL
3162 bool link_loss = false;
3163 bool has_left_work = false;
3164 int idx = aconnector->base.index;
3165 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3166
3167 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3168
b972b4f9
HW
3169 if (adev->dm.disable_hpd_irq)
3170 return;
3171
1f6010a9
DF
3172 /*
3173 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3174 * conflict, after implement i2c helper, this mutex should be
3175 * retired.
3176 */
b86e7eef 3177 mutex_lock(&aconnector->hpd_lock);
4562236b 3178
8e794421
WL
3179 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3180 &link_loss, true, &has_left_work);
3083a984 3181
8e794421
WL
3182 if (!has_left_work)
3183 goto out;
3184
3185 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3186 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3187 goto out;
3188 }
3189
3190 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3191 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3192 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3193 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3194 goto out;
3195 }
3083a984 3196
8e794421
WL
3197 if (link_loss) {
3198 bool skip = false;
d2aa1356 3199
8e794421
WL
3200 spin_lock(&offload_wq->offload_lock);
3201 skip = offload_wq->is_handling_link_loss;
3202
3203 if (!skip)
3204 offload_wq->is_handling_link_loss = true;
3205
3206 spin_unlock(&offload_wq->offload_lock);
3207
3208 if (!skip)
3209 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3210
3211 goto out;
3212 }
3213 }
c8ea79a8 3214
3083a984 3215out:
c8ea79a8 3216 if (result && !is_mst_root_connector) {
4562236b 3217 /* Downstream Port status changed. */
fbbdadf2
BL
3218 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3219 DRM_ERROR("KMS: Failed to detect connector\n");
3220
3221 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3222 emulated_link_detect(dc_link);
3223
3224 if (aconnector->fake_enable)
3225 aconnector->fake_enable = false;
3226
3227 amdgpu_dm_update_connector_after_detect(aconnector);
3228
3229
3230 drm_modeset_lock_all(dev);
3231 dm_restore_drm_connector_state(dev, connector);
3232 drm_modeset_unlock_all(dev);
3233
3234 drm_kms_helper_hotplug_event(dev);
3235 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3236
3237 if (aconnector->fake_enable)
3238 aconnector->fake_enable = false;
3239
4562236b
HW
3240 amdgpu_dm_update_connector_after_detect(aconnector);
3241
3242
3243 drm_modeset_lock_all(dev);
3244 dm_restore_drm_connector_state(dev, connector);
3245 drm_modeset_unlock_all(dev);
3246
3247 drm_kms_helper_hotplug_event(dev);
3248 }
3249 }
2a0f9270 3250#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3251 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3252 if (adev->dm.hdcp_workqueue)
3253 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3254 }
2a0f9270 3255#endif
4562236b 3256
b86e7eef 3257 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3258 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3259
3260 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3261}
3262
3263static void register_hpd_handlers(struct amdgpu_device *adev)
3264{
4a580877 3265 struct drm_device *dev = adev_to_drm(adev);
4562236b 3266 struct drm_connector *connector;
c84dec2f 3267 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3268 const struct dc_link *dc_link;
3269 struct dc_interrupt_params int_params = {0};
3270
3271 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3272 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3273
3274 list_for_each_entry(connector,
3275 &dev->mode_config.connector_list, head) {
3276
c84dec2f 3277 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3278 dc_link = aconnector->dc_link;
3279
3280 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3281 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3282 int_params.irq_source = dc_link->irq_source_hpd;
3283
3284 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3285 handle_hpd_irq,
3286 (void *) aconnector);
3287 }
3288
3289 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3290
3291 /* Also register for DP short pulse (hpd_rx). */
3292 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3293 int_params.irq_source = dc_link->irq_source_hpd_rx;
3294
3295 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3296 handle_hpd_rx_irq,
3297 (void *) aconnector);
8e794421
WL
3298
3299 if (adev->dm.hpd_rx_offload_wq)
3300 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3301 aconnector;
4562236b
HW
3302 }
3303 }
3304}
3305
55e56389
MR
3306#if defined(CONFIG_DRM_AMD_DC_SI)
3307/* Register IRQ sources and initialize IRQ callbacks */
3308static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3309{
3310 struct dc *dc = adev->dm.dc;
3311 struct common_irq_params *c_irq_params;
3312 struct dc_interrupt_params int_params = {0};
3313 int r;
3314 int i;
3315 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3316
3317 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3318 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3319
3320 /*
3321 * Actions of amdgpu_irq_add_id():
3322 * 1. Register a set() function with base driver.
3323 * Base driver will call set() function to enable/disable an
3324 * interrupt in DC hardware.
3325 * 2. Register amdgpu_dm_irq_handler().
3326 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3327 * coming from DC hardware.
3328 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3329 * for acknowledging and handling. */
3330
3331 /* Use VBLANK interrupt */
3332 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3333 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3334 if (r) {
3335 DRM_ERROR("Failed to add crtc irq id!\n");
3336 return r;
3337 }
3338
3339 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3340 int_params.irq_source =
3341 dc_interrupt_to_irq_source(dc, i+1 , 0);
3342
3343 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3344
3345 c_irq_params->adev = adev;
3346 c_irq_params->irq_src = int_params.irq_source;
3347
3348 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3349 dm_crtc_high_irq, c_irq_params);
3350 }
3351
3352 /* Use GRPH_PFLIP interrupt */
3353 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3354 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3355 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3356 if (r) {
3357 DRM_ERROR("Failed to add page flip irq id!\n");
3358 return r;
3359 }
3360
3361 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3362 int_params.irq_source =
3363 dc_interrupt_to_irq_source(dc, i, 0);
3364
3365 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3366
3367 c_irq_params->adev = adev;
3368 c_irq_params->irq_src = int_params.irq_source;
3369
3370 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3371 dm_pflip_high_irq, c_irq_params);
3372
3373 }
3374
3375 /* HPD */
3376 r = amdgpu_irq_add_id(adev, client_id,
3377 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3378 if (r) {
3379 DRM_ERROR("Failed to add hpd irq id!\n");
3380 return r;
3381 }
3382
3383 register_hpd_handlers(adev);
3384
3385 return 0;
3386}
3387#endif
3388
4562236b
HW
3389/* Register IRQ sources and initialize IRQ callbacks */
3390static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3391{
3392 struct dc *dc = adev->dm.dc;
3393 struct common_irq_params *c_irq_params;
3394 struct dc_interrupt_params int_params = {0};
3395 int r;
3396 int i;
1ffdeca6 3397 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3398
c08182f2 3399 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3400 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3401
3402 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3403 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3404
1f6010a9
DF
3405 /*
3406 * Actions of amdgpu_irq_add_id():
4562236b
HW
3407 * 1. Register a set() function with base driver.
3408 * Base driver will call set() function to enable/disable an
3409 * interrupt in DC hardware.
3410 * 2. Register amdgpu_dm_irq_handler().
3411 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3412 * coming from DC hardware.
3413 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3414 * for acknowledging and handling. */
3415
b57de80a 3416 /* Use VBLANK interrupt */
e9029155 3417 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3418 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3419 if (r) {
3420 DRM_ERROR("Failed to add crtc irq id!\n");
3421 return r;
3422 }
3423
3424 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3425 int_params.irq_source =
3d761e79 3426 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3427
b57de80a 3428 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3429
3430 c_irq_params->adev = adev;
3431 c_irq_params->irq_src = int_params.irq_source;
3432
3433 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3434 dm_crtc_high_irq, c_irq_params);
3435 }
3436
d2574c33
MK
3437 /* Use VUPDATE interrupt */
3438 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3439 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3440 if (r) {
3441 DRM_ERROR("Failed to add vupdate irq id!\n");
3442 return r;
3443 }
3444
3445 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3446 int_params.irq_source =
3447 dc_interrupt_to_irq_source(dc, i, 0);
3448
3449 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3450
3451 c_irq_params->adev = adev;
3452 c_irq_params->irq_src = int_params.irq_source;
3453
3454 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3455 dm_vupdate_high_irq, c_irq_params);
3456 }
3457
3d761e79 3458 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3459 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3460 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3461 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3462 if (r) {
3463 DRM_ERROR("Failed to add page flip irq id!\n");
3464 return r;
3465 }
3466
3467 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3468 int_params.irq_source =
3469 dc_interrupt_to_irq_source(dc, i, 0);
3470
3471 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3472
3473 c_irq_params->adev = adev;
3474 c_irq_params->irq_src = int_params.irq_source;
3475
3476 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3477 dm_pflip_high_irq, c_irq_params);
3478
3479 }
3480
3481 /* HPD */
2c8ad2d5
AD
3482 r = amdgpu_irq_add_id(adev, client_id,
3483 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3484 if (r) {
3485 DRM_ERROR("Failed to add hpd irq id!\n");
3486 return r;
3487 }
3488
3489 register_hpd_handlers(adev);
3490
3491 return 0;
3492}
3493
b86a1aa3 3494#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3495/* Register IRQ sources and initialize IRQ callbacks */
3496static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3497{
3498 struct dc *dc = adev->dm.dc;
3499 struct common_irq_params *c_irq_params;
3500 struct dc_interrupt_params int_params = {0};
3501 int r;
3502 int i;
660d5406
WL
3503#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3504 static const unsigned int vrtl_int_srcid[] = {
3505 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3506 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3507 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3508 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3509 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3510 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3511 };
3512#endif
ff5ef992
AD
3513
3514 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3515 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3516
1f6010a9
DF
3517 /*
3518 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3519 * 1. Register a set() function with base driver.
3520 * Base driver will call set() function to enable/disable an
3521 * interrupt in DC hardware.
3522 * 2. Register amdgpu_dm_irq_handler().
3523 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3524 * coming from DC hardware.
3525 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3526 * for acknowledging and handling.
1f6010a9 3527 */
ff5ef992
AD
3528
3529 /* Use VSTARTUP interrupt */
3530 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3531 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3532 i++) {
3760f76c 3533 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3534
3535 if (r) {
3536 DRM_ERROR("Failed to add crtc irq id!\n");
3537 return r;
3538 }
3539
3540 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3541 int_params.irq_source =
3542 dc_interrupt_to_irq_source(dc, i, 0);
3543
3544 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3545
3546 c_irq_params->adev = adev;
3547 c_irq_params->irq_src = int_params.irq_source;
3548
2346ef47
NK
3549 amdgpu_dm_irq_register_interrupt(
3550 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3551 }
3552
86bc2219
WL
3553 /* Use otg vertical line interrupt */
3554#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3555 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3556 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3557 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3558
3559 if (r) {
3560 DRM_ERROR("Failed to add vline0 irq id!\n");
3561 return r;
3562 }
3563
3564 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3565 int_params.irq_source =
660d5406
WL
3566 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3567
3568 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3569 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3570 break;
3571 }
86bc2219
WL
3572
3573 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3574 - DC_IRQ_SOURCE_DC1_VLINE0];
3575
3576 c_irq_params->adev = adev;
3577 c_irq_params->irq_src = int_params.irq_source;
3578
3579 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3580 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3581 }
3582#endif
3583
2346ef47
NK
3584 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3585 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3586 * to trigger at end of each vblank, regardless of state of the lock,
3587 * matching DCE behaviour.
3588 */
3589 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3590 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3591 i++) {
3592 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3593
3594 if (r) {
3595 DRM_ERROR("Failed to add vupdate irq id!\n");
3596 return r;
3597 }
3598
3599 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3600 int_params.irq_source =
3601 dc_interrupt_to_irq_source(dc, i, 0);
3602
3603 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3604
3605 c_irq_params->adev = adev;
3606 c_irq_params->irq_src = int_params.irq_source;
3607
ff5ef992 3608 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3609 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3610 }
3611
ff5ef992
AD
3612 /* Use GRPH_PFLIP interrupt */
3613 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3614 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3615 i++) {
3760f76c 3616 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3617 if (r) {
3618 DRM_ERROR("Failed to add page flip irq id!\n");
3619 return r;
3620 }
3621
3622 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3623 int_params.irq_source =
3624 dc_interrupt_to_irq_source(dc, i, 0);
3625
3626 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3627
3628 c_irq_params->adev = adev;
3629 c_irq_params->irq_src = int_params.irq_source;
3630
3631 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3632 dm_pflip_high_irq, c_irq_params);
3633
3634 }
3635
81927e28
JS
3636 /* HPD */
3637 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3638 &adev->hpd_irq);
3639 if (r) {
3640 DRM_ERROR("Failed to add hpd irq id!\n");
3641 return r;
3642 }
a08f16cf 3643
81927e28 3644 register_hpd_handlers(adev);
a08f16cf 3645
81927e28
JS
3646 return 0;
3647}
3648/* Register Outbox IRQ sources and initialize IRQ callbacks */
3649static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3650{
3651 struct dc *dc = adev->dm.dc;
3652 struct common_irq_params *c_irq_params;
3653 struct dc_interrupt_params int_params = {0};
3654 int r, i;
3655
3656 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3657 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3658
3659 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3660 &adev->dmub_outbox_irq);
3661 if (r) {
3662 DRM_ERROR("Failed to add outbox irq id!\n");
3663 return r;
3664 }
3665
3666 if (dc->ctx->dmub_srv) {
3667 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3668 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3669 int_params.irq_source =
81927e28 3670 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3671
81927e28 3672 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3673
3674 c_irq_params->adev = adev;
3675 c_irq_params->irq_src = int_params.irq_source;
3676
3677 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3678 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3679 }
3680
ff5ef992
AD
3681 return 0;
3682}
3683#endif
3684
eb3dc897
NK
3685/*
3686 * Acquires the lock for the atomic state object and returns
3687 * the new atomic state.
3688 *
3689 * This should only be called during atomic check.
3690 */
3691static int dm_atomic_get_state(struct drm_atomic_state *state,
3692 struct dm_atomic_state **dm_state)
3693{
3694 struct drm_device *dev = state->dev;
1348969a 3695 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3696 struct amdgpu_display_manager *dm = &adev->dm;
3697 struct drm_private_state *priv_state;
eb3dc897
NK
3698
3699 if (*dm_state)
3700 return 0;
3701
eb3dc897
NK
3702 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3703 if (IS_ERR(priv_state))
3704 return PTR_ERR(priv_state);
3705
3706 *dm_state = to_dm_atomic_state(priv_state);
3707
3708 return 0;
3709}
3710
dfd84d90 3711static struct dm_atomic_state *
eb3dc897
NK
3712dm_atomic_get_new_state(struct drm_atomic_state *state)
3713{
3714 struct drm_device *dev = state->dev;
1348969a 3715 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3716 struct amdgpu_display_manager *dm = &adev->dm;
3717 struct drm_private_obj *obj;
3718 struct drm_private_state *new_obj_state;
3719 int i;
3720
3721 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3722 if (obj->funcs == dm->atomic_obj.funcs)
3723 return to_dm_atomic_state(new_obj_state);
3724 }
3725
3726 return NULL;
3727}
3728
eb3dc897
NK
3729static struct drm_private_state *
3730dm_atomic_duplicate_state(struct drm_private_obj *obj)
3731{
3732 struct dm_atomic_state *old_state, *new_state;
3733
3734 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3735 if (!new_state)
3736 return NULL;
3737
3738 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3739
813d20dc
AW
3740 old_state = to_dm_atomic_state(obj->state);
3741
3742 if (old_state && old_state->context)
3743 new_state->context = dc_copy_state(old_state->context);
3744
eb3dc897
NK
3745 if (!new_state->context) {
3746 kfree(new_state);
3747 return NULL;
3748 }
3749
eb3dc897
NK
3750 return &new_state->base;
3751}
3752
3753static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3754 struct drm_private_state *state)
3755{
3756 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3757
3758 if (dm_state && dm_state->context)
3759 dc_release_state(dm_state->context);
3760
3761 kfree(dm_state);
3762}
3763
3764static struct drm_private_state_funcs dm_atomic_state_funcs = {
3765 .atomic_duplicate_state = dm_atomic_duplicate_state,
3766 .atomic_destroy_state = dm_atomic_destroy_state,
3767};
3768
4562236b
HW
3769static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3770{
eb3dc897 3771 struct dm_atomic_state *state;
4562236b
HW
3772 int r;
3773
3774 adev->mode_info.mode_config_initialized = true;
3775
4a580877
LT
3776 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3777 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3778
4a580877
LT
3779 adev_to_drm(adev)->mode_config.max_width = 16384;
3780 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3781
4a580877
LT
3782 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3783 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3784 /* indicates support for immediate flip */
4a580877 3785 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3786
4a580877 3787 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3788
eb3dc897
NK
3789 state = kzalloc(sizeof(*state), GFP_KERNEL);
3790 if (!state)
3791 return -ENOMEM;
3792
813d20dc 3793 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3794 if (!state->context) {
3795 kfree(state);
3796 return -ENOMEM;
3797 }
3798
3799 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3800
4a580877 3801 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3802 &adev->dm.atomic_obj,
eb3dc897
NK
3803 &state->base,
3804 &dm_atomic_state_funcs);
3805
3dc9b1ce 3806 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3807 if (r) {
3808 dc_release_state(state->context);
3809 kfree(state);
4562236b 3810 return r;
b67a468a 3811 }
4562236b 3812
6ce8f316 3813 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3814 if (r) {
3815 dc_release_state(state->context);
3816 kfree(state);
6ce8f316 3817 return r;
b67a468a 3818 }
6ce8f316 3819
4562236b
HW
3820 return 0;
3821}
3822
206bbafe
DF
3823#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3824#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3825#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3826
4562236b
HW
3827#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3828 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3829
7fd13bae
AD
3830static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3831 int bl_idx)
206bbafe
DF
3832{
3833#if defined(CONFIG_ACPI)
3834 struct amdgpu_dm_backlight_caps caps;
3835
58965855
FS
3836 memset(&caps, 0, sizeof(caps));
3837
7fd13bae 3838 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3839 return;
3840
f9b7f370 3841 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3842 if (caps.caps_valid) {
7fd13bae 3843 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3844 if (caps.aux_support)
3845 return;
7fd13bae
AD
3846 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3847 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3848 } else {
7fd13bae 3849 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3850 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3851 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3852 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3853 }
3854#else
7fd13bae 3855 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3856 return;
3857
7fd13bae
AD
3858 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3859 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3860#endif
3861}
3862
69d9f427
AM
3863static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3864 unsigned *min, unsigned *max)
94562810 3865{
94562810 3866 if (!caps)
69d9f427 3867 return 0;
94562810 3868
69d9f427
AM
3869 if (caps->aux_support) {
3870 // Firmware limits are in nits, DC API wants millinits.
3871 *max = 1000 * caps->aux_max_input_signal;
3872 *min = 1000 * caps->aux_min_input_signal;
94562810 3873 } else {
69d9f427
AM
3874 // Firmware limits are 8-bit, PWM control is 16-bit.
3875 *max = 0x101 * caps->max_input_signal;
3876 *min = 0x101 * caps->min_input_signal;
94562810 3877 }
69d9f427
AM
3878 return 1;
3879}
94562810 3880
69d9f427
AM
3881static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3882 uint32_t brightness)
3883{
3884 unsigned min, max;
94562810 3885
69d9f427
AM
3886 if (!get_brightness_range(caps, &min, &max))
3887 return brightness;
3888
3889 // Rescale 0..255 to min..max
3890 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3891 AMDGPU_MAX_BL_LEVEL);
3892}
3893
3894static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3895 uint32_t brightness)
3896{
3897 unsigned min, max;
3898
3899 if (!get_brightness_range(caps, &min, &max))
3900 return brightness;
3901
3902 if (brightness < min)
3903 return 0;
3904 // Rescale min..max to 0..255
3905 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3906 max - min);
94562810
RS
3907}
3908
3d6c9164 3909static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3910 int bl_idx,
3d6c9164 3911 u32 user_brightness)
4562236b 3912{
206bbafe 3913 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3914 struct dc_link *link;
3915 u32 brightness;
94562810 3916 bool rc;
4562236b 3917
7fd13bae
AD
3918 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3919 caps = dm->backlight_caps[bl_idx];
94562810 3920
7fd13bae 3921 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
3922 /* update scratch register */
3923 if (bl_idx == 0)
3924 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
3925 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3926 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3927
3d6c9164 3928 /* Change brightness based on AUX property */
118b4627 3929 if (caps.aux_support) {
7fd13bae
AD
3930 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3931 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3932 if (!rc)
3933 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3934 } else {
7fd13bae
AD
3935 rc = dc_link_set_backlight_level(link, brightness, 0);
3936 if (!rc)
3937 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3938 }
94562810
RS
3939
3940 return rc ? 0 : 1;
4562236b
HW
3941}
3942
3d6c9164 3943static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3944{
620a0d27 3945 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3946 int i;
3d6c9164 3947
7fd13bae
AD
3948 for (i = 0; i < dm->num_of_edps; i++) {
3949 if (bd == dm->backlight_dev[i])
3950 break;
3951 }
3952 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3953 i = 0;
3954 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3955
3956 return 0;
3957}
3958
7fd13bae
AD
3959static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3960 int bl_idx)
3d6c9164 3961{
0ad3e64e 3962 struct amdgpu_dm_backlight_caps caps;
7fd13bae 3963 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 3964
7fd13bae
AD
3965 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3966 caps = dm->backlight_caps[bl_idx];
620a0d27 3967
0ad3e64e 3968 if (caps.aux_support) {
0ad3e64e
AD
3969 u32 avg, peak;
3970 bool rc;
3971
3972 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3973 if (!rc)
7fd13bae 3974 return dm->brightness[bl_idx];
0ad3e64e
AD
3975 return convert_brightness_to_user(&caps, avg);
3976 } else {
7fd13bae 3977 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
3978
3979 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 3980 return dm->brightness[bl_idx];
0ad3e64e
AD
3981 return convert_brightness_to_user(&caps, ret);
3982 }
4562236b
HW
3983}
3984
3d6c9164
AD
3985static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3986{
3987 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3988 int i;
3d6c9164 3989
7fd13bae
AD
3990 for (i = 0; i < dm->num_of_edps; i++) {
3991 if (bd == dm->backlight_dev[i])
3992 break;
3993 }
3994 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3995 i = 0;
3996 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
3997}
3998
4562236b 3999static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 4000 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
4001 .get_brightness = amdgpu_dm_backlight_get_brightness,
4002 .update_status = amdgpu_dm_backlight_update_status,
4003};
4004
7578ecda
AD
4005static void
4006amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4007{
4008 char bl_name[16];
4009 struct backlight_properties props = { 0 };
4010
7fd13bae
AD
4011 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4012 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4013
4562236b 4014 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4015 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4016 props.type = BACKLIGHT_RAW;
4017
4018 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4019 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4020
7fd13bae
AD
4021 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4022 adev_to_drm(dm->adev)->dev,
4023 dm,
4024 &amdgpu_dm_backlight_ops,
4025 &props);
4562236b 4026
7fd13bae 4027 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4028 DRM_ERROR("DM: Backlight registration failed!\n");
4029 else
f1ad2f5e 4030 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4031}
4562236b
HW
4032#endif
4033
df534fff 4034static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4035 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4036 enum drm_plane_type plane_type,
4037 const struct dc_plane_cap *plane_cap)
df534fff 4038{
f180b4bc 4039 struct drm_plane *plane;
df534fff
S
4040 unsigned long possible_crtcs;
4041 int ret = 0;
4042
f180b4bc 4043 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4044 if (!plane) {
4045 DRM_ERROR("KMS: Failed to allocate plane\n");
4046 return -ENOMEM;
4047 }
b2fddb13 4048 plane->type = plane_type;
df534fff
S
4049
4050 /*
b2fddb13
NK
4051 * HACK: IGT tests expect that the primary plane for a CRTC
4052 * can only have one possible CRTC. Only expose support for
4053 * any CRTC if they're not going to be used as a primary plane
4054 * for a CRTC - like overlay or underlay planes.
df534fff
S
4055 */
4056 possible_crtcs = 1 << plane_id;
4057 if (plane_id >= dm->dc->caps.max_streams)
4058 possible_crtcs = 0xff;
4059
cc1fec57 4060 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4061
4062 if (ret) {
4063 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4064 kfree(plane);
df534fff
S
4065 return ret;
4066 }
4067
54087768
NK
4068 if (mode_info)
4069 mode_info->planes[plane_id] = plane;
4070
df534fff
S
4071 return ret;
4072}
4073
89fc8d4e
HW
4074
4075static void register_backlight_device(struct amdgpu_display_manager *dm,
4076 struct dc_link *link)
4077{
4078#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4079 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4080
4081 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4082 link->type != dc_connection_none) {
1f6010a9
DF
4083 /*
4084 * Event if registration failed, we should continue with
89fc8d4e
HW
4085 * DM initialization because not having a backlight control
4086 * is better then a black screen.
4087 */
7fd13bae 4088 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4089 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4090
7fd13bae 4091 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4092 dm->backlight_link[dm->num_of_edps] = link;
4093 dm->num_of_edps++;
4094 }
89fc8d4e
HW
4095 }
4096#endif
4097}
4098
4099
1f6010a9
DF
4100/*
4101 * In this architecture, the association
4562236b
HW
4102 * connector -> encoder -> crtc
4103 * id not really requried. The crtc and connector will hold the
4104 * display_index as an abstraction to use with DAL component
4105 *
4106 * Returns 0 on success
4107 */
7578ecda 4108static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4109{
4110 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4111 int32_t i;
c84dec2f 4112 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4113 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4114 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4115 uint32_t link_cnt;
cc1fec57 4116 int32_t primary_planes;
fbbdadf2 4117 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4118 const struct dc_plane_cap *plane;
9470620e 4119 bool psr_feature_enabled = false;
4562236b 4120
d58159de
AD
4121 dm->display_indexes_num = dm->dc->caps.max_streams;
4122 /* Update the actual used number of crtc */
4123 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4124
4562236b 4125 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4126 if (amdgpu_dm_mode_config_init(dm->adev)) {
4127 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4128 return -EINVAL;
4562236b
HW
4129 }
4130
b2fddb13
NK
4131 /* There is one primary plane per CRTC */
4132 primary_planes = dm->dc->caps.max_streams;
54087768 4133 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4134
b2fddb13
NK
4135 /*
4136 * Initialize primary planes, implicit planes for legacy IOCTLS.
4137 * Order is reversed to match iteration order in atomic check.
4138 */
4139 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4140 plane = &dm->dc->caps.planes[i];
4141
b2fddb13 4142 if (initialize_plane(dm, mode_info, i,
cc1fec57 4143 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4144 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4145 goto fail;
d4e13b0d 4146 }
df534fff 4147 }
92f3ac40 4148
0d579c7e
NK
4149 /*
4150 * Initialize overlay planes, index starting after primary planes.
4151 * These planes have a higher DRM index than the primary planes since
4152 * they should be considered as having a higher z-order.
4153 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4154 *
4155 * Only support DCN for now, and only expose one so we don't encourage
4156 * userspace to use up all the pipes.
0d579c7e 4157 */
cc1fec57
NK
4158 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4159 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4160
4161 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4162 continue;
4163
4164 if (!plane->blends_with_above || !plane->blends_with_below)
4165 continue;
4166
ea36ad34 4167 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4168 continue;
4169
54087768 4170 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4171 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4172 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4173 goto fail;
d4e13b0d 4174 }
cc1fec57
NK
4175
4176 /* Only create one overlay plane. */
4177 break;
d4e13b0d 4178 }
4562236b 4179
d4e13b0d 4180 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4181 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4182 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4183 goto fail;
4562236b 4184 }
4562236b 4185
50610b74 4186#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28 4187 /* Use Outbox interrupt */
1d789535 4188 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4189 case IP_VERSION(3, 0, 0):
4190 case IP_VERSION(3, 1, 2):
4191 case IP_VERSION(3, 1, 3):
4192 case IP_VERSION(2, 1, 0):
81927e28
JS
4193 if (register_outbox_irq_handlers(dm->adev)) {
4194 DRM_ERROR("DM: Failed to initialize IRQ\n");
4195 goto fail;
4196 }
4197 break;
4198 default:
c08182f2 4199 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4200 adev->ip_versions[DCE_HWIP][0]);
81927e28 4201 }
9470620e
NK
4202
4203 /* Determine whether to enable PSR support by default. */
4204 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4205 switch (adev->ip_versions[DCE_HWIP][0]) {
4206 case IP_VERSION(3, 1, 2):
4207 case IP_VERSION(3, 1, 3):
4208 psr_feature_enabled = true;
4209 break;
4210 default:
4211 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4212 break;
4213 }
4214 }
50610b74 4215#endif
81927e28 4216
4562236b
HW
4217 /* loops over all connectors on the board */
4218 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4219 struct dc_link *link = NULL;
4562236b
HW
4220
4221 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4222 DRM_ERROR(
4223 "KMS: Cannot support more than %d display indexes\n",
4224 AMDGPU_DM_MAX_DISPLAY_INDEX);
4225 continue;
4226 }
4227
4228 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4229 if (!aconnector)
cd8a2ae8 4230 goto fail;
4562236b
HW
4231
4232 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4233 if (!aencoder)
cd8a2ae8 4234 goto fail;
4562236b
HW
4235
4236 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4237 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4238 goto fail;
4562236b
HW
4239 }
4240
4241 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4242 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4243 goto fail;
4562236b
HW
4244 }
4245
89fc8d4e
HW
4246 link = dc_get_link_at_index(dm->dc, i);
4247
fbbdadf2
BL
4248 if (!dc_link_detect_sink(link, &new_connection_type))
4249 DRM_ERROR("KMS: Failed to detect connector\n");
4250
4251 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4252 emulated_link_detect(link);
4253 amdgpu_dm_update_connector_after_detect(aconnector);
4254
4255 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4256 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4257 register_backlight_device(dm, link);
b295ce39
RL
4258 if (dm->num_of_edps)
4259 update_connector_ext_caps(aconnector);
9470620e 4260 if (psr_feature_enabled)
397a9bc5 4261 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
4262 }
4263
4264
4562236b
HW
4265 }
4266
4267 /* Software is initialized. Now we can register interrupt handlers. */
4268 switch (adev->asic_type) {
55e56389
MR
4269#if defined(CONFIG_DRM_AMD_DC_SI)
4270 case CHIP_TAHITI:
4271 case CHIP_PITCAIRN:
4272 case CHIP_VERDE:
4273 case CHIP_OLAND:
4274 if (dce60_register_irq_handlers(dm->adev)) {
4275 DRM_ERROR("DM: Failed to initialize IRQ\n");
4276 goto fail;
4277 }
4278 break;
4279#endif
4562236b
HW
4280 case CHIP_BONAIRE:
4281 case CHIP_HAWAII:
cd4b356f
AD
4282 case CHIP_KAVERI:
4283 case CHIP_KABINI:
4284 case CHIP_MULLINS:
4562236b
HW
4285 case CHIP_TONGA:
4286 case CHIP_FIJI:
4287 case CHIP_CARRIZO:
4288 case CHIP_STONEY:
4289 case CHIP_POLARIS11:
4290 case CHIP_POLARIS10:
b264d345 4291 case CHIP_POLARIS12:
7737de91 4292 case CHIP_VEGAM:
2c8ad2d5 4293 case CHIP_VEGA10:
2325ff30 4294 case CHIP_VEGA12:
1fe6bf2f 4295 case CHIP_VEGA20:
4562236b
HW
4296 if (dce110_register_irq_handlers(dm->adev)) {
4297 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4298 goto fail;
4562236b
HW
4299 }
4300 break;
4301 default:
c08182f2 4302#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4303 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4304 case IP_VERSION(1, 0, 0):
4305 case IP_VERSION(1, 0, 1):
c08182f2
AD
4306 case IP_VERSION(2, 0, 2):
4307 case IP_VERSION(2, 0, 3):
4308 case IP_VERSION(2, 0, 0):
4309 case IP_VERSION(2, 1, 0):
4310 case IP_VERSION(3, 0, 0):
4311 case IP_VERSION(3, 0, 2):
4312 case IP_VERSION(3, 0, 3):
4313 case IP_VERSION(3, 0, 1):
4314 case IP_VERSION(3, 1, 2):
4315 case IP_VERSION(3, 1, 3):
4316 if (dcn10_register_irq_handlers(dm->adev)) {
4317 DRM_ERROR("DM: Failed to initialize IRQ\n");
4318 goto fail;
4319 }
4320 break;
4321 default:
2cbc6f42 4322 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4323 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4324 goto fail;
c08182f2
AD
4325 }
4326#endif
2cbc6f42 4327 break;
4562236b
HW
4328 }
4329
4562236b 4330 return 0;
cd8a2ae8 4331fail:
4562236b 4332 kfree(aencoder);
4562236b 4333 kfree(aconnector);
54087768 4334
59d0f396 4335 return -EINVAL;
4562236b
HW
4336}
4337
7578ecda 4338static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4339{
eb3dc897 4340 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4341 return;
4342}
4343
4344/******************************************************************************
4345 * amdgpu_display_funcs functions
4346 *****************************************************************************/
4347
1f6010a9 4348/*
4562236b
HW
4349 * dm_bandwidth_update - program display watermarks
4350 *
4351 * @adev: amdgpu_device pointer
4352 *
4353 * Calculate and program the display watermarks and line buffer allocation.
4354 */
4355static void dm_bandwidth_update(struct amdgpu_device *adev)
4356{
49c07a99 4357 /* TODO: implement later */
4562236b
HW
4358}
4359
39cc5be2 4360static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4361 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4362 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4363 .backlight_set_level = NULL, /* never called for DC */
4364 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4365 .hpd_sense = NULL,/* called unconditionally */
4366 .hpd_set_polarity = NULL, /* called unconditionally */
4367 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4368 .page_flip_get_scanoutpos =
4369 dm_crtc_get_scanoutpos,/* called unconditionally */
4370 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4371 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4372};
4373
4374#if defined(CONFIG_DEBUG_KERNEL_DC)
4375
3ee6b26b
AD
4376static ssize_t s3_debug_store(struct device *device,
4377 struct device_attribute *attr,
4378 const char *buf,
4379 size_t count)
4562236b
HW
4380{
4381 int ret;
4382 int s3_state;
ef1de361 4383 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4384 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4385
4386 ret = kstrtoint(buf, 0, &s3_state);
4387
4388 if (ret == 0) {
4389 if (s3_state) {
4390 dm_resume(adev);
4a580877 4391 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4392 } else
4393 dm_suspend(adev);
4394 }
4395
4396 return ret == 0 ? count : 0;
4397}
4398
4399DEVICE_ATTR_WO(s3_debug);
4400
4401#endif
4402
4403static int dm_early_init(void *handle)
4404{
4405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4406
4562236b 4407 switch (adev->asic_type) {
55e56389
MR
4408#if defined(CONFIG_DRM_AMD_DC_SI)
4409 case CHIP_TAHITI:
4410 case CHIP_PITCAIRN:
4411 case CHIP_VERDE:
4412 adev->mode_info.num_crtc = 6;
4413 adev->mode_info.num_hpd = 6;
4414 adev->mode_info.num_dig = 6;
4415 break;
4416 case CHIP_OLAND:
4417 adev->mode_info.num_crtc = 2;
4418 adev->mode_info.num_hpd = 2;
4419 adev->mode_info.num_dig = 2;
4420 break;
4421#endif
4562236b
HW
4422 case CHIP_BONAIRE:
4423 case CHIP_HAWAII:
4424 adev->mode_info.num_crtc = 6;
4425 adev->mode_info.num_hpd = 6;
4426 adev->mode_info.num_dig = 6;
4562236b 4427 break;
cd4b356f
AD
4428 case CHIP_KAVERI:
4429 adev->mode_info.num_crtc = 4;
4430 adev->mode_info.num_hpd = 6;
4431 adev->mode_info.num_dig = 7;
cd4b356f
AD
4432 break;
4433 case CHIP_KABINI:
4434 case CHIP_MULLINS:
4435 adev->mode_info.num_crtc = 2;
4436 adev->mode_info.num_hpd = 6;
4437 adev->mode_info.num_dig = 6;
cd4b356f 4438 break;
4562236b
HW
4439 case CHIP_FIJI:
4440 case CHIP_TONGA:
4441 adev->mode_info.num_crtc = 6;
4442 adev->mode_info.num_hpd = 6;
4443 adev->mode_info.num_dig = 7;
4562236b
HW
4444 break;
4445 case CHIP_CARRIZO:
4446 adev->mode_info.num_crtc = 3;
4447 adev->mode_info.num_hpd = 6;
4448 adev->mode_info.num_dig = 9;
4562236b
HW
4449 break;
4450 case CHIP_STONEY:
4451 adev->mode_info.num_crtc = 2;
4452 adev->mode_info.num_hpd = 6;
4453 adev->mode_info.num_dig = 9;
4562236b
HW
4454 break;
4455 case CHIP_POLARIS11:
b264d345 4456 case CHIP_POLARIS12:
4562236b
HW
4457 adev->mode_info.num_crtc = 5;
4458 adev->mode_info.num_hpd = 5;
4459 adev->mode_info.num_dig = 5;
4562236b
HW
4460 break;
4461 case CHIP_POLARIS10:
7737de91 4462 case CHIP_VEGAM:
4562236b
HW
4463 adev->mode_info.num_crtc = 6;
4464 adev->mode_info.num_hpd = 6;
4465 adev->mode_info.num_dig = 6;
4562236b 4466 break;
2c8ad2d5 4467 case CHIP_VEGA10:
2325ff30 4468 case CHIP_VEGA12:
1fe6bf2f 4469 case CHIP_VEGA20:
2c8ad2d5
AD
4470 adev->mode_info.num_crtc = 6;
4471 adev->mode_info.num_hpd = 6;
4472 adev->mode_info.num_dig = 6;
4473 break;
4562236b 4474 default:
c08182f2 4475#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4476 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4477 case IP_VERSION(2, 0, 2):
4478 case IP_VERSION(3, 0, 0):
4479 adev->mode_info.num_crtc = 6;
4480 adev->mode_info.num_hpd = 6;
4481 adev->mode_info.num_dig = 6;
4482 break;
4483 case IP_VERSION(2, 0, 0):
4484 case IP_VERSION(3, 0, 2):
4485 adev->mode_info.num_crtc = 5;
4486 adev->mode_info.num_hpd = 5;
4487 adev->mode_info.num_dig = 5;
4488 break;
4489 case IP_VERSION(2, 0, 3):
4490 case IP_VERSION(3, 0, 3):
4491 adev->mode_info.num_crtc = 2;
4492 adev->mode_info.num_hpd = 2;
4493 adev->mode_info.num_dig = 2;
4494 break;
559f591d
AD
4495 case IP_VERSION(1, 0, 0):
4496 case IP_VERSION(1, 0, 1):
c08182f2
AD
4497 case IP_VERSION(3, 0, 1):
4498 case IP_VERSION(2, 1, 0):
4499 case IP_VERSION(3, 1, 2):
4500 case IP_VERSION(3, 1, 3):
4501 adev->mode_info.num_crtc = 4;
4502 adev->mode_info.num_hpd = 4;
4503 adev->mode_info.num_dig = 4;
4504 break;
4505 default:
2cbc6f42 4506 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4507 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4508 return -EINVAL;
c08182f2
AD
4509 }
4510#endif
2cbc6f42 4511 break;
4562236b
HW
4512 }
4513
c8dd5715
MD
4514 amdgpu_dm_set_irq_funcs(adev);
4515
39cc5be2
AD
4516 if (adev->mode_info.funcs == NULL)
4517 adev->mode_info.funcs = &dm_display_funcs;
4518
1f6010a9
DF
4519 /*
4520 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4521 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4522 * amdgpu_device_init()
4523 */
4562236b
HW
4524#if defined(CONFIG_DEBUG_KERNEL_DC)
4525 device_create_file(
4a580877 4526 adev_to_drm(adev)->dev,
4562236b
HW
4527 &dev_attr_s3_debug);
4528#endif
4529
4530 return 0;
4531}
4532
9b690ef3 4533static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4534 struct dc_stream_state *new_stream,
4535 struct dc_stream_state *old_stream)
9b690ef3 4536{
2afda735 4537 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4538}
4539
4540static bool modereset_required(struct drm_crtc_state *crtc_state)
4541{
2afda735 4542 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4543}
4544
7578ecda 4545static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4546{
4547 drm_encoder_cleanup(encoder);
4548 kfree(encoder);
4549}
4550
4551static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4552 .destroy = amdgpu_dm_encoder_destroy,
4553};
4554
e7b07cee 4555
6300b3bd
MK
4556static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4557 struct drm_framebuffer *fb,
4558 int *min_downscale, int *max_upscale)
4559{
4560 struct amdgpu_device *adev = drm_to_adev(dev);
4561 struct dc *dc = adev->dm.dc;
4562 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4563 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4564
4565 switch (fb->format->format) {
4566 case DRM_FORMAT_P010:
4567 case DRM_FORMAT_NV12:
4568 case DRM_FORMAT_NV21:
4569 *max_upscale = plane_cap->max_upscale_factor.nv12;
4570 *min_downscale = plane_cap->max_downscale_factor.nv12;
4571 break;
4572
4573 case DRM_FORMAT_XRGB16161616F:
4574 case DRM_FORMAT_ARGB16161616F:
4575 case DRM_FORMAT_XBGR16161616F:
4576 case DRM_FORMAT_ABGR16161616F:
4577 *max_upscale = plane_cap->max_upscale_factor.fp16;
4578 *min_downscale = plane_cap->max_downscale_factor.fp16;
4579 break;
4580
4581 default:
4582 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4583 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4584 break;
4585 }
4586
4587 /*
4588 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4589 * scaling factor of 1.0 == 1000 units.
4590 */
4591 if (*max_upscale == 1)
4592 *max_upscale = 1000;
4593
4594 if (*min_downscale == 1)
4595 *min_downscale = 1000;
4596}
4597
4598
4375d625
S
4599static int fill_dc_scaling_info(struct amdgpu_device *adev,
4600 const struct drm_plane_state *state,
695af5f9 4601 struct dc_scaling_info *scaling_info)
e7b07cee 4602{
6300b3bd 4603 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4604
695af5f9 4605 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4606
695af5f9
NK
4607 /* Source is fixed 16.16 but we ignore mantissa for now... */
4608 scaling_info->src_rect.x = state->src_x >> 16;
4609 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4610
d89f6048
HW
4611 /*
4612 * For reasons we don't (yet) fully understand a non-zero
4613 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4614 * system hang on DCN1x.
4615 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4616 * let's reject both non-zero src_x and src_y.
4617 *
4618 * We currently know of only one use-case to reproduce a
4619 * scenario with non-zero src_x and src_y for NV12, which
4620 * is to gesture the YouTube Android app into full screen
4621 * on ChromeOS.
4622 */
4375d625
S
4623 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4624 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4625 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4626 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4627 return -EINVAL;
4628
695af5f9
NK
4629 scaling_info->src_rect.width = state->src_w >> 16;
4630 if (scaling_info->src_rect.width == 0)
4631 return -EINVAL;
4632
4633 scaling_info->src_rect.height = state->src_h >> 16;
4634 if (scaling_info->src_rect.height == 0)
4635 return -EINVAL;
4636
4637 scaling_info->dst_rect.x = state->crtc_x;
4638 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4639
4640 if (state->crtc_w == 0)
695af5f9 4641 return -EINVAL;
e7b07cee 4642
695af5f9 4643 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4644
4645 if (state->crtc_h == 0)
695af5f9 4646 return -EINVAL;
e7b07cee 4647
695af5f9 4648 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4649
695af5f9
NK
4650 /* DRM doesn't specify clipping on destination output. */
4651 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4652
6300b3bd
MK
4653 /* Validate scaling per-format with DC plane caps */
4654 if (state->plane && state->plane->dev && state->fb) {
4655 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4656 &min_downscale, &max_upscale);
4657 } else {
4658 min_downscale = 250;
4659 max_upscale = 16000;
4660 }
4661
6491f0c0
NK
4662 scale_w = scaling_info->dst_rect.width * 1000 /
4663 scaling_info->src_rect.width;
e7b07cee 4664
6300b3bd 4665 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4666 return -EINVAL;
4667
4668 scale_h = scaling_info->dst_rect.height * 1000 /
4669 scaling_info->src_rect.height;
4670
6300b3bd 4671 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4672 return -EINVAL;
4673
695af5f9
NK
4674 /*
4675 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4676 * assume reasonable defaults based on the format.
4677 */
e7b07cee 4678
695af5f9 4679 return 0;
4562236b 4680}
695af5f9 4681
a3241991
BN
4682static void
4683fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4684 uint64_t tiling_flags)
e7b07cee 4685{
a3241991
BN
4686 /* Fill GFX8 params */
4687 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4688 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4689
a3241991
BN
4690 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4691 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4692 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4693 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4694 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4695
a3241991
BN
4696 /* XXX fix me for VI */
4697 tiling_info->gfx8.num_banks = num_banks;
4698 tiling_info->gfx8.array_mode =
4699 DC_ARRAY_2D_TILED_THIN1;
4700 tiling_info->gfx8.tile_split = tile_split;
4701 tiling_info->gfx8.bank_width = bankw;
4702 tiling_info->gfx8.bank_height = bankh;
4703 tiling_info->gfx8.tile_aspect = mtaspect;
4704 tiling_info->gfx8.tile_mode =
4705 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4706 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4707 == DC_ARRAY_1D_TILED_THIN1) {
4708 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4709 }
4710
a3241991
BN
4711 tiling_info->gfx8.pipe_config =
4712 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4713}
4714
a3241991
BN
4715static void
4716fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4717 union dc_tiling_info *tiling_info)
4718{
4719 tiling_info->gfx9.num_pipes =
4720 adev->gfx.config.gb_addr_config_fields.num_pipes;
4721 tiling_info->gfx9.num_banks =
4722 adev->gfx.config.gb_addr_config_fields.num_banks;
4723 tiling_info->gfx9.pipe_interleave =
4724 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4725 tiling_info->gfx9.num_shader_engines =
4726 adev->gfx.config.gb_addr_config_fields.num_se;
4727 tiling_info->gfx9.max_compressed_frags =
4728 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4729 tiling_info->gfx9.num_rb_per_se =
4730 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4731 tiling_info->gfx9.shaderEnable = 1;
1d789535 4732 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4733 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4734}
4735
695af5f9 4736static int
a3241991
BN
4737validate_dcc(struct amdgpu_device *adev,
4738 const enum surface_pixel_format format,
4739 const enum dc_rotation_angle rotation,
4740 const union dc_tiling_info *tiling_info,
4741 const struct dc_plane_dcc_param *dcc,
4742 const struct dc_plane_address *address,
4743 const struct plane_size *plane_size)
7df7e505
NK
4744{
4745 struct dc *dc = adev->dm.dc;
8daa1218
NC
4746 struct dc_dcc_surface_param input;
4747 struct dc_surface_dcc_cap output;
7df7e505 4748
8daa1218
NC
4749 memset(&input, 0, sizeof(input));
4750 memset(&output, 0, sizeof(output));
4751
a3241991 4752 if (!dcc->enable)
87b7ebc2
RS
4753 return 0;
4754
a3241991
BN
4755 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4756 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4757 return -EINVAL;
7df7e505 4758
695af5f9 4759 input.format = format;
12e2b2d4
DL
4760 input.surface_size.width = plane_size->surface_size.width;
4761 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4762 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4763
695af5f9 4764 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4765 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4766 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4767 input.scan = SCAN_DIRECTION_VERTICAL;
4768
4769 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4770 return -EINVAL;
7df7e505
NK
4771
4772 if (!output.capable)
09e5665a 4773 return -EINVAL;
7df7e505 4774
a3241991
BN
4775 if (dcc->independent_64b_blks == 0 &&
4776 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4777 return -EINVAL;
7df7e505 4778
a3241991
BN
4779 return 0;
4780}
4781
37384b3f
BN
4782static bool
4783modifier_has_dcc(uint64_t modifier)
4784{
4785 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4786}
4787
4788static unsigned
4789modifier_gfx9_swizzle_mode(uint64_t modifier)
4790{
4791 if (modifier == DRM_FORMAT_MOD_LINEAR)
4792 return 0;
4793
4794 return AMD_FMT_MOD_GET(TILE, modifier);
4795}
4796
dfbbfe3c
BN
4797static const struct drm_format_info *
4798amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4799{
816853f9 4800 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4801}
4802
37384b3f
BN
4803static void
4804fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4805 union dc_tiling_info *tiling_info,
4806 uint64_t modifier)
4807{
4808 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4809 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4810 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4811 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4812
4813 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4814
4815 if (!IS_AMD_FMT_MOD(modifier))
4816 return;
4817
4818 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4819 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4820
4821 if (adev->family >= AMDGPU_FAMILY_NV) {
4822 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4823 } else {
4824 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4825
4826 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4827 }
4828}
4829
faa37f54
BN
4830enum dm_micro_swizzle {
4831 MICRO_SWIZZLE_Z = 0,
4832 MICRO_SWIZZLE_S = 1,
4833 MICRO_SWIZZLE_D = 2,
4834 MICRO_SWIZZLE_R = 3
4835};
4836
4837static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4838 uint32_t format,
4839 uint64_t modifier)
4840{
4841 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4842 const struct drm_format_info *info = drm_format_info(format);
fe180178 4843 int i;
faa37f54
BN
4844
4845 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4846
4847 if (!info)
4848 return false;
4849
4850 /*
fe180178
QZ
4851 * We always have to allow these modifiers:
4852 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4853 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4854 */
fe180178
QZ
4855 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4856 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4857 return true;
fe180178 4858 }
faa37f54 4859
fe180178
QZ
4860 /* Check that the modifier is on the list of the plane's supported modifiers. */
4861 for (i = 0; i < plane->modifier_count; i++) {
4862 if (modifier == plane->modifiers[i])
4863 break;
4864 }
4865 if (i == plane->modifier_count)
faa37f54
BN
4866 return false;
4867
4868 /*
4869 * For D swizzle the canonical modifier depends on the bpp, so check
4870 * it here.
4871 */
4872 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4873 adev->family >= AMDGPU_FAMILY_NV) {
4874 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4875 return false;
4876 }
4877
4878 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4879 info->cpp[0] < 8)
4880 return false;
4881
4882 if (modifier_has_dcc(modifier)) {
4883 /* Per radeonsi comments 16/64 bpp are more complicated. */
4884 if (info->cpp[0] != 4)
4885 return false;
951796f2
SS
4886 /* We support multi-planar formats, but not when combined with
4887 * additional DCC metadata planes. */
4888 if (info->num_planes > 1)
4889 return false;
faa37f54
BN
4890 }
4891
4892 return true;
4893}
4894
4895static void
4896add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4897{
4898 if (!*mods)
4899 return;
4900
4901 if (*cap - *size < 1) {
4902 uint64_t new_cap = *cap * 2;
4903 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4904
4905 if (!new_mods) {
4906 kfree(*mods);
4907 *mods = NULL;
4908 return;
4909 }
4910
4911 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4912 kfree(*mods);
4913 *mods = new_mods;
4914 *cap = new_cap;
4915 }
4916
4917 (*mods)[*size] = mod;
4918 *size += 1;
4919}
4920
4921static void
4922add_gfx9_modifiers(const struct amdgpu_device *adev,
4923 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4924{
4925 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4926 int pipe_xor_bits = min(8, pipes +
4927 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4928 int bank_xor_bits = min(8 - pipe_xor_bits,
4929 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4930 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4931 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4932
4933
4934 if (adev->family == AMDGPU_FAMILY_RV) {
4935 /* Raven2 and later */
4936 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4937
4938 /*
4939 * No _D DCC swizzles yet because we only allow 32bpp, which
4940 * doesn't support _D on DCN
4941 */
4942
4943 if (has_constant_encode) {
4944 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4945 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4946 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4947 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4948 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4949 AMD_FMT_MOD_SET(DCC, 1) |
4950 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4951 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4952 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4953 }
4954
4955 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4956 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4957 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4958 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4959 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4960 AMD_FMT_MOD_SET(DCC, 1) |
4961 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4962 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4963 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4964
4965 if (has_constant_encode) {
4966 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4967 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4968 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4969 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4970 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4971 AMD_FMT_MOD_SET(DCC, 1) |
4972 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4973 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4974 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4975
4976 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4977 AMD_FMT_MOD_SET(RB, rb) |
4978 AMD_FMT_MOD_SET(PIPE, pipes));
4979 }
4980
4981 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4982 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4983 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4984 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4985 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4986 AMD_FMT_MOD_SET(DCC, 1) |
4987 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4988 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4989 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4990 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4991 AMD_FMT_MOD_SET(RB, rb) |
4992 AMD_FMT_MOD_SET(PIPE, pipes));
4993 }
4994
4995 /*
4996 * Only supported for 64bpp on Raven, will be filtered on format in
4997 * dm_plane_format_mod_supported.
4998 */
4999 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5000 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5001 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5002 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5003 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5004
5005 if (adev->family == AMDGPU_FAMILY_RV) {
5006 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5007 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5008 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5009 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5010 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5011 }
5012
5013 /*
5014 * Only supported for 64bpp on Raven, will be filtered on format in
5015 * dm_plane_format_mod_supported.
5016 */
5017 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5018 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5019 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5020
5021 if (adev->family == AMDGPU_FAMILY_RV) {
5022 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5023 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5024 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5025 }
5026}
5027
5028static void
5029add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5030 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5031{
5032 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5033
5034 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5035 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5036 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5037 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5038 AMD_FMT_MOD_SET(DCC, 1) |
5039 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5040 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5041 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5042
5043 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5044 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5045 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5046 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5047 AMD_FMT_MOD_SET(DCC, 1) |
5048 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5049 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5050 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5051 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5052
5053 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5054 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5055 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5056 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5057
5058 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5059 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5060 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5061 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5062
5063
5064 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5065 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5066 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5067 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5068
5069 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5070 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5071 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5072}
5073
5074static void
5075add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5076 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5077{
5078 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5079 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5080
5081 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5082 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5083 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5084 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5085 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5086 AMD_FMT_MOD_SET(DCC, 1) |
5087 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5088 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5089 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5090 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5091
7f6ab50a
JA
5092 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5093 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5094 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5095 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5096 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5097 AMD_FMT_MOD_SET(DCC, 1) |
5098 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5099 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5100 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5101
faa37f54
BN
5102 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5104 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5105 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5106 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5107 AMD_FMT_MOD_SET(DCC, 1) |
5108 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5109 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5110 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5111 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5112 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5113
7f6ab50a
JA
5114 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5115 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5116 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5117 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5118 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5119 AMD_FMT_MOD_SET(DCC, 1) |
5120 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5121 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5122 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5123 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5124
faa37f54
BN
5125 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5126 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5127 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5128 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5129 AMD_FMT_MOD_SET(PACKERS, pkrs));
5130
5131 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5132 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5133 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5134 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5135 AMD_FMT_MOD_SET(PACKERS, pkrs));
5136
5137 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5138 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5139 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5140 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5141
5142 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5143 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5144 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5145}
5146
5147static int
5148get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5149{
5150 uint64_t size = 0, capacity = 128;
5151 *mods = NULL;
5152
5153 /* We have not hooked up any pre-GFX9 modifiers. */
5154 if (adev->family < AMDGPU_FAMILY_AI)
5155 return 0;
5156
5157 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5158
5159 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5160 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5161 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5162 return *mods ? 0 : -ENOMEM;
5163 }
5164
5165 switch (adev->family) {
5166 case AMDGPU_FAMILY_AI:
5167 case AMDGPU_FAMILY_RV:
5168 add_gfx9_modifiers(adev, mods, &size, &capacity);
5169 break;
5170 case AMDGPU_FAMILY_NV:
5171 case AMDGPU_FAMILY_VGH:
1ebcaebd 5172 case AMDGPU_FAMILY_YC:
1d789535 5173 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5174 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5175 else
5176 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5177 break;
5178 }
5179
5180 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5181
5182 /* INVALID marks the end of the list. */
5183 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5184
5185 if (!*mods)
5186 return -ENOMEM;
5187
5188 return 0;
5189}
5190
37384b3f
BN
5191static int
5192fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5193 const struct amdgpu_framebuffer *afb,
5194 const enum surface_pixel_format format,
5195 const enum dc_rotation_angle rotation,
5196 const struct plane_size *plane_size,
5197 union dc_tiling_info *tiling_info,
5198 struct dc_plane_dcc_param *dcc,
5199 struct dc_plane_address *address,
5200 const bool force_disable_dcc)
5201{
5202 const uint64_t modifier = afb->base.modifier;
2be7f77f 5203 int ret = 0;
37384b3f
BN
5204
5205 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5206 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5207
5208 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5209 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5210 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5211 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5212
5213 dcc->enable = 1;
5214 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5215 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5216 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5217 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5218 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5219 else if (independent_128b_blks)
5220 dcc->dcc_ind_blk = hubp_ind_block_128b;
5221 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5222 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5223 else
5224 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5225 } else {
5226 if (independent_64b_blks)
5227 dcc->dcc_ind_blk = hubp_ind_block_64b;
5228 else
5229 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5230 }
37384b3f
BN
5231
5232 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5233 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5234 }
5235
5236 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5237 if (ret)
2be7f77f 5238 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5239
2be7f77f 5240 return ret;
09e5665a
NK
5241}
5242
5243static int
320932bf 5244fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5245 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5246 const enum surface_pixel_format format,
5247 const enum dc_rotation_angle rotation,
5248 const uint64_t tiling_flags,
09e5665a 5249 union dc_tiling_info *tiling_info,
12e2b2d4 5250 struct plane_size *plane_size,
09e5665a 5251 struct dc_plane_dcc_param *dcc,
87b7ebc2 5252 struct dc_plane_address *address,
5888f07a 5253 bool tmz_surface,
87b7ebc2 5254 bool force_disable_dcc)
09e5665a 5255{
320932bf 5256 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5257 int ret;
5258
5259 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5260 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5261 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5262 memset(address, 0, sizeof(*address));
5263
5888f07a
HW
5264 address->tmz_surface = tmz_surface;
5265
695af5f9 5266 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5267 uint64_t addr = afb->address + fb->offsets[0];
5268
12e2b2d4
DL
5269 plane_size->surface_size.x = 0;
5270 plane_size->surface_size.y = 0;
5271 plane_size->surface_size.width = fb->width;
5272 plane_size->surface_size.height = fb->height;
5273 plane_size->surface_pitch =
320932bf
NK
5274 fb->pitches[0] / fb->format->cpp[0];
5275
e0634e8d 5276 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5277 address->grph.addr.low_part = lower_32_bits(addr);
5278 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5279 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5280 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5281 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5282
12e2b2d4
DL
5283 plane_size->surface_size.x = 0;
5284 plane_size->surface_size.y = 0;
5285 plane_size->surface_size.width = fb->width;
5286 plane_size->surface_size.height = fb->height;
5287 plane_size->surface_pitch =
320932bf
NK
5288 fb->pitches[0] / fb->format->cpp[0];
5289
12e2b2d4
DL
5290 plane_size->chroma_size.x = 0;
5291 plane_size->chroma_size.y = 0;
320932bf 5292 /* TODO: set these based on surface format */
12e2b2d4
DL
5293 plane_size->chroma_size.width = fb->width / 2;
5294 plane_size->chroma_size.height = fb->height / 2;
320932bf 5295
12e2b2d4 5296 plane_size->chroma_pitch =
320932bf
NK
5297 fb->pitches[1] / fb->format->cpp[1];
5298
e0634e8d
NK
5299 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5300 address->video_progressive.luma_addr.low_part =
be7b9b32 5301 lower_32_bits(luma_addr);
e0634e8d 5302 address->video_progressive.luma_addr.high_part =
be7b9b32 5303 upper_32_bits(luma_addr);
e0634e8d
NK
5304 address->video_progressive.chroma_addr.low_part =
5305 lower_32_bits(chroma_addr);
5306 address->video_progressive.chroma_addr.high_part =
5307 upper_32_bits(chroma_addr);
5308 }
09e5665a 5309
a3241991 5310 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5311 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5312 rotation, plane_size,
5313 tiling_info, dcc,
5314 address,
5315 force_disable_dcc);
09e5665a
NK
5316 if (ret)
5317 return ret;
a3241991
BN
5318 } else {
5319 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5320 }
5321
5322 return 0;
7df7e505
NK
5323}
5324
d74004b6 5325static void
695af5f9 5326fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5327 bool *per_pixel_alpha, bool *global_alpha,
5328 int *global_alpha_value)
5329{
5330 *per_pixel_alpha = false;
5331 *global_alpha = false;
5332 *global_alpha_value = 0xff;
5333
5334 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5335 return;
5336
5337 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5338 static const uint32_t alpha_formats[] = {
5339 DRM_FORMAT_ARGB8888,
5340 DRM_FORMAT_RGBA8888,
5341 DRM_FORMAT_ABGR8888,
5342 };
5343 uint32_t format = plane_state->fb->format->format;
5344 unsigned int i;
5345
5346 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5347 if (format == alpha_formats[i]) {
5348 *per_pixel_alpha = true;
5349 break;
5350 }
5351 }
5352 }
5353
5354 if (plane_state->alpha < 0xffff) {
5355 *global_alpha = true;
5356 *global_alpha_value = plane_state->alpha >> 8;
5357 }
5358}
5359
004fefa3
NK
5360static int
5361fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5362 const enum surface_pixel_format format,
004fefa3
NK
5363 enum dc_color_space *color_space)
5364{
5365 bool full_range;
5366
5367 *color_space = COLOR_SPACE_SRGB;
5368
5369 /* DRM color properties only affect non-RGB formats. */
695af5f9 5370 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5371 return 0;
5372
5373 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5374
5375 switch (plane_state->color_encoding) {
5376 case DRM_COLOR_YCBCR_BT601:
5377 if (full_range)
5378 *color_space = COLOR_SPACE_YCBCR601;
5379 else
5380 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5381 break;
5382
5383 case DRM_COLOR_YCBCR_BT709:
5384 if (full_range)
5385 *color_space = COLOR_SPACE_YCBCR709;
5386 else
5387 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5388 break;
5389
5390 case DRM_COLOR_YCBCR_BT2020:
5391 if (full_range)
5392 *color_space = COLOR_SPACE_2020_YCBCR;
5393 else
5394 return -EINVAL;
5395 break;
5396
5397 default:
5398 return -EINVAL;
5399 }
5400
5401 return 0;
5402}
5403
695af5f9
NK
5404static int
5405fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5406 const struct drm_plane_state *plane_state,
5407 const uint64_t tiling_flags,
5408 struct dc_plane_info *plane_info,
87b7ebc2 5409 struct dc_plane_address *address,
5888f07a 5410 bool tmz_surface,
87b7ebc2 5411 bool force_disable_dcc)
695af5f9
NK
5412{
5413 const struct drm_framebuffer *fb = plane_state->fb;
5414 const struct amdgpu_framebuffer *afb =
5415 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5416 int ret;
5417
5418 memset(plane_info, 0, sizeof(*plane_info));
5419
5420 switch (fb->format->format) {
5421 case DRM_FORMAT_C8:
5422 plane_info->format =
5423 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5424 break;
5425 case DRM_FORMAT_RGB565:
5426 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5427 break;
5428 case DRM_FORMAT_XRGB8888:
5429 case DRM_FORMAT_ARGB8888:
5430 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5431 break;
5432 case DRM_FORMAT_XRGB2101010:
5433 case DRM_FORMAT_ARGB2101010:
5434 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5435 break;
5436 case DRM_FORMAT_XBGR2101010:
5437 case DRM_FORMAT_ABGR2101010:
5438 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5439 break;
5440 case DRM_FORMAT_XBGR8888:
5441 case DRM_FORMAT_ABGR8888:
5442 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5443 break;
5444 case DRM_FORMAT_NV21:
5445 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5446 break;
5447 case DRM_FORMAT_NV12:
5448 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5449 break;
cbec6477
SW
5450 case DRM_FORMAT_P010:
5451 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5452 break;
492548dc
SW
5453 case DRM_FORMAT_XRGB16161616F:
5454 case DRM_FORMAT_ARGB16161616F:
5455 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5456 break;
2a5195dc
MK
5457 case DRM_FORMAT_XBGR16161616F:
5458 case DRM_FORMAT_ABGR16161616F:
5459 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5460 break;
58020403
MK
5461 case DRM_FORMAT_XRGB16161616:
5462 case DRM_FORMAT_ARGB16161616:
5463 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5464 break;
5465 case DRM_FORMAT_XBGR16161616:
5466 case DRM_FORMAT_ABGR16161616:
5467 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5468 break;
695af5f9
NK
5469 default:
5470 DRM_ERROR(
92f1d09c
SA
5471 "Unsupported screen format %p4cc\n",
5472 &fb->format->format);
695af5f9
NK
5473 return -EINVAL;
5474 }
5475
5476 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5477 case DRM_MODE_ROTATE_0:
5478 plane_info->rotation = ROTATION_ANGLE_0;
5479 break;
5480 case DRM_MODE_ROTATE_90:
5481 plane_info->rotation = ROTATION_ANGLE_90;
5482 break;
5483 case DRM_MODE_ROTATE_180:
5484 plane_info->rotation = ROTATION_ANGLE_180;
5485 break;
5486 case DRM_MODE_ROTATE_270:
5487 plane_info->rotation = ROTATION_ANGLE_270;
5488 break;
5489 default:
5490 plane_info->rotation = ROTATION_ANGLE_0;
5491 break;
5492 }
5493
5494 plane_info->visible = true;
5495 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5496
6d83a32d
MS
5497 plane_info->layer_index = 0;
5498
695af5f9
NK
5499 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5500 &plane_info->color_space);
5501 if (ret)
5502 return ret;
5503
5504 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5505 plane_info->rotation, tiling_flags,
5506 &plane_info->tiling_info,
5507 &plane_info->plane_size,
5888f07a 5508 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5509 force_disable_dcc);
695af5f9
NK
5510 if (ret)
5511 return ret;
5512
5513 fill_blending_from_plane_state(
5514 plane_state, &plane_info->per_pixel_alpha,
5515 &plane_info->global_alpha, &plane_info->global_alpha_value);
5516
5517 return 0;
5518}
5519
5520static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5521 struct dc_plane_state *dc_plane_state,
5522 struct drm_plane_state *plane_state,
5523 struct drm_crtc_state *crtc_state)
e7b07cee 5524{
cf020d49 5525 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5526 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5527 struct dc_scaling_info scaling_info;
5528 struct dc_plane_info plane_info;
695af5f9 5529 int ret;
87b7ebc2 5530 bool force_disable_dcc = false;
e7b07cee 5531
4375d625 5532 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5533 if (ret)
5534 return ret;
e7b07cee 5535
695af5f9
NK
5536 dc_plane_state->src_rect = scaling_info.src_rect;
5537 dc_plane_state->dst_rect = scaling_info.dst_rect;
5538 dc_plane_state->clip_rect = scaling_info.clip_rect;
5539 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5540
87b7ebc2 5541 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5542 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5543 afb->tiling_flags,
695af5f9 5544 &plane_info,
87b7ebc2 5545 &dc_plane_state->address,
6eed95b0 5546 afb->tmz_surface,
87b7ebc2 5547 force_disable_dcc);
004fefa3
NK
5548 if (ret)
5549 return ret;
5550
695af5f9
NK
5551 dc_plane_state->format = plane_info.format;
5552 dc_plane_state->color_space = plane_info.color_space;
5553 dc_plane_state->format = plane_info.format;
5554 dc_plane_state->plane_size = plane_info.plane_size;
5555 dc_plane_state->rotation = plane_info.rotation;
5556 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5557 dc_plane_state->stereo_format = plane_info.stereo_format;
5558 dc_plane_state->tiling_info = plane_info.tiling_info;
5559 dc_plane_state->visible = plane_info.visible;
5560 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5561 dc_plane_state->global_alpha = plane_info.global_alpha;
5562 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5563 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5564 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5565 dc_plane_state->flip_int_enabled = true;
695af5f9 5566
e277adc5
LSL
5567 /*
5568 * Always set input transfer function, since plane state is refreshed
5569 * every time.
5570 */
cf020d49
NK
5571 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5572 if (ret)
5573 return ret;
e7b07cee 5574
cf020d49 5575 return 0;
e7b07cee
HW
5576}
5577
3ee6b26b
AD
5578static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5579 const struct dm_connector_state *dm_state,
5580 struct dc_stream_state *stream)
e7b07cee
HW
5581{
5582 enum amdgpu_rmx_type rmx_type;
5583
5584 struct rect src = { 0 }; /* viewport in composition space*/
5585 struct rect dst = { 0 }; /* stream addressable area */
5586
5587 /* no mode. nothing to be done */
5588 if (!mode)
5589 return;
5590
5591 /* Full screen scaling by default */
5592 src.width = mode->hdisplay;
5593 src.height = mode->vdisplay;
5594 dst.width = stream->timing.h_addressable;
5595 dst.height = stream->timing.v_addressable;
5596
f4791779
HW
5597 if (dm_state) {
5598 rmx_type = dm_state->scaling;
5599 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5600 if (src.width * dst.height <
5601 src.height * dst.width) {
5602 /* height needs less upscaling/more downscaling */
5603 dst.width = src.width *
5604 dst.height / src.height;
5605 } else {
5606 /* width needs less upscaling/more downscaling */
5607 dst.height = src.height *
5608 dst.width / src.width;
5609 }
5610 } else if (rmx_type == RMX_CENTER) {
5611 dst = src;
e7b07cee 5612 }
e7b07cee 5613
f4791779
HW
5614 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5615 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5616
f4791779
HW
5617 if (dm_state->underscan_enable) {
5618 dst.x += dm_state->underscan_hborder / 2;
5619 dst.y += dm_state->underscan_vborder / 2;
5620 dst.width -= dm_state->underscan_hborder;
5621 dst.height -= dm_state->underscan_vborder;
5622 }
e7b07cee
HW
5623 }
5624
5625 stream->src = src;
5626 stream->dst = dst;
5627
4711c033
LT
5628 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5629 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5630
5631}
5632
3ee6b26b 5633static enum dc_color_depth
42ba01fc 5634convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5635 bool is_y420, int requested_bpc)
e7b07cee 5636{
1bc22f20 5637 uint8_t bpc;
01c22997 5638
1bc22f20
SW
5639 if (is_y420) {
5640 bpc = 8;
5641
5642 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5643 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5644 bpc = 16;
5645 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5646 bpc = 12;
5647 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5648 bpc = 10;
5649 } else {
5650 bpc = (uint8_t)connector->display_info.bpc;
5651 /* Assume 8 bpc by default if no bpc is specified. */
5652 bpc = bpc ? bpc : 8;
5653 }
e7b07cee 5654
cbd14ae7 5655 if (requested_bpc > 0) {
01c22997
NK
5656 /*
5657 * Cap display bpc based on the user requested value.
5658 *
5659 * The value for state->max_bpc may not correctly updated
5660 * depending on when the connector gets added to the state
5661 * or if this was called outside of atomic check, so it
5662 * can't be used directly.
5663 */
cbd14ae7 5664 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5665
1825fd34
NK
5666 /* Round down to the nearest even number. */
5667 bpc = bpc - (bpc & 1);
5668 }
07e3a1cf 5669
e7b07cee
HW
5670 switch (bpc) {
5671 case 0:
1f6010a9
DF
5672 /*
5673 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5674 * EDID revision before 1.4
5675 * TODO: Fix edid parsing
5676 */
5677 return COLOR_DEPTH_888;
5678 case 6:
5679 return COLOR_DEPTH_666;
5680 case 8:
5681 return COLOR_DEPTH_888;
5682 case 10:
5683 return COLOR_DEPTH_101010;
5684 case 12:
5685 return COLOR_DEPTH_121212;
5686 case 14:
5687 return COLOR_DEPTH_141414;
5688 case 16:
5689 return COLOR_DEPTH_161616;
5690 default:
5691 return COLOR_DEPTH_UNDEFINED;
5692 }
5693}
5694
3ee6b26b
AD
5695static enum dc_aspect_ratio
5696get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5697{
e11d4147
LSL
5698 /* 1-1 mapping, since both enums follow the HDMI spec. */
5699 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5700}
5701
3ee6b26b
AD
5702static enum dc_color_space
5703get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5704{
5705 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5706
5707 switch (dc_crtc_timing->pixel_encoding) {
5708 case PIXEL_ENCODING_YCBCR422:
5709 case PIXEL_ENCODING_YCBCR444:
5710 case PIXEL_ENCODING_YCBCR420:
5711 {
5712 /*
5713 * 27030khz is the separation point between HDTV and SDTV
5714 * according to HDMI spec, we use YCbCr709 and YCbCr601
5715 * respectively
5716 */
380604e2 5717 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5718 if (dc_crtc_timing->flags.Y_ONLY)
5719 color_space =
5720 COLOR_SPACE_YCBCR709_LIMITED;
5721 else
5722 color_space = COLOR_SPACE_YCBCR709;
5723 } else {
5724 if (dc_crtc_timing->flags.Y_ONLY)
5725 color_space =
5726 COLOR_SPACE_YCBCR601_LIMITED;
5727 else
5728 color_space = COLOR_SPACE_YCBCR601;
5729 }
5730
5731 }
5732 break;
5733 case PIXEL_ENCODING_RGB:
5734 color_space = COLOR_SPACE_SRGB;
5735 break;
5736
5737 default:
5738 WARN_ON(1);
5739 break;
5740 }
5741
5742 return color_space;
5743}
5744
ea117312
TA
5745static bool adjust_colour_depth_from_display_info(
5746 struct dc_crtc_timing *timing_out,
5747 const struct drm_display_info *info)
400443e8 5748{
ea117312 5749 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5750 int normalized_clk;
400443e8 5751 do {
380604e2 5752 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5753 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5754 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5755 normalized_clk /= 2;
5756 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5757 switch (depth) {
5758 case COLOR_DEPTH_888:
5759 break;
400443e8
ML
5760 case COLOR_DEPTH_101010:
5761 normalized_clk = (normalized_clk * 30) / 24;
5762 break;
5763 case COLOR_DEPTH_121212:
5764 normalized_clk = (normalized_clk * 36) / 24;
5765 break;
5766 case COLOR_DEPTH_161616:
5767 normalized_clk = (normalized_clk * 48) / 24;
5768 break;
5769 default:
ea117312
TA
5770 /* The above depths are the only ones valid for HDMI. */
5771 return false;
400443e8 5772 }
ea117312
TA
5773 if (normalized_clk <= info->max_tmds_clock) {
5774 timing_out->display_color_depth = depth;
5775 return true;
5776 }
5777 } while (--depth > COLOR_DEPTH_666);
5778 return false;
400443e8 5779}
e7b07cee 5780
42ba01fc
NK
5781static void fill_stream_properties_from_drm_display_mode(
5782 struct dc_stream_state *stream,
5783 const struct drm_display_mode *mode_in,
5784 const struct drm_connector *connector,
5785 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5786 const struct dc_stream_state *old_stream,
5787 int requested_bpc)
e7b07cee
HW
5788{
5789 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5790 const struct drm_display_info *info = &connector->display_info;
d4252eee 5791 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5792 struct hdmi_vendor_infoframe hv_frame;
5793 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5794
acf83f86
WL
5795 memset(&hv_frame, 0, sizeof(hv_frame));
5796 memset(&avi_frame, 0, sizeof(avi_frame));
5797
e7b07cee
HW
5798 timing_out->h_border_left = 0;
5799 timing_out->h_border_right = 0;
5800 timing_out->v_border_top = 0;
5801 timing_out->v_border_bottom = 0;
5802 /* TODO: un-hardcode */
fe61a2f1 5803 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5804 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5805 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5806 else if (drm_mode_is_420_also(info, mode_in)
5807 && aconnector->force_yuv420_output)
5808 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5809 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5810 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5811 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5812 else
5813 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5814
5815 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5816 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5817 connector,
5818 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5819 requested_bpc);
e7b07cee
HW
5820 timing_out->scan_type = SCANNING_TYPE_NODATA;
5821 timing_out->hdmi_vic = 0;
b333730d
BL
5822
5823 if(old_stream) {
5824 timing_out->vic = old_stream->timing.vic;
5825 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5826 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5827 } else {
5828 timing_out->vic = drm_match_cea_mode(mode_in);
5829 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5830 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5831 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5832 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5833 }
e7b07cee 5834
1cb1d477
WL
5835 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5836 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5837 timing_out->vic = avi_frame.video_code;
5838 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5839 timing_out->hdmi_vic = hv_frame.vic;
5840 }
5841
fe8858bb
NC
5842 if (is_freesync_video_mode(mode_in, aconnector)) {
5843 timing_out->h_addressable = mode_in->hdisplay;
5844 timing_out->h_total = mode_in->htotal;
5845 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5846 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5847 timing_out->v_total = mode_in->vtotal;
5848 timing_out->v_addressable = mode_in->vdisplay;
5849 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5850 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5851 timing_out->pix_clk_100hz = mode_in->clock * 10;
5852 } else {
5853 timing_out->h_addressable = mode_in->crtc_hdisplay;
5854 timing_out->h_total = mode_in->crtc_htotal;
5855 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5856 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5857 timing_out->v_total = mode_in->crtc_vtotal;
5858 timing_out->v_addressable = mode_in->crtc_vdisplay;
5859 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5860 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5861 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5862 }
a85ba005 5863
e7b07cee 5864 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5865
5866 stream->output_color_space = get_output_color_space(timing_out);
5867
e43a432c
AK
5868 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5869 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5870 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5871 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5872 drm_mode_is_420_also(info, mode_in) &&
5873 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5874 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5875 adjust_colour_depth_from_display_info(timing_out, info);
5876 }
5877 }
e7b07cee
HW
5878}
5879
3ee6b26b
AD
5880static void fill_audio_info(struct audio_info *audio_info,
5881 const struct drm_connector *drm_connector,
5882 const struct dc_sink *dc_sink)
e7b07cee
HW
5883{
5884 int i = 0;
5885 int cea_revision = 0;
5886 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5887
5888 audio_info->manufacture_id = edid_caps->manufacturer_id;
5889 audio_info->product_id = edid_caps->product_id;
5890
5891 cea_revision = drm_connector->display_info.cea_rev;
5892
090afc1e 5893 strscpy(audio_info->display_name,
d2b2562c 5894 edid_caps->display_name,
090afc1e 5895 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5896
b830ebc9 5897 if (cea_revision >= 3) {
e7b07cee
HW
5898 audio_info->mode_count = edid_caps->audio_mode_count;
5899
5900 for (i = 0; i < audio_info->mode_count; ++i) {
5901 audio_info->modes[i].format_code =
5902 (enum audio_format_code)
5903 (edid_caps->audio_modes[i].format_code);
5904 audio_info->modes[i].channel_count =
5905 edid_caps->audio_modes[i].channel_count;
5906 audio_info->modes[i].sample_rates.all =
5907 edid_caps->audio_modes[i].sample_rate;
5908 audio_info->modes[i].sample_size =
5909 edid_caps->audio_modes[i].sample_size;
5910 }
5911 }
5912
5913 audio_info->flags.all = edid_caps->speaker_flags;
5914
5915 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5916 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5917 audio_info->video_latency = drm_connector->video_latency[0];
5918 audio_info->audio_latency = drm_connector->audio_latency[0];
5919 }
5920
5921 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5922
5923}
5924
3ee6b26b
AD
5925static void
5926copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5927 struct drm_display_mode *dst_mode)
e7b07cee
HW
5928{
5929 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5930 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5931 dst_mode->crtc_clock = src_mode->crtc_clock;
5932 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5933 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5934 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5935 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5936 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5937 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5938 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5939 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5940 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5941 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5942 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5943}
5944
3ee6b26b
AD
5945static void
5946decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5947 const struct drm_display_mode *native_mode,
5948 bool scale_enabled)
e7b07cee
HW
5949{
5950 if (scale_enabled) {
5951 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5952 } else if (native_mode->clock == drm_mode->clock &&
5953 native_mode->htotal == drm_mode->htotal &&
5954 native_mode->vtotal == drm_mode->vtotal) {
5955 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5956 } else {
5957 /* no scaling nor amdgpu inserted, no need to patch */
5958 }
5959}
5960
aed15309
ML
5961static struct dc_sink *
5962create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5963{
2e0ac3d6 5964 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5965 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5966 sink_init_data.link = aconnector->dc_link;
5967 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5968
5969 sink = dc_sink_create(&sink_init_data);
423788c7 5970 if (!sink) {
2e0ac3d6 5971 DRM_ERROR("Failed to create sink!\n");
aed15309 5972 return NULL;
423788c7 5973 }
2e0ac3d6 5974 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5975
aed15309 5976 return sink;
2e0ac3d6
HW
5977}
5978
fa2123db
ML
5979static void set_multisync_trigger_params(
5980 struct dc_stream_state *stream)
5981{
ec372186
ML
5982 struct dc_stream_state *master = NULL;
5983
fa2123db 5984 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5985 master = stream->triggered_crtc_reset.event_source;
5986 stream->triggered_crtc_reset.event =
5987 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5988 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5989 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5990 }
5991}
5992
5993static void set_master_stream(struct dc_stream_state *stream_set[],
5994 int stream_count)
5995{
5996 int j, highest_rfr = 0, master_stream = 0;
5997
5998 for (j = 0; j < stream_count; j++) {
5999 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6000 int refresh_rate = 0;
6001
380604e2 6002 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
6003 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6004 if (refresh_rate > highest_rfr) {
6005 highest_rfr = refresh_rate;
6006 master_stream = j;
6007 }
6008 }
6009 }
6010 for (j = 0; j < stream_count; j++) {
03736f4c 6011 if (stream_set[j])
fa2123db
ML
6012 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6013 }
6014}
6015
6016static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6017{
6018 int i = 0;
ec372186 6019 struct dc_stream_state *stream;
fa2123db
ML
6020
6021 if (context->stream_count < 2)
6022 return;
6023 for (i = 0; i < context->stream_count ; i++) {
6024 if (!context->streams[i])
6025 continue;
1f6010a9
DF
6026 /*
6027 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6028 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6029 * For now it's set to false
fa2123db 6030 */
fa2123db 6031 }
ec372186 6032
fa2123db 6033 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6034
6035 for (i = 0; i < context->stream_count ; i++) {
6036 stream = context->streams[i];
6037
6038 if (!stream)
6039 continue;
6040
6041 set_multisync_trigger_params(stream);
6042 }
fa2123db
ML
6043}
6044
ea2be5c0 6045#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6046static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6047 struct dc_sink *sink, struct dc_stream_state *stream,
6048 struct dsc_dec_dpcd_caps *dsc_caps)
6049{
6050 stream->timing.flags.DSC = 0;
6051
2665f63a
ML
6052 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6053 sink->sink_signal == SIGNAL_TYPE_EDP)) {
998b7ad2
FZ
6054 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6055 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6056 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6057 dsc_caps);
998b7ad2
FZ
6058 }
6059}
6060
2665f63a
ML
6061static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6062 struct dc_sink *sink, struct dc_stream_state *stream,
6063 struct dsc_dec_dpcd_caps *dsc_caps,
6064 uint32_t max_dsc_target_bpp_limit_override)
6065{
6066 const struct dc_link_settings *verified_link_cap = NULL;
6067 uint32_t link_bw_in_kbps;
6068 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6069 struct dc *dc = sink->ctx->dc;
6070 struct dc_dsc_bw_range bw_range = {0};
6071 struct dc_dsc_config dsc_cfg = {0};
6072
6073 verified_link_cap = dc_link_get_link_cap(stream->link);
6074 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6075 edp_min_bpp_x16 = 8 * 16;
6076 edp_max_bpp_x16 = 8 * 16;
6077
6078 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6079 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6080
6081 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6082 edp_min_bpp_x16 = edp_max_bpp_x16;
6083
6084 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6085 dc->debug.dsc_min_slice_height_override,
6086 edp_min_bpp_x16, edp_max_bpp_x16,
6087 dsc_caps,
6088 &stream->timing,
6089 &bw_range)) {
6090
6091 if (bw_range.max_kbps < link_bw_in_kbps) {
6092 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6093 dsc_caps,
6094 dc->debug.dsc_min_slice_height_override,
6095 max_dsc_target_bpp_limit_override,
6096 0,
6097 &stream->timing,
6098 &dsc_cfg)) {
6099 stream->timing.dsc_cfg = dsc_cfg;
6100 stream->timing.flags.DSC = 1;
6101 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6102 }
6103 return;
6104 }
6105 }
6106
6107 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6108 dsc_caps,
6109 dc->debug.dsc_min_slice_height_override,
6110 max_dsc_target_bpp_limit_override,
6111 link_bw_in_kbps,
6112 &stream->timing,
6113 &dsc_cfg)) {
6114 stream->timing.dsc_cfg = dsc_cfg;
6115 stream->timing.flags.DSC = 1;
6116 }
6117}
6118
998b7ad2
FZ
6119static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6120 struct dc_sink *sink, struct dc_stream_state *stream,
6121 struct dsc_dec_dpcd_caps *dsc_caps)
6122{
6123 struct drm_connector *drm_connector = &aconnector->base;
6124 uint32_t link_bandwidth_kbps;
f1c1a982 6125 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 6126 struct dc *dc = sink->ctx->dc;
998b7ad2
FZ
6127
6128 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6129 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6130
6131 if (stream->link && stream->link->local_sink)
6132 max_dsc_target_bpp_limit_override =
6133 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6134
998b7ad2
FZ
6135 /* Set DSC policy according to dsc_clock_en */
6136 dc_dsc_policy_set_enable_dsc_when_not_needed(
6137 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6138
2665f63a
ML
6139 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6140 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6141
6142 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6143
6144 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
998b7ad2
FZ
6145
6146 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6147 dsc_caps,
6148 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6149 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6150 link_bandwidth_kbps,
6151 &stream->timing,
6152 &stream->timing.dsc_cfg)) {
6153 stream->timing.flags.DSC = 1;
6154 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6155 }
6156 }
6157
6158 /* Overwrite the stream flag if DSC is enabled through debugfs */
6159 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6160 stream->timing.flags.DSC = 1;
6161
6162 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6163 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6164
6165 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6166 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6167
6168 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6169 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6170}
433e5dec 6171#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6172
5fd953a3
RS
6173/**
6174 * DOC: FreeSync Video
6175 *
6176 * When a userspace application wants to play a video, the content follows a
6177 * standard format definition that usually specifies the FPS for that format.
6178 * The below list illustrates some video format and the expected FPS,
6179 * respectively:
6180 *
6181 * - TV/NTSC (23.976 FPS)
6182 * - Cinema (24 FPS)
6183 * - TV/PAL (25 FPS)
6184 * - TV/NTSC (29.97 FPS)
6185 * - TV/NTSC (30 FPS)
6186 * - Cinema HFR (48 FPS)
6187 * - TV/PAL (50 FPS)
6188 * - Commonly used (60 FPS)
12cdff6b 6189 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6190 *
6191 * The list of standards video format is not huge and can be added to the
6192 * connector modeset list beforehand. With that, userspace can leverage
6193 * FreeSync to extends the front porch in order to attain the target refresh
6194 * rate. Such a switch will happen seamlessly, without screen blanking or
6195 * reprogramming of the output in any other way. If the userspace requests a
6196 * modesetting change compatible with FreeSync modes that only differ in the
6197 * refresh rate, DC will skip the full update and avoid blink during the
6198 * transition. For example, the video player can change the modesetting from
6199 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6200 * causing any display blink. This same concept can be applied to a mode
6201 * setting change.
6202 */
a85ba005
NC
6203static struct drm_display_mode *
6204get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6205 bool use_probed_modes)
6206{
6207 struct drm_display_mode *m, *m_pref = NULL;
6208 u16 current_refresh, highest_refresh;
6209 struct list_head *list_head = use_probed_modes ?
6210 &aconnector->base.probed_modes :
6211 &aconnector->base.modes;
6212
6213 if (aconnector->freesync_vid_base.clock != 0)
6214 return &aconnector->freesync_vid_base;
6215
6216 /* Find the preferred mode */
6217 list_for_each_entry (m, list_head, head) {
6218 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6219 m_pref = m;
6220 break;
6221 }
6222 }
6223
6224 if (!m_pref) {
6225 /* Probably an EDID with no preferred mode. Fallback to first entry */
6226 m_pref = list_first_entry_or_null(
6227 &aconnector->base.modes, struct drm_display_mode, head);
6228 if (!m_pref) {
6229 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6230 return NULL;
6231 }
6232 }
6233
6234 highest_refresh = drm_mode_vrefresh(m_pref);
6235
6236 /*
6237 * Find the mode with highest refresh rate with same resolution.
6238 * For some monitors, preferred mode is not the mode with highest
6239 * supported refresh rate.
6240 */
6241 list_for_each_entry (m, list_head, head) {
6242 current_refresh = drm_mode_vrefresh(m);
6243
6244 if (m->hdisplay == m_pref->hdisplay &&
6245 m->vdisplay == m_pref->vdisplay &&
6246 highest_refresh < current_refresh) {
6247 highest_refresh = current_refresh;
6248 m_pref = m;
6249 }
6250 }
6251
6252 aconnector->freesync_vid_base = *m_pref;
6253 return m_pref;
6254}
6255
fe8858bb 6256static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6257 struct amdgpu_dm_connector *aconnector)
6258{
6259 struct drm_display_mode *high_mode;
6260 int timing_diff;
6261
6262 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6263 if (!high_mode || !mode)
6264 return false;
6265
6266 timing_diff = high_mode->vtotal - mode->vtotal;
6267
6268 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6269 high_mode->hdisplay != mode->hdisplay ||
6270 high_mode->vdisplay != mode->vdisplay ||
6271 high_mode->hsync_start != mode->hsync_start ||
6272 high_mode->hsync_end != mode->hsync_end ||
6273 high_mode->htotal != mode->htotal ||
6274 high_mode->hskew != mode->hskew ||
6275 high_mode->vscan != mode->vscan ||
6276 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6277 high_mode->vsync_end - mode->vsync_end != timing_diff)
6278 return false;
6279 else
6280 return true;
6281}
6282
3ee6b26b
AD
6283static struct dc_stream_state *
6284create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6285 const struct drm_display_mode *drm_mode,
b333730d 6286 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6287 const struct dc_stream_state *old_stream,
6288 int requested_bpc)
e7b07cee
HW
6289{
6290 struct drm_display_mode *preferred_mode = NULL;
391ef035 6291 struct drm_connector *drm_connector;
42ba01fc
NK
6292 const struct drm_connector_state *con_state =
6293 dm_state ? &dm_state->base : NULL;
0971c40e 6294 struct dc_stream_state *stream = NULL;
e7b07cee 6295 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6296 struct drm_display_mode saved_mode;
6297 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6298 bool native_mode_found = false;
b0781603
NK
6299 bool recalculate_timing = false;
6300 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6301 int mode_refresh;
58124bf8 6302 int preferred_refresh = 0;
defeb878 6303#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6304 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6305#endif
aed15309 6306 struct dc_sink *sink = NULL;
a85ba005
NC
6307
6308 memset(&saved_mode, 0, sizeof(saved_mode));
6309
b830ebc9 6310 if (aconnector == NULL) {
e7b07cee 6311 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6312 return stream;
e7b07cee
HW
6313 }
6314
e7b07cee 6315 drm_connector = &aconnector->base;
2e0ac3d6 6316
f4ac176e 6317 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6318 sink = create_fake_sink(aconnector);
6319 if (!sink)
6320 return stream;
aed15309
ML
6321 } else {
6322 sink = aconnector->dc_sink;
dcd5fb82 6323 dc_sink_retain(sink);
f4ac176e 6324 }
2e0ac3d6 6325
aed15309 6326 stream = dc_create_stream_for_sink(sink);
4562236b 6327
b830ebc9 6328 if (stream == NULL) {
e7b07cee 6329 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6330 goto finish;
e7b07cee
HW
6331 }
6332
ceb3dbb4
JL
6333 stream->dm_stream_context = aconnector;
6334
4a36fcba
WL
6335 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6336 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6337
e7b07cee
HW
6338 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6339 /* Search for preferred mode */
6340 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6341 native_mode_found = true;
6342 break;
6343 }
6344 }
6345 if (!native_mode_found)
6346 preferred_mode = list_first_entry_or_null(
6347 &aconnector->base.modes,
6348 struct drm_display_mode,
6349 head);
6350
b333730d
BL
6351 mode_refresh = drm_mode_vrefresh(&mode);
6352
b830ebc9 6353 if (preferred_mode == NULL) {
1f6010a9
DF
6354 /*
6355 * This may not be an error, the use case is when we have no
e7b07cee
HW
6356 * usermode calls to reset and set mode upon hotplug. In this
6357 * case, we call set mode ourselves to restore the previous mode
6358 * and the modelist may not be filled in in time.
6359 */
f1ad2f5e 6360 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6361 } else {
b0781603 6362 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
6363 is_freesync_video_mode(&mode, aconnector);
6364 if (recalculate_timing) {
6365 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6366 saved_mode = mode;
6367 mode = *freesync_mode;
6368 } else {
6369 decide_crtc_timing_for_drm_display_mode(
b0781603 6370 &mode, preferred_mode, scale);
a85ba005 6371
b0781603
NK
6372 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6373 }
e7b07cee
HW
6374 }
6375
a85ba005
NC
6376 if (recalculate_timing)
6377 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6378 else if (!dm_state)
f783577c
JFZ
6379 drm_mode_set_crtcinfo(&mode, 0);
6380
a85ba005 6381 /*
b333730d
BL
6382 * If scaling is enabled and refresh rate didn't change
6383 * we copy the vic and polarities of the old timings
6384 */
b0781603 6385 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6386 fill_stream_properties_from_drm_display_mode(
6387 stream, &mode, &aconnector->base, con_state, NULL,
6388 requested_bpc);
b333730d 6389 else
a85ba005
NC
6390 fill_stream_properties_from_drm_display_mode(
6391 stream, &mode, &aconnector->base, con_state, old_stream,
6392 requested_bpc);
b333730d 6393
defeb878 6394#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6395 /* SST DSC determination policy */
6396 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6397 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6398 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6399#endif
6400
e7b07cee
HW
6401 update_stream_scaling_settings(&mode, dm_state, stream);
6402
6403 fill_audio_info(
6404 &stream->audio_info,
6405 drm_connector,
aed15309 6406 sink);
e7b07cee 6407
ceb3dbb4 6408 update_stream_signal(stream, sink);
9182b4cb 6409
d832fc3b 6410 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6411 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6412
8a488f5d
RL
6413 if (stream->link->psr_settings.psr_feature_enabled) {
6414 //
6415 // should decide stream support vsc sdp colorimetry capability
6416 // before building vsc info packet
6417 //
6418 stream->use_vsc_sdp_for_colorimetry = false;
6419 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6420 stream->use_vsc_sdp_for_colorimetry =
6421 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6422 } else {
6423 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6424 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6425 }
8a488f5d 6426 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
6427 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6428
8c322309 6429 }
aed15309 6430finish:
dcd5fb82 6431 dc_sink_release(sink);
9e3efe3e 6432
e7b07cee
HW
6433 return stream;
6434}
6435
7578ecda 6436static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6437{
6438 drm_crtc_cleanup(crtc);
6439 kfree(crtc);
6440}
6441
6442static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6443 struct drm_crtc_state *state)
e7b07cee
HW
6444{
6445 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6446
6447 /* TODO Destroy dc_stream objects are stream object is flattened */
6448 if (cur->stream)
6449 dc_stream_release(cur->stream);
6450
6451
6452 __drm_atomic_helper_crtc_destroy_state(state);
6453
6454
6455 kfree(state);
6456}
6457
6458static void dm_crtc_reset_state(struct drm_crtc *crtc)
6459{
6460 struct dm_crtc_state *state;
6461
6462 if (crtc->state)
6463 dm_crtc_destroy_state(crtc, crtc->state);
6464
6465 state = kzalloc(sizeof(*state), GFP_KERNEL);
6466 if (WARN_ON(!state))
6467 return;
6468
1f8a52ec 6469 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6470}
6471
6472static struct drm_crtc_state *
6473dm_crtc_duplicate_state(struct drm_crtc *crtc)
6474{
6475 struct dm_crtc_state *state, *cur;
6476
6477 cur = to_dm_crtc_state(crtc->state);
6478
6479 if (WARN_ON(!crtc->state))
6480 return NULL;
6481
2004f45e 6482 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6483 if (!state)
6484 return NULL;
e7b07cee
HW
6485
6486 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6487
6488 if (cur->stream) {
6489 state->stream = cur->stream;
6490 dc_stream_retain(state->stream);
6491 }
6492
d6ef9b41 6493 state->active_planes = cur->active_planes;
98e6436d 6494 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6495 state->abm_level = cur->abm_level;
bb47de73
NK
6496 state->vrr_supported = cur->vrr_supported;
6497 state->freesync_config = cur->freesync_config;
cf020d49
NK
6498 state->cm_has_degamma = cur->cm_has_degamma;
6499 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6500 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6501 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6502
6503 return &state->base;
6504}
6505
86bc2219 6506#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6507static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6508{
6509 crtc_debugfs_init(crtc);
6510
6511 return 0;
6512}
6513#endif
6514
d2574c33
MK
6515static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6516{
6517 enum dc_irq_source irq_source;
6518 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6519 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6520 int rc;
6521
6522 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6523
6524 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6525
4711c033
LT
6526 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6527 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6528 return rc;
6529}
589d2739
HW
6530
6531static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6532{
6533 enum dc_irq_source irq_source;
6534 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6535 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6536 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6537#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6538 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6539 struct vblank_control_work *work;
ea3b4242 6540#endif
d2574c33
MK
6541 int rc = 0;
6542
6543 if (enable) {
6544 /* vblank irq on -> Only need vupdate irq in vrr mode */
6545 if (amdgpu_dm_vrr_active(acrtc_state))
6546 rc = dm_set_vupdate_irq(crtc, true);
6547 } else {
6548 /* vblank irq off -> vupdate irq off */
6549 rc = dm_set_vupdate_irq(crtc, false);
6550 }
6551
6552 if (rc)
6553 return rc;
589d2739
HW
6554
6555 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6556
6557 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6558 return -EBUSY;
6559
98ab5f35
BL
6560 if (amdgpu_in_reset(adev))
6561 return 0;
6562
4928b480 6563#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6564 if (dm->vblank_control_workqueue) {
6565 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6566 if (!work)
6567 return -ENOMEM;
09a5df6c 6568
06dd1888
NK
6569 INIT_WORK(&work->work, vblank_control_worker);
6570 work->dm = dm;
6571 work->acrtc = acrtc;
6572 work->enable = enable;
09a5df6c 6573
06dd1888
NK
6574 if (acrtc_state->stream) {
6575 dc_stream_retain(acrtc_state->stream);
6576 work->stream = acrtc_state->stream;
6577 }
58aa1c50 6578
06dd1888
NK
6579 queue_work(dm->vblank_control_workqueue, &work->work);
6580 }
4928b480 6581#endif
71338cb4 6582
71338cb4 6583 return 0;
589d2739
HW
6584}
6585
6586static int dm_enable_vblank(struct drm_crtc *crtc)
6587{
6588 return dm_set_vblank(crtc, true);
6589}
6590
6591static void dm_disable_vblank(struct drm_crtc *crtc)
6592{
6593 dm_set_vblank(crtc, false);
6594}
6595
e7b07cee
HW
6596/* Implemented only the options currently availible for the driver */
6597static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6598 .reset = dm_crtc_reset_state,
6599 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6600 .set_config = drm_atomic_helper_set_config,
6601 .page_flip = drm_atomic_helper_page_flip,
6602 .atomic_duplicate_state = dm_crtc_duplicate_state,
6603 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6604 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6605 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6606 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6607 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6608 .enable_vblank = dm_enable_vblank,
6609 .disable_vblank = dm_disable_vblank,
e3eff4b5 6610 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6611#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6612 .late_register = amdgpu_dm_crtc_late_register,
6613#endif
e7b07cee
HW
6614};
6615
6616static enum drm_connector_status
6617amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6618{
6619 bool connected;
c84dec2f 6620 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6621
1f6010a9
DF
6622 /*
6623 * Notes:
e7b07cee
HW
6624 * 1. This interface is NOT called in context of HPD irq.
6625 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6626 * makes it a bad place for *any* MST-related activity.
6627 */
e7b07cee 6628
8580d60b
HW
6629 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6630 !aconnector->fake_enable)
e7b07cee
HW
6631 connected = (aconnector->dc_sink != NULL);
6632 else
6633 connected = (aconnector->base.force == DRM_FORCE_ON);
6634
0f877894
OV
6635 update_subconnector_property(aconnector);
6636
e7b07cee
HW
6637 return (connected ? connector_status_connected :
6638 connector_status_disconnected);
6639}
6640
3ee6b26b
AD
6641int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6642 struct drm_connector_state *connector_state,
6643 struct drm_property *property,
6644 uint64_t val)
e7b07cee
HW
6645{
6646 struct drm_device *dev = connector->dev;
1348969a 6647 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6648 struct dm_connector_state *dm_old_state =
6649 to_dm_connector_state(connector->state);
6650 struct dm_connector_state *dm_new_state =
6651 to_dm_connector_state(connector_state);
6652
6653 int ret = -EINVAL;
6654
6655 if (property == dev->mode_config.scaling_mode_property) {
6656 enum amdgpu_rmx_type rmx_type;
6657
6658 switch (val) {
6659 case DRM_MODE_SCALE_CENTER:
6660 rmx_type = RMX_CENTER;
6661 break;
6662 case DRM_MODE_SCALE_ASPECT:
6663 rmx_type = RMX_ASPECT;
6664 break;
6665 case DRM_MODE_SCALE_FULLSCREEN:
6666 rmx_type = RMX_FULL;
6667 break;
6668 case DRM_MODE_SCALE_NONE:
6669 default:
6670 rmx_type = RMX_OFF;
6671 break;
6672 }
6673
6674 if (dm_old_state->scaling == rmx_type)
6675 return 0;
6676
6677 dm_new_state->scaling = rmx_type;
6678 ret = 0;
6679 } else if (property == adev->mode_info.underscan_hborder_property) {
6680 dm_new_state->underscan_hborder = val;
6681 ret = 0;
6682 } else if (property == adev->mode_info.underscan_vborder_property) {
6683 dm_new_state->underscan_vborder = val;
6684 ret = 0;
6685 } else if (property == adev->mode_info.underscan_property) {
6686 dm_new_state->underscan_enable = val;
6687 ret = 0;
c1ee92f9
DF
6688 } else if (property == adev->mode_info.abm_level_property) {
6689 dm_new_state->abm_level = val;
6690 ret = 0;
e7b07cee
HW
6691 }
6692
6693 return ret;
6694}
6695
3ee6b26b
AD
6696int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6697 const struct drm_connector_state *state,
6698 struct drm_property *property,
6699 uint64_t *val)
e7b07cee
HW
6700{
6701 struct drm_device *dev = connector->dev;
1348969a 6702 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6703 struct dm_connector_state *dm_state =
6704 to_dm_connector_state(state);
6705 int ret = -EINVAL;
6706
6707 if (property == dev->mode_config.scaling_mode_property) {
6708 switch (dm_state->scaling) {
6709 case RMX_CENTER:
6710 *val = DRM_MODE_SCALE_CENTER;
6711 break;
6712 case RMX_ASPECT:
6713 *val = DRM_MODE_SCALE_ASPECT;
6714 break;
6715 case RMX_FULL:
6716 *val = DRM_MODE_SCALE_FULLSCREEN;
6717 break;
6718 case RMX_OFF:
6719 default:
6720 *val = DRM_MODE_SCALE_NONE;
6721 break;
6722 }
6723 ret = 0;
6724 } else if (property == adev->mode_info.underscan_hborder_property) {
6725 *val = dm_state->underscan_hborder;
6726 ret = 0;
6727 } else if (property == adev->mode_info.underscan_vborder_property) {
6728 *val = dm_state->underscan_vborder;
6729 ret = 0;
6730 } else if (property == adev->mode_info.underscan_property) {
6731 *val = dm_state->underscan_enable;
6732 ret = 0;
c1ee92f9
DF
6733 } else if (property == adev->mode_info.abm_level_property) {
6734 *val = dm_state->abm_level;
6735 ret = 0;
e7b07cee 6736 }
c1ee92f9 6737
e7b07cee
HW
6738 return ret;
6739}
6740
526c654a
ED
6741static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6742{
6743 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6744
6745 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6746}
6747
7578ecda 6748static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6749{
c84dec2f 6750 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6751 const struct dc_link *link = aconnector->dc_link;
1348969a 6752 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6753 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6754 int i;
ada8ce15 6755
5dff80bd
AG
6756 /*
6757 * Call only if mst_mgr was iniitalized before since it's not done
6758 * for all connector types.
6759 */
6760 if (aconnector->mst_mgr.dev)
6761 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6762
e7b07cee
HW
6763#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6764 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6765 for (i = 0; i < dm->num_of_edps; i++) {
6766 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6767 backlight_device_unregister(dm->backlight_dev[i]);
6768 dm->backlight_dev[i] = NULL;
6769 }
e7b07cee
HW
6770 }
6771#endif
dcd5fb82
MF
6772
6773 if (aconnector->dc_em_sink)
6774 dc_sink_release(aconnector->dc_em_sink);
6775 aconnector->dc_em_sink = NULL;
6776 if (aconnector->dc_sink)
6777 dc_sink_release(aconnector->dc_sink);
6778 aconnector->dc_sink = NULL;
6779
e86e8947 6780 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6781 drm_connector_unregister(connector);
6782 drm_connector_cleanup(connector);
526c654a
ED
6783 if (aconnector->i2c) {
6784 i2c_del_adapter(&aconnector->i2c->base);
6785 kfree(aconnector->i2c);
6786 }
7daec99f 6787 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6788
e7b07cee
HW
6789 kfree(connector);
6790}
6791
6792void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6793{
6794 struct dm_connector_state *state =
6795 to_dm_connector_state(connector->state);
6796
df099b9b
LSL
6797 if (connector->state)
6798 __drm_atomic_helper_connector_destroy_state(connector->state);
6799
e7b07cee
HW
6800 kfree(state);
6801
6802 state = kzalloc(sizeof(*state), GFP_KERNEL);
6803
6804 if (state) {
6805 state->scaling = RMX_OFF;
6806 state->underscan_enable = false;
6807 state->underscan_hborder = 0;
6808 state->underscan_vborder = 0;
01933ba4 6809 state->base.max_requested_bpc = 8;
3261e013
ML
6810 state->vcpi_slots = 0;
6811 state->pbn = 0;
c3e50f89
NK
6812 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6813 state->abm_level = amdgpu_dm_abm_level;
6814
df099b9b 6815 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6816 }
6817}
6818
3ee6b26b
AD
6819struct drm_connector_state *
6820amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6821{
6822 struct dm_connector_state *state =
6823 to_dm_connector_state(connector->state);
6824
6825 struct dm_connector_state *new_state =
6826 kmemdup(state, sizeof(*state), GFP_KERNEL);
6827
98e6436d
AK
6828 if (!new_state)
6829 return NULL;
e7b07cee 6830
98e6436d
AK
6831 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6832
6833 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6834 new_state->abm_level = state->abm_level;
922454c2
NK
6835 new_state->scaling = state->scaling;
6836 new_state->underscan_enable = state->underscan_enable;
6837 new_state->underscan_hborder = state->underscan_hborder;
6838 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6839 new_state->vcpi_slots = state->vcpi_slots;
6840 new_state->pbn = state->pbn;
98e6436d 6841 return &new_state->base;
e7b07cee
HW
6842}
6843
14f04fa4
AD
6844static int
6845amdgpu_dm_connector_late_register(struct drm_connector *connector)
6846{
6847 struct amdgpu_dm_connector *amdgpu_dm_connector =
6848 to_amdgpu_dm_connector(connector);
00a8037e 6849 int r;
14f04fa4 6850
00a8037e
AD
6851 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6852 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6853 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6854 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6855 if (r)
6856 return r;
6857 }
6858
6859#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6860 connector_debugfs_init(amdgpu_dm_connector);
6861#endif
6862
6863 return 0;
6864}
6865
e7b07cee
HW
6866static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6867 .reset = amdgpu_dm_connector_funcs_reset,
6868 .detect = amdgpu_dm_connector_detect,
6869 .fill_modes = drm_helper_probe_single_connector_modes,
6870 .destroy = amdgpu_dm_connector_destroy,
6871 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6872 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6873 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6874 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6875 .late_register = amdgpu_dm_connector_late_register,
526c654a 6876 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6877};
6878
e7b07cee
HW
6879static int get_modes(struct drm_connector *connector)
6880{
6881 return amdgpu_dm_connector_get_modes(connector);
6882}
6883
c84dec2f 6884static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6885{
6886 struct dc_sink_init_data init_params = {
6887 .link = aconnector->dc_link,
6888 .sink_signal = SIGNAL_TYPE_VIRTUAL
6889 };
70e8ffc5 6890 struct edid *edid;
e7b07cee 6891
a89ff457 6892 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6893 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6894 aconnector->base.name);
6895
6896 aconnector->base.force = DRM_FORCE_OFF;
6897 aconnector->base.override_edid = false;
6898 return;
6899 }
6900
70e8ffc5
HW
6901 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6902
e7b07cee
HW
6903 aconnector->edid = edid;
6904
6905 aconnector->dc_em_sink = dc_link_add_remote_sink(
6906 aconnector->dc_link,
6907 (uint8_t *)edid,
6908 (edid->extensions + 1) * EDID_LENGTH,
6909 &init_params);
6910
dcd5fb82 6911 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6912 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6913 aconnector->dc_link->local_sink :
6914 aconnector->dc_em_sink;
dcd5fb82
MF
6915 dc_sink_retain(aconnector->dc_sink);
6916 }
e7b07cee
HW
6917}
6918
c84dec2f 6919static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6920{
6921 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6922
1f6010a9
DF
6923 /*
6924 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6925 * Those settings have to be != 0 to get initial modeset
6926 */
6927 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6928 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6929 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6930 }
6931
6932
6933 aconnector->base.override_edid = true;
6934 create_eml_sink(aconnector);
6935}
6936
cbd14ae7
SW
6937static struct dc_stream_state *
6938create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6939 const struct drm_display_mode *drm_mode,
6940 const struct dm_connector_state *dm_state,
6941 const struct dc_stream_state *old_stream)
6942{
6943 struct drm_connector *connector = &aconnector->base;
1348969a 6944 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6945 struct dc_stream_state *stream;
4b7da34b
SW
6946 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6947 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6948 enum dc_status dc_result = DC_OK;
6949
6950 do {
6951 stream = create_stream_for_sink(aconnector, drm_mode,
6952 dm_state, old_stream,
6953 requested_bpc);
6954 if (stream == NULL) {
6955 DRM_ERROR("Failed to create stream for sink!\n");
6956 break;
6957 }
6958
6959 dc_result = dc_validate_stream(adev->dm.dc, stream);
6960
6961 if (dc_result != DC_OK) {
74a16675 6962 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6963 drm_mode->hdisplay,
6964 drm_mode->vdisplay,
6965 drm_mode->clock,
74a16675
RS
6966 dc_result,
6967 dc_status_to_str(dc_result));
cbd14ae7
SW
6968
6969 dc_stream_release(stream);
6970 stream = NULL;
6971 requested_bpc -= 2; /* lower bpc to retry validation */
6972 }
6973
6974 } while (stream == NULL && requested_bpc >= 6);
6975
68eb3ae3
WS
6976 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6977 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6978
6979 aconnector->force_yuv420_output = true;
6980 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6981 dm_state, old_stream);
6982 aconnector->force_yuv420_output = false;
6983 }
6984
cbd14ae7
SW
6985 return stream;
6986}
6987
ba9ca088 6988enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6989 struct drm_display_mode *mode)
e7b07cee
HW
6990{
6991 int result = MODE_ERROR;
6992 struct dc_sink *dc_sink;
e7b07cee 6993 /* TODO: Unhardcode stream count */
0971c40e 6994 struct dc_stream_state *stream;
c84dec2f 6995 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6996
6997 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6998 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6999 return result;
7000
1f6010a9
DF
7001 /*
7002 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
7003 * EDID mgmt
7004 */
7005 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7006 !aconnector->dc_em_sink)
7007 handle_edid_mgmt(aconnector);
7008
c84dec2f 7009 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 7010
ad975f44
VL
7011 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7012 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
7013 DRM_ERROR("dc_sink is NULL!\n");
7014 goto fail;
7015 }
7016
cbd14ae7
SW
7017 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7018 if (stream) {
7019 dc_stream_release(stream);
e7b07cee 7020 result = MODE_OK;
cbd14ae7 7021 }
e7b07cee
HW
7022
7023fail:
7024 /* TODO: error handling*/
7025 return result;
7026}
7027
88694af9
NK
7028static int fill_hdr_info_packet(const struct drm_connector_state *state,
7029 struct dc_info_packet *out)
7030{
7031 struct hdmi_drm_infoframe frame;
7032 unsigned char buf[30]; /* 26 + 4 */
7033 ssize_t len;
7034 int ret, i;
7035
7036 memset(out, 0, sizeof(*out));
7037
7038 if (!state->hdr_output_metadata)
7039 return 0;
7040
7041 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7042 if (ret)
7043 return ret;
7044
7045 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7046 if (len < 0)
7047 return (int)len;
7048
7049 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7050 if (len != 30)
7051 return -EINVAL;
7052
7053 /* Prepare the infopacket for DC. */
7054 switch (state->connector->connector_type) {
7055 case DRM_MODE_CONNECTOR_HDMIA:
7056 out->hb0 = 0x87; /* type */
7057 out->hb1 = 0x01; /* version */
7058 out->hb2 = 0x1A; /* length */
7059 out->sb[0] = buf[3]; /* checksum */
7060 i = 1;
7061 break;
7062
7063 case DRM_MODE_CONNECTOR_DisplayPort:
7064 case DRM_MODE_CONNECTOR_eDP:
7065 out->hb0 = 0x00; /* sdp id, zero */
7066 out->hb1 = 0x87; /* type */
7067 out->hb2 = 0x1D; /* payload len - 1 */
7068 out->hb3 = (0x13 << 2); /* sdp version */
7069 out->sb[0] = 0x01; /* version */
7070 out->sb[1] = 0x1A; /* length */
7071 i = 2;
7072 break;
7073
7074 default:
7075 return -EINVAL;
7076 }
7077
7078 memcpy(&out->sb[i], &buf[4], 26);
7079 out->valid = true;
7080
7081 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7082 sizeof(out->sb), false);
7083
7084 return 0;
7085}
7086
88694af9
NK
7087static int
7088amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7089 struct drm_atomic_state *state)
88694af9 7090{
51e857af
SP
7091 struct drm_connector_state *new_con_state =
7092 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7093 struct drm_connector_state *old_con_state =
7094 drm_atomic_get_old_connector_state(state, conn);
7095 struct drm_crtc *crtc = new_con_state->crtc;
7096 struct drm_crtc_state *new_crtc_state;
7097 int ret;
7098
e8a98235
RS
7099 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7100
88694af9
NK
7101 if (!crtc)
7102 return 0;
7103
72921cdf 7104 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7105 struct dc_info_packet hdr_infopacket;
7106
7107 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7108 if (ret)
7109 return ret;
7110
7111 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7112 if (IS_ERR(new_crtc_state))
7113 return PTR_ERR(new_crtc_state);
7114
7115 /*
7116 * DC considers the stream backends changed if the
7117 * static metadata changes. Forcing the modeset also
7118 * gives a simple way for userspace to switch from
b232d4ed
NK
7119 * 8bpc to 10bpc when setting the metadata to enter
7120 * or exit HDR.
7121 *
7122 * Changing the static metadata after it's been
7123 * set is permissible, however. So only force a
7124 * modeset if we're entering or exiting HDR.
88694af9 7125 */
b232d4ed
NK
7126 new_crtc_state->mode_changed =
7127 !old_con_state->hdr_output_metadata ||
7128 !new_con_state->hdr_output_metadata;
88694af9
NK
7129 }
7130
7131 return 0;
7132}
7133
e7b07cee
HW
7134static const struct drm_connector_helper_funcs
7135amdgpu_dm_connector_helper_funcs = {
7136 /*
1f6010a9 7137 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7138 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7139 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7140 * in get_modes call back, not just return the modes count
7141 */
e7b07cee
HW
7142 .get_modes = get_modes,
7143 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7144 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7145};
7146
7147static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7148{
7149}
7150
d6ef9b41 7151static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7152{
7153 struct drm_atomic_state *state = new_crtc_state->state;
7154 struct drm_plane *plane;
7155 int num_active = 0;
7156
7157 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7158 struct drm_plane_state *new_plane_state;
7159
7160 /* Cursor planes are "fake". */
7161 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7162 continue;
7163
7164 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7165
7166 if (!new_plane_state) {
7167 /*
7168 * The plane is enable on the CRTC and hasn't changed
7169 * state. This means that it previously passed
7170 * validation and is therefore enabled.
7171 */
7172 num_active += 1;
7173 continue;
7174 }
7175
7176 /* We need a framebuffer to be considered enabled. */
7177 num_active += (new_plane_state->fb != NULL);
7178 }
7179
d6ef9b41
NK
7180 return num_active;
7181}
7182
8fe684e9
NK
7183static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7184 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7185{
7186 struct dm_crtc_state *dm_new_crtc_state =
7187 to_dm_crtc_state(new_crtc_state);
7188
7189 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7190
7191 if (!dm_new_crtc_state->stream)
7192 return;
7193
7194 dm_new_crtc_state->active_planes =
7195 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7196}
7197
3ee6b26b 7198static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7199 struct drm_atomic_state *state)
e7b07cee 7200{
29b77ad7
MR
7201 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7202 crtc);
1348969a 7203 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7204 struct dc *dc = adev->dm.dc;
29b77ad7 7205 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7206 int ret = -EINVAL;
7207
5b8c5969 7208 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7209
29b77ad7 7210 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7211
bcd74374
ND
7212 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7213 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7214 return ret;
7215 }
7216
bc92c065 7217 /*
b836a274
MD
7218 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7219 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7220 * planes are disabled, which is not supported by the hardware. And there is legacy
7221 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7222 */
29b77ad7 7223 if (crtc_state->enable &&
ea9522f5
SS
7224 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7225 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7226 return -EINVAL;
ea9522f5 7227 }
c14a005c 7228
b836a274
MD
7229 /* In some use cases, like reset, no stream is attached */
7230 if (!dm_crtc_state->stream)
7231 return 0;
7232
62c933f9 7233 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7234 return 0;
7235
ea9522f5 7236 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7237 return ret;
7238}
7239
3ee6b26b
AD
7240static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7241 const struct drm_display_mode *mode,
7242 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7243{
7244 return true;
7245}
7246
7247static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7248 .disable = dm_crtc_helper_disable,
7249 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7250 .mode_fixup = dm_crtc_helper_mode_fixup,
7251 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7252};
7253
7254static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7255{
7256
7257}
7258
3261e013
ML
7259static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7260{
7261 switch (display_color_depth) {
7262 case COLOR_DEPTH_666:
7263 return 6;
7264 case COLOR_DEPTH_888:
7265 return 8;
7266 case COLOR_DEPTH_101010:
7267 return 10;
7268 case COLOR_DEPTH_121212:
7269 return 12;
7270 case COLOR_DEPTH_141414:
7271 return 14;
7272 case COLOR_DEPTH_161616:
7273 return 16;
7274 default:
7275 break;
7276 }
7277 return 0;
7278}
7279
3ee6b26b
AD
7280static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7281 struct drm_crtc_state *crtc_state,
7282 struct drm_connector_state *conn_state)
e7b07cee 7283{
3261e013
ML
7284 struct drm_atomic_state *state = crtc_state->state;
7285 struct drm_connector *connector = conn_state->connector;
7286 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7287 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7288 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7289 struct drm_dp_mst_topology_mgr *mst_mgr;
7290 struct drm_dp_mst_port *mst_port;
7291 enum dc_color_depth color_depth;
7292 int clock, bpp = 0;
1bc22f20 7293 bool is_y420 = false;
3261e013
ML
7294
7295 if (!aconnector->port || !aconnector->dc_sink)
7296 return 0;
7297
7298 mst_port = aconnector->port;
7299 mst_mgr = &aconnector->mst_port->mst_mgr;
7300
7301 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7302 return 0;
7303
7304 if (!state->duplicated) {
cbd14ae7 7305 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7306 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7307 aconnector->force_yuv420_output;
cbd14ae7
SW
7308 color_depth = convert_color_depth_from_display_info(connector,
7309 is_y420,
7310 max_bpc);
3261e013
ML
7311 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7312 clock = adjusted_mode->clock;
dc48529f 7313 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7314 }
7315 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7316 mst_mgr,
7317 mst_port,
1c6c1cb5 7318 dm_new_connector_state->pbn,
03ca9600 7319 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7320 if (dm_new_connector_state->vcpi_slots < 0) {
7321 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7322 return dm_new_connector_state->vcpi_slots;
7323 }
e7b07cee
HW
7324 return 0;
7325}
7326
7327const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7328 .disable = dm_encoder_helper_disable,
7329 .atomic_check = dm_encoder_helper_atomic_check
7330};
7331
d9fe1a4c 7332#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7333static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7334 struct dc_state *dc_state,
7335 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7336{
7337 struct dc_stream_state *stream = NULL;
7338 struct drm_connector *connector;
5760dcb9 7339 struct drm_connector_state *new_con_state;
29b9ba74
ML
7340 struct amdgpu_dm_connector *aconnector;
7341 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7342 int i, j;
7343 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7344
5760dcb9 7345 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7346
7347 aconnector = to_amdgpu_dm_connector(connector);
7348
7349 if (!aconnector->port)
7350 continue;
7351
7352 if (!new_con_state || !new_con_state->crtc)
7353 continue;
7354
7355 dm_conn_state = to_dm_connector_state(new_con_state);
7356
7357 for (j = 0; j < dc_state->stream_count; j++) {
7358 stream = dc_state->streams[j];
7359 if (!stream)
7360 continue;
7361
7362 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7363 break;
7364
7365 stream = NULL;
7366 }
7367
7368 if (!stream)
7369 continue;
7370
29b9ba74 7371 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7372 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7373 for (j = 0; j < dc_state->stream_count; j++) {
7374 if (vars[j].aconnector == aconnector) {
7375 pbn = vars[j].pbn;
7376 break;
7377 }
7378 }
7379
a550bb16
HW
7380 if (j == dc_state->stream_count)
7381 continue;
7382
7383 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7384
7385 if (stream->timing.flags.DSC != 1) {
7386 dm_conn_state->pbn = pbn;
7387 dm_conn_state->vcpi_slots = slot_num;
7388
7389 drm_dp_mst_atomic_enable_dsc(state,
7390 aconnector->port,
7391 dm_conn_state->pbn,
7392 0,
7393 false);
7394 continue;
7395 }
7396
29b9ba74
ML
7397 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7398 aconnector->port,
7399 pbn, pbn_div,
7400 true);
7401 if (vcpi < 0)
7402 return vcpi;
7403
7404 dm_conn_state->pbn = pbn;
7405 dm_conn_state->vcpi_slots = vcpi;
7406 }
7407 return 0;
7408}
d9fe1a4c 7409#endif
29b9ba74 7410
e7b07cee
HW
7411static void dm_drm_plane_reset(struct drm_plane *plane)
7412{
7413 struct dm_plane_state *amdgpu_state = NULL;
7414
7415 if (plane->state)
7416 plane->funcs->atomic_destroy_state(plane, plane->state);
7417
7418 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7419 WARN_ON(amdgpu_state == NULL);
1f6010a9 7420
7ddaef96
NK
7421 if (amdgpu_state)
7422 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7423}
7424
7425static struct drm_plane_state *
7426dm_drm_plane_duplicate_state(struct drm_plane *plane)
7427{
7428 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7429
7430 old_dm_plane_state = to_dm_plane_state(plane->state);
7431 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7432 if (!dm_plane_state)
7433 return NULL;
7434
7435 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7436
3be5262e
HW
7437 if (old_dm_plane_state->dc_state) {
7438 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7439 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7440 }
7441
7442 return &dm_plane_state->base;
7443}
7444
dfd84d90 7445static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7446 struct drm_plane_state *state)
e7b07cee
HW
7447{
7448 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7449
3be5262e
HW
7450 if (dm_plane_state->dc_state)
7451 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7452
0627bbd3 7453 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7454}
7455
7456static const struct drm_plane_funcs dm_plane_funcs = {
7457 .update_plane = drm_atomic_helper_update_plane,
7458 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7459 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7460 .reset = dm_drm_plane_reset,
7461 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7462 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7463 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7464};
7465
3ee6b26b
AD
7466static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7467 struct drm_plane_state *new_state)
e7b07cee
HW
7468{
7469 struct amdgpu_framebuffer *afb;
7470 struct drm_gem_object *obj;
5d43be0c 7471 struct amdgpu_device *adev;
e7b07cee 7472 struct amdgpu_bo *rbo;
e7b07cee 7473 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7474 struct list_head list;
7475 struct ttm_validate_buffer tv;
7476 struct ww_acquire_ctx ticket;
5d43be0c
CK
7477 uint32_t domain;
7478 int r;
e7b07cee
HW
7479
7480 if (!new_state->fb) {
4711c033 7481 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7482 return 0;
7483 }
7484
7485 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7486 obj = new_state->fb->obj[0];
e7b07cee 7487 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7488 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7489 INIT_LIST_HEAD(&list);
7490
7491 tv.bo = &rbo->tbo;
7492 tv.num_shared = 1;
7493 list_add(&tv.head, &list);
7494
9165fb87 7495 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7496 if (r) {
7497 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7498 return r;
0f257b09 7499 }
e7b07cee 7500
5d43be0c 7501 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7502 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7503 else
7504 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7505
7b7c6c81 7506 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7507 if (unlikely(r != 0)) {
30b7c614
HW
7508 if (r != -ERESTARTSYS)
7509 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7510 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7511 return r;
7512 }
7513
bb812f1e
JZ
7514 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7515 if (unlikely(r != 0)) {
7516 amdgpu_bo_unpin(rbo);
0f257b09 7517 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7518 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7519 return r;
7520 }
7df7e505 7521
0f257b09 7522 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7523
7b7c6c81 7524 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7525
7526 amdgpu_bo_ref(rbo);
7527
cf322b49
NK
7528 /**
7529 * We don't do surface updates on planes that have been newly created,
7530 * but we also don't have the afb->address during atomic check.
7531 *
7532 * Fill in buffer attributes depending on the address here, but only on
7533 * newly created planes since they're not being used by DC yet and this
7534 * won't modify global state.
7535 */
7536 dm_plane_state_old = to_dm_plane_state(plane->state);
7537 dm_plane_state_new = to_dm_plane_state(new_state);
7538
3be5262e 7539 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7540 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7541 struct dc_plane_state *plane_state =
7542 dm_plane_state_new->dc_state;
7543 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7544
320932bf 7545 fill_plane_buffer_attributes(
695af5f9 7546 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7547 afb->tiling_flags,
cf322b49
NK
7548 &plane_state->tiling_info, &plane_state->plane_size,
7549 &plane_state->dcc, &plane_state->address,
6eed95b0 7550 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7551 }
7552
e7b07cee
HW
7553 return 0;
7554}
7555
3ee6b26b
AD
7556static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7557 struct drm_plane_state *old_state)
e7b07cee
HW
7558{
7559 struct amdgpu_bo *rbo;
e7b07cee
HW
7560 int r;
7561
7562 if (!old_state->fb)
7563 return;
7564
e68d14dd 7565 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7566 r = amdgpu_bo_reserve(rbo, false);
7567 if (unlikely(r)) {
7568 DRM_ERROR("failed to reserve rbo before unpin\n");
7569 return;
b830ebc9
HW
7570 }
7571
7572 amdgpu_bo_unpin(rbo);
7573 amdgpu_bo_unreserve(rbo);
7574 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7575}
7576
8c44515b
AP
7577static int dm_plane_helper_check_state(struct drm_plane_state *state,
7578 struct drm_crtc_state *new_crtc_state)
7579{
6300b3bd
MK
7580 struct drm_framebuffer *fb = state->fb;
7581 int min_downscale, max_upscale;
7582 int min_scale = 0;
7583 int max_scale = INT_MAX;
7584
40d916a2 7585 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7586 if (fb && state->crtc) {
40d916a2
NC
7587 /* Validate viewport to cover the case when only the position changes */
7588 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7589 int viewport_width = state->crtc_w;
7590 int viewport_height = state->crtc_h;
7591
7592 if (state->crtc_x < 0)
7593 viewport_width += state->crtc_x;
7594 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7595 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7596
7597 if (state->crtc_y < 0)
7598 viewport_height += state->crtc_y;
7599 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7600 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7601
4abdb72b
NC
7602 if (viewport_width < 0 || viewport_height < 0) {
7603 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7604 return -EINVAL;
7605 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7606 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7607 return -EINVAL;
4abdb72b
NC
7608 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7609 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7610 return -EINVAL;
4abdb72b
NC
7611 }
7612
40d916a2
NC
7613 }
7614
7615 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7616 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7617 &min_downscale, &max_upscale);
7618 /*
7619 * Convert to drm convention: 16.16 fixed point, instead of dc's
7620 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7621 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7622 */
7623 min_scale = (1000 << 16) / max_upscale;
7624 max_scale = (1000 << 16) / min_downscale;
7625 }
8c44515b 7626
8c44515b 7627 return drm_atomic_helper_check_plane_state(
6300b3bd 7628 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7629}
7630
7578ecda 7631static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7632 struct drm_atomic_state *state)
cbd19488 7633{
7c11b99a
MR
7634 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7635 plane);
1348969a 7636 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7637 struct dc *dc = adev->dm.dc;
78171832 7638 struct dm_plane_state *dm_plane_state;
695af5f9 7639 struct dc_scaling_info scaling_info;
8c44515b 7640 struct drm_crtc_state *new_crtc_state;
695af5f9 7641 int ret;
78171832 7642
ba5c1649 7643 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7644
ba5c1649 7645 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7646
3be5262e 7647 if (!dm_plane_state->dc_state)
9a3329b1 7648 return 0;
cbd19488 7649
8c44515b 7650 new_crtc_state =
dec92020 7651 drm_atomic_get_new_crtc_state(state,
ba5c1649 7652 new_plane_state->crtc);
8c44515b
AP
7653 if (!new_crtc_state)
7654 return -EINVAL;
7655
ba5c1649 7656 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7657 if (ret)
7658 return ret;
7659
4375d625 7660 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7661 if (ret)
7662 return ret;
a05bcff1 7663
62c933f9 7664 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7665 return 0;
7666
7667 return -EINVAL;
7668}
7669
674e78ac 7670static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7671 struct drm_atomic_state *state)
674e78ac
NK
7672{
7673 /* Only support async updates on cursor planes. */
7674 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7675 return -EINVAL;
7676
7677 return 0;
7678}
7679
7680static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7681 struct drm_atomic_state *state)
674e78ac 7682{
5ddb0bd4
MR
7683 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7684 plane);
674e78ac 7685 struct drm_plane_state *old_state =
5ddb0bd4 7686 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7687
e8a98235
RS
7688 trace_amdgpu_dm_atomic_update_cursor(new_state);
7689
332af874 7690 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7691
7692 plane->state->src_x = new_state->src_x;
7693 plane->state->src_y = new_state->src_y;
7694 plane->state->src_w = new_state->src_w;
7695 plane->state->src_h = new_state->src_h;
7696 plane->state->crtc_x = new_state->crtc_x;
7697 plane->state->crtc_y = new_state->crtc_y;
7698 plane->state->crtc_w = new_state->crtc_w;
7699 plane->state->crtc_h = new_state->crtc_h;
7700
7701 handle_cursor_update(plane, old_state);
7702}
7703
e7b07cee
HW
7704static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7705 .prepare_fb = dm_plane_helper_prepare_fb,
7706 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7707 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7708 .atomic_async_check = dm_plane_atomic_async_check,
7709 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7710};
7711
7712/*
7713 * TODO: these are currently initialized to rgb formats only.
7714 * For future use cases we should either initialize them dynamically based on
7715 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7716 * check will succeed, and let DC implement proper check
e7b07cee 7717 */
d90371b0 7718static const uint32_t rgb_formats[] = {
e7b07cee
HW
7719 DRM_FORMAT_XRGB8888,
7720 DRM_FORMAT_ARGB8888,
7721 DRM_FORMAT_RGBA8888,
7722 DRM_FORMAT_XRGB2101010,
7723 DRM_FORMAT_XBGR2101010,
7724 DRM_FORMAT_ARGB2101010,
7725 DRM_FORMAT_ABGR2101010,
58020403
MK
7726 DRM_FORMAT_XRGB16161616,
7727 DRM_FORMAT_XBGR16161616,
7728 DRM_FORMAT_ARGB16161616,
7729 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7730 DRM_FORMAT_XBGR8888,
7731 DRM_FORMAT_ABGR8888,
46dd9ff7 7732 DRM_FORMAT_RGB565,
e7b07cee
HW
7733};
7734
0d579c7e
NK
7735static const uint32_t overlay_formats[] = {
7736 DRM_FORMAT_XRGB8888,
7737 DRM_FORMAT_ARGB8888,
7738 DRM_FORMAT_RGBA8888,
7739 DRM_FORMAT_XBGR8888,
7740 DRM_FORMAT_ABGR8888,
7267a1a9 7741 DRM_FORMAT_RGB565
e7b07cee
HW
7742};
7743
7744static const u32 cursor_formats[] = {
7745 DRM_FORMAT_ARGB8888
7746};
7747
37c6a93b
NK
7748static int get_plane_formats(const struct drm_plane *plane,
7749 const struct dc_plane_cap *plane_cap,
7750 uint32_t *formats, int max_formats)
e7b07cee 7751{
37c6a93b
NK
7752 int i, num_formats = 0;
7753
7754 /*
7755 * TODO: Query support for each group of formats directly from
7756 * DC plane caps. This will require adding more formats to the
7757 * caps list.
7758 */
e7b07cee 7759
f180b4bc 7760 switch (plane->type) {
e7b07cee 7761 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7762 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7763 if (num_formats >= max_formats)
7764 break;
7765
7766 formats[num_formats++] = rgb_formats[i];
7767 }
7768
ea36ad34 7769 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7770 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7771 if (plane_cap && plane_cap->pixel_format_support.p010)
7772 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7773 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7774 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7775 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7776 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7777 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7778 }
e7b07cee 7779 break;
37c6a93b 7780
e7b07cee 7781 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7782 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7783 if (num_formats >= max_formats)
7784 break;
7785
7786 formats[num_formats++] = overlay_formats[i];
7787 }
e7b07cee 7788 break;
37c6a93b 7789
e7b07cee 7790 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7791 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7792 if (num_formats >= max_formats)
7793 break;
7794
7795 formats[num_formats++] = cursor_formats[i];
7796 }
e7b07cee
HW
7797 break;
7798 }
7799
37c6a93b
NK
7800 return num_formats;
7801}
7802
7803static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7804 struct drm_plane *plane,
7805 unsigned long possible_crtcs,
7806 const struct dc_plane_cap *plane_cap)
7807{
7808 uint32_t formats[32];
7809 int num_formats;
7810 int res = -EPERM;
ecc874a6 7811 unsigned int supported_rotations;
faa37f54 7812 uint64_t *modifiers = NULL;
37c6a93b
NK
7813
7814 num_formats = get_plane_formats(plane, plane_cap, formats,
7815 ARRAY_SIZE(formats));
7816
faa37f54
BN
7817 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7818 if (res)
7819 return res;
7820
4a580877 7821 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7822 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7823 modifiers, plane->type, NULL);
7824 kfree(modifiers);
37c6a93b
NK
7825 if (res)
7826 return res;
7827
cc1fec57
NK
7828 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7829 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7830 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7831 BIT(DRM_MODE_BLEND_PREMULTI);
7832
7833 drm_plane_create_alpha_property(plane);
7834 drm_plane_create_blend_mode_property(plane, blend_caps);
7835 }
7836
fc8e5230 7837 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7838 plane_cap &&
7839 (plane_cap->pixel_format_support.nv12 ||
7840 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7841 /* This only affects YUV formats. */
7842 drm_plane_create_color_properties(
7843 plane,
7844 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7845 BIT(DRM_COLOR_YCBCR_BT709) |
7846 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7847 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7848 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7849 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7850 }
7851
ecc874a6
PLG
7852 supported_rotations =
7853 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7854 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7855
1347385f
SS
7856 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7857 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7858 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7859 supported_rotations);
ecc874a6 7860
f180b4bc 7861 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7862
96719c54 7863 /* Create (reset) the plane state */
f180b4bc
HW
7864 if (plane->funcs->reset)
7865 plane->funcs->reset(plane);
96719c54 7866
37c6a93b 7867 return 0;
e7b07cee
HW
7868}
7869
7578ecda
AD
7870static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7871 struct drm_plane *plane,
7872 uint32_t crtc_index)
e7b07cee
HW
7873{
7874 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7875 struct drm_plane *cursor_plane;
e7b07cee
HW
7876
7877 int res = -ENOMEM;
7878
7879 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7880 if (!cursor_plane)
7881 goto fail;
7882
f180b4bc 7883 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7884 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7885
7886 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7887 if (!acrtc)
7888 goto fail;
7889
7890 res = drm_crtc_init_with_planes(
7891 dm->ddev,
7892 &acrtc->base,
7893 plane,
f180b4bc 7894 cursor_plane,
e7b07cee
HW
7895 &amdgpu_dm_crtc_funcs, NULL);
7896
7897 if (res)
7898 goto fail;
7899
7900 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7901
96719c54
HW
7902 /* Create (reset) the plane state */
7903 if (acrtc->base.funcs->reset)
7904 acrtc->base.funcs->reset(&acrtc->base);
7905
e7b07cee
HW
7906 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7907 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7908
7909 acrtc->crtc_id = crtc_index;
7910 acrtc->base.enabled = false;
c37e2d29 7911 acrtc->otg_inst = -1;
e7b07cee
HW
7912
7913 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7914 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7915 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7916 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7917
e7b07cee
HW
7918 return 0;
7919
7920fail:
b830ebc9
HW
7921 kfree(acrtc);
7922 kfree(cursor_plane);
e7b07cee
HW
7923 return res;
7924}
7925
7926
7927static int to_drm_connector_type(enum signal_type st)
7928{
7929 switch (st) {
7930 case SIGNAL_TYPE_HDMI_TYPE_A:
7931 return DRM_MODE_CONNECTOR_HDMIA;
7932 case SIGNAL_TYPE_EDP:
7933 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7934 case SIGNAL_TYPE_LVDS:
7935 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7936 case SIGNAL_TYPE_RGB:
7937 return DRM_MODE_CONNECTOR_VGA;
7938 case SIGNAL_TYPE_DISPLAY_PORT:
7939 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7940 return DRM_MODE_CONNECTOR_DisplayPort;
7941 case SIGNAL_TYPE_DVI_DUAL_LINK:
7942 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7943 return DRM_MODE_CONNECTOR_DVID;
7944 case SIGNAL_TYPE_VIRTUAL:
7945 return DRM_MODE_CONNECTOR_VIRTUAL;
7946
7947 default:
7948 return DRM_MODE_CONNECTOR_Unknown;
7949 }
7950}
7951
2b4c1c05
DV
7952static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7953{
62afb4ad
JRS
7954 struct drm_encoder *encoder;
7955
7956 /* There is only one encoder per connector */
7957 drm_connector_for_each_possible_encoder(connector, encoder)
7958 return encoder;
7959
7960 return NULL;
2b4c1c05
DV
7961}
7962
e7b07cee
HW
7963static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7964{
e7b07cee
HW
7965 struct drm_encoder *encoder;
7966 struct amdgpu_encoder *amdgpu_encoder;
7967
2b4c1c05 7968 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7969
7970 if (encoder == NULL)
7971 return;
7972
7973 amdgpu_encoder = to_amdgpu_encoder(encoder);
7974
7975 amdgpu_encoder->native_mode.clock = 0;
7976
7977 if (!list_empty(&connector->probed_modes)) {
7978 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7979
e7b07cee 7980 list_for_each_entry(preferred_mode,
b830ebc9
HW
7981 &connector->probed_modes,
7982 head) {
7983 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7984 amdgpu_encoder->native_mode = *preferred_mode;
7985
e7b07cee
HW
7986 break;
7987 }
7988
7989 }
7990}
7991
3ee6b26b
AD
7992static struct drm_display_mode *
7993amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7994 char *name,
7995 int hdisplay, int vdisplay)
e7b07cee
HW
7996{
7997 struct drm_device *dev = encoder->dev;
7998 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7999 struct drm_display_mode *mode = NULL;
8000 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8001
8002 mode = drm_mode_duplicate(dev, native_mode);
8003
b830ebc9 8004 if (mode == NULL)
e7b07cee
HW
8005 return NULL;
8006
8007 mode->hdisplay = hdisplay;
8008 mode->vdisplay = vdisplay;
8009 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 8010 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
8011
8012 return mode;
8013
8014}
8015
8016static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 8017 struct drm_connector *connector)
e7b07cee
HW
8018{
8019 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8020 struct drm_display_mode *mode = NULL;
8021 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
8022 struct amdgpu_dm_connector *amdgpu_dm_connector =
8023 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8024 int i;
8025 int n;
8026 struct mode_size {
8027 char name[DRM_DISPLAY_MODE_LEN];
8028 int w;
8029 int h;
b830ebc9 8030 } common_modes[] = {
e7b07cee
HW
8031 { "640x480", 640, 480},
8032 { "800x600", 800, 600},
8033 { "1024x768", 1024, 768},
8034 { "1280x720", 1280, 720},
8035 { "1280x800", 1280, 800},
8036 {"1280x1024", 1280, 1024},
8037 { "1440x900", 1440, 900},
8038 {"1680x1050", 1680, 1050},
8039 {"1600x1200", 1600, 1200},
8040 {"1920x1080", 1920, 1080},
8041 {"1920x1200", 1920, 1200}
8042 };
8043
b830ebc9 8044 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
8045
8046 for (i = 0; i < n; i++) {
8047 struct drm_display_mode *curmode = NULL;
8048 bool mode_existed = false;
8049
8050 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
8051 common_modes[i].h > native_mode->vdisplay ||
8052 (common_modes[i].w == native_mode->hdisplay &&
8053 common_modes[i].h == native_mode->vdisplay))
8054 continue;
e7b07cee
HW
8055
8056 list_for_each_entry(curmode, &connector->probed_modes, head) {
8057 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 8058 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
8059 mode_existed = true;
8060 break;
8061 }
8062 }
8063
8064 if (mode_existed)
8065 continue;
8066
8067 mode = amdgpu_dm_create_common_mode(encoder,
8068 common_modes[i].name, common_modes[i].w,
8069 common_modes[i].h);
8070 drm_mode_probed_add(connector, mode);
c84dec2f 8071 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
8072 }
8073}
8074
d77de788
SS
8075static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8076{
8077 struct drm_encoder *encoder;
8078 struct amdgpu_encoder *amdgpu_encoder;
8079 const struct drm_display_mode *native_mode;
8080
8081 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8082 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8083 return;
8084
8085 encoder = amdgpu_dm_connector_to_encoder(connector);
8086 if (!encoder)
8087 return;
8088
8089 amdgpu_encoder = to_amdgpu_encoder(encoder);
8090
8091 native_mode = &amdgpu_encoder->native_mode;
8092 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8093 return;
8094
8095 drm_connector_set_panel_orientation_with_quirk(connector,
8096 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8097 native_mode->hdisplay,
8098 native_mode->vdisplay);
8099}
8100
3ee6b26b
AD
8101static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8102 struct edid *edid)
e7b07cee 8103{
c84dec2f
HW
8104 struct amdgpu_dm_connector *amdgpu_dm_connector =
8105 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8106
8107 if (edid) {
8108 /* empty probed_modes */
8109 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8110 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8111 drm_add_edid_modes(connector, edid);
8112
f1e5e913
YMM
8113 /* sorting the probed modes before calling function
8114 * amdgpu_dm_get_native_mode() since EDID can have
8115 * more than one preferred mode. The modes that are
8116 * later in the probed mode list could be of higher
8117 * and preferred resolution. For example, 3840x2160
8118 * resolution in base EDID preferred timing and 4096x2160
8119 * preferred resolution in DID extension block later.
8120 */
8121 drm_mode_sort(&connector->probed_modes);
e7b07cee 8122 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8123
8124 /* Freesync capabilities are reset by calling
8125 * drm_add_edid_modes() and need to be
8126 * restored here.
8127 */
8128 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8129
8130 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8131 } else {
c84dec2f 8132 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8133 }
e7b07cee
HW
8134}
8135
a85ba005
NC
8136static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8137 struct drm_display_mode *mode)
8138{
8139 struct drm_display_mode *m;
8140
8141 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8142 if (drm_mode_equal(m, mode))
8143 return true;
8144 }
8145
8146 return false;
8147}
8148
8149static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8150{
8151 const struct drm_display_mode *m;
8152 struct drm_display_mode *new_mode;
8153 uint i;
8154 uint32_t new_modes_count = 0;
8155
8156 /* Standard FPS values
8157 *
12cdff6b
SC
8158 * 23.976 - TV/NTSC
8159 * 24 - Cinema
8160 * 25 - TV/PAL
8161 * 29.97 - TV/NTSC
8162 * 30 - TV/NTSC
8163 * 48 - Cinema HFR
8164 * 50 - TV/PAL
8165 * 60 - Commonly used
8166 * 48,72,96,120 - Multiples of 24
a85ba005 8167 */
9ce5ed6e
CIK
8168 static const uint32_t common_rates[] = {
8169 23976, 24000, 25000, 29970, 30000,
12cdff6b 8170 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8171 };
a85ba005
NC
8172
8173 /*
8174 * Find mode with highest refresh rate with the same resolution
8175 * as the preferred mode. Some monitors report a preferred mode
8176 * with lower resolution than the highest refresh rate supported.
8177 */
8178
8179 m = get_highest_refresh_rate_mode(aconnector, true);
8180 if (!m)
8181 return 0;
8182
8183 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8184 uint64_t target_vtotal, target_vtotal_diff;
8185 uint64_t num, den;
8186
8187 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8188 continue;
8189
8190 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8191 common_rates[i] > aconnector->max_vfreq * 1000)
8192 continue;
8193
8194 num = (unsigned long long)m->clock * 1000 * 1000;
8195 den = common_rates[i] * (unsigned long long)m->htotal;
8196 target_vtotal = div_u64(num, den);
8197 target_vtotal_diff = target_vtotal - m->vtotal;
8198
8199 /* Check for illegal modes */
8200 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8201 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8202 m->vtotal + target_vtotal_diff < m->vsync_end)
8203 continue;
8204
8205 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8206 if (!new_mode)
8207 goto out;
8208
8209 new_mode->vtotal += (u16)target_vtotal_diff;
8210 new_mode->vsync_start += (u16)target_vtotal_diff;
8211 new_mode->vsync_end += (u16)target_vtotal_diff;
8212 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8213 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8214
8215 if (!is_duplicate_mode(aconnector, new_mode)) {
8216 drm_mode_probed_add(&aconnector->base, new_mode);
8217 new_modes_count += 1;
8218 } else
8219 drm_mode_destroy(aconnector->base.dev, new_mode);
8220 }
8221 out:
8222 return new_modes_count;
8223}
8224
8225static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8226 struct edid *edid)
8227{
8228 struct amdgpu_dm_connector *amdgpu_dm_connector =
8229 to_amdgpu_dm_connector(connector);
8230
8231 if (!(amdgpu_freesync_vid_mode && edid))
8232 return;
fe8858bb 8233
a85ba005
NC
8234 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8235 amdgpu_dm_connector->num_modes +=
8236 add_fs_modes(amdgpu_dm_connector);
8237}
8238
7578ecda 8239static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8240{
c84dec2f
HW
8241 struct amdgpu_dm_connector *amdgpu_dm_connector =
8242 to_amdgpu_dm_connector(connector);
e7b07cee 8243 struct drm_encoder *encoder;
c84dec2f 8244 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8245
2b4c1c05 8246 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8247
5c0e6840 8248 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8249 amdgpu_dm_connector->num_modes =
8250 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8251 } else {
8252 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8253 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8254 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8255 }
3e332d3a 8256 amdgpu_dm_fbc_init(connector);
5099114b 8257
c84dec2f 8258 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8259}
8260
3ee6b26b
AD
8261void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8262 struct amdgpu_dm_connector *aconnector,
8263 int connector_type,
8264 struct dc_link *link,
8265 int link_index)
e7b07cee 8266{
1348969a 8267 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8268
f04bee34
NK
8269 /*
8270 * Some of the properties below require access to state, like bpc.
8271 * Allocate some default initial connector state with our reset helper.
8272 */
8273 if (aconnector->base.funcs->reset)
8274 aconnector->base.funcs->reset(&aconnector->base);
8275
e7b07cee
HW
8276 aconnector->connector_id = link_index;
8277 aconnector->dc_link = link;
8278 aconnector->base.interlace_allowed = false;
8279 aconnector->base.doublescan_allowed = false;
8280 aconnector->base.stereo_allowed = false;
8281 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8282 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8283 aconnector->audio_inst = -1;
e7b07cee
HW
8284 mutex_init(&aconnector->hpd_lock);
8285
1f6010a9
DF
8286 /*
8287 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8288 * which means HPD hot plug not supported
8289 */
e7b07cee
HW
8290 switch (connector_type) {
8291 case DRM_MODE_CONNECTOR_HDMIA:
8292 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8293 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8294 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8295 break;
8296 case DRM_MODE_CONNECTOR_DisplayPort:
8297 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
f6e03f80
JS
8298 if (link->is_dig_mapping_flexible &&
8299 link->dc->res_pool->funcs->link_encs_assign) {
8300 link->link_enc =
8301 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8302 if (!link->link_enc)
8303 link->link_enc =
8304 link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8305 }
8306
8307 if (link->link_enc)
8308 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8309 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8310 break;
8311 case DRM_MODE_CONNECTOR_DVID:
8312 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8313 break;
8314 default:
8315 break;
8316 }
8317
8318 drm_object_attach_property(&aconnector->base.base,
8319 dm->ddev->mode_config.scaling_mode_property,
8320 DRM_MODE_SCALE_NONE);
8321
8322 drm_object_attach_property(&aconnector->base.base,
8323 adev->mode_info.underscan_property,
8324 UNDERSCAN_OFF);
8325 drm_object_attach_property(&aconnector->base.base,
8326 adev->mode_info.underscan_hborder_property,
8327 0);
8328 drm_object_attach_property(&aconnector->base.base,
8329 adev->mode_info.underscan_vborder_property,
8330 0);
1825fd34 8331
8c61b31e
JFZ
8332 if (!aconnector->mst_port)
8333 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8334
4a8ca46b
RL
8335 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8336 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8337 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8338
c1ee92f9 8339 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8340 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8341 drm_object_attach_property(&aconnector->base.base,
8342 adev->mode_info.abm_level_property, 0);
8343 }
bb47de73
NK
8344
8345 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8346 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8347 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8348 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8349
8c61b31e
JFZ
8350 if (!aconnector->mst_port)
8351 drm_connector_attach_vrr_capable_property(&aconnector->base);
8352
0c8620d6 8353#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8354 if (adev->dm.hdcp_workqueue)
53e108aa 8355 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8356#endif
bb47de73 8357 }
e7b07cee
HW
8358}
8359
7578ecda
AD
8360static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8361 struct i2c_msg *msgs, int num)
e7b07cee
HW
8362{
8363 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8364 struct ddc_service *ddc_service = i2c->ddc_service;
8365 struct i2c_command cmd;
8366 int i;
8367 int result = -EIO;
8368
b830ebc9 8369 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8370
8371 if (!cmd.payloads)
8372 return result;
8373
8374 cmd.number_of_payloads = num;
8375 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8376 cmd.speed = 100;
8377
8378 for (i = 0; i < num; i++) {
8379 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8380 cmd.payloads[i].address = msgs[i].addr;
8381 cmd.payloads[i].length = msgs[i].len;
8382 cmd.payloads[i].data = msgs[i].buf;
8383 }
8384
c85e6e54
DF
8385 if (dc_submit_i2c(
8386 ddc_service->ctx->dc,
8387 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8388 &cmd))
8389 result = num;
8390
8391 kfree(cmd.payloads);
8392 return result;
8393}
8394
7578ecda 8395static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8396{
8397 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8398}
8399
8400static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8401 .master_xfer = amdgpu_dm_i2c_xfer,
8402 .functionality = amdgpu_dm_i2c_func,
8403};
8404
3ee6b26b
AD
8405static struct amdgpu_i2c_adapter *
8406create_i2c(struct ddc_service *ddc_service,
8407 int link_index,
8408 int *res)
e7b07cee
HW
8409{
8410 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8411 struct amdgpu_i2c_adapter *i2c;
8412
b830ebc9 8413 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8414 if (!i2c)
8415 return NULL;
e7b07cee
HW
8416 i2c->base.owner = THIS_MODULE;
8417 i2c->base.class = I2C_CLASS_DDC;
8418 i2c->base.dev.parent = &adev->pdev->dev;
8419 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8420 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8421 i2c_set_adapdata(&i2c->base, i2c);
8422 i2c->ddc_service = ddc_service;
f6e03f80
JS
8423 if (i2c->ddc_service->ddc_pin)
8424 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8425
8426 return i2c;
8427}
8428
89fc8d4e 8429
1f6010a9
DF
8430/*
8431 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8432 * dc_link which will be represented by this aconnector.
8433 */
7578ecda
AD
8434static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8435 struct amdgpu_dm_connector *aconnector,
8436 uint32_t link_index,
8437 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8438{
8439 int res = 0;
8440 int connector_type;
8441 struct dc *dc = dm->dc;
8442 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8443 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8444
8445 link->priv = aconnector;
e7b07cee 8446
f1ad2f5e 8447 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8448
8449 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8450 if (!i2c) {
8451 DRM_ERROR("Failed to create i2c adapter data\n");
8452 return -ENOMEM;
8453 }
8454
e7b07cee
HW
8455 aconnector->i2c = i2c;
8456 res = i2c_add_adapter(&i2c->base);
8457
8458 if (res) {
8459 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8460 goto out_free;
8461 }
8462
8463 connector_type = to_drm_connector_type(link->connector_signal);
8464
17165de2 8465 res = drm_connector_init_with_ddc(
e7b07cee
HW
8466 dm->ddev,
8467 &aconnector->base,
8468 &amdgpu_dm_connector_funcs,
17165de2
AP
8469 connector_type,
8470 &i2c->base);
e7b07cee
HW
8471
8472 if (res) {
8473 DRM_ERROR("connector_init failed\n");
8474 aconnector->connector_id = -1;
8475 goto out_free;
8476 }
8477
8478 drm_connector_helper_add(
8479 &aconnector->base,
8480 &amdgpu_dm_connector_helper_funcs);
8481
8482 amdgpu_dm_connector_init_helper(
8483 dm,
8484 aconnector,
8485 connector_type,
8486 link,
8487 link_index);
8488
cde4c44d 8489 drm_connector_attach_encoder(
e7b07cee
HW
8490 &aconnector->base, &aencoder->base);
8491
e7b07cee
HW
8492 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8493 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8494 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8495
e7b07cee
HW
8496out_free:
8497 if (res) {
8498 kfree(i2c);
8499 aconnector->i2c = NULL;
8500 }
8501 return res;
8502}
8503
8504int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8505{
8506 switch (adev->mode_info.num_crtc) {
8507 case 1:
8508 return 0x1;
8509 case 2:
8510 return 0x3;
8511 case 3:
8512 return 0x7;
8513 case 4:
8514 return 0xf;
8515 case 5:
8516 return 0x1f;
8517 case 6:
8518 default:
8519 return 0x3f;
8520 }
8521}
8522
7578ecda
AD
8523static int amdgpu_dm_encoder_init(struct drm_device *dev,
8524 struct amdgpu_encoder *aencoder,
8525 uint32_t link_index)
e7b07cee 8526{
1348969a 8527 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8528
8529 int res = drm_encoder_init(dev,
8530 &aencoder->base,
8531 &amdgpu_dm_encoder_funcs,
8532 DRM_MODE_ENCODER_TMDS,
8533 NULL);
8534
8535 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8536
8537 if (!res)
8538 aencoder->encoder_id = link_index;
8539 else
8540 aencoder->encoder_id = -1;
8541
8542 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8543
8544 return res;
8545}
8546
3ee6b26b
AD
8547static void manage_dm_interrupts(struct amdgpu_device *adev,
8548 struct amdgpu_crtc *acrtc,
8549 bool enable)
e7b07cee
HW
8550{
8551 /*
8fe684e9
NK
8552 * We have no guarantee that the frontend index maps to the same
8553 * backend index - some even map to more than one.
8554 *
8555 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8556 */
8557 int irq_type =
734dd01d 8558 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8559 adev,
8560 acrtc->crtc_id);
8561
8562 if (enable) {
8563 drm_crtc_vblank_on(&acrtc->base);
8564 amdgpu_irq_get(
8565 adev,
8566 &adev->pageflip_irq,
8567 irq_type);
86bc2219
WL
8568#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8569 amdgpu_irq_get(
8570 adev,
8571 &adev->vline0_irq,
8572 irq_type);
8573#endif
e7b07cee 8574 } else {
86bc2219
WL
8575#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8576 amdgpu_irq_put(
8577 adev,
8578 &adev->vline0_irq,
8579 irq_type);
8580#endif
e7b07cee
HW
8581 amdgpu_irq_put(
8582 adev,
8583 &adev->pageflip_irq,
8584 irq_type);
8585 drm_crtc_vblank_off(&acrtc->base);
8586 }
8587}
8588
8fe684e9
NK
8589static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8590 struct amdgpu_crtc *acrtc)
8591{
8592 int irq_type =
8593 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8594
8595 /**
8596 * This reads the current state for the IRQ and force reapplies
8597 * the setting to hardware.
8598 */
8599 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8600}
8601
3ee6b26b
AD
8602static bool
8603is_scaling_state_different(const struct dm_connector_state *dm_state,
8604 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8605{
8606 if (dm_state->scaling != old_dm_state->scaling)
8607 return true;
8608 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8609 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8610 return true;
8611 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8612 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8613 return true;
b830ebc9
HW
8614 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8615 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8616 return true;
e7b07cee
HW
8617 return false;
8618}
8619
0c8620d6
BL
8620#ifdef CONFIG_DRM_AMD_DC_HDCP
8621static bool is_content_protection_different(struct drm_connector_state *state,
8622 const struct drm_connector_state *old_state,
8623 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8624{
8625 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8626 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8627
31c0ed90 8628 /* Handle: Type0/1 change */
53e108aa
BL
8629 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8630 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8631 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8632 return true;
8633 }
8634
31c0ed90
BL
8635 /* CP is being re enabled, ignore this
8636 *
8637 * Handles: ENABLED -> DESIRED
8638 */
0c8620d6
BL
8639 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8640 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8641 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8642 return false;
8643 }
8644
31c0ed90
BL
8645 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8646 *
8647 * Handles: UNDESIRED -> ENABLED
8648 */
0c8620d6
BL
8649 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8650 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8651 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8652
0d9a947b
QZ
8653 /* Stream removed and re-enabled
8654 *
8655 * Can sometimes overlap with the HPD case,
8656 * thus set update_hdcp to false to avoid
8657 * setting HDCP multiple times.
8658 *
8659 * Handles: DESIRED -> DESIRED (Special case)
8660 */
8661 if (!(old_state->crtc && old_state->crtc->enabled) &&
8662 state->crtc && state->crtc->enabled &&
8663 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8664 dm_con_state->update_hdcp = false;
8665 return true;
8666 }
8667
8668 /* Hot-plug, headless s3, dpms
8669 *
8670 * Only start HDCP if the display is connected/enabled.
8671 * update_hdcp flag will be set to false until the next
8672 * HPD comes in.
31c0ed90
BL
8673 *
8674 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8675 */
97f6c917
BL
8676 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8677 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8678 dm_con_state->update_hdcp = false;
0c8620d6 8679 return true;
97f6c917 8680 }
0c8620d6 8681
31c0ed90
BL
8682 /*
8683 * Handles: UNDESIRED -> UNDESIRED
8684 * DESIRED -> DESIRED
8685 * ENABLED -> ENABLED
8686 */
0c8620d6
BL
8687 if (old_state->content_protection == state->content_protection)
8688 return false;
8689
31c0ed90
BL
8690 /*
8691 * Handles: UNDESIRED -> DESIRED
8692 * DESIRED -> UNDESIRED
8693 * ENABLED -> UNDESIRED
8694 */
97f6c917 8695 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8696 return true;
8697
31c0ed90
BL
8698 /*
8699 * Handles: DESIRED -> ENABLED
8700 */
0c8620d6
BL
8701 return false;
8702}
8703
0c8620d6 8704#endif
3ee6b26b
AD
8705static void remove_stream(struct amdgpu_device *adev,
8706 struct amdgpu_crtc *acrtc,
8707 struct dc_stream_state *stream)
e7b07cee
HW
8708{
8709 /* this is the update mode case */
e7b07cee
HW
8710
8711 acrtc->otg_inst = -1;
8712 acrtc->enabled = false;
8713}
8714
7578ecda
AD
8715static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8716 struct dc_cursor_position *position)
2a8f6ccb 8717{
f4c2cc43 8718 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8719 int x, y;
8720 int xorigin = 0, yorigin = 0;
8721
e371e19c 8722 if (!crtc || !plane->state->fb)
2a8f6ccb 8723 return 0;
2a8f6ccb
HW
8724
8725 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8726 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8727 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8728 __func__,
8729 plane->state->crtc_w,
8730 plane->state->crtc_h);
8731 return -EINVAL;
8732 }
8733
8734 x = plane->state->crtc_x;
8735 y = plane->state->crtc_y;
c14a005c 8736
e371e19c
NK
8737 if (x <= -amdgpu_crtc->max_cursor_width ||
8738 y <= -amdgpu_crtc->max_cursor_height)
8739 return 0;
8740
2a8f6ccb
HW
8741 if (x < 0) {
8742 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8743 x = 0;
8744 }
8745 if (y < 0) {
8746 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8747 y = 0;
8748 }
8749 position->enable = true;
d243b6ff 8750 position->translate_by_source = true;
2a8f6ccb
HW
8751 position->x = x;
8752 position->y = y;
8753 position->x_hotspot = xorigin;
8754 position->y_hotspot = yorigin;
8755
8756 return 0;
8757}
8758
3ee6b26b
AD
8759static void handle_cursor_update(struct drm_plane *plane,
8760 struct drm_plane_state *old_plane_state)
e7b07cee 8761{
1348969a 8762 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8763 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8764 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8765 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8766 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8767 uint64_t address = afb ? afb->address : 0;
6a30a929 8768 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8769 struct dc_cursor_attributes attributes;
8770 int ret;
8771
e7b07cee
HW
8772 if (!plane->state->fb && !old_plane_state->fb)
8773 return;
8774
cb2318b7 8775 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8776 __func__,
8777 amdgpu_crtc->crtc_id,
8778 plane->state->crtc_w,
8779 plane->state->crtc_h);
2a8f6ccb
HW
8780
8781 ret = get_cursor_position(plane, crtc, &position);
8782 if (ret)
8783 return;
8784
8785 if (!position.enable) {
8786 /* turn off cursor */
674e78ac
NK
8787 if (crtc_state && crtc_state->stream) {
8788 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8789 dc_stream_set_cursor_position(crtc_state->stream,
8790 &position);
674e78ac
NK
8791 mutex_unlock(&adev->dm.dc_lock);
8792 }
2a8f6ccb 8793 return;
e7b07cee 8794 }
e7b07cee 8795
2a8f6ccb
HW
8796 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8797 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8798
c1cefe11 8799 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8800 attributes.address.high_part = upper_32_bits(address);
8801 attributes.address.low_part = lower_32_bits(address);
8802 attributes.width = plane->state->crtc_w;
8803 attributes.height = plane->state->crtc_h;
8804 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8805 attributes.rotation_angle = 0;
8806 attributes.attribute_flags.value = 0;
8807
03a66367 8808 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8809
886daac9 8810 if (crtc_state->stream) {
674e78ac 8811 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8812 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8813 &attributes))
8814 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8815
2a8f6ccb
HW
8816 if (!dc_stream_set_cursor_position(crtc_state->stream,
8817 &position))
8818 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8819 mutex_unlock(&adev->dm.dc_lock);
886daac9 8820 }
2a8f6ccb 8821}
e7b07cee
HW
8822
8823static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8824{
8825
8826 assert_spin_locked(&acrtc->base.dev->event_lock);
8827 WARN_ON(acrtc->event);
8828
8829 acrtc->event = acrtc->base.state->event;
8830
8831 /* Set the flip status */
8832 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8833
8834 /* Mark this event as consumed */
8835 acrtc->base.state->event = NULL;
8836
cb2318b7
VL
8837 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8838 acrtc->crtc_id);
e7b07cee
HW
8839}
8840
bb47de73
NK
8841static void update_freesync_state_on_stream(
8842 struct amdgpu_display_manager *dm,
8843 struct dm_crtc_state *new_crtc_state,
180db303
NK
8844 struct dc_stream_state *new_stream,
8845 struct dc_plane_state *surface,
8846 u32 flip_timestamp_in_us)
bb47de73 8847{
09aef2c4 8848 struct mod_vrr_params vrr_params;
bb47de73 8849 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8850 struct amdgpu_device *adev = dm->adev;
585d450c 8851 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8852 unsigned long flags;
4cda3243 8853 bool pack_sdp_v1_3 = false;
bb47de73
NK
8854
8855 if (!new_stream)
8856 return;
8857
8858 /*
8859 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8860 * For now it's sufficient to just guard against these conditions.
8861 */
8862
8863 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8864 return;
8865
4a580877 8866 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8867 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8868
180db303
NK
8869 if (surface) {
8870 mod_freesync_handle_preflip(
8871 dm->freesync_module,
8872 surface,
8873 new_stream,
8874 flip_timestamp_in_us,
8875 &vrr_params);
09aef2c4
MK
8876
8877 if (adev->family < AMDGPU_FAMILY_AI &&
8878 amdgpu_dm_vrr_active(new_crtc_state)) {
8879 mod_freesync_handle_v_update(dm->freesync_module,
8880 new_stream, &vrr_params);
e63e2491
EB
8881
8882 /* Need to call this before the frame ends. */
8883 dc_stream_adjust_vmin_vmax(dm->dc,
8884 new_crtc_state->stream,
8885 &vrr_params.adjust);
09aef2c4 8886 }
180db303 8887 }
bb47de73
NK
8888
8889 mod_freesync_build_vrr_infopacket(
8890 dm->freesync_module,
8891 new_stream,
180db303 8892 &vrr_params,
ecd0136b
HT
8893 PACKET_TYPE_VRR,
8894 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8895 &vrr_infopacket,
8896 pack_sdp_v1_3);
bb47de73 8897
8a48b44c 8898 new_crtc_state->freesync_timing_changed |=
585d450c 8899 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8900 &vrr_params.adjust,
8901 sizeof(vrr_params.adjust)) != 0);
bb47de73 8902
8a48b44c 8903 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8904 (memcmp(&new_crtc_state->vrr_infopacket,
8905 &vrr_infopacket,
8906 sizeof(vrr_infopacket)) != 0);
8907
585d450c 8908 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8909 new_crtc_state->vrr_infopacket = vrr_infopacket;
8910
585d450c 8911 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8912 new_stream->vrr_infopacket = vrr_infopacket;
8913
8914 if (new_crtc_state->freesync_vrr_info_changed)
8915 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8916 new_crtc_state->base.crtc->base.id,
8917 (int)new_crtc_state->base.vrr_enabled,
180db303 8918 (int)vrr_params.state);
09aef2c4 8919
4a580877 8920 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8921}
8922
585d450c 8923static void update_stream_irq_parameters(
e854194c
MK
8924 struct amdgpu_display_manager *dm,
8925 struct dm_crtc_state *new_crtc_state)
8926{
8927 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8928 struct mod_vrr_params vrr_params;
e854194c 8929 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8930 struct amdgpu_device *adev = dm->adev;
585d450c 8931 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8932 unsigned long flags;
e854194c
MK
8933
8934 if (!new_stream)
8935 return;
8936
8937 /*
8938 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8939 * For now it's sufficient to just guard against these conditions.
8940 */
8941 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8942 return;
8943
4a580877 8944 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8945 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8946
e854194c
MK
8947 if (new_crtc_state->vrr_supported &&
8948 config.min_refresh_in_uhz &&
8949 config.max_refresh_in_uhz) {
a85ba005
NC
8950 /*
8951 * if freesync compatible mode was set, config.state will be set
8952 * in atomic check
8953 */
8954 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8955 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8956 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8957 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8958 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8959 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8960 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8961 } else {
8962 config.state = new_crtc_state->base.vrr_enabled ?
8963 VRR_STATE_ACTIVE_VARIABLE :
8964 VRR_STATE_INACTIVE;
8965 }
e854194c
MK
8966 } else {
8967 config.state = VRR_STATE_UNSUPPORTED;
8968 }
8969
8970 mod_freesync_build_vrr_params(dm->freesync_module,
8971 new_stream,
8972 &config, &vrr_params);
8973
8974 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8975 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8976 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8977
585d450c
AP
8978 new_crtc_state->freesync_config = config;
8979 /* Copy state for access from DM IRQ handler */
8980 acrtc->dm_irq_params.freesync_config = config;
8981 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8982 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8983 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8984}
8985
66b0c973
MK
8986static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8987 struct dm_crtc_state *new_state)
8988{
8989 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8990 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8991
8992 if (!old_vrr_active && new_vrr_active) {
8993 /* Transition VRR inactive -> active:
8994 * While VRR is active, we must not disable vblank irq, as a
8995 * reenable after disable would compute bogus vblank/pflip
8996 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8997 *
8998 * We also need vupdate irq for the actual core vblank handling
8999 * at end of vblank.
66b0c973 9000 */
d2574c33 9001 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
9002 drm_crtc_vblank_get(new_state->base.crtc);
9003 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9004 __func__, new_state->base.crtc->base.id);
9005 } else if (old_vrr_active && !new_vrr_active) {
9006 /* Transition VRR active -> inactive:
9007 * Allow vblank irq disable again for fixed refresh rate.
9008 */
d2574c33 9009 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
9010 drm_crtc_vblank_put(new_state->base.crtc);
9011 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9012 __func__, new_state->base.crtc->base.id);
9013 }
9014}
9015
8ad27806
NK
9016static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9017{
9018 struct drm_plane *plane;
5760dcb9 9019 struct drm_plane_state *old_plane_state;
8ad27806
NK
9020 int i;
9021
9022 /*
9023 * TODO: Make this per-stream so we don't issue redundant updates for
9024 * commits with multiple streams.
9025 */
5760dcb9 9026 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
9027 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9028 handle_cursor_update(plane, old_plane_state);
9029}
9030
3be5262e 9031static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 9032 struct dc_state *dc_state,
3ee6b26b
AD
9033 struct drm_device *dev,
9034 struct amdgpu_display_manager *dm,
9035 struct drm_crtc *pcrtc,
420cd472 9036 bool wait_for_vblank)
e7b07cee 9037{
efc8278e 9038 uint32_t i;
8a48b44c 9039 uint64_t timestamp_ns;
e7b07cee 9040 struct drm_plane *plane;
0bc9706d 9041 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 9042 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
9043 struct drm_crtc_state *new_pcrtc_state =
9044 drm_atomic_get_new_crtc_state(state, pcrtc);
9045 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
9046 struct dm_crtc_state *dm_old_crtc_state =
9047 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 9048 int planes_count = 0, vpos, hpos;
570c91d5 9049 long r;
e7b07cee 9050 unsigned long flags;
8a48b44c 9051 struct amdgpu_bo *abo;
fdd1fe57
MK
9052 uint32_t target_vblank, last_flip_vblank;
9053 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 9054 bool pflip_present = false;
bc7f670e
DF
9055 struct {
9056 struct dc_surface_update surface_updates[MAX_SURFACES];
9057 struct dc_plane_info plane_infos[MAX_SURFACES];
9058 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 9059 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 9060 struct dc_stream_update stream_update;
74aa7bd4 9061 } *bundle;
bc7f670e 9062
74aa7bd4 9063 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 9064
74aa7bd4
DF
9065 if (!bundle) {
9066 dm_error("Failed to allocate update bundle\n");
4b510503
NK
9067 goto cleanup;
9068 }
e7b07cee 9069
8ad27806
NK
9070 /*
9071 * Disable the cursor first if we're disabling all the planes.
9072 * It'll remain on the screen after the planes are re-enabled
9073 * if we don't.
9074 */
9075 if (acrtc_state->active_planes == 0)
9076 amdgpu_dm_commit_cursors(state);
9077
e7b07cee 9078 /* update planes when needed */
efc8278e 9079 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9080 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9081 struct drm_crtc_state *new_crtc_state;
0bc9706d 9082 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9083 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9084 bool plane_needs_flip;
c7af5f77 9085 struct dc_plane_state *dc_plane;
54d76575 9086 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9087
80c218d5
NK
9088 /* Cursor plane is handled after stream updates */
9089 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9090 continue;
e7b07cee 9091
f5ba60fe
DD
9092 if (!fb || !crtc || pcrtc != crtc)
9093 continue;
9094
9095 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9096 if (!new_crtc_state->active)
e7b07cee
HW
9097 continue;
9098
bc7f670e 9099 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9100
74aa7bd4 9101 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9102 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9103 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9104 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9105 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9106 }
8a48b44c 9107
4375d625 9108 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9109 &bundle->scaling_infos[planes_count]);
8a48b44c 9110
695af5f9
NK
9111 bundle->surface_updates[planes_count].scaling_info =
9112 &bundle->scaling_infos[planes_count];
8a48b44c 9113
f5031000 9114 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9115
f5031000 9116 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9117
f5031000
DF
9118 if (!plane_needs_flip) {
9119 planes_count += 1;
9120 continue;
9121 }
8a48b44c 9122
2fac0f53
CK
9123 abo = gem_to_amdgpu_bo(fb->obj[0]);
9124
f8308898
AG
9125 /*
9126 * Wait for all fences on this FB. Do limited wait to avoid
9127 * deadlock during GPU reset when this fence will not signal
9128 * but we hold reservation lock for the BO.
9129 */
d3fae3b3
CK
9130 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9131 msecs_to_jiffies(5000));
f8308898 9132 if (unlikely(r <= 0))
ed8a5fb2 9133 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9134
695af5f9 9135 fill_dc_plane_info_and_addr(
8ce5d842 9136 dm->adev, new_plane_state,
6eed95b0 9137 afb->tiling_flags,
695af5f9 9138 &bundle->plane_infos[planes_count],
87b7ebc2 9139 &bundle->flip_addrs[planes_count].address,
6eed95b0 9140 afb->tmz_surface, false);
87b7ebc2 9141
4711c033 9142 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9143 new_plane_state->plane->index,
9144 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9145
9146 bundle->surface_updates[planes_count].plane_info =
9147 &bundle->plane_infos[planes_count];
8a48b44c 9148
caff0e66
NK
9149 /*
9150 * Only allow immediate flips for fast updates that don't
9151 * change FB pitch, DCC state, rotation or mirroing.
9152 */
f5031000 9153 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9154 crtc->state->async_flip &&
caff0e66 9155 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9156
f5031000
DF
9157 timestamp_ns = ktime_get_ns();
9158 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9159 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9160 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9161
f5031000
DF
9162 if (!bundle->surface_updates[planes_count].surface) {
9163 DRM_ERROR("No surface for CRTC: id=%d\n",
9164 acrtc_attach->crtc_id);
9165 continue;
bc7f670e
DF
9166 }
9167
f5031000
DF
9168 if (plane == pcrtc->primary)
9169 update_freesync_state_on_stream(
9170 dm,
9171 acrtc_state,
9172 acrtc_state->stream,
9173 dc_plane,
9174 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9175
4711c033 9176 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9177 __func__,
9178 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9179 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9180
9181 planes_count += 1;
9182
8a48b44c
DF
9183 }
9184
74aa7bd4 9185 if (pflip_present) {
634092b1
MK
9186 if (!vrr_active) {
9187 /* Use old throttling in non-vrr fixed refresh rate mode
9188 * to keep flip scheduling based on target vblank counts
9189 * working in a backwards compatible way, e.g., for
9190 * clients using the GLX_OML_sync_control extension or
9191 * DRI3/Present extension with defined target_msc.
9192 */
e3eff4b5 9193 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9194 }
9195 else {
9196 /* For variable refresh rate mode only:
9197 * Get vblank of last completed flip to avoid > 1 vrr
9198 * flips per video frame by use of throttling, but allow
9199 * flip programming anywhere in the possibly large
9200 * variable vrr vblank interval for fine-grained flip
9201 * timing control and more opportunity to avoid stutter
9202 * on late submission of flips.
9203 */
9204 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9205 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9206 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9207 }
9208
fdd1fe57 9209 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9210
9211 /*
9212 * Wait until we're out of the vertical blank period before the one
9213 * targeted by the flip
9214 */
9215 while ((acrtc_attach->enabled &&
9216 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9217 0, &vpos, &hpos, NULL,
9218 NULL, &pcrtc->hwmode)
9219 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9220 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9221 (int)(target_vblank -
e3eff4b5 9222 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9223 usleep_range(1000, 1100);
9224 }
9225
8fe684e9
NK
9226 /**
9227 * Prepare the flip event for the pageflip interrupt to handle.
9228 *
9229 * This only works in the case where we've already turned on the
9230 * appropriate hardware blocks (eg. HUBP) so in the transition case
9231 * from 0 -> n planes we have to skip a hardware generated event
9232 * and rely on sending it from software.
9233 */
9234 if (acrtc_attach->base.state->event &&
035f5496
AP
9235 acrtc_state->active_planes > 0 &&
9236 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9237 drm_crtc_vblank_get(pcrtc);
9238
9239 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9240
9241 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9242 prepare_flip_isr(acrtc_attach);
9243
9244 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9245 }
9246
9247 if (acrtc_state->stream) {
8a48b44c 9248 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9249 bundle->stream_update.vrr_infopacket =
8a48b44c 9250 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9251 }
e7b07cee
HW
9252 }
9253
bc92c065 9254 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9255 if ((planes_count || acrtc_state->active_planes == 0) &&
9256 acrtc_state->stream) {
96160687 9257#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9258 /*
9259 * If PSR or idle optimizations are enabled then flush out
9260 * any pending work before hardware programming.
9261 */
06dd1888
NK
9262 if (dm->vblank_control_workqueue)
9263 flush_workqueue(dm->vblank_control_workqueue);
96160687 9264#endif
58aa1c50 9265
b6e881c9 9266 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9267 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9268 bundle->stream_update.src = acrtc_state->stream->src;
9269 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9270 }
9271
cf020d49
NK
9272 if (new_pcrtc_state->color_mgmt_changed) {
9273 /*
9274 * TODO: This isn't fully correct since we've actually
9275 * already modified the stream in place.
9276 */
9277 bundle->stream_update.gamut_remap =
9278 &acrtc_state->stream->gamut_remap_matrix;
9279 bundle->stream_update.output_csc_transform =
9280 &acrtc_state->stream->csc_color_matrix;
9281 bundle->stream_update.out_transfer_func =
9282 acrtc_state->stream->out_transfer_func;
9283 }
bc7f670e 9284
8a48b44c 9285 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9286 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9287 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9288
e63e2491
EB
9289 /*
9290 * If FreeSync state on the stream has changed then we need to
9291 * re-adjust the min/max bounds now that DC doesn't handle this
9292 * as part of commit.
9293 */
a85ba005 9294 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9295 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9296 dc_stream_adjust_vmin_vmax(
9297 dm->dc, acrtc_state->stream,
585d450c 9298 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9299 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9300 }
bc7f670e 9301 mutex_lock(&dm->dc_lock);
8c322309 9302 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9303 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9304 amdgpu_dm_psr_disable(acrtc_state->stream);
9305
bc7f670e 9306 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9307 bundle->surface_updates,
bc7f670e
DF
9308 planes_count,
9309 acrtc_state->stream,
efc8278e
AJ
9310 &bundle->stream_update,
9311 dc_state);
8c322309 9312
8fe684e9
NK
9313 /**
9314 * Enable or disable the interrupts on the backend.
9315 *
9316 * Most pipes are put into power gating when unused.
9317 *
9318 * When power gating is enabled on a pipe we lose the
9319 * interrupt enablement state when power gating is disabled.
9320 *
9321 * So we need to update the IRQ control state in hardware
9322 * whenever the pipe turns on (since it could be previously
9323 * power gated) or off (since some pipes can't be power gated
9324 * on some ASICs).
9325 */
9326 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9327 dm_update_pflip_irq_state(drm_to_adev(dev),
9328 acrtc_attach);
8fe684e9 9329
8c322309 9330 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9331 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9332 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9333 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9334
9335 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9336 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9337 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9338 struct amdgpu_dm_connector *aconn =
9339 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9340
9341 if (aconn->psr_skip_count > 0)
9342 aconn->psr_skip_count--;
58aa1c50
NK
9343
9344 /* Allow PSR when skip count is 0. */
9345 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9346 } else {
9347 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9348 }
9349
bc7f670e 9350 mutex_unlock(&dm->dc_lock);
e7b07cee 9351 }
4b510503 9352
8ad27806
NK
9353 /*
9354 * Update cursor state *after* programming all the planes.
9355 * This avoids redundant programming in the case where we're going
9356 * to be disabling a single plane - those pipes are being disabled.
9357 */
9358 if (acrtc_state->active_planes)
9359 amdgpu_dm_commit_cursors(state);
80c218d5 9360
4b510503 9361cleanup:
74aa7bd4 9362 kfree(bundle);
e7b07cee
HW
9363}
9364
6ce8f316
NK
9365static void amdgpu_dm_commit_audio(struct drm_device *dev,
9366 struct drm_atomic_state *state)
9367{
1348969a 9368 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9369 struct amdgpu_dm_connector *aconnector;
9370 struct drm_connector *connector;
9371 struct drm_connector_state *old_con_state, *new_con_state;
9372 struct drm_crtc_state *new_crtc_state;
9373 struct dm_crtc_state *new_dm_crtc_state;
9374 const struct dc_stream_status *status;
9375 int i, inst;
9376
9377 /* Notify device removals. */
9378 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9379 if (old_con_state->crtc != new_con_state->crtc) {
9380 /* CRTC changes require notification. */
9381 goto notify;
9382 }
9383
9384 if (!new_con_state->crtc)
9385 continue;
9386
9387 new_crtc_state = drm_atomic_get_new_crtc_state(
9388 state, new_con_state->crtc);
9389
9390 if (!new_crtc_state)
9391 continue;
9392
9393 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9394 continue;
9395
9396 notify:
9397 aconnector = to_amdgpu_dm_connector(connector);
9398
9399 mutex_lock(&adev->dm.audio_lock);
9400 inst = aconnector->audio_inst;
9401 aconnector->audio_inst = -1;
9402 mutex_unlock(&adev->dm.audio_lock);
9403
9404 amdgpu_dm_audio_eld_notify(adev, inst);
9405 }
9406
9407 /* Notify audio device additions. */
9408 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9409 if (!new_con_state->crtc)
9410 continue;
9411
9412 new_crtc_state = drm_atomic_get_new_crtc_state(
9413 state, new_con_state->crtc);
9414
9415 if (!new_crtc_state)
9416 continue;
9417
9418 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9419 continue;
9420
9421 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9422 if (!new_dm_crtc_state->stream)
9423 continue;
9424
9425 status = dc_stream_get_status(new_dm_crtc_state->stream);
9426 if (!status)
9427 continue;
9428
9429 aconnector = to_amdgpu_dm_connector(connector);
9430
9431 mutex_lock(&adev->dm.audio_lock);
9432 inst = status->audio_inst;
9433 aconnector->audio_inst = inst;
9434 mutex_unlock(&adev->dm.audio_lock);
9435
9436 amdgpu_dm_audio_eld_notify(adev, inst);
9437 }
9438}
9439
1f6010a9 9440/*
27b3f4fc
LSL
9441 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9442 * @crtc_state: the DRM CRTC state
9443 * @stream_state: the DC stream state.
9444 *
9445 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9446 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9447 */
9448static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9449 struct dc_stream_state *stream_state)
9450{
b9952f93 9451 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9452}
e7b07cee 9453
b8592b48
LL
9454/**
9455 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9456 * @state: The atomic state to commit
9457 *
9458 * This will tell DC to commit the constructed DC state from atomic_check,
9459 * programming the hardware. Any failures here implies a hardware failure, since
9460 * atomic check should have filtered anything non-kosher.
9461 */
7578ecda 9462static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9463{
9464 struct drm_device *dev = state->dev;
1348969a 9465 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9466 struct amdgpu_display_manager *dm = &adev->dm;
9467 struct dm_atomic_state *dm_state;
eb3dc897 9468 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9469 uint32_t i, j;
5cc6dcbd 9470 struct drm_crtc *crtc;
0bc9706d 9471 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9472 unsigned long flags;
9473 bool wait_for_vblank = true;
9474 struct drm_connector *connector;
c2cea706 9475 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9476 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9477 int crtc_disable_count = 0;
6ee90e88 9478 bool mode_set_reset_required = false;
e7b07cee 9479
e8a98235
RS
9480 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9481
e7b07cee
HW
9482 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9483
eb3dc897
NK
9484 dm_state = dm_atomic_get_new_state(state);
9485 if (dm_state && dm_state->context) {
9486 dc_state = dm_state->context;
9487 } else {
9488 /* No state changes, retain current state. */
813d20dc 9489 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9490 ASSERT(dc_state_temp);
9491 dc_state = dc_state_temp;
9492 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9493 }
e7b07cee 9494
6d90a208
AP
9495 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9496 new_crtc_state, i) {
9497 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9498
9499 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9500
9501 if (old_crtc_state->active &&
9502 (!new_crtc_state->active ||
9503 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9504 manage_dm_interrupts(adev, acrtc, false);
9505 dc_stream_release(dm_old_crtc_state->stream);
9506 }
9507 }
9508
8976f73b
RS
9509 drm_atomic_helper_calc_timestamping_constants(state);
9510
e7b07cee 9511 /* update changed items */
0bc9706d 9512 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9513 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9514
54d76575
LSL
9515 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9516 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9517
4711c033 9518 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9519 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9520 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9521 "connectors_changed:%d\n",
9522 acrtc->crtc_id,
0bc9706d
LSL
9523 new_crtc_state->enable,
9524 new_crtc_state->active,
9525 new_crtc_state->planes_changed,
9526 new_crtc_state->mode_changed,
9527 new_crtc_state->active_changed,
9528 new_crtc_state->connectors_changed);
e7b07cee 9529
5c68c652
VL
9530 /* Disable cursor if disabling crtc */
9531 if (old_crtc_state->active && !new_crtc_state->active) {
9532 struct dc_cursor_position position;
9533
9534 memset(&position, 0, sizeof(position));
9535 mutex_lock(&dm->dc_lock);
9536 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9537 mutex_unlock(&dm->dc_lock);
9538 }
9539
27b3f4fc
LSL
9540 /* Copy all transient state flags into dc state */
9541 if (dm_new_crtc_state->stream) {
9542 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9543 dm_new_crtc_state->stream);
9544 }
9545
e7b07cee
HW
9546 /* handles headless hotplug case, updating new_state and
9547 * aconnector as needed
9548 */
9549
54d76575 9550 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9551
4711c033 9552 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9553
54d76575 9554 if (!dm_new_crtc_state->stream) {
e7b07cee 9555 /*
b830ebc9
HW
9556 * this could happen because of issues with
9557 * userspace notifications delivery.
9558 * In this case userspace tries to set mode on
1f6010a9
DF
9559 * display which is disconnected in fact.
9560 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9561 * We expect reset mode will come soon.
9562 *
9563 * This can also happen when unplug is done
9564 * during resume sequence ended
9565 *
9566 * In this case, we want to pretend we still
9567 * have a sink to keep the pipe running so that
9568 * hw state is consistent with the sw state
9569 */
f1ad2f5e 9570 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9571 __func__, acrtc->base.base.id);
9572 continue;
9573 }
9574
54d76575
LSL
9575 if (dm_old_crtc_state->stream)
9576 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9577
97028037
LP
9578 pm_runtime_get_noresume(dev->dev);
9579
e7b07cee 9580 acrtc->enabled = true;
0bc9706d
LSL
9581 acrtc->hw_mode = new_crtc_state->mode;
9582 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9583 mode_set_reset_required = true;
0bc9706d 9584 } else if (modereset_required(new_crtc_state)) {
4711c033 9585 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9586 /* i.e. reset mode */
6ee90e88 9587 if (dm_old_crtc_state->stream)
54d76575 9588 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9589
6ee90e88 9590 mode_set_reset_required = true;
e7b07cee
HW
9591 }
9592 } /* for_each_crtc_in_state() */
9593
eb3dc897 9594 if (dc_state) {
6ee90e88 9595 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9596 if (mode_set_reset_required) {
96160687 9597#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9598 if (dm->vblank_control_workqueue)
9599 flush_workqueue(dm->vblank_control_workqueue);
96160687 9600#endif
6ee90e88 9601 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9602 }
6ee90e88 9603
eb3dc897 9604 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9605 mutex_lock(&dm->dc_lock);
eb3dc897 9606 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9607#if defined(CONFIG_DRM_AMD_DC_DCN)
9608 /* Allow idle optimization when vblank count is 0 for display off */
9609 if (dm->active_vblank_irq_count == 0)
9610 dc_allow_idle_optimizations(dm->dc,true);
9611#endif
674e78ac 9612 mutex_unlock(&dm->dc_lock);
fa2123db 9613 }
fe8858bb 9614
0bc9706d 9615 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9616 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9617
54d76575 9618 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9619
54d76575 9620 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9621 const struct dc_stream_status *status =
54d76575 9622 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9623
eb3dc897 9624 if (!status)
09f609c3
LL
9625 status = dc_stream_get_status_from_state(dc_state,
9626 dm_new_crtc_state->stream);
e7b07cee 9627 if (!status)
54d76575 9628 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9629 else
9630 acrtc->otg_inst = status->primary_otg_inst;
9631 }
9632 }
0c8620d6
BL
9633#ifdef CONFIG_DRM_AMD_DC_HDCP
9634 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9635 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9636 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9637 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9638
9639 new_crtc_state = NULL;
9640
9641 if (acrtc)
9642 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9643
9644 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9645
9646 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9647 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9648 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9649 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9650 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9651 continue;
9652 }
9653
9654 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9655 hdcp_update_display(
9656 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9657 new_con_state->hdcp_content_type,
0e86d3d4 9658 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9659 }
9660#endif
e7b07cee 9661
02d6a6fc 9662 /* Handle connector state changes */
c2cea706 9663 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9664 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9665 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9666 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9667 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9668 struct dc_stream_update stream_update;
b232d4ed 9669 struct dc_info_packet hdr_packet;
e7b07cee 9670 struct dc_stream_status *status = NULL;
b232d4ed 9671 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9672
efc8278e 9673 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9674 memset(&stream_update, 0, sizeof(stream_update));
9675
44d09c6a 9676 if (acrtc) {
0bc9706d 9677 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9678 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9679 }
0bc9706d 9680
e7b07cee 9681 /* Skip any modesets/resets */
0bc9706d 9682 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9683 continue;
9684
54d76575 9685 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9686 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9687
b232d4ed
NK
9688 scaling_changed = is_scaling_state_different(dm_new_con_state,
9689 dm_old_con_state);
9690
9691 abm_changed = dm_new_crtc_state->abm_level !=
9692 dm_old_crtc_state->abm_level;
9693
9694 hdr_changed =
72921cdf 9695 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9696
9697 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9698 continue;
e7b07cee 9699
b6e881c9 9700 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9701 if (scaling_changed) {
02d6a6fc 9702 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9703 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9704
02d6a6fc
DF
9705 stream_update.src = dm_new_crtc_state->stream->src;
9706 stream_update.dst = dm_new_crtc_state->stream->dst;
9707 }
9708
b232d4ed 9709 if (abm_changed) {
02d6a6fc
DF
9710 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9711
9712 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9713 }
70e8ffc5 9714
b232d4ed
NK
9715 if (hdr_changed) {
9716 fill_hdr_info_packet(new_con_state, &hdr_packet);
9717 stream_update.hdr_static_metadata = &hdr_packet;
9718 }
9719
54d76575 9720 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9721
9722 if (WARN_ON(!status))
9723 continue;
9724
3be5262e 9725 WARN_ON(!status->plane_count);
e7b07cee 9726
02d6a6fc
DF
9727 /*
9728 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9729 * Here we create an empty update on each plane.
9730 * To fix this, DC should permit updating only stream properties.
9731 */
9732 for (j = 0; j < status->plane_count; j++)
efc8278e 9733 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9734
9735
9736 mutex_lock(&dm->dc_lock);
9737 dc_commit_updates_for_stream(dm->dc,
efc8278e 9738 dummy_updates,
02d6a6fc
DF
9739 status->plane_count,
9740 dm_new_crtc_state->stream,
efc8278e
AJ
9741 &stream_update,
9742 dc_state);
02d6a6fc 9743 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9744 }
9745
b5e83f6f 9746 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9747 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9748 new_crtc_state, i) {
fe2a1965
LP
9749 if (old_crtc_state->active && !new_crtc_state->active)
9750 crtc_disable_count++;
9751
54d76575 9752 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9753 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9754
585d450c
AP
9755 /* For freesync config update on crtc state and params for irq */
9756 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9757
66b0c973
MK
9758 /* Handle vrr on->off / off->on transitions */
9759 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9760 dm_new_crtc_state);
e7b07cee
HW
9761 }
9762
8fe684e9
NK
9763 /**
9764 * Enable interrupts for CRTCs that are newly enabled or went through
9765 * a modeset. It was intentionally deferred until after the front end
9766 * state was modified to wait until the OTG was on and so the IRQ
9767 * handlers didn't access stale or invalid state.
9768 */
9769 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9770 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9771#ifdef CONFIG_DEBUG_FS
86bc2219 9772 bool configure_crc = false;
8e7b6fee 9773 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9774#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9775 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9776#endif
9777 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9778 cur_crc_src = acrtc->dm_irq_params.crc_src;
9779 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9780#endif
585d450c
AP
9781 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9782
8fe684e9
NK
9783 if (new_crtc_state->active &&
9784 (!old_crtc_state->active ||
9785 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9786 dc_stream_retain(dm_new_crtc_state->stream);
9787 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9788 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9789
24eb9374 9790#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9791 /**
9792 * Frontend may have changed so reapply the CRC capture
9793 * settings for the stream.
9794 */
9795 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9796
8e7b6fee 9797 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9798 configure_crc = true;
9799#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9800 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9801 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9802 acrtc->dm_irq_params.crc_window.update_win = true;
9803 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9804 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9805 crc_rd_wrk->crtc = crtc;
9806 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9807 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9808 }
86bc2219 9809#endif
e2881d6d 9810 }
c920888c 9811
86bc2219 9812 if (configure_crc)
bbc49fc0
WL
9813 if (amdgpu_dm_crtc_configure_crc_source(
9814 crtc, dm_new_crtc_state, cur_crc_src))
9815 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9816#endif
8fe684e9
NK
9817 }
9818 }
e7b07cee 9819
420cd472 9820 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9821 if (new_crtc_state->async_flip)
420cd472
DF
9822 wait_for_vblank = false;
9823
e7b07cee 9824 /* update planes when needed per crtc*/
5cc6dcbd 9825 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9826 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9827
54d76575 9828 if (dm_new_crtc_state->stream)
eb3dc897 9829 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9830 dm, crtc, wait_for_vblank);
e7b07cee
HW
9831 }
9832
6ce8f316
NK
9833 /* Update audio instances for each connector. */
9834 amdgpu_dm_commit_audio(dev, state);
9835
7230362c
AD
9836#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9837 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9838 /* restore the backlight level */
7fd13bae
AD
9839 for (i = 0; i < dm->num_of_edps; i++) {
9840 if (dm->backlight_dev[i] &&
9841 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9842 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9843 }
7230362c 9844#endif
e7b07cee
HW
9845 /*
9846 * send vblank event on all events not handled in flip and
9847 * mark consumed event for drm_atomic_helper_commit_hw_done
9848 */
4a580877 9849 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9850 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9851
0bc9706d
LSL
9852 if (new_crtc_state->event)
9853 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9854
0bc9706d 9855 new_crtc_state->event = NULL;
e7b07cee 9856 }
4a580877 9857 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9858
29c8f234
LL
9859 /* Signal HW programming completion */
9860 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9861
9862 if (wait_for_vblank)
320a1274 9863 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9864
9865 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9866
5f6fab24
AD
9867 /* return the stolen vga memory back to VRAM */
9868 if (!adev->mman.keep_stolen_vga_memory)
9869 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9870 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9871
1f6010a9
DF
9872 /*
9873 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9874 * so we can put the GPU into runtime suspend if we're not driving any
9875 * displays anymore
9876 */
fe2a1965
LP
9877 for (i = 0; i < crtc_disable_count; i++)
9878 pm_runtime_put_autosuspend(dev->dev);
97028037 9879 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9880
9881 if (dc_state_temp)
9882 dc_release_state(dc_state_temp);
e7b07cee
HW
9883}
9884
9885
9886static int dm_force_atomic_commit(struct drm_connector *connector)
9887{
9888 int ret = 0;
9889 struct drm_device *ddev = connector->dev;
9890 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9891 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9892 struct drm_plane *plane = disconnected_acrtc->base.primary;
9893 struct drm_connector_state *conn_state;
9894 struct drm_crtc_state *crtc_state;
9895 struct drm_plane_state *plane_state;
9896
9897 if (!state)
9898 return -ENOMEM;
9899
9900 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9901
9902 /* Construct an atomic state to restore previous display setting */
9903
9904 /*
9905 * Attach connectors to drm_atomic_state
9906 */
9907 conn_state = drm_atomic_get_connector_state(state, connector);
9908
9909 ret = PTR_ERR_OR_ZERO(conn_state);
9910 if (ret)
2dc39051 9911 goto out;
e7b07cee
HW
9912
9913 /* Attach crtc to drm_atomic_state*/
9914 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9915
9916 ret = PTR_ERR_OR_ZERO(crtc_state);
9917 if (ret)
2dc39051 9918 goto out;
e7b07cee
HW
9919
9920 /* force a restore */
9921 crtc_state->mode_changed = true;
9922
9923 /* Attach plane to drm_atomic_state */
9924 plane_state = drm_atomic_get_plane_state(state, plane);
9925
9926 ret = PTR_ERR_OR_ZERO(plane_state);
9927 if (ret)
2dc39051 9928 goto out;
e7b07cee
HW
9929
9930 /* Call commit internally with the state we just constructed */
9931 ret = drm_atomic_commit(state);
e7b07cee 9932
2dc39051 9933out:
e7b07cee 9934 drm_atomic_state_put(state);
2dc39051
VL
9935 if (ret)
9936 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9937
9938 return ret;
9939}
9940
9941/*
1f6010a9
DF
9942 * This function handles all cases when set mode does not come upon hotplug.
9943 * This includes when a display is unplugged then plugged back into the
9944 * same port and when running without usermode desktop manager supprot
e7b07cee 9945 */
3ee6b26b
AD
9946void dm_restore_drm_connector_state(struct drm_device *dev,
9947 struct drm_connector *connector)
e7b07cee 9948{
c84dec2f 9949 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9950 struct amdgpu_crtc *disconnected_acrtc;
9951 struct dm_crtc_state *acrtc_state;
9952
9953 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9954 return;
9955
9956 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9957 if (!disconnected_acrtc)
9958 return;
e7b07cee 9959
70e8ffc5
HW
9960 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9961 if (!acrtc_state->stream)
e7b07cee
HW
9962 return;
9963
9964 /*
9965 * If the previous sink is not released and different from the current,
9966 * we deduce we are in a state where we can not rely on usermode call
9967 * to turn on the display, so we do it here
9968 */
9969 if (acrtc_state->stream->sink != aconnector->dc_sink)
9970 dm_force_atomic_commit(&aconnector->base);
9971}
9972
1f6010a9 9973/*
e7b07cee
HW
9974 * Grabs all modesetting locks to serialize against any blocking commits,
9975 * Waits for completion of all non blocking commits.
9976 */
3ee6b26b
AD
9977static int do_aquire_global_lock(struct drm_device *dev,
9978 struct drm_atomic_state *state)
e7b07cee
HW
9979{
9980 struct drm_crtc *crtc;
9981 struct drm_crtc_commit *commit;
9982 long ret;
9983
1f6010a9
DF
9984 /*
9985 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9986 * ensure that when the framework release it the
9987 * extra locks we are locking here will get released to
9988 */
9989 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9990 if (ret)
9991 return ret;
9992
9993 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9994 spin_lock(&crtc->commit_lock);
9995 commit = list_first_entry_or_null(&crtc->commit_list,
9996 struct drm_crtc_commit, commit_entry);
9997 if (commit)
9998 drm_crtc_commit_get(commit);
9999 spin_unlock(&crtc->commit_lock);
10000
10001 if (!commit)
10002 continue;
10003
1f6010a9
DF
10004 /*
10005 * Make sure all pending HW programming completed and
e7b07cee
HW
10006 * page flips done
10007 */
10008 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10009
10010 if (ret > 0)
10011 ret = wait_for_completion_interruptible_timeout(
10012 &commit->flip_done, 10*HZ);
10013
10014 if (ret == 0)
10015 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 10016 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
10017
10018 drm_crtc_commit_put(commit);
10019 }
10020
10021 return ret < 0 ? ret : 0;
10022}
10023
bb47de73
NK
10024static void get_freesync_config_for_crtc(
10025 struct dm_crtc_state *new_crtc_state,
10026 struct dm_connector_state *new_con_state)
98e6436d
AK
10027{
10028 struct mod_freesync_config config = {0};
98e6436d
AK
10029 struct amdgpu_dm_connector *aconnector =
10030 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 10031 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 10032 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 10033 bool fs_vid_mode = false;
98e6436d 10034
a057ec46 10035 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
10036 vrefresh >= aconnector->min_vfreq &&
10037 vrefresh <= aconnector->max_vfreq;
bb47de73 10038
a057ec46
IB
10039 if (new_crtc_state->vrr_supported) {
10040 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
10041 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10042
10043 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10044 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 10045 config.vsif_supported = true;
180db303 10046 config.btr = true;
98e6436d 10047
a85ba005
NC
10048 if (fs_vid_mode) {
10049 config.state = VRR_STATE_ACTIVE_FIXED;
10050 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10051 goto out;
10052 } else if (new_crtc_state->base.vrr_enabled) {
10053 config.state = VRR_STATE_ACTIVE_VARIABLE;
10054 } else {
10055 config.state = VRR_STATE_INACTIVE;
10056 }
10057 }
10058out:
bb47de73
NK
10059 new_crtc_state->freesync_config = config;
10060}
98e6436d 10061
bb47de73
NK
10062static void reset_freesync_config_for_crtc(
10063 struct dm_crtc_state *new_crtc_state)
10064{
10065 new_crtc_state->vrr_supported = false;
98e6436d 10066
bb47de73
NK
10067 memset(&new_crtc_state->vrr_infopacket, 0,
10068 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
10069}
10070
a85ba005
NC
10071static bool
10072is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10073 struct drm_crtc_state *new_crtc_state)
10074{
10075 struct drm_display_mode old_mode, new_mode;
10076
10077 if (!old_crtc_state || !new_crtc_state)
10078 return false;
10079
10080 old_mode = old_crtc_state->mode;
10081 new_mode = new_crtc_state->mode;
10082
10083 if (old_mode.clock == new_mode.clock &&
10084 old_mode.hdisplay == new_mode.hdisplay &&
10085 old_mode.vdisplay == new_mode.vdisplay &&
10086 old_mode.htotal == new_mode.htotal &&
10087 old_mode.vtotal != new_mode.vtotal &&
10088 old_mode.hsync_start == new_mode.hsync_start &&
10089 old_mode.vsync_start != new_mode.vsync_start &&
10090 old_mode.hsync_end == new_mode.hsync_end &&
10091 old_mode.vsync_end != new_mode.vsync_end &&
10092 old_mode.hskew == new_mode.hskew &&
10093 old_mode.vscan == new_mode.vscan &&
10094 (old_mode.vsync_end - old_mode.vsync_start) ==
10095 (new_mode.vsync_end - new_mode.vsync_start))
10096 return true;
10097
10098 return false;
10099}
10100
10101static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10102 uint64_t num, den, res;
10103 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10104
10105 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10106
10107 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10108 den = (unsigned long long)new_crtc_state->mode.htotal *
10109 (unsigned long long)new_crtc_state->mode.vtotal;
10110
10111 res = div_u64(num, den);
10112 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10113}
10114
4b9674e5
LL
10115static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10116 struct drm_atomic_state *state,
10117 struct drm_crtc *crtc,
10118 struct drm_crtc_state *old_crtc_state,
10119 struct drm_crtc_state *new_crtc_state,
10120 bool enable,
10121 bool *lock_and_validation_needed)
e7b07cee 10122{
eb3dc897 10123 struct dm_atomic_state *dm_state = NULL;
54d76575 10124 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10125 struct dc_stream_state *new_stream;
62f55537 10126 int ret = 0;
d4d4a645 10127
1f6010a9
DF
10128 /*
10129 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10130 * update changed items
10131 */
4b9674e5
LL
10132 struct amdgpu_crtc *acrtc = NULL;
10133 struct amdgpu_dm_connector *aconnector = NULL;
10134 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10135 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10136
4b9674e5 10137 new_stream = NULL;
9635b754 10138
4b9674e5
LL
10139 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10140 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10141 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10142 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10143
4b9674e5
LL
10144 /* TODO This hack should go away */
10145 if (aconnector && enable) {
10146 /* Make sure fake sink is created in plug-in scenario */
10147 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10148 &aconnector->base);
10149 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10150 &aconnector->base);
19f89e23 10151
4b9674e5
LL
10152 if (IS_ERR(drm_new_conn_state)) {
10153 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10154 goto fail;
10155 }
19f89e23 10156
4b9674e5
LL
10157 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10158 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10159
02d35a67
JFZ
10160 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10161 goto skip_modeset;
10162
cbd14ae7
SW
10163 new_stream = create_validate_stream_for_sink(aconnector,
10164 &new_crtc_state->mode,
10165 dm_new_conn_state,
10166 dm_old_crtc_state->stream);
19f89e23 10167
4b9674e5
LL
10168 /*
10169 * we can have no stream on ACTION_SET if a display
10170 * was disconnected during S3, in this case it is not an
10171 * error, the OS will be updated after detection, and
10172 * will do the right thing on next atomic commit
10173 */
19f89e23 10174
4b9674e5
LL
10175 if (!new_stream) {
10176 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10177 __func__, acrtc->base.base.id);
10178 ret = -ENOMEM;
10179 goto fail;
10180 }
e7b07cee 10181
3d4e52d0
VL
10182 /*
10183 * TODO: Check VSDB bits to decide whether this should
10184 * be enabled or not.
10185 */
10186 new_stream->triggered_crtc_reset.enabled =
10187 dm->force_timing_sync;
10188
4b9674e5 10189 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10190
88694af9
NK
10191 ret = fill_hdr_info_packet(drm_new_conn_state,
10192 &new_stream->hdr_static_metadata);
10193 if (ret)
10194 goto fail;
10195
7e930949
NK
10196 /*
10197 * If we already removed the old stream from the context
10198 * (and set the new stream to NULL) then we can't reuse
10199 * the old stream even if the stream and scaling are unchanged.
10200 * We'll hit the BUG_ON and black screen.
10201 *
10202 * TODO: Refactor this function to allow this check to work
10203 * in all conditions.
10204 */
a85ba005
NC
10205 if (amdgpu_freesync_vid_mode &&
10206 dm_new_crtc_state->stream &&
10207 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10208 goto skip_modeset;
10209
7e930949
NK
10210 if (dm_new_crtc_state->stream &&
10211 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10212 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10213 new_crtc_state->mode_changed = false;
10214 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10215 new_crtc_state->mode_changed);
62f55537 10216 }
4b9674e5 10217 }
b830ebc9 10218
02d35a67 10219 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10220 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10221 goto skip_modeset;
e7b07cee 10222
4711c033 10223 DRM_DEBUG_ATOMIC(
4b9674e5
LL
10224 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10225 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10226 "connectors_changed:%d\n",
10227 acrtc->crtc_id,
10228 new_crtc_state->enable,
10229 new_crtc_state->active,
10230 new_crtc_state->planes_changed,
10231 new_crtc_state->mode_changed,
10232 new_crtc_state->active_changed,
10233 new_crtc_state->connectors_changed);
62f55537 10234
4b9674e5
LL
10235 /* Remove stream for any changed/disabled CRTC */
10236 if (!enable) {
62f55537 10237
4b9674e5
LL
10238 if (!dm_old_crtc_state->stream)
10239 goto skip_modeset;
eb3dc897 10240
a85ba005
NC
10241 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10242 is_timing_unchanged_for_freesync(new_crtc_state,
10243 old_crtc_state)) {
10244 new_crtc_state->mode_changed = false;
10245 DRM_DEBUG_DRIVER(
10246 "Mode change not required for front porch change, "
10247 "setting mode_changed to %d",
10248 new_crtc_state->mode_changed);
10249
10250 set_freesync_fixed_config(dm_new_crtc_state);
10251
10252 goto skip_modeset;
10253 } else if (amdgpu_freesync_vid_mode && aconnector &&
10254 is_freesync_video_mode(&new_crtc_state->mode,
10255 aconnector)) {
e88ebd83
SC
10256 struct drm_display_mode *high_mode;
10257
10258 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10259 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10260 set_freesync_fixed_config(dm_new_crtc_state);
10261 }
a85ba005
NC
10262 }
10263
4b9674e5
LL
10264 ret = dm_atomic_get_state(state, &dm_state);
10265 if (ret)
10266 goto fail;
e7b07cee 10267
4b9674e5
LL
10268 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10269 crtc->base.id);
62f55537 10270
4b9674e5
LL
10271 /* i.e. reset mode */
10272 if (dc_remove_stream_from_ctx(
10273 dm->dc,
10274 dm_state->context,
10275 dm_old_crtc_state->stream) != DC_OK) {
10276 ret = -EINVAL;
10277 goto fail;
10278 }
62f55537 10279
4b9674e5
LL
10280 dc_stream_release(dm_old_crtc_state->stream);
10281 dm_new_crtc_state->stream = NULL;
bb47de73 10282
4b9674e5 10283 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10284
4b9674e5 10285 *lock_and_validation_needed = true;
62f55537 10286
4b9674e5
LL
10287 } else {/* Add stream for any updated/enabled CRTC */
10288 /*
10289 * Quick fix to prevent NULL pointer on new_stream when
10290 * added MST connectors not found in existing crtc_state in the chained mode
10291 * TODO: need to dig out the root cause of that
10292 */
10293 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10294 goto skip_modeset;
62f55537 10295
4b9674e5
LL
10296 if (modereset_required(new_crtc_state))
10297 goto skip_modeset;
62f55537 10298
4b9674e5
LL
10299 if (modeset_required(new_crtc_state, new_stream,
10300 dm_old_crtc_state->stream)) {
62f55537 10301
4b9674e5 10302 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10303
4b9674e5
LL
10304 ret = dm_atomic_get_state(state, &dm_state);
10305 if (ret)
10306 goto fail;
27b3f4fc 10307
4b9674e5 10308 dm_new_crtc_state->stream = new_stream;
62f55537 10309
4b9674e5 10310 dc_stream_retain(new_stream);
1dc90497 10311
4711c033
LT
10312 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10313 crtc->base.id);
1dc90497 10314
4b9674e5
LL
10315 if (dc_add_stream_to_ctx(
10316 dm->dc,
10317 dm_state->context,
10318 dm_new_crtc_state->stream) != DC_OK) {
10319 ret = -EINVAL;
10320 goto fail;
9b690ef3
BL
10321 }
10322
4b9674e5
LL
10323 *lock_and_validation_needed = true;
10324 }
10325 }
e277adc5 10326
4b9674e5
LL
10327skip_modeset:
10328 /* Release extra reference */
10329 if (new_stream)
10330 dc_stream_release(new_stream);
e277adc5 10331
4b9674e5
LL
10332 /*
10333 * We want to do dc stream updates that do not require a
10334 * full modeset below.
10335 */
2afda735 10336 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10337 return 0;
10338 /*
10339 * Given above conditions, the dc state cannot be NULL because:
10340 * 1. We're in the process of enabling CRTCs (just been added
10341 * to the dc context, or already is on the context)
10342 * 2. Has a valid connector attached, and
10343 * 3. Is currently active and enabled.
10344 * => The dc stream state currently exists.
10345 */
10346 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10347
4b9674e5 10348 /* Scaling or underscan settings */
c521fc31
RL
10349 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10350 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10351 update_stream_scaling_settings(
10352 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10353
b05e2c5e
DF
10354 /* ABM settings */
10355 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10356
4b9674e5
LL
10357 /*
10358 * Color management settings. We also update color properties
10359 * when a modeset is needed, to ensure it gets reprogrammed.
10360 */
10361 if (dm_new_crtc_state->base.color_mgmt_changed ||
10362 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10363 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10364 if (ret)
10365 goto fail;
62f55537 10366 }
e7b07cee 10367
4b9674e5
LL
10368 /* Update Freesync settings. */
10369 get_freesync_config_for_crtc(dm_new_crtc_state,
10370 dm_new_conn_state);
10371
62f55537 10372 return ret;
9635b754
DS
10373
10374fail:
10375 if (new_stream)
10376 dc_stream_release(new_stream);
10377 return ret;
62f55537 10378}
9b690ef3 10379
f6ff2a08
NK
10380static bool should_reset_plane(struct drm_atomic_state *state,
10381 struct drm_plane *plane,
10382 struct drm_plane_state *old_plane_state,
10383 struct drm_plane_state *new_plane_state)
10384{
10385 struct drm_plane *other;
10386 struct drm_plane_state *old_other_state, *new_other_state;
10387 struct drm_crtc_state *new_crtc_state;
10388 int i;
10389
70a1efac
NK
10390 /*
10391 * TODO: Remove this hack once the checks below are sufficient
10392 * enough to determine when we need to reset all the planes on
10393 * the stream.
10394 */
10395 if (state->allow_modeset)
10396 return true;
10397
f6ff2a08
NK
10398 /* Exit early if we know that we're adding or removing the plane. */
10399 if (old_plane_state->crtc != new_plane_state->crtc)
10400 return true;
10401
10402 /* old crtc == new_crtc == NULL, plane not in context. */
10403 if (!new_plane_state->crtc)
10404 return false;
10405
10406 new_crtc_state =
10407 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10408
10409 if (!new_crtc_state)
10410 return true;
10411
7316c4ad
NK
10412 /* CRTC Degamma changes currently require us to recreate planes. */
10413 if (new_crtc_state->color_mgmt_changed)
10414 return true;
10415
f6ff2a08
NK
10416 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10417 return true;
10418
10419 /*
10420 * If there are any new primary or overlay planes being added or
10421 * removed then the z-order can potentially change. To ensure
10422 * correct z-order and pipe acquisition the current DC architecture
10423 * requires us to remove and recreate all existing planes.
10424 *
10425 * TODO: Come up with a more elegant solution for this.
10426 */
10427 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10428 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10429 if (other->type == DRM_PLANE_TYPE_CURSOR)
10430 continue;
10431
10432 if (old_other_state->crtc != new_plane_state->crtc &&
10433 new_other_state->crtc != new_plane_state->crtc)
10434 continue;
10435
10436 if (old_other_state->crtc != new_other_state->crtc)
10437 return true;
10438
dc4cb30d
NK
10439 /* Src/dst size and scaling updates. */
10440 if (old_other_state->src_w != new_other_state->src_w ||
10441 old_other_state->src_h != new_other_state->src_h ||
10442 old_other_state->crtc_w != new_other_state->crtc_w ||
10443 old_other_state->crtc_h != new_other_state->crtc_h)
10444 return true;
10445
10446 /* Rotation / mirroring updates. */
10447 if (old_other_state->rotation != new_other_state->rotation)
10448 return true;
10449
10450 /* Blending updates. */
10451 if (old_other_state->pixel_blend_mode !=
10452 new_other_state->pixel_blend_mode)
10453 return true;
10454
10455 /* Alpha updates. */
10456 if (old_other_state->alpha != new_other_state->alpha)
10457 return true;
10458
10459 /* Colorspace changes. */
10460 if (old_other_state->color_range != new_other_state->color_range ||
10461 old_other_state->color_encoding != new_other_state->color_encoding)
10462 return true;
10463
9a81cc60
NK
10464 /* Framebuffer checks fall at the end. */
10465 if (!old_other_state->fb || !new_other_state->fb)
10466 continue;
10467
10468 /* Pixel format changes can require bandwidth updates. */
10469 if (old_other_state->fb->format != new_other_state->fb->format)
10470 return true;
10471
6eed95b0
BN
10472 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10473 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10474
10475 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10476 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10477 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10478 return true;
10479 }
10480
10481 return false;
10482}
10483
b0455fda
SS
10484static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10485 struct drm_plane_state *new_plane_state,
10486 struct drm_framebuffer *fb)
10487{
e72868c4
SS
10488 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10489 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10490 unsigned int pitch;
e72868c4 10491 bool linear;
b0455fda
SS
10492
10493 if (fb->width > new_acrtc->max_cursor_width ||
10494 fb->height > new_acrtc->max_cursor_height) {
10495 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10496 new_plane_state->fb->width,
10497 new_plane_state->fb->height);
10498 return -EINVAL;
10499 }
10500 if (new_plane_state->src_w != fb->width << 16 ||
10501 new_plane_state->src_h != fb->height << 16) {
10502 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10503 return -EINVAL;
10504 }
10505
10506 /* Pitch in pixels */
10507 pitch = fb->pitches[0] / fb->format->cpp[0];
10508
10509 if (fb->width != pitch) {
10510 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10511 fb->width, pitch);
10512 return -EINVAL;
10513 }
10514
10515 switch (pitch) {
10516 case 64:
10517 case 128:
10518 case 256:
10519 /* FB pitch is supported by cursor plane */
10520 break;
10521 default:
10522 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10523 return -EINVAL;
10524 }
10525
e72868c4
SS
10526 /* Core DRM takes care of checking FB modifiers, so we only need to
10527 * check tiling flags when the FB doesn't have a modifier. */
10528 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10529 if (adev->family < AMDGPU_FAMILY_AI) {
10530 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10531 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10532 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10533 } else {
10534 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10535 }
10536 if (!linear) {
10537 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10538 return -EINVAL;
10539 }
10540 }
10541
b0455fda
SS
10542 return 0;
10543}
10544
9e869063
LL
10545static int dm_update_plane_state(struct dc *dc,
10546 struct drm_atomic_state *state,
10547 struct drm_plane *plane,
10548 struct drm_plane_state *old_plane_state,
10549 struct drm_plane_state *new_plane_state,
10550 bool enable,
10551 bool *lock_and_validation_needed)
62f55537 10552{
eb3dc897
NK
10553
10554 struct dm_atomic_state *dm_state = NULL;
62f55537 10555 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10556 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10557 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10558 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10559 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10560 bool needs_reset;
62f55537 10561 int ret = 0;
e7b07cee 10562
9b690ef3 10563
9e869063
LL
10564 new_plane_crtc = new_plane_state->crtc;
10565 old_plane_crtc = old_plane_state->crtc;
10566 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10567 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10568
626bf90f
SS
10569 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10570 if (!enable || !new_plane_crtc ||
10571 drm_atomic_plane_disabling(plane->state, new_plane_state))
10572 return 0;
10573
10574 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10575
5f581248
SS
10576 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10577 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10578 return -EINVAL;
10579 }
10580
24f99d2b 10581 if (new_plane_state->fb) {
b0455fda
SS
10582 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10583 new_plane_state->fb);
10584 if (ret)
10585 return ret;
24f99d2b
SS
10586 }
10587
9e869063 10588 return 0;
626bf90f 10589 }
9b690ef3 10590
f6ff2a08
NK
10591 needs_reset = should_reset_plane(state, plane, old_plane_state,
10592 new_plane_state);
10593
9e869063
LL
10594 /* Remove any changed/removed planes */
10595 if (!enable) {
f6ff2a08 10596 if (!needs_reset)
9e869063 10597 return 0;
a7b06724 10598
9e869063
LL
10599 if (!old_plane_crtc)
10600 return 0;
62f55537 10601
9e869063
LL
10602 old_crtc_state = drm_atomic_get_old_crtc_state(
10603 state, old_plane_crtc);
10604 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10605
9e869063
LL
10606 if (!dm_old_crtc_state->stream)
10607 return 0;
62f55537 10608
9e869063
LL
10609 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10610 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10611
9e869063
LL
10612 ret = dm_atomic_get_state(state, &dm_state);
10613 if (ret)
10614 return ret;
eb3dc897 10615
9e869063
LL
10616 if (!dc_remove_plane_from_context(
10617 dc,
10618 dm_old_crtc_state->stream,
10619 dm_old_plane_state->dc_state,
10620 dm_state->context)) {
62f55537 10621
c3537613 10622 return -EINVAL;
9e869063 10623 }
e7b07cee 10624
9b690ef3 10625
9e869063
LL
10626 dc_plane_state_release(dm_old_plane_state->dc_state);
10627 dm_new_plane_state->dc_state = NULL;
1dc90497 10628
9e869063 10629 *lock_and_validation_needed = true;
1dc90497 10630
9e869063
LL
10631 } else { /* Add new planes */
10632 struct dc_plane_state *dc_new_plane_state;
1dc90497 10633
9e869063
LL
10634 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10635 return 0;
e7b07cee 10636
9e869063
LL
10637 if (!new_plane_crtc)
10638 return 0;
e7b07cee 10639
9e869063
LL
10640 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10641 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10642
9e869063
LL
10643 if (!dm_new_crtc_state->stream)
10644 return 0;
62f55537 10645
f6ff2a08 10646 if (!needs_reset)
9e869063 10647 return 0;
62f55537 10648
8c44515b
AP
10649 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10650 if (ret)
10651 return ret;
10652
9e869063 10653 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10654
9e869063
LL
10655 dc_new_plane_state = dc_create_plane_state(dc);
10656 if (!dc_new_plane_state)
10657 return -ENOMEM;
62f55537 10658
4711c033
LT
10659 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10660 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10661
695af5f9 10662 ret = fill_dc_plane_attributes(
1348969a 10663 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10664 dc_new_plane_state,
10665 new_plane_state,
10666 new_crtc_state);
10667 if (ret) {
10668 dc_plane_state_release(dc_new_plane_state);
10669 return ret;
10670 }
62f55537 10671
9e869063
LL
10672 ret = dm_atomic_get_state(state, &dm_state);
10673 if (ret) {
10674 dc_plane_state_release(dc_new_plane_state);
10675 return ret;
10676 }
eb3dc897 10677
9e869063
LL
10678 /*
10679 * Any atomic check errors that occur after this will
10680 * not need a release. The plane state will be attached
10681 * to the stream, and therefore part of the atomic
10682 * state. It'll be released when the atomic state is
10683 * cleaned.
10684 */
10685 if (!dc_add_plane_to_context(
10686 dc,
10687 dm_new_crtc_state->stream,
10688 dc_new_plane_state,
10689 dm_state->context)) {
62f55537 10690
9e869063
LL
10691 dc_plane_state_release(dc_new_plane_state);
10692 return -EINVAL;
10693 }
8c45c5db 10694
9e869063 10695 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10696
9e869063
LL
10697 /* Tell DC to do a full surface update every time there
10698 * is a plane change. Inefficient, but works for now.
10699 */
10700 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10701
10702 *lock_and_validation_needed = true;
62f55537 10703 }
e7b07cee
HW
10704
10705
62f55537
AG
10706 return ret;
10707}
a87fa993 10708
12f4849a
SS
10709static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10710 struct drm_crtc *crtc,
10711 struct drm_crtc_state *new_crtc_state)
10712{
d1bfbe8a
SS
10713 struct drm_plane *cursor = crtc->cursor, *underlying;
10714 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10715 int i;
10716 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
12f4849a
SS
10717
10718 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10719 * cursor per pipe but it's going to inherit the scaling and
10720 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 10721 * blending properties match the underlying planes'. */
12f4849a 10722
d1bfbe8a
SS
10723 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10724 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
10725 return 0;
10726 }
10727
10728 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10729 (new_cursor_state->src_w >> 16);
10730 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10731 (new_cursor_state->src_h >> 16);
10732
d1bfbe8a
SS
10733 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10734 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10735 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10736 continue;
12f4849a 10737
d1bfbe8a
SS
10738 /* Ignore disabled planes */
10739 if (!new_underlying_state->fb)
10740 continue;
10741
10742 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10743 (new_underlying_state->src_w >> 16);
10744 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10745 (new_underlying_state->src_h >> 16);
10746
10747 if (cursor_scale_w != underlying_scale_w ||
10748 cursor_scale_h != underlying_scale_h) {
10749 drm_dbg_atomic(crtc->dev,
10750 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10751 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10752 return -EINVAL;
10753 }
10754
10755 /* If this plane covers the whole CRTC, no need to check planes underneath */
10756 if (new_underlying_state->crtc_x <= 0 &&
10757 new_underlying_state->crtc_y <= 0 &&
10758 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10759 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10760 break;
12f4849a
SS
10761 }
10762
10763 return 0;
10764}
10765
e10517b3 10766#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10767static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10768{
10769 struct drm_connector *connector;
10770 struct drm_connector_state *conn_state;
10771 struct amdgpu_dm_connector *aconnector = NULL;
10772 int i;
10773 for_each_new_connector_in_state(state, connector, conn_state, i) {
10774 if (conn_state->crtc != crtc)
10775 continue;
10776
10777 aconnector = to_amdgpu_dm_connector(connector);
10778 if (!aconnector->port || !aconnector->mst_port)
10779 aconnector = NULL;
10780 else
10781 break;
10782 }
10783
10784 if (!aconnector)
10785 return 0;
10786
10787 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10788}
e10517b3 10789#endif
44be939f 10790
b8592b48
LL
10791/**
10792 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10793 * @dev: The DRM device
10794 * @state: The atomic state to commit
10795 *
10796 * Validate that the given atomic state is programmable by DC into hardware.
10797 * This involves constructing a &struct dc_state reflecting the new hardware
10798 * state we wish to commit, then querying DC to see if it is programmable. It's
10799 * important not to modify the existing DC state. Otherwise, atomic_check
10800 * may unexpectedly commit hardware changes.
10801 *
10802 * When validating the DC state, it's important that the right locks are
10803 * acquired. For full updates case which removes/adds/updates streams on one
10804 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10805 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10806 * flip using DRMs synchronization events.
b8592b48
LL
10807 *
10808 * Note that DM adds the affected connectors for all CRTCs in state, when that
10809 * might not seem necessary. This is because DC stream creation requires the
10810 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10811 * be possible but non-trivial - a possible TODO item.
10812 *
10813 * Return: -Error code if validation failed.
10814 */
7578ecda
AD
10815static int amdgpu_dm_atomic_check(struct drm_device *dev,
10816 struct drm_atomic_state *state)
62f55537 10817{
1348969a 10818 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10819 struct dm_atomic_state *dm_state = NULL;
62f55537 10820 struct dc *dc = adev->dm.dc;
62f55537 10821 struct drm_connector *connector;
c2cea706 10822 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10823 struct drm_crtc *crtc;
fc9e9920 10824 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10825 struct drm_plane *plane;
10826 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10827 enum dc_status status;
1e88ad0a 10828 int ret, i;
62f55537 10829 bool lock_and_validation_needed = false;
886876ec 10830 struct dm_crtc_state *dm_old_crtc_state;
6513104b
HW
10831#if defined(CONFIG_DRM_AMD_DC_DCN)
10832 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
10833 struct drm_dp_mst_topology_state *mst_state;
10834 struct drm_dp_mst_topology_mgr *mgr;
6513104b 10835#endif
62f55537 10836
e8a98235 10837 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10838
62f55537 10839 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
10840 if (ret) {
10841 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 10842 goto fail;
68ca1c3e 10843 }
62f55537 10844
c5892a10
SW
10845 /* Check connector changes */
10846 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10847 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10848 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10849
10850 /* Skip connectors that are disabled or part of modeset already. */
10851 if (!old_con_state->crtc && !new_con_state->crtc)
10852 continue;
10853
10854 if (!new_con_state->crtc)
10855 continue;
10856
10857 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10858 if (IS_ERR(new_crtc_state)) {
68ca1c3e 10859 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
10860 ret = PTR_ERR(new_crtc_state);
10861 goto fail;
10862 }
10863
10864 if (dm_old_con_state->abm_level !=
10865 dm_new_con_state->abm_level)
10866 new_crtc_state->connectors_changed = true;
10867 }
10868
e10517b3 10869#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10870 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10871 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10872 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10873 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
10874 if (ret) {
10875 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 10876 goto fail;
68ca1c3e 10877 }
44be939f
ML
10878 }
10879 }
10880 }
e10517b3 10881#endif
1e88ad0a 10882 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10883 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10884
1e88ad0a 10885 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10886 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10887 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10888 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10889 continue;
7bef1af3 10890
03fc4cf4 10891 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
10892 if (ret) {
10893 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 10894 goto fail;
68ca1c3e 10895 }
03fc4cf4 10896
1e88ad0a
S
10897 if (!new_crtc_state->enable)
10898 continue;
fc9e9920 10899
1e88ad0a 10900 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
10901 if (ret) {
10902 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 10903 goto fail;
68ca1c3e 10904 }
fc9e9920 10905
1e88ad0a 10906 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
10907 if (ret) {
10908 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 10909 goto fail;
68ca1c3e 10910 }
115a385c 10911
cbac53f7 10912 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10913 new_crtc_state->mode_changed = true;
e7b07cee
HW
10914 }
10915
2d9e6431
NK
10916 /*
10917 * Add all primary and overlay planes on the CRTC to the state
10918 * whenever a plane is enabled to maintain correct z-ordering
10919 * and to enable fast surface updates.
10920 */
10921 drm_for_each_crtc(crtc, dev) {
10922 bool modified = false;
10923
10924 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10925 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10926 continue;
10927
10928 if (new_plane_state->crtc == crtc ||
10929 old_plane_state->crtc == crtc) {
10930 modified = true;
10931 break;
10932 }
10933 }
10934
10935 if (!modified)
10936 continue;
10937
10938 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10939 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10940 continue;
10941
10942 new_plane_state =
10943 drm_atomic_get_plane_state(state, plane);
10944
10945 if (IS_ERR(new_plane_state)) {
10946 ret = PTR_ERR(new_plane_state);
68ca1c3e 10947 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
10948 goto fail;
10949 }
10950 }
10951 }
10952
62f55537 10953 /* Remove exiting planes if they are modified */
9e869063
LL
10954 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10955 ret = dm_update_plane_state(dc, state, plane,
10956 old_plane_state,
10957 new_plane_state,
10958 false,
10959 &lock_and_validation_needed);
68ca1c3e
S
10960 if (ret) {
10961 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 10962 goto fail;
68ca1c3e 10963 }
62f55537
AG
10964 }
10965
10966 /* Disable all crtcs which require disable */
4b9674e5
LL
10967 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10968 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10969 old_crtc_state,
10970 new_crtc_state,
10971 false,
10972 &lock_and_validation_needed);
68ca1c3e
S
10973 if (ret) {
10974 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 10975 goto fail;
68ca1c3e 10976 }
62f55537
AG
10977 }
10978
10979 /* Enable all crtcs which require enable */
4b9674e5
LL
10980 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10981 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10982 old_crtc_state,
10983 new_crtc_state,
10984 true,
10985 &lock_and_validation_needed);
68ca1c3e
S
10986 if (ret) {
10987 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 10988 goto fail;
68ca1c3e 10989 }
62f55537
AG
10990 }
10991
10992 /* Add new/modified planes */
9e869063
LL
10993 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10994 ret = dm_update_plane_state(dc, state, plane,
10995 old_plane_state,
10996 new_plane_state,
10997 true,
10998 &lock_and_validation_needed);
68ca1c3e
S
10999 if (ret) {
11000 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11001 goto fail;
68ca1c3e 11002 }
62f55537
AG
11003 }
11004
b349f76e
ES
11005 /* Run this here since we want to validate the streams we created */
11006 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
11007 if (ret) {
11008 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 11009 goto fail;
68ca1c3e 11010 }
62f55537 11011
12f4849a
SS
11012 /* Check cursor planes scaling */
11013 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11014 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
11015 if (ret) {
11016 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 11017 goto fail;
68ca1c3e 11018 }
12f4849a
SS
11019 }
11020
43d10d30
NK
11021 if (state->legacy_cursor_update) {
11022 /*
11023 * This is a fast cursor update coming from the plane update
11024 * helper, check if it can be done asynchronously for better
11025 * performance.
11026 */
11027 state->async_update =
11028 !drm_atomic_helper_async_check(dev, state);
11029
11030 /*
11031 * Skip the remaining global validation if this is an async
11032 * update. Cursor updates can be done without affecting
11033 * state or bandwidth calcs and this avoids the performance
11034 * penalty of locking the private state object and
11035 * allocating a new dc_state.
11036 */
11037 if (state->async_update)
11038 return 0;
11039 }
11040
ebdd27e1 11041 /* Check scaling and underscan changes*/
1f6010a9 11042 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
11043 * new stream into context w\o causing full reset. Need to
11044 * decide how to handle.
11045 */
c2cea706 11046 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
11047 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11048 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11049 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
11050
11051 /* Skip any modesets/resets */
0bc9706d
LSL
11052 if (!acrtc || drm_atomic_crtc_needs_modeset(
11053 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
11054 continue;
11055
b830ebc9 11056 /* Skip any thing not scale or underscan changes */
54d76575 11057 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
11058 continue;
11059
11060 lock_and_validation_needed = true;
11061 }
11062
41724ea2
BL
11063#if defined(CONFIG_DRM_AMD_DC_DCN)
11064 /* set the slot info for each mst_state based on the link encoding format */
11065 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11066 struct amdgpu_dm_connector *aconnector;
11067 struct drm_connector *connector;
11068 struct drm_connector_list_iter iter;
11069 u8 link_coding_cap;
11070
11071 if (!mgr->mst_state )
11072 continue;
11073
11074 drm_connector_list_iter_begin(dev, &iter);
11075 drm_for_each_connector_iter(connector, &iter) {
11076 int id = connector->index;
11077
11078 if (id == mst_state->mgr->conn_base_id) {
11079 aconnector = to_amdgpu_dm_connector(connector);
11080 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11081 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11082
11083 break;
11084 }
11085 }
11086 drm_connector_list_iter_end(&iter);
11087
11088 }
11089#endif
f6d7c7fa
NK
11090 /**
11091 * Streams and planes are reset when there are changes that affect
11092 * bandwidth. Anything that affects bandwidth needs to go through
11093 * DC global validation to ensure that the configuration can be applied
11094 * to hardware.
11095 *
11096 * We have to currently stall out here in atomic_check for outstanding
11097 * commits to finish in this case because our IRQ handlers reference
11098 * DRM state directly - we can end up disabling interrupts too early
11099 * if we don't.
11100 *
11101 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11102 */
f6d7c7fa 11103 if (lock_and_validation_needed) {
eb3dc897 11104 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11105 if (ret) {
11106 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11107 goto fail;
68ca1c3e 11108 }
e7b07cee
HW
11109
11110 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11111 if (ret) {
11112 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11113 goto fail;
68ca1c3e 11114 }
1dc90497 11115
d9fe1a4c 11116#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11117 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11118 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
8c20a1ed 11119 goto fail;
68ca1c3e 11120 }
8c20a1ed 11121
6513104b 11122 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11123 if (ret) {
11124 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11125 goto fail;
68ca1c3e 11126 }
d9fe1a4c 11127#endif
29b9ba74 11128
ded58c7b
ZL
11129 /*
11130 * Perform validation of MST topology in the state:
11131 * We need to perform MST atomic check before calling
11132 * dc_validate_global_state(), or there is a chance
11133 * to get stuck in an infinite loop and hang eventually.
11134 */
11135 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11136 if (ret) {
11137 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11138 goto fail;
68ca1c3e 11139 }
85fb8bb9 11140 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 11141 if (status != DC_OK) {
68ca1c3e 11142 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11143 dc_status_to_str(status), status);
e7b07cee
HW
11144 ret = -EINVAL;
11145 goto fail;
11146 }
bd200d19 11147 } else {
674e78ac 11148 /*
bd200d19
NK
11149 * The commit is a fast update. Fast updates shouldn't change
11150 * the DC context, affect global validation, and can have their
11151 * commit work done in parallel with other commits not touching
11152 * the same resource. If we have a new DC context as part of
11153 * the DM atomic state from validation we need to free it and
11154 * retain the existing one instead.
fde9f39a
MR
11155 *
11156 * Furthermore, since the DM atomic state only contains the DC
11157 * context and can safely be annulled, we can free the state
11158 * and clear the associated private object now to free
11159 * some memory and avoid a possible use-after-free later.
674e78ac 11160 */
bd200d19 11161
fde9f39a
MR
11162 for (i = 0; i < state->num_private_objs; i++) {
11163 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11164
fde9f39a
MR
11165 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11166 int j = state->num_private_objs-1;
bd200d19 11167
fde9f39a
MR
11168 dm_atomic_destroy_state(obj,
11169 state->private_objs[i].state);
11170
11171 /* If i is not at the end of the array then the
11172 * last element needs to be moved to where i was
11173 * before the array can safely be truncated.
11174 */
11175 if (i != j)
11176 state->private_objs[i] =
11177 state->private_objs[j];
bd200d19 11178
fde9f39a
MR
11179 state->private_objs[j].ptr = NULL;
11180 state->private_objs[j].state = NULL;
11181 state->private_objs[j].old_state = NULL;
11182 state->private_objs[j].new_state = NULL;
11183
11184 state->num_private_objs = j;
11185 break;
11186 }
bd200d19 11187 }
e7b07cee
HW
11188 }
11189
caff0e66
NK
11190 /* Store the overall update type for use later in atomic check. */
11191 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11192 struct dm_crtc_state *dm_new_crtc_state =
11193 to_dm_crtc_state(new_crtc_state);
11194
f6d7c7fa
NK
11195 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11196 UPDATE_TYPE_FULL :
11197 UPDATE_TYPE_FAST;
e7b07cee
HW
11198 }
11199
11200 /* Must be success */
11201 WARN_ON(ret);
e8a98235
RS
11202
11203 trace_amdgpu_dm_atomic_check_finish(state, ret);
11204
e7b07cee
HW
11205 return ret;
11206
11207fail:
11208 if (ret == -EDEADLK)
01e28f9c 11209 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11210 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11211 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11212 else
01e28f9c 11213 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11214
e8a98235
RS
11215 trace_amdgpu_dm_atomic_check_finish(state, ret);
11216
e7b07cee
HW
11217 return ret;
11218}
11219
3ee6b26b
AD
11220static bool is_dp_capable_without_timing_msa(struct dc *dc,
11221 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11222{
11223 uint8_t dpcd_data;
11224 bool capable = false;
11225
c84dec2f 11226 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11227 dm_helpers_dp_read_dpcd(
11228 NULL,
c84dec2f 11229 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11230 DP_DOWN_STREAM_PORT_COUNT,
11231 &dpcd_data,
11232 sizeof(dpcd_data))) {
11233 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11234 }
11235
11236 return capable;
11237}
f9b4f20c 11238
46db138d
SW
11239static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11240 unsigned int offset,
11241 unsigned int total_length,
11242 uint8_t *data,
11243 unsigned int length,
11244 struct amdgpu_hdmi_vsdb_info *vsdb)
11245{
11246 bool res;
11247 union dmub_rb_cmd cmd;
11248 struct dmub_cmd_send_edid_cea *input;
11249 struct dmub_cmd_edid_cea_output *output;
11250
11251 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11252 return false;
11253
11254 memset(&cmd, 0, sizeof(cmd));
11255
11256 input = &cmd.edid_cea.data.input;
11257
11258 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11259 cmd.edid_cea.header.sub_type = 0;
11260 cmd.edid_cea.header.payload_bytes =
11261 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11262 input->offset = offset;
11263 input->length = length;
11264 input->total_length = total_length;
11265 memcpy(input->payload, data, length);
11266
11267 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11268 if (!res) {
11269 DRM_ERROR("EDID CEA parser failed\n");
11270 return false;
11271 }
11272
11273 output = &cmd.edid_cea.data.output;
11274
11275 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11276 if (!output->ack.success) {
11277 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11278 output->ack.offset);
11279 }
11280 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11281 if (!output->amd_vsdb.vsdb_found)
11282 return false;
11283
11284 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11285 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11286 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11287 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11288 } else {
b76a8062 11289 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11290 return false;
11291 }
11292
11293 return true;
11294}
11295
11296static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11297 uint8_t *edid_ext, int len,
11298 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11299{
11300 int i;
f9b4f20c
SW
11301
11302 /* send extension block to DMCU for parsing */
11303 for (i = 0; i < len; i += 8) {
11304 bool res;
11305 int offset;
11306
11307 /* send 8 bytes a time */
46db138d 11308 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11309 return false;
11310
11311 if (i+8 == len) {
11312 /* EDID block sent completed, expect result */
11313 int version, min_rate, max_rate;
11314
46db138d 11315 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11316 if (res) {
11317 /* amd vsdb found */
11318 vsdb_info->freesync_supported = 1;
11319 vsdb_info->amd_vsdb_version = version;
11320 vsdb_info->min_refresh_rate_hz = min_rate;
11321 vsdb_info->max_refresh_rate_hz = max_rate;
11322 return true;
11323 }
11324 /* not amd vsdb */
11325 return false;
11326 }
11327
11328 /* check for ack*/
46db138d 11329 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11330 if (!res)
11331 return false;
11332 }
11333
11334 return false;
11335}
11336
46db138d
SW
11337static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11338 uint8_t *edid_ext, int len,
11339 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11340{
11341 int i;
11342
11343 /* send extension block to DMCU for parsing */
11344 for (i = 0; i < len; i += 8) {
11345 /* send 8 bytes a time */
11346 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11347 return false;
11348 }
11349
11350 return vsdb_info->freesync_supported;
11351}
11352
11353static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11354 uint8_t *edid_ext, int len,
11355 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11356{
11357 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11358
11359 if (adev->dm.dmub_srv)
11360 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11361 else
11362 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11363}
11364
7c7dd774 11365static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11366 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11367{
11368 uint8_t *edid_ext = NULL;
11369 int i;
11370 bool valid_vsdb_found = false;
11371
11372 /*----- drm_find_cea_extension() -----*/
11373 /* No EDID or EDID extensions */
11374 if (edid == NULL || edid->extensions == 0)
7c7dd774 11375 return -ENODEV;
f9b4f20c
SW
11376
11377 /* Find CEA extension */
11378 for (i = 0; i < edid->extensions; i++) {
11379 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11380 if (edid_ext[0] == CEA_EXT)
11381 break;
11382 }
11383
11384 if (i == edid->extensions)
7c7dd774 11385 return -ENODEV;
f9b4f20c
SW
11386
11387 /*----- cea_db_offsets() -----*/
11388 if (edid_ext[0] != CEA_EXT)
7c7dd774 11389 return -ENODEV;
f9b4f20c
SW
11390
11391 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11392
11393 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11394}
11395
98e6436d
AK
11396void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11397 struct edid *edid)
e7b07cee 11398{
eb0709ba 11399 int i = 0;
e7b07cee
HW
11400 struct detailed_timing *timing;
11401 struct detailed_non_pixel *data;
11402 struct detailed_data_monitor_range *range;
c84dec2f
HW
11403 struct amdgpu_dm_connector *amdgpu_dm_connector =
11404 to_amdgpu_dm_connector(connector);
bb47de73 11405 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11406 struct dc_sink *sink;
e7b07cee
HW
11407
11408 struct drm_device *dev = connector->dev;
1348969a 11409 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11410 bool freesync_capable = false;
f9b4f20c 11411 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11412
8218d7f1
HW
11413 if (!connector->state) {
11414 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11415 goto update;
8218d7f1
HW
11416 }
11417
9b2fdc33
AP
11418 sink = amdgpu_dm_connector->dc_sink ?
11419 amdgpu_dm_connector->dc_sink :
11420 amdgpu_dm_connector->dc_em_sink;
11421
11422 if (!edid || !sink) {
98e6436d
AK
11423 dm_con_state = to_dm_connector_state(connector->state);
11424
11425 amdgpu_dm_connector->min_vfreq = 0;
11426 amdgpu_dm_connector->max_vfreq = 0;
11427 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11428 connector->display_info.monitor_range.min_vfreq = 0;
11429 connector->display_info.monitor_range.max_vfreq = 0;
11430 freesync_capable = false;
98e6436d 11431
bb47de73 11432 goto update;
98e6436d
AK
11433 }
11434
8218d7f1
HW
11435 dm_con_state = to_dm_connector_state(connector->state);
11436
e7b07cee 11437 if (!adev->dm.freesync_module)
bb47de73 11438 goto update;
f9b4f20c
SW
11439
11440
9b2fdc33
AP
11441 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11442 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11443 bool edid_check_required = false;
11444
11445 if (edid) {
e7b07cee
HW
11446 edid_check_required = is_dp_capable_without_timing_msa(
11447 adev->dm.dc,
c84dec2f 11448 amdgpu_dm_connector);
e7b07cee 11449 }
e7b07cee 11450
f9b4f20c
SW
11451 if (edid_check_required == true && (edid->version > 1 ||
11452 (edid->version == 1 && edid->revision > 1))) {
11453 for (i = 0; i < 4; i++) {
e7b07cee 11454
f9b4f20c
SW
11455 timing = &edid->detailed_timings[i];
11456 data = &timing->data.other_data;
11457 range = &data->data.range;
11458 /*
11459 * Check if monitor has continuous frequency mode
11460 */
11461 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11462 continue;
11463 /*
11464 * Check for flag range limits only. If flag == 1 then
11465 * no additional timing information provided.
11466 * Default GTF, GTF Secondary curve and CVT are not
11467 * supported
11468 */
11469 if (range->flags != 1)
11470 continue;
a0ffc3fd 11471
f9b4f20c
SW
11472 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11473 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11474 amdgpu_dm_connector->pixel_clock_mhz =
11475 range->pixel_clock_mhz * 10;
a0ffc3fd 11476
f9b4f20c
SW
11477 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11478 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11479
f9b4f20c
SW
11480 break;
11481 }
98e6436d 11482
f9b4f20c
SW
11483 if (amdgpu_dm_connector->max_vfreq -
11484 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11485
f9b4f20c
SW
11486 freesync_capable = true;
11487 }
11488 }
9b2fdc33 11489 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11490 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11491 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11492 timing = &edid->detailed_timings[i];
11493 data = &timing->data.other_data;
11494
11495 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11496 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11497 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11498 freesync_capable = true;
11499
11500 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11501 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11502 }
11503 }
bb47de73
NK
11504
11505update:
11506 if (dm_con_state)
11507 dm_con_state->freesync_capable = freesync_capable;
11508
11509 if (connector->vrr_capable_property)
11510 drm_connector_set_vrr_capable_property(connector,
11511 freesync_capable);
e7b07cee
HW
11512}
11513
3d4e52d0
VL
11514void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11515{
1348969a 11516 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11517 struct dc *dc = adev->dm.dc;
11518 int i;
11519
11520 mutex_lock(&adev->dm.dc_lock);
11521 if (dc->current_state) {
11522 for (i = 0; i < dc->current_state->stream_count; ++i)
11523 dc->current_state->streams[i]
11524 ->triggered_crtc_reset.enabled =
11525 adev->dm.force_timing_sync;
11526
11527 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11528 dc_trigger_sync(dc, dc->current_state);
11529 }
11530 mutex_unlock(&adev->dm.dc_lock);
11531}
9d83722d
RS
11532
11533void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11534 uint32_t value, const char *func_name)
11535{
11536#ifdef DM_CHECK_ADDR_0
11537 if (address == 0) {
11538 DC_ERR("invalid register write. address = 0");
11539 return;
11540 }
11541#endif
11542 cgs_write_register(ctx->cgs_device, address, value);
11543 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11544}
11545
11546uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11547 const char *func_name)
11548{
11549 uint32_t value;
11550#ifdef DM_CHECK_ADDR_0
11551 if (address == 0) {
11552 DC_ERR("invalid register read; address = 0\n");
11553 return 0;
11554 }
11555#endif
11556
11557 if (ctx->dmub_srv &&
11558 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11559 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11560 ASSERT(false);
11561 return 0;
11562 }
11563
11564 value = cgs_read_register(ctx->cgs_device, address);
11565
11566 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11567
11568 return value;
11569}
81927e28 11570
88f52b1f
JS
11571int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11572 uint8_t status_type, uint32_t *operation_result)
11573{
11574 struct amdgpu_device *adev = ctx->driver_context;
11575 int return_status = -1;
11576 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11577
11578 if (is_cmd_aux) {
11579 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11580 return_status = p_notify->aux_reply.length;
11581 *operation_result = p_notify->result;
11582 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11583 *operation_result = AUX_RET_ERROR_TIMEOUT;
11584 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11585 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11586 } else {
11587 *operation_result = AUX_RET_ERROR_UNKNOWN;
11588 }
11589 } else {
11590 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11591 return_status = 0;
11592 *operation_result = p_notify->sc_status;
11593 } else {
11594 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11595 }
11596 }
11597
11598 return return_status;
11599}
11600
11601int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11602 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11603{
11604 struct amdgpu_device *adev = ctx->driver_context;
11605 int ret = 0;
11606
88f52b1f
JS
11607 if (is_cmd_aux) {
11608 dc_process_dmub_aux_transfer_async(ctx->dc,
11609 link_index, (struct aux_payload *)cmd_payload);
11610 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11611 (struct set_config_cmd_payload *)cmd_payload,
11612 adev->dm.dmub_notify)) {
11613 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11614 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11615 (uint32_t *)operation_result);
11616 }
11617
9e3a50d2 11618 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11619 if (ret == 0) {
9e3a50d2 11620 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11621 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11622 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11623 (uint32_t *)operation_result);
81927e28 11624 }
81927e28 11625
88f52b1f
JS
11626 if (is_cmd_aux) {
11627 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11628 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11629
88f52b1f
JS
11630 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11631 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11632 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11633 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11634 adev->dm.dmub_notify->aux_reply.length);
11635 }
11636 }
81927e28
JS
11637 }
11638
88f52b1f
JS
11639 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11640 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11641 (uint32_t *)operation_result);
81927e28 11642}
1edf5ae1
ZL
11643
11644/*
11645 * Check whether seamless boot is supported.
11646 *
11647 * So far we only support seamless boot on CHIP_VANGOGH.
11648 * If everything goes well, we may consider expanding
11649 * seamless boot to other ASICs.
11650 */
11651bool check_seamless_boot_capability(struct amdgpu_device *adev)
11652{
11653 switch (adev->asic_type) {
11654 case CHIP_VANGOGH:
11655 if (!adev->mman.keep_stolen_vga_memory)
11656 return true;
11657 break;
11658 default:
11659 break;
11660 }
11661
11662 return false;
11663}