drm/amd/display: Adding dpia debug bits for hpd delay
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
53e108aa 51#include <drm/drm_hdcp.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
1f579254 54#include "amdgpu_atombios.h"
4562236b
HW
55
56#include "amd_shared.h"
57#include "amdgpu_dm_irq.h"
58#include "dm_helpers.h"
e7b07cee 59#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
60#if defined(CONFIG_DEBUG_FS)
61#include "amdgpu_dm_debugfs.h"
62#endif
f4594cd1 63#include "amdgpu_dm_psr.h"
4562236b
HW
64
65#include "ivsrcid/ivsrcid_vislands30.h"
66
81927e28 67#include "i2caux_interface.h"
4562236b
HW
68#include <linux/module.h>
69#include <linux/moduleparam.h>
e7b07cee 70#include <linux/types.h>
97028037 71#include <linux/pm_runtime.h>
09d21852 72#include <linux/pci.h>
a94d5569 73#include <linux/firmware.h>
6ce8f316 74#include <linux/component.h>
4562236b
HW
75
76#include <drm/drm_atomic.h>
674e78ac 77#include <drm/drm_atomic_uapi.h>
4562236b
HW
78#include <drm/drm_atomic_helper.h>
79#include <drm/drm_dp_mst_helper.h>
e7b07cee 80#include <drm/drm_fb_helper.h>
09d21852 81#include <drm/drm_fourcc.h>
e7b07cee 82#include <drm/drm_edid.h>
09d21852 83#include <drm/drm_vblank.h>
6ce8f316 84#include <drm/drm_audio_component.h>
4562236b 85
b86a1aa3 86#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 87#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 88
ad941f7a
FX
89#include "dcn/dcn_1_0_offset.h"
90#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
91#include "soc15_hw_ip.h"
92#include "vega10_ip_offset.h"
ff5ef992
AD
93
94#include "soc15_common.h"
95#endif
96
e7b07cee 97#include "modules/inc/mod_freesync.h"
bbf854dc 98#include "modules/power/power_helpers.h"
ecd0136b 99#include "modules/inc/mod_info_packet.h"
e7b07cee 100
743b9786
NK
101#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
103#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
105#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
107#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
109#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
111#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
113#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
115#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 117
a94d5569
DF
118#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
119MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 120
5ea23931
RL
121#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
122MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
8c7aea40
NK
124/* Number of bytes in PSP header for firmware. */
125#define PSP_HEADER_BYTES 0x100
126
127/* Number of bytes in PSP footer for firmware. */
128#define PSP_FOOTER_BYTES 0x100
129
b8592b48
LL
130/**
131 * DOC: overview
132 *
133 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 134 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
135 * requests into DC requests, and DC responses into DRM responses.
136 *
137 * The root control structure is &struct amdgpu_display_manager.
138 */
139
7578ecda
AD
140/* basic init/fini API */
141static int amdgpu_dm_init(struct amdgpu_device *adev);
142static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 143static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 144
0f877894
OV
145static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146{
147 switch (link->dpcd_caps.dongle_type) {
148 case DISPLAY_DONGLE_NONE:
149 return DRM_MODE_SUBCONNECTOR_Native;
150 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 return DRM_MODE_SUBCONNECTOR_VGA;
152 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_DVID;
155 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_HDMIA;
158 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 default:
160 return DRM_MODE_SUBCONNECTOR_Unknown;
161 }
162}
163
164static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165{
166 struct dc_link *link = aconnector->dc_link;
167 struct drm_connector *connector = &aconnector->base;
168 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 return;
172
173 if (aconnector->dc_sink)
174 subconnector = get_subconnector_type(link);
175
176 drm_object_property_set_value(&connector->base,
177 connector->dev->mode_config.dp_subconnector_property,
178 subconnector);
179}
180
1f6010a9
DF
181/*
182 * initializes drm_device display related structures, based on the information
7578ecda
AD
183 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184 * drm_encoder, drm_mode_config
185 *
186 * Returns 0 on success
187 */
188static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189/* removes and deallocates the drm structures, created by the above function */
190static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
7578ecda 192static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 193 struct drm_plane *plane,
cc1fec57
NK
194 unsigned long possible_crtcs,
195 const struct dc_plane_cap *plane_cap);
7578ecda
AD
196static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 uint32_t link_index);
199static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 struct amdgpu_dm_connector *amdgpu_dm_connector,
201 uint32_t link_index,
202 struct amdgpu_encoder *amdgpu_encoder);
203static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 struct amdgpu_encoder *aencoder,
205 uint32_t link_index);
206
207static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
7578ecda
AD
209static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 struct drm_atomic_state *state);
213
674e78ac
NK
214static void handle_cursor_update(struct drm_plane *plane,
215 struct drm_plane_state *old_plane_state);
7578ecda 216
dfbbfe3c
BN
217static const struct drm_format_info *
218amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
e27c41d5 220static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 221static void handle_hpd_rx_irq(void *param);
e27c41d5 222
a85ba005
NC
223static bool
224is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 struct drm_crtc_state *new_crtc_state);
4562236b
HW
226/*
227 * dm_vblank_get_counter
228 *
229 * @brief
230 * Get counter for number of vertical blanks
231 *
232 * @param
233 * struct amdgpu_device *adev - [in] desired amdgpu device
234 * int disp_idx - [in] which CRTC to get the counter from
235 *
236 * @return
237 * Counter for vertical blanks
238 */
239static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240{
241 if (crtc >= adev->mode_info.num_crtc)
242 return 0;
243 else {
244 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
585d450c 246 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
247 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 crtc);
4562236b
HW
249 return 0;
250 }
251
585d450c 252 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
253 }
254}
255
256static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 257 u32 *vbl, u32 *position)
4562236b 258{
81c50963
ST
259 uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
4562236b
HW
261 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 return -EINVAL;
263 else {
264 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
585d450c 266 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
267 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 crtc);
4562236b
HW
269 return 0;
270 }
271
81c50963
ST
272 /*
273 * TODO rework base driver to use values directly.
274 * for now parse it back into reg-format
275 */
585d450c 276 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
277 &v_blank_start,
278 &v_blank_end,
279 &h_position,
280 &v_position);
281
e806208d
AG
282 *position = v_position | (h_position << 16);
283 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
284 }
285
286 return 0;
287}
288
289static bool dm_is_idle(void *handle)
290{
291 /* XXX todo */
292 return true;
293}
294
295static int dm_wait_for_idle(void *handle)
296{
297 /* XXX todo */
298 return 0;
299}
300
301static bool dm_check_soft_reset(void *handle)
302{
303 return false;
304}
305
306static int dm_soft_reset(void *handle)
307{
308 /* XXX todo */
309 return 0;
310}
311
3ee6b26b
AD
312static struct amdgpu_crtc *
313get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 int otg_inst)
4562236b 315{
4a580877 316 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
317 struct drm_crtc *crtc;
318 struct amdgpu_crtc *amdgpu_crtc;
319
bcd74374 320 if (WARN_ON(otg_inst == -1))
4562236b 321 return adev->mode_info.crtcs[0];
4562236b
HW
322
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326 if (amdgpu_crtc->otg_inst == otg_inst)
327 return amdgpu_crtc;
328 }
329
330 return NULL;
331}
332
585d450c
AP
333static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334{
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
339}
340
66b0c973
MK
341static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342{
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345}
346
a85ba005
NC
347static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
349{
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
351 return true;
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 return true;
354 else
355 return false;
356}
357
b8e8c934
HW
358/**
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
361 *
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
364 */
4562236b
HW
365static void dm_pflip_high_irq(void *interrupt_params)
366{
4562236b
HW
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
370 unsigned long flags;
71bbe51a 371 struct drm_pending_vblank_event *e;
71bbe51a
MK
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 bool vrr_active;
4562236b
HW
374
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377 /* IRQ could occur when in initial stage */
1f6010a9 378 /* TODO work and BO cleanup */
4562236b 379 if (amdgpu_crtc == NULL) {
cb2318b7 380 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
381 return;
382 }
383
4a580877 384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
385
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
391 amdgpu_crtc);
4a580877 392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
393 return;
394 }
395
71bbe51a
MK
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
4562236b 399
bcd74374 400 WARN_ON(!e);
1159898a 401
585d450c 402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
403
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 if (!vrr_active ||
585d450c 406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
412 */
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 414
71bbe51a
MK
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
417 */
418 if (e) {
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
423 }
424 } else if (e) {
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
431 *
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
436 */
437
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
441
4a580877 442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
443 e = NULL;
444 }
4562236b 445
fdd1fe57
MK
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
450 */
5d1c59c4 451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 453
54f5499a 454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 456
cb2318b7
VL
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
4562236b
HW
460}
461
d2574c33
MK
462static void dm_vupdate_high_irq(void *interrupt_params)
463{
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
47588233
RS
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 470 unsigned long flags;
585d450c 471 int vrr_active;
d2574c33
MK
472
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475 if (acrtc) {
585d450c 476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
481
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 frame_duration_ns,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 }
d2574c33 488
cb2318b7 489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 490 acrtc->crtc_id,
585d450c 491 vrr_active);
d2574c33
MK
492
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
498 */
585d450c 499 if (vrr_active) {
d2574c33 500 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
501
502 /* BTR processing for pre-DCE12 ASICs */
585d450c 503 if (acrtc->dm_irq_params.stream &&
09aef2c4 504 adev->family < AMDGPU_FAMILY_AI) {
4a580877 505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
585d450c
AP
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
510
511 dc_stream_adjust_vmin_vmax(
512 adev->dm.dc,
585d450c
AP
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
516 }
517 }
d2574c33
MK
518 }
519}
520
b8e8c934
HW
521/**
522 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 523 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
524 *
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526 * event handler.
527 */
4562236b
HW
528static void dm_crtc_high_irq(void *interrupt_params)
529{
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
4562236b 532 struct amdgpu_crtc *acrtc;
09aef2c4 533 unsigned long flags;
585d450c 534 int vrr_active;
4562236b 535
b57de80a 536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
537 if (!acrtc)
538 return;
539
585d450c 540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 541
cb2318b7 542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 543 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 544
2346ef47
NK
545 /**
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
550 */
585d450c 551 if (!vrr_active)
2346ef47
NK
552 drm_crtc_handle_vblank(&acrtc->base);
553
554 /**
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
557 */
16f17eda 558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
559
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
562 return;
16f17eda 563
4a580877 564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 565
585d450c
AP
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 570 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
16f17eda 573
585d450c
AP
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
576 }
577
2b5aed9a
MK
578 /*
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
583 *
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
587 */
2346ef47
NK
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 590 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
591 if (acrtc->event) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 acrtc->event = NULL;
594 drm_crtc_vblank_put(&acrtc->base);
595 }
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 }
598
4a580877 599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
600}
601
86bc2219 602#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 603#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
604/**
605 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606 * DCN generation ASICs
48e01bf4 607 * @interrupt_params: interrupt parameters
86bc2219
WL
608 *
609 * Used to set crc window/read out crc value at vertical line 0 position
610 */
86bc2219
WL
611static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612{
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
616
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619 if (!acrtc)
620 return;
621
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623}
433e5dec 624#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 625
e27c41d5
JS
626/**
627 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628 * @adev: amdgpu_device pointer
629 * @notify: dmub notification structure
630 *
631 * Dmub AUX or SET_CONFIG command completion processing callback
632 * Copies dmub notification to DM which is to be read by AUX command.
633 * issuing thread and also signals the event to wake up the thread.
634 */
635void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636{
637 if (adev->dm.dmub_notify)
638 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 complete(&adev->dm.dmub_aux_transfer_done);
641}
642
643/**
644 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645 * @adev: amdgpu_device pointer
646 * @notify: dmub notification structure
647 *
648 * Dmub Hpd interrupt processing callback. Gets displayindex through the
649 * ink index and calls helper to do the processing.
650 */
651void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652{
653 struct amdgpu_dm_connector *aconnector;
f6e03f80 654 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
655 struct drm_connector *connector;
656 struct drm_connector_list_iter iter;
657 struct dc_link *link;
658 uint8_t link_index = 0;
659 struct drm_device *dev = adev->dm.ddev;
660
661 if (adev == NULL)
662 return;
663
664 if (notify == NULL) {
665 DRM_ERROR("DMUB HPD callback notification was NULL");
666 return;
667 }
668
669 if (notify->link_index > adev->dm.dc->link_count) {
670 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671 return;
672 }
673
e27c41d5 674 link_index = notify->link_index;
e27c41d5
JS
675 link = adev->dm.dc->links[link_index];
676
677 drm_connector_list_iter_begin(dev, &iter);
678 drm_for_each_connector_iter(connector, &iter) {
679 aconnector = to_amdgpu_dm_connector(connector);
680 if (link && aconnector->dc_link == link) {
681 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 682 hpd_aconnector = aconnector;
e27c41d5
JS
683 break;
684 }
685 }
686 drm_connector_list_iter_end(&iter);
e27c41d5 687
c40a09e5
NK
688 if (hpd_aconnector) {
689 if (notify->type == DMUB_NOTIFICATION_HPD)
690 handle_hpd_irq_helper(hpd_aconnector);
691 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 handle_hpd_rx_irq(hpd_aconnector);
693 }
e27c41d5
JS
694}
695
696/**
697 * register_dmub_notify_callback - Sets callback for DMUB notify
698 * @adev: amdgpu_device pointer
699 * @type: Type of dmub notification
700 * @callback: Dmub interrupt callback function
701 * @dmub_int_thread_offload: offload indicator
702 *
703 * API to register a dmub callback handler for a dmub notification
704 * Also sets indicator whether callback processing to be offloaded.
705 * to dmub interrupt handling thread
706 * Return: true if successfully registered, false if there is existing registration
707 */
708bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710{
711 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 adev->dm.dmub_callback[type] = callback;
713 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714 } else
715 return false;
716
717 return true;
718}
719
720static void dm_handle_hpd_work(struct work_struct *work)
721{
722 struct dmub_hpd_work *dmub_hpd_wrk;
723
724 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725
726 if (!dmub_hpd_wrk->dmub_notify) {
727 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728 return;
729 }
730
731 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 dmub_hpd_wrk->dmub_notify);
734 }
094b21c1
JS
735
736 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
737 kfree(dmub_hpd_wrk);
738
739}
740
e25515e2 741#define DMUB_TRACE_MAX_READ 64
81927e28
JS
742/**
743 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744 * @interrupt_params: used for determining the Outbox instance
745 *
746 * Handles the Outbox Interrupt
747 * event handler.
748 */
81927e28
JS
749static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750{
751 struct dmub_notification notify;
752 struct common_irq_params *irq_params = interrupt_params;
753 struct amdgpu_device *adev = irq_params->adev;
754 struct amdgpu_display_manager *dm = &adev->dm;
755 struct dmcub_trace_buf_entry entry = { 0 };
756 uint32_t count = 0;
e27c41d5 757 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 758 struct dc_link *plink = NULL;
81927e28 759
f6e03f80
JS
760 if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 762
f6e03f80
JS
763 do {
764 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 DRM_ERROR("DM: notify type %d invalid!", notify.type);
767 continue;
768 }
c40a09e5
NK
769 if (!dm->dmub_callback[notify.type]) {
770 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771 continue;
772 }
f6e03f80 773 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
774 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775 if (!dmub_hpd_wrk) {
776 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777 return;
778 }
779 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 if (!dmub_hpd_wrk->dmub_notify) {
781 kfree(dmub_hpd_wrk);
782 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783 return;
784 }
785 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 if (dmub_hpd_wrk->dmub_notify)
787 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
788 dmub_hpd_wrk->adev = adev;
789 if (notify.type == DMUB_NOTIFICATION_HPD) {
790 plink = adev->dm.dc->links[notify.link_index];
791 if (plink) {
792 plink->hpd_status =
b97788e5 793 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 794 }
e27c41d5 795 }
f6e03f80
JS
796 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797 } else {
798 dm->dmub_callback[notify.type](adev, &notify);
799 }
800 } while (notify.pending_notification);
81927e28
JS
801 }
802
803
804 do {
805 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807 entry.param0, entry.param1);
808
809 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811 } else
812 break;
813
814 count++;
815
816 } while (count <= DMUB_TRACE_MAX_READ);
817
f6e03f80
JS
818 if (count > DMUB_TRACE_MAX_READ)
819 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 820}
433e5dec 821#endif /* CONFIG_DRM_AMD_DC_DCN */
86bc2219 822
4562236b
HW
823static int dm_set_clockgating_state(void *handle,
824 enum amd_clockgating_state state)
825{
826 return 0;
827}
828
829static int dm_set_powergating_state(void *handle,
830 enum amd_powergating_state state)
831{
832 return 0;
833}
834
835/* Prototypes of private functions */
836static int dm_early_init(void* handle);
837
a32e24b4 838/* Allocate memory for FBC compressed data */
3e332d3a 839static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 840{
3e332d3a 841 struct drm_device *dev = connector->dev;
1348969a 842 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 843 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
844 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845 struct drm_display_mode *mode;
42e67c3b
RL
846 unsigned long max_size = 0;
847
848 if (adev->dm.dc->fbc_compressor == NULL)
849 return;
a32e24b4 850
3e332d3a 851 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
852 return;
853
3e332d3a
RL
854 if (compressor->bo_ptr)
855 return;
42e67c3b 856
42e67c3b 857
3e332d3a
RL
858 list_for_each_entry(mode, &connector->modes, head) {
859 if (max_size < mode->htotal * mode->vtotal)
860 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
861 }
862
863 if (max_size) {
864 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 865 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 866 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
867
868 if (r)
42e67c3b
RL
869 DRM_ERROR("DM: Failed to initialize FBC\n");
870 else {
871 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873 }
874
a32e24b4
RL
875 }
876
877}
a32e24b4 878
6ce8f316
NK
879static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880 int pipe, bool *enabled,
881 unsigned char *buf, int max_bytes)
882{
883 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 884 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
885 struct drm_connector *connector;
886 struct drm_connector_list_iter conn_iter;
887 struct amdgpu_dm_connector *aconnector;
888 int ret = 0;
889
890 *enabled = false;
891
892 mutex_lock(&adev->dm.audio_lock);
893
894 drm_connector_list_iter_begin(dev, &conn_iter);
895 drm_for_each_connector_iter(connector, &conn_iter) {
896 aconnector = to_amdgpu_dm_connector(connector);
897 if (aconnector->audio_inst != port)
898 continue;
899
900 *enabled = true;
901 ret = drm_eld_size(connector->eld);
902 memcpy(buf, connector->eld, min(max_bytes, ret));
903
904 break;
905 }
906 drm_connector_list_iter_end(&conn_iter);
907
908 mutex_unlock(&adev->dm.audio_lock);
909
910 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911
912 return ret;
913}
914
915static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916 .get_eld = amdgpu_dm_audio_component_get_eld,
917};
918
919static int amdgpu_dm_audio_component_bind(struct device *kdev,
920 struct device *hda_kdev, void *data)
921{
922 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 923 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
924 struct drm_audio_component *acomp = data;
925
926 acomp->ops = &amdgpu_dm_audio_component_ops;
927 acomp->dev = kdev;
928 adev->dm.audio_component = acomp;
929
930 return 0;
931}
932
933static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934 struct device *hda_kdev, void *data)
935{
936 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 937 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
938 struct drm_audio_component *acomp = data;
939
940 acomp->ops = NULL;
941 acomp->dev = NULL;
942 adev->dm.audio_component = NULL;
943}
944
945static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946 .bind = amdgpu_dm_audio_component_bind,
947 .unbind = amdgpu_dm_audio_component_unbind,
948};
949
950static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951{
952 int i, ret;
953
954 if (!amdgpu_audio)
955 return 0;
956
957 adev->mode_info.audio.enabled = true;
958
959 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960
961 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962 adev->mode_info.audio.pin[i].channels = -1;
963 adev->mode_info.audio.pin[i].rate = -1;
964 adev->mode_info.audio.pin[i].bits_per_sample = -1;
965 adev->mode_info.audio.pin[i].status_bits = 0;
966 adev->mode_info.audio.pin[i].category_code = 0;
967 adev->mode_info.audio.pin[i].connected = false;
968 adev->mode_info.audio.pin[i].id =
969 adev->dm.dc->res_pool->audios[i]->inst;
970 adev->mode_info.audio.pin[i].offset = 0;
971 }
972
973 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974 if (ret < 0)
975 return ret;
976
977 adev->dm.audio_registered = true;
978
979 return 0;
980}
981
982static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983{
984 if (!amdgpu_audio)
985 return;
986
987 if (!adev->mode_info.audio.enabled)
988 return;
989
990 if (adev->dm.audio_registered) {
991 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992 adev->dm.audio_registered = false;
993 }
994
995 /* TODO: Disable audio? */
996
997 adev->mode_info.audio.enabled = false;
998}
999
dfd84d90 1000static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1001{
1002 struct drm_audio_component *acomp = adev->dm.audio_component;
1003
1004 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006
1007 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008 pin, -1);
1009 }
1010}
1011
743b9786
NK
1012static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013{
743b9786
NK
1014 const struct dmcub_firmware_header_v1_0 *hdr;
1015 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1016 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1017 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1020 struct dmub_srv_hw_params hw_params;
1021 enum dmub_status status;
1022 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1023 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786 1024 bool has_hw_support;
5b109397 1025 struct dc *dc = adev->dm.dc;
743b9786
NK
1026
1027 if (!dmub_srv)
1028 /* DMUB isn't supported on the ASIC. */
1029 return 0;
1030
8c7aea40
NK
1031 if (!fb_info) {
1032 DRM_ERROR("No framebuffer info for DMUB service.\n");
1033 return -EINVAL;
1034 }
1035
743b9786
NK
1036 if (!dmub_fw) {
1037 /* Firmware required for DMUB support. */
1038 DRM_ERROR("No firmware provided for DMUB.\n");
1039 return -EINVAL;
1040 }
1041
1042 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043 if (status != DMUB_STATUS_OK) {
1044 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045 return -EINVAL;
1046 }
1047
1048 if (!has_hw_support) {
1049 DRM_INFO("DMUB unsupported on ASIC\n");
1050 return 0;
1051 }
1052
1053 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054
743b9786
NK
1055 fw_inst_const = dmub_fw->data +
1056 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1057 PSP_HEADER_BYTES;
743b9786
NK
1058
1059 fw_bss_data = dmub_fw->data +
1060 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 le32_to_cpu(hdr->inst_const_bytes);
1062
1063 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1064 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066
1067 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068
ddde28a5
HW
1069 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070 * amdgpu_ucode_init_single_fw will load dmub firmware
1071 * fw_inst_const part to cw0; otherwise, the firmware back door load
1072 * will be done by dm_dmub_hw_init
1073 */
1074 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076 fw_inst_const_size);
1077 }
1078
a576b345
NK
1079 if (fw_bss_data_size)
1080 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1082
1083 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1084 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085 adev->bios_size);
1086
1087 /* Reset regions that need to be reset. */
1088 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090
1091 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093
1094 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1096
1097 /* Initialize hardware. */
1098 memset(&hw_params, 0, sizeof(hw_params));
1099 hw_params.fb_base = adev->gmc.fb_start;
1100 hw_params.fb_offset = adev->gmc.aper_base;
1101
31a7f4bb
HW
1102 /* backdoor load firmware and trigger dmub running */
1103 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104 hw_params.load_inst_const = true;
1105
743b9786
NK
1106 if (dmcu)
1107 hw_params.psp_version = dmcu->psp_version;
1108
8c7aea40
NK
1109 for (i = 0; i < fb_info->num_fb; ++i)
1110 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1111
5b109397
JS
1112 switch (adev->asic_type) {
1113 case CHIP_YELLOW_CARP:
1114 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115 hw_params.dpia_supported = true;
1116#if defined(CONFIG_DRM_AMD_DC_DCN)
1117 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118#endif
1119 }
1120 break;
1121 default:
1122 break;
1123 }
1124
743b9786
NK
1125 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126 if (status != DMUB_STATUS_OK) {
1127 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128 return -EINVAL;
1129 }
1130
1131 /* Wait for firmware load to finish. */
1132 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133 if (status != DMUB_STATUS_OK)
1134 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135
1136 /* Init DMCU and ABM if available. */
1137 if (dmcu && abm) {
1138 dmcu->funcs->dmcu_init(dmcu);
1139 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140 }
1141
051b7887
RL
1142 if (!adev->dm.dc->ctx->dmub_srv)
1143 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1144 if (!adev->dm.dc->ctx->dmub_srv) {
1145 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146 return -ENOMEM;
1147 }
1148
743b9786
NK
1149 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150 adev->dm.dmcub_fw_version);
1151
1152 return 0;
1153}
1154
a3fe0e33 1155#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1156static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1157{
c0fb85ae
YZ
1158 uint64_t pt_base;
1159 uint32_t logical_addr_low;
1160 uint32_t logical_addr_high;
1161 uint32_t agp_base, agp_bot, agp_top;
1162 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1163
a0f884f5
NK
1164 memset(pa_config, 0, sizeof(*pa_config));
1165
c0fb85ae
YZ
1166 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1168
c0fb85ae
YZ
1169 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170 /*
1171 * Raven2 has a HW issue that it is unable to use the vram which
1172 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173 * workaround that increase system aperture high address (add 1)
1174 * to get rid of the VM fault and hardware hang.
1175 */
1176 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177 else
1178 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1179
c0fb85ae
YZ
1180 agp_base = 0;
1181 agp_bot = adev->gmc.agp_start >> 24;
1182 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1183
c44a22b3 1184
c0fb85ae
YZ
1185 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1191
c0fb85ae
YZ
1192 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194
1195 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198
1199 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202
1203 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206
1207 pa_config->is_hvm_enabled = 0;
c44a22b3 1208
c44a22b3 1209}
e6cd859d 1210#endif
ea3b4242 1211#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1212static void vblank_control_worker(struct work_struct *work)
ea3b4242 1213{
09a5df6c
NK
1214 struct vblank_control_work *vblank_work =
1215 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1216 struct amdgpu_display_manager *dm = vblank_work->dm;
1217
1218 mutex_lock(&dm->dc_lock);
1219
1220 if (vblank_work->enable)
1221 dm->active_vblank_irq_count++;
5af50b0b 1222 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1223 dm->active_vblank_irq_count--;
1224
2cbcb78c 1225 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1226
4711c033 1227 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1228
58aa1c50
NK
1229 /* Control PSR based on vblank requirements from OS */
1230 if (vblank_work->stream && vblank_work->stream->link) {
1231 if (vblank_work->enable) {
1232 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233 amdgpu_dm_psr_disable(vblank_work->stream);
1234 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237 amdgpu_dm_psr_enable(vblank_work->stream);
1238 }
1239 }
1240
ea3b4242 1241 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1242
1243 dc_stream_release(vblank_work->stream);
1244
09a5df6c 1245 kfree(vblank_work);
ea3b4242
QZ
1246}
1247
ea3b4242 1248#endif
8e794421
WL
1249
1250static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251{
1252 struct hpd_rx_irq_offload_work *offload_work;
1253 struct amdgpu_dm_connector *aconnector;
1254 struct dc_link *dc_link;
1255 struct amdgpu_device *adev;
1256 enum dc_connection_type new_connection_type = dc_connection_none;
1257 unsigned long flags;
1258
1259 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260 aconnector = offload_work->offload_wq->aconnector;
1261
1262 if (!aconnector) {
1263 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264 goto skip;
1265 }
1266
1267 adev = drm_to_adev(aconnector->base.dev);
1268 dc_link = aconnector->dc_link;
1269
1270 mutex_lock(&aconnector->hpd_lock);
1271 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272 DRM_ERROR("KMS: Failed to detect connector\n");
1273 mutex_unlock(&aconnector->hpd_lock);
1274
1275 if (new_connection_type == dc_connection_none)
1276 goto skip;
1277
1278 if (amdgpu_in_reset(adev))
1279 goto skip;
1280
1281 mutex_lock(&adev->dm.dc_lock);
1282 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283 dc_link_dp_handle_automated_test(dc_link);
1284 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287 dc_link_dp_handle_link_loss(dc_link);
1288 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289 offload_work->offload_wq->is_handling_link_loss = false;
1290 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291 }
1292 mutex_unlock(&adev->dm.dc_lock);
1293
1294skip:
1295 kfree(offload_work);
1296
1297}
1298
1299static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300{
1301 int max_caps = dc->caps.max_links;
1302 int i = 0;
1303 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304
1305 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306
1307 if (!hpd_rx_offload_wq)
1308 return NULL;
1309
1310
1311 for (i = 0; i < max_caps; i++) {
1312 hpd_rx_offload_wq[i].wq =
1313 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314
1315 if (hpd_rx_offload_wq[i].wq == NULL) {
1316 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317 return NULL;
1318 }
1319
1320 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321 }
1322
1323 return hpd_rx_offload_wq;
1324}
1325
3ce51649
AD
1326struct amdgpu_stutter_quirk {
1327 u16 chip_vendor;
1328 u16 chip_device;
1329 u16 subsys_vendor;
1330 u16 subsys_device;
1331 u8 revision;
1332};
1333
1334static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337 { 0, 0, 0, 0, 0 },
1338};
1339
1340static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341{
1342 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343
1344 while (p && p->chip_device != 0) {
1345 if (pdev->vendor == p->chip_vendor &&
1346 pdev->device == p->chip_device &&
1347 pdev->subsystem_vendor == p->subsys_vendor &&
1348 pdev->subsystem_device == p->subsys_device &&
1349 pdev->revision == p->revision) {
1350 return true;
1351 }
1352 ++p;
1353 }
1354 return false;
1355}
1356
7578ecda 1357static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1358{
1359 struct dc_init_data init_data;
52704fca
BL
1360#ifdef CONFIG_DRM_AMD_DC_HDCP
1361 struct dc_callback_init init_params;
1362#endif
743b9786 1363 int r;
52704fca 1364
4a580877 1365 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1366 adev->dm.adev = adev;
1367
4562236b
HW
1368 /* Zero all the fields */
1369 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1370#ifdef CONFIG_DRM_AMD_DC_HDCP
1371 memset(&init_params, 0, sizeof(init_params));
1372#endif
4562236b 1373
674e78ac 1374 mutex_init(&adev->dm.dc_lock);
6ce8f316 1375 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1376#if defined(CONFIG_DRM_AMD_DC_DCN)
1377 spin_lock_init(&adev->dm.vblank_lock);
1378#endif
674e78ac 1379
4562236b
HW
1380 if(amdgpu_dm_irq_init(adev)) {
1381 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382 goto error;
1383 }
1384
1385 init_data.asic_id.chip_family = adev->family;
1386
2dc31ca1 1387 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1388 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1389 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1390
770d13b1 1391 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1392 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393 init_data.asic_id.atombios_base_address =
1394 adev->mode_info.atom_context->bios;
1395
1396 init_data.driver = adev;
1397
1398 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399
1400 if (!adev->dm.cgs_device) {
1401 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402 goto error;
1403 }
1404
1405 init_data.cgs_device = adev->dm.cgs_device;
1406
4562236b
HW
1407 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408
60fb100b
AD
1409 switch (adev->asic_type) {
1410 case CHIP_CARRIZO:
1411 case CHIP_STONEY:
1ebcaebd
NK
1412 init_data.flags.gpu_vm_support = true;
1413 break;
60fb100b 1414 default:
1d789535 1415 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1416 case IP_VERSION(2, 1, 0):
1417 init_data.flags.gpu_vm_support = true;
91adec9e
ML
1418 switch (adev->dm.dmcub_fw_version) {
1419 case 0: /* development */
1420 case 0x1: /* linux-firmware.git hash 6d9f399 */
1421 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422 init_data.flags.disable_dmcu = false;
1423 break;
1424 default:
1425 init_data.flags.disable_dmcu = true;
1426 }
c08182f2 1427 break;
559f591d
AD
1428 case IP_VERSION(1, 0, 0):
1429 case IP_VERSION(1, 0, 1):
c08182f2
AD
1430 case IP_VERSION(3, 0, 1):
1431 case IP_VERSION(3, 1, 2):
1432 case IP_VERSION(3, 1, 3):
1433 init_data.flags.gpu_vm_support = true;
1434 break;
1435 case IP_VERSION(2, 0, 3):
1436 init_data.flags.disable_dmcu = true;
1437 break;
1438 default:
1439 break;
1440 }
60fb100b
AD
1441 break;
1442 }
6e227308 1443
04b94af4
AD
1444 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445 init_data.flags.fbc_support = true;
1446
d99f38ae
AD
1447 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448 init_data.flags.multi_mon_pp_mclk_switch = true;
1449
eaf56410
LL
1450 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1452
1453 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1455
27eaa492 1456 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1457
1edf5ae1
ZL
1458 if (check_seamless_boot_capability(adev)) {
1459 init_data.flags.power_down_display_on_boot = false;
1460 init_data.flags.allow_seamless_boot_optimization = true;
1461 DRM_INFO("Seamless boot condition check passed\n");
1462 }
1463
0dd79532 1464 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1465 /* Display Core create. */
1466 adev->dm.dc = dc_create(&init_data);
1467
423788c7 1468 if (adev->dm.dc) {
76121231 1469 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1470 } else {
76121231 1471 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1472 goto error;
1473 }
4562236b 1474
8a791dab
HW
1475 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1476 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1477 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1478 }
1479
f99d8762
HW
1480 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1481 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1482 if (dm_should_disable_stutter(adev->pdev))
1483 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1484
8a791dab
HW
1485 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1486 adev->dm.dc->debug.disable_stutter = true;
1487
2665f63a 1488 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1489 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1490 adev->dm.dc->debug.disable_dsc_edp = true;
1491 }
8a791dab
HW
1492
1493 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1494 adev->dm.dc->debug.disable_clock_gate = true;
1495
743b9786
NK
1496 r = dm_dmub_hw_init(adev);
1497 if (r) {
1498 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1499 goto error;
1500 }
1501
bb6785c1
NK
1502 dc_hardware_init(adev->dm.dc);
1503
8e794421
WL
1504 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1505 if (!adev->dm.hpd_rx_offload_wq) {
1506 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1507 goto error;
1508 }
1509
0b08c54b 1510#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1511 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1512 struct dc_phy_addr_space_config pa_config;
1513
0b08c54b 1514 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1515
0b08c54b
YZ
1516 // Call the DC init_memory func
1517 dc_setup_system_context(adev->dm.dc, &pa_config);
1518 }
1519#endif
c0fb85ae 1520
4562236b
HW
1521 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1522 if (!adev->dm.freesync_module) {
1523 DRM_ERROR(
1524 "amdgpu: failed to initialize freesync_module.\n");
1525 } else
f1ad2f5e 1526 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1527 adev->dm.freesync_module);
1528
e277adc5
LSL
1529 amdgpu_dm_init_color_mod();
1530
ea3b4242
QZ
1531#if defined(CONFIG_DRM_AMD_DC_DCN)
1532 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1533 adev->dm.vblank_control_workqueue =
1534 create_singlethread_workqueue("dm_vblank_control_workqueue");
1535 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1536 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1537 }
1538#endif
1539
52704fca 1540#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1541 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1542 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1543
96a3b32e
BL
1544 if (!adev->dm.hdcp_workqueue)
1545 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1546 else
1547 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1548
96a3b32e
BL
1549 dc_init_callbacks(adev->dm.dc, &init_params);
1550 }
9a65df19
WL
1551#endif
1552#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1553 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1554#endif
81927e28
JS
1555 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1556 init_completion(&adev->dm.dmub_aux_transfer_done);
1557 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1558 if (!adev->dm.dmub_notify) {
1559 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1560 goto error;
1561 }
e27c41d5
JS
1562
1563 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1564 if (!adev->dm.delayed_hpd_wq) {
1565 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1566 goto error;
1567 }
1568
81927e28 1569 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1570#if defined(CONFIG_DRM_AMD_DC_DCN)
1571 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1572 dmub_aux_setconfig_callback, false)) {
1573 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1574 goto error;
1575 }
1576 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1577 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1578 goto error;
1579 }
c40a09e5
NK
1580 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1581 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1582 goto error;
1583 }
433e5dec 1584#endif /* CONFIG_DRM_AMD_DC_DCN */
81927e28
JS
1585 }
1586
4562236b
HW
1587 if (amdgpu_dm_initialize_drm_device(adev)) {
1588 DRM_ERROR(
1589 "amdgpu: failed to initialize sw for display support.\n");
1590 goto error;
1591 }
1592
f74367e4
AD
1593 /* create fake encoders for MST */
1594 dm_dp_create_fake_mst_encoders(adev);
1595
4562236b
HW
1596 /* TODO: Add_display_info? */
1597
1598 /* TODO use dynamic cursor width */
4a580877
LT
1599 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1600 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1601
4a580877 1602 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1603 DRM_ERROR(
1604 "amdgpu: failed to initialize sw for display support.\n");
1605 goto error;
1606 }
1607
c0fb85ae 1608
f1ad2f5e 1609 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1610
1611 return 0;
1612error:
1613 amdgpu_dm_fini(adev);
1614
59d0f396 1615 return -EINVAL;
4562236b
HW
1616}
1617
e9669fb7
AG
1618static int amdgpu_dm_early_fini(void *handle)
1619{
1620 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1621
1622 amdgpu_dm_audio_fini(adev);
1623
1624 return 0;
1625}
1626
7578ecda 1627static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1628{
f74367e4
AD
1629 int i;
1630
09a5df6c
NK
1631#if defined(CONFIG_DRM_AMD_DC_DCN)
1632 if (adev->dm.vblank_control_workqueue) {
1633 destroy_workqueue(adev->dm.vblank_control_workqueue);
1634 adev->dm.vblank_control_workqueue = NULL;
1635 }
1636#endif
1637
f74367e4
AD
1638 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1639 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1640 }
1641
4562236b 1642 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1643
9a65df19
WL
1644#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1645 if (adev->dm.crc_rd_wrk) {
1646 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1647 kfree(adev->dm.crc_rd_wrk);
1648 adev->dm.crc_rd_wrk = NULL;
1649 }
1650#endif
52704fca
BL
1651#ifdef CONFIG_DRM_AMD_DC_HDCP
1652 if (adev->dm.hdcp_workqueue) {
e96b1b29 1653 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1654 adev->dm.hdcp_workqueue = NULL;
1655 }
1656
1657 if (adev->dm.dc)
1658 dc_deinit_callbacks(adev->dm.dc);
1659#endif
51ba6912 1660
3beac533 1661 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1662
81927e28
JS
1663 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1664 kfree(adev->dm.dmub_notify);
1665 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1666 destroy_workqueue(adev->dm.delayed_hpd_wq);
1667 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1668 }
1669
743b9786
NK
1670 if (adev->dm.dmub_bo)
1671 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1672 &adev->dm.dmub_bo_gpu_addr,
1673 &adev->dm.dmub_bo_cpu_addr);
52704fca 1674
006c26a0
AG
1675 if (adev->dm.hpd_rx_offload_wq) {
1676 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1677 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1678 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1679 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1680 }
1681 }
1682
1683 kfree(adev->dm.hpd_rx_offload_wq);
1684 adev->dm.hpd_rx_offload_wq = NULL;
1685 }
1686
c8bdf2b6
ED
1687 /* DC Destroy TODO: Replace destroy DAL */
1688 if (adev->dm.dc)
1689 dc_destroy(&adev->dm.dc);
4562236b
HW
1690 /*
1691 * TODO: pageflip, vlank interrupt
1692 *
1693 * amdgpu_dm_irq_fini(adev);
1694 */
1695
1696 if (adev->dm.cgs_device) {
1697 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1698 adev->dm.cgs_device = NULL;
1699 }
1700 if (adev->dm.freesync_module) {
1701 mod_freesync_destroy(adev->dm.freesync_module);
1702 adev->dm.freesync_module = NULL;
1703 }
674e78ac 1704
6ce8f316 1705 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1706 mutex_destroy(&adev->dm.dc_lock);
1707
4562236b
HW
1708 return;
1709}
1710
a94d5569 1711static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1712{
a7669aff 1713 const char *fw_name_dmcu = NULL;
a94d5569
DF
1714 int r;
1715 const struct dmcu_firmware_header_v1_0 *hdr;
1716
1717 switch(adev->asic_type) {
55e56389
MR
1718#if defined(CONFIG_DRM_AMD_DC_SI)
1719 case CHIP_TAHITI:
1720 case CHIP_PITCAIRN:
1721 case CHIP_VERDE:
1722 case CHIP_OLAND:
1723#endif
a94d5569
DF
1724 case CHIP_BONAIRE:
1725 case CHIP_HAWAII:
1726 case CHIP_KAVERI:
1727 case CHIP_KABINI:
1728 case CHIP_MULLINS:
1729 case CHIP_TONGA:
1730 case CHIP_FIJI:
1731 case CHIP_CARRIZO:
1732 case CHIP_STONEY:
1733 case CHIP_POLARIS11:
1734 case CHIP_POLARIS10:
1735 case CHIP_POLARIS12:
1736 case CHIP_VEGAM:
1737 case CHIP_VEGA10:
1738 case CHIP_VEGA12:
1739 case CHIP_VEGA20:
1740 return 0;
5ea23931
RL
1741 case CHIP_NAVI12:
1742 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1743 break;
a94d5569 1744 case CHIP_RAVEN:
a7669aff
HW
1745 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1746 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1747 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1748 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1749 else
a7669aff 1750 return 0;
a94d5569
DF
1751 break;
1752 default:
1d789535 1753 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1754 case IP_VERSION(2, 0, 2):
1755 case IP_VERSION(2, 0, 3):
1756 case IP_VERSION(2, 0, 0):
1757 case IP_VERSION(2, 1, 0):
1758 case IP_VERSION(3, 0, 0):
1759 case IP_VERSION(3, 0, 2):
1760 case IP_VERSION(3, 0, 3):
1761 case IP_VERSION(3, 0, 1):
1762 case IP_VERSION(3, 1, 2):
1763 case IP_VERSION(3, 1, 3):
1764 return 0;
1765 default:
1766 break;
1767 }
a94d5569 1768 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1769 return -EINVAL;
a94d5569
DF
1770 }
1771
1772 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1773 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1774 return 0;
1775 }
1776
1777 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1778 if (r == -ENOENT) {
1779 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1780 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1781 adev->dm.fw_dmcu = NULL;
1782 return 0;
1783 }
1784 if (r) {
1785 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1786 fw_name_dmcu);
1787 return r;
1788 }
1789
1790 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1791 if (r) {
1792 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1793 fw_name_dmcu);
1794 release_firmware(adev->dm.fw_dmcu);
1795 adev->dm.fw_dmcu = NULL;
1796 return r;
1797 }
1798
1799 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1800 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1801 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1802 adev->firmware.fw_size +=
1803 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1804
1805 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1806 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1807 adev->firmware.fw_size +=
1808 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1809
ee6e89c0
DF
1810 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1811
a94d5569
DF
1812 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1813
4562236b
HW
1814 return 0;
1815}
1816
743b9786
NK
1817static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1818{
1819 struct amdgpu_device *adev = ctx;
1820
1821 return dm_read_reg(adev->dm.dc->ctx, address);
1822}
1823
1824static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1825 uint32_t value)
1826{
1827 struct amdgpu_device *adev = ctx;
1828
1829 return dm_write_reg(adev->dm.dc->ctx, address, value);
1830}
1831
1832static int dm_dmub_sw_init(struct amdgpu_device *adev)
1833{
1834 struct dmub_srv_create_params create_params;
8c7aea40
NK
1835 struct dmub_srv_region_params region_params;
1836 struct dmub_srv_region_info region_info;
1837 struct dmub_srv_fb_params fb_params;
1838 struct dmub_srv_fb_info *fb_info;
1839 struct dmub_srv *dmub_srv;
743b9786
NK
1840 const struct dmcub_firmware_header_v1_0 *hdr;
1841 const char *fw_name_dmub;
1842 enum dmub_asic dmub_asic;
1843 enum dmub_status status;
1844 int r;
1845
1d789535 1846 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1847 case IP_VERSION(2, 1, 0):
743b9786
NK
1848 dmub_asic = DMUB_ASIC_DCN21;
1849 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1850 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1851 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1852 break;
c08182f2 1853 case IP_VERSION(3, 0, 0):
1d789535 1854 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1855 dmub_asic = DMUB_ASIC_DCN30;
1856 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1857 } else {
1858 dmub_asic = DMUB_ASIC_DCN30;
1859 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1860 }
79037324 1861 break;
c08182f2 1862 case IP_VERSION(3, 0, 1):
469989ca
RL
1863 dmub_asic = DMUB_ASIC_DCN301;
1864 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1865 break;
c08182f2 1866 case IP_VERSION(3, 0, 2):
2a411205
BL
1867 dmub_asic = DMUB_ASIC_DCN302;
1868 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1869 break;
c08182f2 1870 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1871 dmub_asic = DMUB_ASIC_DCN303;
1872 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1873 break;
c08182f2
AD
1874 case IP_VERSION(3, 1, 2):
1875 case IP_VERSION(3, 1, 3):
3137f792 1876 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1877 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1878 break;
743b9786
NK
1879
1880 default:
1881 /* ASIC doesn't support DMUB. */
1882 return 0;
1883 }
1884
743b9786
NK
1885 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1886 if (r) {
1887 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1888 return 0;
1889 }
1890
1891 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1892 if (r) {
1893 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1894 return 0;
1895 }
1896
743b9786 1897 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1898 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1899
9a6ed547
NK
1900 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1901 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1902 AMDGPU_UCODE_ID_DMCUB;
1903 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1904 adev->dm.dmub_fw;
1905 adev->firmware.fw_size +=
1906 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1907
9a6ed547
NK
1908 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1909 adev->dm.dmcub_fw_version);
1910 }
1911
743b9786 1912
8c7aea40
NK
1913 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1914 dmub_srv = adev->dm.dmub_srv;
1915
1916 if (!dmub_srv) {
1917 DRM_ERROR("Failed to allocate DMUB service!\n");
1918 return -ENOMEM;
1919 }
1920
1921 memset(&create_params, 0, sizeof(create_params));
1922 create_params.user_ctx = adev;
1923 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1924 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1925 create_params.asic = dmub_asic;
1926
1927 /* Create the DMUB service. */
1928 status = dmub_srv_create(dmub_srv, &create_params);
1929 if (status != DMUB_STATUS_OK) {
1930 DRM_ERROR("Error creating DMUB service: %d\n", status);
1931 return -EINVAL;
1932 }
1933
1934 /* Calculate the size of all the regions for the DMUB service. */
1935 memset(&region_params, 0, sizeof(region_params));
1936
1937 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1938 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1939 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1940 region_params.vbios_size = adev->bios_size;
0922b899 1941 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1942 adev->dm.dmub_fw->data +
1943 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1944 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1945 region_params.fw_inst_const =
1946 adev->dm.dmub_fw->data +
1947 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1948 PSP_HEADER_BYTES;
8c7aea40
NK
1949
1950 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1951 &region_info);
1952
1953 if (status != DMUB_STATUS_OK) {
1954 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1955 return -EINVAL;
1956 }
1957
1958 /*
1959 * Allocate a framebuffer based on the total size of all the regions.
1960 * TODO: Move this into GART.
1961 */
1962 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1963 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1964 &adev->dm.dmub_bo_gpu_addr,
1965 &adev->dm.dmub_bo_cpu_addr);
1966 if (r)
1967 return r;
1968
1969 /* Rebase the regions on the framebuffer address. */
1970 memset(&fb_params, 0, sizeof(fb_params));
1971 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1972 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1973 fb_params.region_info = &region_info;
1974
1975 adev->dm.dmub_fb_info =
1976 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1977 fb_info = adev->dm.dmub_fb_info;
1978
1979 if (!fb_info) {
1980 DRM_ERROR(
1981 "Failed to allocate framebuffer info for DMUB service!\n");
1982 return -ENOMEM;
1983 }
1984
1985 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1986 if (status != DMUB_STATUS_OK) {
1987 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1988 return -EINVAL;
1989 }
1990
743b9786
NK
1991 return 0;
1992}
1993
a94d5569
DF
1994static int dm_sw_init(void *handle)
1995{
1996 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1997 int r;
1998
1999 r = dm_dmub_sw_init(adev);
2000 if (r)
2001 return r;
a94d5569
DF
2002
2003 return load_dmcu_fw(adev);
2004}
2005
4562236b
HW
2006static int dm_sw_fini(void *handle)
2007{
a94d5569
DF
2008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2009
8c7aea40
NK
2010 kfree(adev->dm.dmub_fb_info);
2011 adev->dm.dmub_fb_info = NULL;
2012
743b9786
NK
2013 if (adev->dm.dmub_srv) {
2014 dmub_srv_destroy(adev->dm.dmub_srv);
2015 adev->dm.dmub_srv = NULL;
2016 }
2017
75e1658e
ND
2018 release_firmware(adev->dm.dmub_fw);
2019 adev->dm.dmub_fw = NULL;
743b9786 2020
75e1658e
ND
2021 release_firmware(adev->dm.fw_dmcu);
2022 adev->dm.fw_dmcu = NULL;
a94d5569 2023
4562236b
HW
2024 return 0;
2025}
2026
7abcf6b5 2027static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2028{
c84dec2f 2029 struct amdgpu_dm_connector *aconnector;
4562236b 2030 struct drm_connector *connector;
f8d2d39e 2031 struct drm_connector_list_iter iter;
7abcf6b5 2032 int ret = 0;
4562236b 2033
f8d2d39e
LP
2034 drm_connector_list_iter_begin(dev, &iter);
2035 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2036 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2037 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2038 aconnector->mst_mgr.aux) {
f1ad2f5e 2039 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2040 aconnector,
2041 aconnector->base.base.id);
7abcf6b5
AG
2042
2043 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2044 if (ret < 0) {
2045 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2046 aconnector->dc_link->type =
2047 dc_connection_single;
2048 break;
7abcf6b5 2049 }
f8d2d39e 2050 }
4562236b 2051 }
f8d2d39e 2052 drm_connector_list_iter_end(&iter);
4562236b 2053
7abcf6b5
AG
2054 return ret;
2055}
2056
2057static int dm_late_init(void *handle)
2058{
42e67c3b 2059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2060
bbf854dc
DF
2061 struct dmcu_iram_parameters params;
2062 unsigned int linear_lut[16];
2063 int i;
17bdb4a8 2064 struct dmcu *dmcu = NULL;
bbf854dc 2065
17bdb4a8
JFZ
2066 dmcu = adev->dm.dc->res_pool->dmcu;
2067
bbf854dc
DF
2068 for (i = 0; i < 16; i++)
2069 linear_lut[i] = 0xFFFF * i / 15;
2070
2071 params.set = 0;
75068994 2072 params.backlight_ramping_override = false;
bbf854dc
DF
2073 params.backlight_ramping_start = 0xCCCC;
2074 params.backlight_ramping_reduction = 0xCCCCCCCC;
2075 params.backlight_lut_array_size = 16;
2076 params.backlight_lut_array = linear_lut;
2077
2ad0cdf9
AK
2078 /* Min backlight level after ABM reduction, Don't allow below 1%
2079 * 0xFFFF x 0.01 = 0x28F
2080 */
2081 params.min_abm_backlight = 0x28F;
5cb32419 2082 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2083 * dmcu object will be null.
2084 * ABM 2.4 and up are implemented on dmcub.
2085 */
2086 if (dmcu) {
2087 if (!dmcu_load_iram(dmcu, params))
2088 return -EINVAL;
2089 } else if (adev->dm.dc->ctx->dmub_srv) {
2090 struct dc_link *edp_links[MAX_NUM_EDP];
2091 int edp_num;
bbf854dc 2092
6e568e43
JW
2093 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2094 for (i = 0; i < edp_num; i++) {
2095 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2096 return -EINVAL;
2097 }
2098 }
bbf854dc 2099
4a580877 2100 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2101}
2102
2103static void s3_handle_mst(struct drm_device *dev, bool suspend)
2104{
c84dec2f 2105 struct amdgpu_dm_connector *aconnector;
4562236b 2106 struct drm_connector *connector;
f8d2d39e 2107 struct drm_connector_list_iter iter;
fe7553be
LP
2108 struct drm_dp_mst_topology_mgr *mgr;
2109 int ret;
2110 bool need_hotplug = false;
4562236b 2111
f8d2d39e
LP
2112 drm_connector_list_iter_begin(dev, &iter);
2113 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2114 aconnector = to_amdgpu_dm_connector(connector);
2115 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2116 aconnector->mst_port)
2117 continue;
2118
2119 mgr = &aconnector->mst_mgr;
2120
2121 if (suspend) {
2122 drm_dp_mst_topology_mgr_suspend(mgr);
2123 } else {
6f85f738 2124 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2125 if (ret < 0) {
2126 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2127 need_hotplug = true;
2128 }
2129 }
4562236b 2130 }
f8d2d39e 2131 drm_connector_list_iter_end(&iter);
fe7553be
LP
2132
2133 if (need_hotplug)
2134 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2135}
2136
9340dfd3
HW
2137static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2138{
2139 struct smu_context *smu = &adev->smu;
2140 int ret = 0;
2141
2142 if (!is_support_sw_smu(adev))
2143 return 0;
2144
2145 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2146 * on window driver dc implementation.
2147 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2148 * should be passed to smu during boot up and resume from s3.
2149 * boot up: dc calculate dcn watermark clock settings within dc_create,
2150 * dcn20_resource_construct
2151 * then call pplib functions below to pass the settings to smu:
2152 * smu_set_watermarks_for_clock_ranges
2153 * smu_set_watermarks_table
2154 * navi10_set_watermarks_table
2155 * smu_write_watermarks_table
2156 *
2157 * For Renoir, clock settings of dcn watermark are also fixed values.
2158 * dc has implemented different flow for window driver:
2159 * dc_hardware_init / dc_set_power_state
2160 * dcn10_init_hw
2161 * notify_wm_ranges
2162 * set_wm_ranges
2163 * -- Linux
2164 * smu_set_watermarks_for_clock_ranges
2165 * renoir_set_watermarks_table
2166 * smu_write_watermarks_table
2167 *
2168 * For Linux,
2169 * dc_hardware_init -> amdgpu_dm_init
2170 * dc_set_power_state --> dm_resume
2171 *
2172 * therefore, this function apply to navi10/12/14 but not Renoir
2173 * *
2174 */
1d789535 2175 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2176 case IP_VERSION(2, 0, 2):
2177 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2178 break;
2179 default:
2180 return 0;
2181 }
2182
e7a95eea
EQ
2183 ret = smu_write_watermarks_table(smu);
2184 if (ret) {
2185 DRM_ERROR("Failed to update WMTABLE!\n");
2186 return ret;
9340dfd3
HW
2187 }
2188
9340dfd3
HW
2189 return 0;
2190}
2191
b8592b48
LL
2192/**
2193 * dm_hw_init() - Initialize DC device
28d687ea 2194 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2195 *
2196 * Initialize the &struct amdgpu_display_manager device. This involves calling
2197 * the initializers of each DM component, then populating the struct with them.
2198 *
2199 * Although the function implies hardware initialization, both hardware and
2200 * software are initialized here. Splitting them out to their relevant init
2201 * hooks is a future TODO item.
2202 *
2203 * Some notable things that are initialized here:
2204 *
2205 * - Display Core, both software and hardware
2206 * - DC modules that we need (freesync and color management)
2207 * - DRM software states
2208 * - Interrupt sources and handlers
2209 * - Vblank support
2210 * - Debug FS entries, if enabled
2211 */
4562236b
HW
2212static int dm_hw_init(void *handle)
2213{
2214 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2215 /* Create DAL display manager */
2216 amdgpu_dm_init(adev);
4562236b
HW
2217 amdgpu_dm_hpd_init(adev);
2218
4562236b
HW
2219 return 0;
2220}
2221
b8592b48
LL
2222/**
2223 * dm_hw_fini() - Teardown DC device
28d687ea 2224 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2225 *
2226 * Teardown components within &struct amdgpu_display_manager that require
2227 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2228 * were loaded. Also flush IRQ workqueues and disable them.
2229 */
4562236b
HW
2230static int dm_hw_fini(void *handle)
2231{
2232 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2233
2234 amdgpu_dm_hpd_fini(adev);
2235
2236 amdgpu_dm_irq_fini(adev);
21de3396 2237 amdgpu_dm_fini(adev);
4562236b
HW
2238 return 0;
2239}
2240
cdaae837
BL
2241
2242static int dm_enable_vblank(struct drm_crtc *crtc);
2243static void dm_disable_vblank(struct drm_crtc *crtc);
2244
2245static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2246 struct dc_state *state, bool enable)
2247{
2248 enum dc_irq_source irq_source;
2249 struct amdgpu_crtc *acrtc;
2250 int rc = -EBUSY;
2251 int i = 0;
2252
2253 for (i = 0; i < state->stream_count; i++) {
2254 acrtc = get_crtc_by_otg_inst(
2255 adev, state->stream_status[i].primary_otg_inst);
2256
2257 if (acrtc && state->stream_status[i].plane_count != 0) {
2258 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2259 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2260 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2261 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2262 if (rc)
2263 DRM_WARN("Failed to %s pflip interrupts\n",
2264 enable ? "enable" : "disable");
2265
2266 if (enable) {
2267 rc = dm_enable_vblank(&acrtc->base);
2268 if (rc)
2269 DRM_WARN("Failed to enable vblank interrupts\n");
2270 } else {
2271 dm_disable_vblank(&acrtc->base);
2272 }
2273
2274 }
2275 }
2276
2277}
2278
dfd84d90 2279static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2280{
2281 struct dc_state *context = NULL;
2282 enum dc_status res = DC_ERROR_UNEXPECTED;
2283 int i;
2284 struct dc_stream_state *del_streams[MAX_PIPES];
2285 int del_streams_count = 0;
2286
2287 memset(del_streams, 0, sizeof(del_streams));
2288
2289 context = dc_create_state(dc);
2290 if (context == NULL)
2291 goto context_alloc_fail;
2292
2293 dc_resource_state_copy_construct_current(dc, context);
2294
2295 /* First remove from context all streams */
2296 for (i = 0; i < context->stream_count; i++) {
2297 struct dc_stream_state *stream = context->streams[i];
2298
2299 del_streams[del_streams_count++] = stream;
2300 }
2301
2302 /* Remove all planes for removed streams and then remove the streams */
2303 for (i = 0; i < del_streams_count; i++) {
2304 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2305 res = DC_FAIL_DETACH_SURFACES;
2306 goto fail;
2307 }
2308
2309 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2310 if (res != DC_OK)
2311 goto fail;
2312 }
2313
cdaae837
BL
2314 res = dc_commit_state(dc, context);
2315
2316fail:
2317 dc_release_state(context);
2318
2319context_alloc_fail:
2320 return res;
2321}
2322
8e794421
WL
2323static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2324{
2325 int i;
2326
2327 if (dm->hpd_rx_offload_wq) {
2328 for (i = 0; i < dm->dc->caps.max_links; i++)
2329 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2330 }
2331}
2332
4562236b
HW
2333static int dm_suspend(void *handle)
2334{
2335 struct amdgpu_device *adev = handle;
2336 struct amdgpu_display_manager *dm = &adev->dm;
2337 int ret = 0;
4562236b 2338
53b3f8f4 2339 if (amdgpu_in_reset(adev)) {
cdaae837 2340 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2341
2342#if defined(CONFIG_DRM_AMD_DC_DCN)
2343 dc_allow_idle_optimizations(adev->dm.dc, false);
2344#endif
2345
cdaae837
BL
2346 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2347
2348 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2349
2350 amdgpu_dm_commit_zero_streams(dm->dc);
2351
2352 amdgpu_dm_irq_suspend(adev);
2353
8e794421
WL
2354 hpd_rx_irq_work_suspend(dm);
2355
cdaae837
BL
2356 return ret;
2357 }
4562236b 2358
d2f0b53b 2359 WARN_ON(adev->dm.cached_state);
4a580877 2360 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2361
4a580877 2362 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2363
4562236b
HW
2364 amdgpu_dm_irq_suspend(adev);
2365
8e794421
WL
2366 hpd_rx_irq_work_suspend(dm);
2367
32f5062d 2368 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2369
1c2075d4 2370 return 0;
4562236b
HW
2371}
2372
1daf8c63
AD
2373static struct amdgpu_dm_connector *
2374amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2375 struct drm_crtc *crtc)
4562236b
HW
2376{
2377 uint32_t i;
c2cea706 2378 struct drm_connector_state *new_con_state;
4562236b
HW
2379 struct drm_connector *connector;
2380 struct drm_crtc *crtc_from_state;
2381
c2cea706
LSL
2382 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2383 crtc_from_state = new_con_state->crtc;
4562236b
HW
2384
2385 if (crtc_from_state == crtc)
c84dec2f 2386 return to_amdgpu_dm_connector(connector);
4562236b
HW
2387 }
2388
2389 return NULL;
2390}
2391
fbbdadf2
BL
2392static void emulated_link_detect(struct dc_link *link)
2393{
2394 struct dc_sink_init_data sink_init_data = { 0 };
2395 struct display_sink_capability sink_caps = { 0 };
2396 enum dc_edid_status edid_status;
2397 struct dc_context *dc_ctx = link->ctx;
2398 struct dc_sink *sink = NULL;
2399 struct dc_sink *prev_sink = NULL;
2400
2401 link->type = dc_connection_none;
2402 prev_sink = link->local_sink;
2403
30164a16
VL
2404 if (prev_sink)
2405 dc_sink_release(prev_sink);
fbbdadf2
BL
2406
2407 switch (link->connector_signal) {
2408 case SIGNAL_TYPE_HDMI_TYPE_A: {
2409 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2410 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2411 break;
2412 }
2413
2414 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2415 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2416 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2417 break;
2418 }
2419
2420 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2421 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2422 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2423 break;
2424 }
2425
2426 case SIGNAL_TYPE_LVDS: {
2427 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2428 sink_caps.signal = SIGNAL_TYPE_LVDS;
2429 break;
2430 }
2431
2432 case SIGNAL_TYPE_EDP: {
2433 sink_caps.transaction_type =
2434 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2435 sink_caps.signal = SIGNAL_TYPE_EDP;
2436 break;
2437 }
2438
2439 case SIGNAL_TYPE_DISPLAY_PORT: {
2440 sink_caps.transaction_type =
2441 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2443 break;
2444 }
2445
2446 default:
2447 DC_ERROR("Invalid connector type! signal:%d\n",
2448 link->connector_signal);
2449 return;
2450 }
2451
2452 sink_init_data.link = link;
2453 sink_init_data.sink_signal = sink_caps.signal;
2454
2455 sink = dc_sink_create(&sink_init_data);
2456 if (!sink) {
2457 DC_ERROR("Failed to create sink!\n");
2458 return;
2459 }
2460
dcd5fb82 2461 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2462 link->local_sink = sink;
2463
2464 edid_status = dm_helpers_read_local_edid(
2465 link->ctx,
2466 link,
2467 sink);
2468
2469 if (edid_status != EDID_OK)
2470 DC_ERROR("Failed to read EDID");
2471
2472}
2473
cdaae837
BL
2474static void dm_gpureset_commit_state(struct dc_state *dc_state,
2475 struct amdgpu_display_manager *dm)
2476{
2477 struct {
2478 struct dc_surface_update surface_updates[MAX_SURFACES];
2479 struct dc_plane_info plane_infos[MAX_SURFACES];
2480 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2481 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2482 struct dc_stream_update stream_update;
2483 } * bundle;
2484 int k, m;
2485
2486 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2487
2488 if (!bundle) {
2489 dm_error("Failed to allocate update bundle\n");
2490 goto cleanup;
2491 }
2492
2493 for (k = 0; k < dc_state->stream_count; k++) {
2494 bundle->stream_update.stream = dc_state->streams[k];
2495
2496 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2497 bundle->surface_updates[m].surface =
2498 dc_state->stream_status->plane_states[m];
2499 bundle->surface_updates[m].surface->force_full_update =
2500 true;
2501 }
2502 dc_commit_updates_for_stream(
2503 dm->dc, bundle->surface_updates,
2504 dc_state->stream_status->plane_count,
efc8278e 2505 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2506 }
2507
2508cleanup:
2509 kfree(bundle);
2510
2511 return;
2512}
2513
035f5496 2514static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2515{
2516 struct dc_stream_state *stream_state;
2517 struct amdgpu_dm_connector *aconnector = link->priv;
2518 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2519 struct dc_stream_update stream_update;
2520 bool dpms_off = true;
2521
2522 memset(&stream_update, 0, sizeof(stream_update));
2523 stream_update.dpms_off = &dpms_off;
2524
2525 mutex_lock(&adev->dm.dc_lock);
2526 stream_state = dc_stream_find_from_link(link);
2527
2528 if (stream_state == NULL) {
2529 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2530 mutex_unlock(&adev->dm.dc_lock);
2531 return;
2532 }
2533
2534 stream_update.stream = stream_state;
035f5496 2535 acrtc_state->force_dpms_off = true;
3c4d55c9 2536 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2537 stream_state, &stream_update,
2538 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2539 mutex_unlock(&adev->dm.dc_lock);
2540}
2541
4562236b
HW
2542static int dm_resume(void *handle)
2543{
2544 struct amdgpu_device *adev = handle;
4a580877 2545 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2546 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2547 struct amdgpu_dm_connector *aconnector;
4562236b 2548 struct drm_connector *connector;
f8d2d39e 2549 struct drm_connector_list_iter iter;
4562236b 2550 struct drm_crtc *crtc;
c2cea706 2551 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2552 struct dm_crtc_state *dm_new_crtc_state;
2553 struct drm_plane *plane;
2554 struct drm_plane_state *new_plane_state;
2555 struct dm_plane_state *dm_new_plane_state;
113b7a01 2556 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2557 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2558 struct dc_state *dc_state;
2559 int i, r, j;
4562236b 2560
53b3f8f4 2561 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2562 dc_state = dm->cached_dc_state;
2563
6d63fcc2
NK
2564 /*
2565 * The dc->current_state is backed up into dm->cached_dc_state
2566 * before we commit 0 streams.
2567 *
2568 * DC will clear link encoder assignments on the real state
2569 * but the changes won't propagate over to the copy we made
2570 * before the 0 streams commit.
2571 *
2572 * DC expects that link encoder assignments are *not* valid
2573 * when committing a state, so as a workaround it needs to be
2574 * cleared here.
2575 */
2576 link_enc_cfg_init(dm->dc, dc_state);
2577
be1ac692
NK
2578 if (dc_enable_dmub_notifications(adev->dm.dc))
2579 amdgpu_dm_outbox_init(adev);
524a0ba6 2580
cdaae837
BL
2581 r = dm_dmub_hw_init(adev);
2582 if (r)
2583 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2584
2585 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2586 dc_resume(dm->dc);
2587
2588 amdgpu_dm_irq_resume_early(adev);
2589
2590 for (i = 0; i < dc_state->stream_count; i++) {
2591 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2592 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2593 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2594 = 0xffffffff;
2595 }
2596 }
2597
2598 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2599
cdaae837
BL
2600 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2601
2602 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2603
2604 dc_release_state(dm->cached_dc_state);
2605 dm->cached_dc_state = NULL;
2606
2607 amdgpu_dm_irq_resume_late(adev);
2608
2609 mutex_unlock(&dm->dc_lock);
2610
2611 return 0;
2612 }
113b7a01
LL
2613 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2614 dc_release_state(dm_state->context);
2615 dm_state->context = dc_create_state(dm->dc);
2616 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2617 dc_resource_state_construct(dm->dc, dm_state->context);
2618
be1ac692
NK
2619 /* Re-enable outbox interrupts for DPIA. */
2620 if (dc_enable_dmub_notifications(adev->dm.dc))
2621 amdgpu_dm_outbox_init(adev);
2622
8c7aea40
NK
2623 /* Before powering on DC we need to re-initialize DMUB. */
2624 r = dm_dmub_hw_init(adev);
2625 if (r)
2626 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2627
a80aa93d
ML
2628 /* power on hardware */
2629 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2630
4562236b
HW
2631 /* program HPD filter */
2632 dc_resume(dm->dc);
2633
4562236b
HW
2634 /*
2635 * early enable HPD Rx IRQ, should be done before set mode as short
2636 * pulse interrupts are used for MST
2637 */
2638 amdgpu_dm_irq_resume_early(adev);
2639
d20ebea8 2640 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2641 s3_handle_mst(ddev, false);
2642
4562236b 2643 /* Do detection*/
f8d2d39e
LP
2644 drm_connector_list_iter_begin(ddev, &iter);
2645 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2646 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2647
2648 /*
2649 * this is the case when traversing through already created
2650 * MST connectors, should be skipped
2651 */
2652 if (aconnector->mst_port)
2653 continue;
2654
03ea364c 2655 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2656 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2657 DRM_ERROR("KMS: Failed to detect connector\n");
2658
2659 if (aconnector->base.force && new_connection_type == dc_connection_none)
2660 emulated_link_detect(aconnector->dc_link);
2661 else
2662 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2663
2664 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2665 aconnector->fake_enable = false;
2666
dcd5fb82
MF
2667 if (aconnector->dc_sink)
2668 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2669 aconnector->dc_sink = NULL;
2670 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2671 mutex_unlock(&aconnector->hpd_lock);
4562236b 2672 }
f8d2d39e 2673 drm_connector_list_iter_end(&iter);
4562236b 2674
1f6010a9 2675 /* Force mode set in atomic commit */
a80aa93d 2676 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2677 new_crtc_state->active_changed = true;
4f346e65 2678
fcb4019e
LSL
2679 /*
2680 * atomic_check is expected to create the dc states. We need to release
2681 * them here, since they were duplicated as part of the suspend
2682 * procedure.
2683 */
a80aa93d 2684 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2685 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2686 if (dm_new_crtc_state->stream) {
2687 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2688 dc_stream_release(dm_new_crtc_state->stream);
2689 dm_new_crtc_state->stream = NULL;
2690 }
2691 }
2692
a80aa93d 2693 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2694 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2695 if (dm_new_plane_state->dc_state) {
2696 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2697 dc_plane_state_release(dm_new_plane_state->dc_state);
2698 dm_new_plane_state->dc_state = NULL;
2699 }
2700 }
2701
2d1af6a1 2702 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2703
a80aa93d 2704 dm->cached_state = NULL;
0a214e2f 2705
9faa4237 2706 amdgpu_dm_irq_resume_late(adev);
4562236b 2707
9340dfd3
HW
2708 amdgpu_dm_smu_write_watermarks_table(adev);
2709
2d1af6a1 2710 return 0;
4562236b
HW
2711}
2712
b8592b48
LL
2713/**
2714 * DOC: DM Lifecycle
2715 *
2716 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2717 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2718 * the base driver's device list to be initialized and torn down accordingly.
2719 *
2720 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2721 */
2722
4562236b
HW
2723static const struct amd_ip_funcs amdgpu_dm_funcs = {
2724 .name = "dm",
2725 .early_init = dm_early_init,
7abcf6b5 2726 .late_init = dm_late_init,
4562236b
HW
2727 .sw_init = dm_sw_init,
2728 .sw_fini = dm_sw_fini,
e9669fb7 2729 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2730 .hw_init = dm_hw_init,
2731 .hw_fini = dm_hw_fini,
2732 .suspend = dm_suspend,
2733 .resume = dm_resume,
2734 .is_idle = dm_is_idle,
2735 .wait_for_idle = dm_wait_for_idle,
2736 .check_soft_reset = dm_check_soft_reset,
2737 .soft_reset = dm_soft_reset,
2738 .set_clockgating_state = dm_set_clockgating_state,
2739 .set_powergating_state = dm_set_powergating_state,
2740};
2741
2742const struct amdgpu_ip_block_version dm_ip_block =
2743{
2744 .type = AMD_IP_BLOCK_TYPE_DCE,
2745 .major = 1,
2746 .minor = 0,
2747 .rev = 0,
2748 .funcs = &amdgpu_dm_funcs,
2749};
2750
ca3268c4 2751
b8592b48
LL
2752/**
2753 * DOC: atomic
2754 *
2755 * *WIP*
2756 */
0a323b84 2757
b3663f70 2758static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2759 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2760 .get_format_info = amd_get_format_info,
366c1baa 2761 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2762 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2763 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2764};
2765
2766static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2767 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2768};
2769
94562810
RS
2770static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2771{
2772 u32 max_cll, min_cll, max, min, q, r;
2773 struct amdgpu_dm_backlight_caps *caps;
2774 struct amdgpu_display_manager *dm;
2775 struct drm_connector *conn_base;
2776 struct amdgpu_device *adev;
ec11fe37 2777 struct dc_link *link = NULL;
94562810
RS
2778 static const u8 pre_computed_values[] = {
2779 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2780 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2781 int i;
94562810
RS
2782
2783 if (!aconnector || !aconnector->dc_link)
2784 return;
2785
ec11fe37 2786 link = aconnector->dc_link;
2787 if (link->connector_signal != SIGNAL_TYPE_EDP)
2788 return;
2789
94562810 2790 conn_base = &aconnector->base;
1348969a 2791 adev = drm_to_adev(conn_base->dev);
94562810 2792 dm = &adev->dm;
7fd13bae
AD
2793 for (i = 0; i < dm->num_of_edps; i++) {
2794 if (link == dm->backlight_link[i])
2795 break;
2796 }
2797 if (i >= dm->num_of_edps)
2798 return;
2799 caps = &dm->backlight_caps[i];
94562810
RS
2800 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2801 caps->aux_support = false;
2802 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2803 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2804
d0ae0b64 2805 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2806 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2807 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2808 caps->aux_support = true;
2809
7a46f05e
TI
2810 if (amdgpu_backlight == 0)
2811 caps->aux_support = false;
2812 else if (amdgpu_backlight == 1)
2813 caps->aux_support = true;
2814
94562810
RS
2815 /* From the specification (CTA-861-G), for calculating the maximum
2816 * luminance we need to use:
2817 * Luminance = 50*2**(CV/32)
2818 * Where CV is a one-byte value.
2819 * For calculating this expression we may need float point precision;
2820 * to avoid this complexity level, we take advantage that CV is divided
2821 * by a constant. From the Euclids division algorithm, we know that CV
2822 * can be written as: CV = 32*q + r. Next, we replace CV in the
2823 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2824 * need to pre-compute the value of r/32. For pre-computing the values
2825 * We just used the following Ruby line:
2826 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2827 * The results of the above expressions can be verified at
2828 * pre_computed_values.
2829 */
2830 q = max_cll >> 5;
2831 r = max_cll % 32;
2832 max = (1 << q) * pre_computed_values[r];
2833
2834 // min luminance: maxLum * (CV/255)^2 / 100
2835 q = DIV_ROUND_CLOSEST(min_cll, 255);
2836 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2837
2838 caps->aux_max_input_signal = max;
2839 caps->aux_min_input_signal = min;
2840}
2841
97e51c16
HW
2842void amdgpu_dm_update_connector_after_detect(
2843 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2844{
2845 struct drm_connector *connector = &aconnector->base;
2846 struct drm_device *dev = connector->dev;
b73a22d3 2847 struct dc_sink *sink;
4562236b
HW
2848
2849 /* MST handled by drm_mst framework */
2850 if (aconnector->mst_mgr.mst_state == true)
2851 return;
2852
4562236b 2853 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2854 if (sink)
2855 dc_sink_retain(sink);
4562236b 2856
1f6010a9
DF
2857 /*
2858 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2859 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2860 * Skip if already done during boot.
4562236b
HW
2861 */
2862 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2863 && aconnector->dc_em_sink) {
2864
1f6010a9
DF
2865 /*
2866 * For S3 resume with headless use eml_sink to fake stream
2867 * because on resume connector->sink is set to NULL
4562236b
HW
2868 */
2869 mutex_lock(&dev->mode_config.mutex);
2870
2871 if (sink) {
922aa1e1 2872 if (aconnector->dc_sink) {
98e6436d 2873 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2874 /*
2875 * retain and release below are used to
2876 * bump up refcount for sink because the link doesn't point
2877 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2878 * reshuffle by UMD we will get into unwanted dc_sink release
2879 */
dcd5fb82 2880 dc_sink_release(aconnector->dc_sink);
922aa1e1 2881 }
4562236b 2882 aconnector->dc_sink = sink;
dcd5fb82 2883 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2884 amdgpu_dm_update_freesync_caps(connector,
2885 aconnector->edid);
4562236b 2886 } else {
98e6436d 2887 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2888 if (!aconnector->dc_sink) {
4562236b 2889 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2890 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2891 }
4562236b
HW
2892 }
2893
2894 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2895
2896 if (sink)
2897 dc_sink_release(sink);
4562236b
HW
2898 return;
2899 }
2900
2901 /*
2902 * TODO: temporary guard to look for proper fix
2903 * if this sink is MST sink, we should not do anything
2904 */
dcd5fb82
MF
2905 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2906 dc_sink_release(sink);
4562236b 2907 return;
dcd5fb82 2908 }
4562236b
HW
2909
2910 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2911 /*
2912 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2913 * Do nothing!!
2914 */
f1ad2f5e 2915 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2916 aconnector->connector_id);
dcd5fb82
MF
2917 if (sink)
2918 dc_sink_release(sink);
4562236b
HW
2919 return;
2920 }
2921
f1ad2f5e 2922 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2923 aconnector->connector_id, aconnector->dc_sink, sink);
2924
2925 mutex_lock(&dev->mode_config.mutex);
2926
1f6010a9
DF
2927 /*
2928 * 1. Update status of the drm connector
2929 * 2. Send an event and let userspace tell us what to do
2930 */
4562236b 2931 if (sink) {
1f6010a9
DF
2932 /*
2933 * TODO: check if we still need the S3 mode update workaround.
2934 * If yes, put it here.
2935 */
c64b0d6b 2936 if (aconnector->dc_sink) {
98e6436d 2937 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2938 dc_sink_release(aconnector->dc_sink);
2939 }
4562236b
HW
2940
2941 aconnector->dc_sink = sink;
dcd5fb82 2942 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2943 if (sink->dc_edid.length == 0) {
4562236b 2944 aconnector->edid = NULL;
e6142dd5
AP
2945 if (aconnector->dc_link->aux_mode) {
2946 drm_dp_cec_unset_edid(
2947 &aconnector->dm_dp_aux.aux);
2948 }
900b3cb1 2949 } else {
4562236b 2950 aconnector->edid =
e6142dd5 2951 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2952
c555f023 2953 drm_connector_update_edid_property(connector,
e6142dd5 2954 aconnector->edid);
e6142dd5
AP
2955 if (aconnector->dc_link->aux_mode)
2956 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2957 aconnector->edid);
4562236b 2958 }
e6142dd5 2959
98e6436d 2960 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2961 update_connector_ext_caps(aconnector);
4562236b 2962 } else {
e86e8947 2963 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2964 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2965 drm_connector_update_edid_property(connector, NULL);
4562236b 2966 aconnector->num_modes = 0;
dcd5fb82 2967 dc_sink_release(aconnector->dc_sink);
4562236b 2968 aconnector->dc_sink = NULL;
5326c452 2969 aconnector->edid = NULL;
0c8620d6
BL
2970#ifdef CONFIG_DRM_AMD_DC_HDCP
2971 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2972 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2973 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2974#endif
4562236b
HW
2975 }
2976
2977 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2978
0f877894
OV
2979 update_subconnector_property(aconnector);
2980
dcd5fb82
MF
2981 if (sink)
2982 dc_sink_release(sink);
4562236b
HW
2983}
2984
e27c41d5 2985static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 2986{
4562236b
HW
2987 struct drm_connector *connector = &aconnector->base;
2988 struct drm_device *dev = connector->dev;
fbbdadf2 2989 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2990 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2991 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 2992 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 2993
b972b4f9
HW
2994 if (adev->dm.disable_hpd_irq)
2995 return;
2996
035f5496
AP
2997 if (dm_con_state->base.state && dm_con_state->base.crtc)
2998 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2999 dm_con_state->base.state,
3000 dm_con_state->base.crtc));
1f6010a9
DF
3001 /*
3002 * In case of failure or MST no need to update connector status or notify the OS
3003 * since (for MST case) MST does this in its own context.
4562236b
HW
3004 */
3005 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3006
0c8620d6 3007#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3008 if (adev->dm.hdcp_workqueue) {
96a3b32e 3009 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3010 dm_con_state->update_hdcp = true;
3011 }
0c8620d6 3012#endif
2e0ac3d6
HW
3013 if (aconnector->fake_enable)
3014 aconnector->fake_enable = false;
3015
fbbdadf2
BL
3016 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3017 DRM_ERROR("KMS: Failed to detect connector\n");
3018
3019 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3020 emulated_link_detect(aconnector->dc_link);
3021
fbbdadf2
BL
3022 drm_modeset_lock_all(dev);
3023 dm_restore_drm_connector_state(dev, connector);
3024 drm_modeset_unlock_all(dev);
3025
3026 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3027 drm_kms_helper_hotplug_event(dev);
3028
3029 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3030 if (new_connection_type == dc_connection_none &&
035f5496
AP
3031 aconnector->dc_link->type == dc_connection_none &&
3032 dm_crtc_state)
3033 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 3034
3c4d55c9 3035 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3036
3037 drm_modeset_lock_all(dev);
3038 dm_restore_drm_connector_state(dev, connector);
3039 drm_modeset_unlock_all(dev);
3040
3041 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3042 drm_kms_helper_hotplug_event(dev);
3043 }
3044 mutex_unlock(&aconnector->hpd_lock);
3045
3046}
3047
e27c41d5
JS
3048static void handle_hpd_irq(void *param)
3049{
3050 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3051
3052 handle_hpd_irq_helper(aconnector);
3053
3054}
3055
8e794421 3056static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3057{
3058 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3059 uint8_t dret;
3060 bool new_irq_handled = false;
3061 int dpcd_addr;
3062 int dpcd_bytes_to_read;
3063
3064 const int max_process_count = 30;
3065 int process_count = 0;
3066
3067 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3068
3069 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3070 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3071 /* DPCD 0x200 - 0x201 for downstream IRQ */
3072 dpcd_addr = DP_SINK_COUNT;
3073 } else {
3074 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3075 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3076 dpcd_addr = DP_SINK_COUNT_ESI;
3077 }
3078
3079 dret = drm_dp_dpcd_read(
3080 &aconnector->dm_dp_aux.aux,
3081 dpcd_addr,
3082 esi,
3083 dpcd_bytes_to_read);
3084
3085 while (dret == dpcd_bytes_to_read &&
3086 process_count < max_process_count) {
3087 uint8_t retry;
3088 dret = 0;
3089
3090 process_count++;
3091
f1ad2f5e 3092 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3093 /* handle HPD short pulse irq */
3094 if (aconnector->mst_mgr.mst_state)
3095 drm_dp_mst_hpd_irq(
3096 &aconnector->mst_mgr,
3097 esi,
3098 &new_irq_handled);
4562236b
HW
3099
3100 if (new_irq_handled) {
3101 /* ACK at DPCD to notify down stream */
3102 const int ack_dpcd_bytes_to_write =
3103 dpcd_bytes_to_read - 1;
3104
3105 for (retry = 0; retry < 3; retry++) {
3106 uint8_t wret;
3107
3108 wret = drm_dp_dpcd_write(
3109 &aconnector->dm_dp_aux.aux,
3110 dpcd_addr + 1,
3111 &esi[1],
3112 ack_dpcd_bytes_to_write);
3113 if (wret == ack_dpcd_bytes_to_write)
3114 break;
3115 }
3116
1f6010a9 3117 /* check if there is new irq to be handled */
4562236b
HW
3118 dret = drm_dp_dpcd_read(
3119 &aconnector->dm_dp_aux.aux,
3120 dpcd_addr,
3121 esi,
3122 dpcd_bytes_to_read);
3123
3124 new_irq_handled = false;
d4a6e8a9 3125 } else {
4562236b 3126 break;
d4a6e8a9 3127 }
4562236b
HW
3128 }
3129
3130 if (process_count == max_process_count)
f1ad2f5e 3131 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3132}
3133
8e794421
WL
3134static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3135 union hpd_irq_data hpd_irq_data)
3136{
3137 struct hpd_rx_irq_offload_work *offload_work =
3138 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3139
3140 if (!offload_work) {
3141 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3142 return;
3143 }
3144
3145 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3146 offload_work->data = hpd_irq_data;
3147 offload_work->offload_wq = offload_wq;
3148
3149 queue_work(offload_wq->wq, &offload_work->work);
3150 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3151}
3152
4562236b
HW
3153static void handle_hpd_rx_irq(void *param)
3154{
c84dec2f 3155 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3156 struct drm_connector *connector = &aconnector->base;
3157 struct drm_device *dev = connector->dev;
53cbf65c 3158 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3159 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3160 bool result = false;
fbbdadf2 3161 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3162 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3163 union hpd_irq_data hpd_irq_data;
8e794421
WL
3164 bool link_loss = false;
3165 bool has_left_work = false;
3166 int idx = aconnector->base.index;
3167 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3168
3169 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3170
b972b4f9
HW
3171 if (adev->dm.disable_hpd_irq)
3172 return;
3173
1f6010a9
DF
3174 /*
3175 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3176 * conflict, after implement i2c helper, this mutex should be
3177 * retired.
3178 */
b86e7eef 3179 mutex_lock(&aconnector->hpd_lock);
4562236b 3180
8e794421
WL
3181 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3182 &link_loss, true, &has_left_work);
3083a984 3183
8e794421
WL
3184 if (!has_left_work)
3185 goto out;
3186
3187 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3188 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3189 goto out;
3190 }
3191
3192 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3193 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3194 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3195 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3196 goto out;
3197 }
3083a984 3198
8e794421
WL
3199 if (link_loss) {
3200 bool skip = false;
d2aa1356 3201
8e794421
WL
3202 spin_lock(&offload_wq->offload_lock);
3203 skip = offload_wq->is_handling_link_loss;
3204
3205 if (!skip)
3206 offload_wq->is_handling_link_loss = true;
3207
3208 spin_unlock(&offload_wq->offload_lock);
3209
3210 if (!skip)
3211 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3212
3213 goto out;
3214 }
3215 }
c8ea79a8 3216
3083a984 3217out:
c8ea79a8 3218 if (result && !is_mst_root_connector) {
4562236b 3219 /* Downstream Port status changed. */
fbbdadf2
BL
3220 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3221 DRM_ERROR("KMS: Failed to detect connector\n");
3222
3223 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3224 emulated_link_detect(dc_link);
3225
3226 if (aconnector->fake_enable)
3227 aconnector->fake_enable = false;
3228
3229 amdgpu_dm_update_connector_after_detect(aconnector);
3230
3231
3232 drm_modeset_lock_all(dev);
3233 dm_restore_drm_connector_state(dev, connector);
3234 drm_modeset_unlock_all(dev);
3235
3236 drm_kms_helper_hotplug_event(dev);
3237 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3238
3239 if (aconnector->fake_enable)
3240 aconnector->fake_enable = false;
3241
4562236b
HW
3242 amdgpu_dm_update_connector_after_detect(aconnector);
3243
3244
3245 drm_modeset_lock_all(dev);
3246 dm_restore_drm_connector_state(dev, connector);
3247 drm_modeset_unlock_all(dev);
3248
3249 drm_kms_helper_hotplug_event(dev);
3250 }
3251 }
2a0f9270 3252#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3253 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3254 if (adev->dm.hdcp_workqueue)
3255 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3256 }
2a0f9270 3257#endif
4562236b 3258
b86e7eef 3259 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3260 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3261
3262 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3263}
3264
3265static void register_hpd_handlers(struct amdgpu_device *adev)
3266{
4a580877 3267 struct drm_device *dev = adev_to_drm(adev);
4562236b 3268 struct drm_connector *connector;
c84dec2f 3269 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3270 const struct dc_link *dc_link;
3271 struct dc_interrupt_params int_params = {0};
3272
3273 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3274 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3275
3276 list_for_each_entry(connector,
3277 &dev->mode_config.connector_list, head) {
3278
c84dec2f 3279 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3280 dc_link = aconnector->dc_link;
3281
3282 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3283 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3284 int_params.irq_source = dc_link->irq_source_hpd;
3285
3286 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3287 handle_hpd_irq,
3288 (void *) aconnector);
3289 }
3290
3291 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3292
3293 /* Also register for DP short pulse (hpd_rx). */
3294 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3295 int_params.irq_source = dc_link->irq_source_hpd_rx;
3296
3297 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3298 handle_hpd_rx_irq,
3299 (void *) aconnector);
8e794421
WL
3300
3301 if (adev->dm.hpd_rx_offload_wq)
3302 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3303 aconnector;
4562236b
HW
3304 }
3305 }
3306}
3307
55e56389
MR
3308#if defined(CONFIG_DRM_AMD_DC_SI)
3309/* Register IRQ sources and initialize IRQ callbacks */
3310static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3311{
3312 struct dc *dc = adev->dm.dc;
3313 struct common_irq_params *c_irq_params;
3314 struct dc_interrupt_params int_params = {0};
3315 int r;
3316 int i;
3317 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3318
3319 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3320 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3321
3322 /*
3323 * Actions of amdgpu_irq_add_id():
3324 * 1. Register a set() function with base driver.
3325 * Base driver will call set() function to enable/disable an
3326 * interrupt in DC hardware.
3327 * 2. Register amdgpu_dm_irq_handler().
3328 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3329 * coming from DC hardware.
3330 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3331 * for acknowledging and handling. */
3332
3333 /* Use VBLANK interrupt */
3334 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3335 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3336 if (r) {
3337 DRM_ERROR("Failed to add crtc irq id!\n");
3338 return r;
3339 }
3340
3341 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3342 int_params.irq_source =
3343 dc_interrupt_to_irq_source(dc, i+1 , 0);
3344
3345 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3346
3347 c_irq_params->adev = adev;
3348 c_irq_params->irq_src = int_params.irq_source;
3349
3350 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3351 dm_crtc_high_irq, c_irq_params);
3352 }
3353
3354 /* Use GRPH_PFLIP interrupt */
3355 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3356 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3357 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3358 if (r) {
3359 DRM_ERROR("Failed to add page flip irq id!\n");
3360 return r;
3361 }
3362
3363 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3364 int_params.irq_source =
3365 dc_interrupt_to_irq_source(dc, i, 0);
3366
3367 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3368
3369 c_irq_params->adev = adev;
3370 c_irq_params->irq_src = int_params.irq_source;
3371
3372 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3373 dm_pflip_high_irq, c_irq_params);
3374
3375 }
3376
3377 /* HPD */
3378 r = amdgpu_irq_add_id(adev, client_id,
3379 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3380 if (r) {
3381 DRM_ERROR("Failed to add hpd irq id!\n");
3382 return r;
3383 }
3384
3385 register_hpd_handlers(adev);
3386
3387 return 0;
3388}
3389#endif
3390
4562236b
HW
3391/* Register IRQ sources and initialize IRQ callbacks */
3392static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3393{
3394 struct dc *dc = adev->dm.dc;
3395 struct common_irq_params *c_irq_params;
3396 struct dc_interrupt_params int_params = {0};
3397 int r;
3398 int i;
1ffdeca6 3399 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3400
c08182f2 3401 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3402 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3403
3404 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3405 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3406
1f6010a9
DF
3407 /*
3408 * Actions of amdgpu_irq_add_id():
4562236b
HW
3409 * 1. Register a set() function with base driver.
3410 * Base driver will call set() function to enable/disable an
3411 * interrupt in DC hardware.
3412 * 2. Register amdgpu_dm_irq_handler().
3413 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3414 * coming from DC hardware.
3415 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3416 * for acknowledging and handling. */
3417
b57de80a 3418 /* Use VBLANK interrupt */
e9029155 3419 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3420 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3421 if (r) {
3422 DRM_ERROR("Failed to add crtc irq id!\n");
3423 return r;
3424 }
3425
3426 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3427 int_params.irq_source =
3d761e79 3428 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3429
b57de80a 3430 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3431
3432 c_irq_params->adev = adev;
3433 c_irq_params->irq_src = int_params.irq_source;
3434
3435 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436 dm_crtc_high_irq, c_irq_params);
3437 }
3438
d2574c33
MK
3439 /* Use VUPDATE interrupt */
3440 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3441 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3442 if (r) {
3443 DRM_ERROR("Failed to add vupdate irq id!\n");
3444 return r;
3445 }
3446
3447 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3448 int_params.irq_source =
3449 dc_interrupt_to_irq_source(dc, i, 0);
3450
3451 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3452
3453 c_irq_params->adev = adev;
3454 c_irq_params->irq_src = int_params.irq_source;
3455
3456 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3457 dm_vupdate_high_irq, c_irq_params);
3458 }
3459
3d761e79 3460 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3461 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3462 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3463 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3464 if (r) {
3465 DRM_ERROR("Failed to add page flip irq id!\n");
3466 return r;
3467 }
3468
3469 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3470 int_params.irq_source =
3471 dc_interrupt_to_irq_source(dc, i, 0);
3472
3473 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3474
3475 c_irq_params->adev = adev;
3476 c_irq_params->irq_src = int_params.irq_source;
3477
3478 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3479 dm_pflip_high_irq, c_irq_params);
3480
3481 }
3482
3483 /* HPD */
2c8ad2d5
AD
3484 r = amdgpu_irq_add_id(adev, client_id,
3485 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3486 if (r) {
3487 DRM_ERROR("Failed to add hpd irq id!\n");
3488 return r;
3489 }
3490
3491 register_hpd_handlers(adev);
3492
3493 return 0;
3494}
3495
b86a1aa3 3496#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3497/* Register IRQ sources and initialize IRQ callbacks */
3498static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3499{
3500 struct dc *dc = adev->dm.dc;
3501 struct common_irq_params *c_irq_params;
3502 struct dc_interrupt_params int_params = {0};
3503 int r;
3504 int i;
660d5406
WL
3505#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3506 static const unsigned int vrtl_int_srcid[] = {
3507 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3508 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3509 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3510 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3511 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3512 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3513 };
3514#endif
ff5ef992
AD
3515
3516 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3517 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3518
1f6010a9
DF
3519 /*
3520 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3521 * 1. Register a set() function with base driver.
3522 * Base driver will call set() function to enable/disable an
3523 * interrupt in DC hardware.
3524 * 2. Register amdgpu_dm_irq_handler().
3525 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3526 * coming from DC hardware.
3527 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3528 * for acknowledging and handling.
1f6010a9 3529 */
ff5ef992
AD
3530
3531 /* Use VSTARTUP interrupt */
3532 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3533 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3534 i++) {
3760f76c 3535 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3536
3537 if (r) {
3538 DRM_ERROR("Failed to add crtc irq id!\n");
3539 return r;
3540 }
3541
3542 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3543 int_params.irq_source =
3544 dc_interrupt_to_irq_source(dc, i, 0);
3545
3546 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3547
3548 c_irq_params->adev = adev;
3549 c_irq_params->irq_src = int_params.irq_source;
3550
2346ef47
NK
3551 amdgpu_dm_irq_register_interrupt(
3552 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3553 }
3554
86bc2219
WL
3555 /* Use otg vertical line interrupt */
3556#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3557 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3558 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3559 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3560
3561 if (r) {
3562 DRM_ERROR("Failed to add vline0 irq id!\n");
3563 return r;
3564 }
3565
3566 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3567 int_params.irq_source =
660d5406
WL
3568 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3569
3570 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3571 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3572 break;
3573 }
86bc2219
WL
3574
3575 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3576 - DC_IRQ_SOURCE_DC1_VLINE0];
3577
3578 c_irq_params->adev = adev;
3579 c_irq_params->irq_src = int_params.irq_source;
3580
3581 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3582 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3583 }
3584#endif
3585
2346ef47
NK
3586 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3587 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3588 * to trigger at end of each vblank, regardless of state of the lock,
3589 * matching DCE behaviour.
3590 */
3591 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3592 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3593 i++) {
3594 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3595
3596 if (r) {
3597 DRM_ERROR("Failed to add vupdate irq id!\n");
3598 return r;
3599 }
3600
3601 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3602 int_params.irq_source =
3603 dc_interrupt_to_irq_source(dc, i, 0);
3604
3605 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3606
3607 c_irq_params->adev = adev;
3608 c_irq_params->irq_src = int_params.irq_source;
3609
ff5ef992 3610 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3611 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3612 }
3613
ff5ef992
AD
3614 /* Use GRPH_PFLIP interrupt */
3615 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3616 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3617 i++) {
3760f76c 3618 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3619 if (r) {
3620 DRM_ERROR("Failed to add page flip irq id!\n");
3621 return r;
3622 }
3623
3624 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3625 int_params.irq_source =
3626 dc_interrupt_to_irq_source(dc, i, 0);
3627
3628 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3629
3630 c_irq_params->adev = adev;
3631 c_irq_params->irq_src = int_params.irq_source;
3632
3633 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3634 dm_pflip_high_irq, c_irq_params);
3635
3636 }
3637
81927e28
JS
3638 /* HPD */
3639 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3640 &adev->hpd_irq);
3641 if (r) {
3642 DRM_ERROR("Failed to add hpd irq id!\n");
3643 return r;
3644 }
a08f16cf 3645
81927e28 3646 register_hpd_handlers(adev);
a08f16cf 3647
81927e28
JS
3648 return 0;
3649}
3650/* Register Outbox IRQ sources and initialize IRQ callbacks */
3651static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3652{
3653 struct dc *dc = adev->dm.dc;
3654 struct common_irq_params *c_irq_params;
3655 struct dc_interrupt_params int_params = {0};
3656 int r, i;
3657
3658 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3659 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3660
3661 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3662 &adev->dmub_outbox_irq);
3663 if (r) {
3664 DRM_ERROR("Failed to add outbox irq id!\n");
3665 return r;
3666 }
3667
3668 if (dc->ctx->dmub_srv) {
3669 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3670 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3671 int_params.irq_source =
81927e28 3672 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3673
81927e28 3674 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3675
3676 c_irq_params->adev = adev;
3677 c_irq_params->irq_src = int_params.irq_source;
3678
3679 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3680 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3681 }
3682
ff5ef992
AD
3683 return 0;
3684}
3685#endif
3686
eb3dc897
NK
3687/*
3688 * Acquires the lock for the atomic state object and returns
3689 * the new atomic state.
3690 *
3691 * This should only be called during atomic check.
3692 */
3693static int dm_atomic_get_state(struct drm_atomic_state *state,
3694 struct dm_atomic_state **dm_state)
3695{
3696 struct drm_device *dev = state->dev;
1348969a 3697 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3698 struct amdgpu_display_manager *dm = &adev->dm;
3699 struct drm_private_state *priv_state;
eb3dc897
NK
3700
3701 if (*dm_state)
3702 return 0;
3703
eb3dc897
NK
3704 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3705 if (IS_ERR(priv_state))
3706 return PTR_ERR(priv_state);
3707
3708 *dm_state = to_dm_atomic_state(priv_state);
3709
3710 return 0;
3711}
3712
dfd84d90 3713static struct dm_atomic_state *
eb3dc897
NK
3714dm_atomic_get_new_state(struct drm_atomic_state *state)
3715{
3716 struct drm_device *dev = state->dev;
1348969a 3717 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3718 struct amdgpu_display_manager *dm = &adev->dm;
3719 struct drm_private_obj *obj;
3720 struct drm_private_state *new_obj_state;
3721 int i;
3722
3723 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3724 if (obj->funcs == dm->atomic_obj.funcs)
3725 return to_dm_atomic_state(new_obj_state);
3726 }
3727
3728 return NULL;
3729}
3730
eb3dc897
NK
3731static struct drm_private_state *
3732dm_atomic_duplicate_state(struct drm_private_obj *obj)
3733{
3734 struct dm_atomic_state *old_state, *new_state;
3735
3736 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3737 if (!new_state)
3738 return NULL;
3739
3740 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3741
813d20dc
AW
3742 old_state = to_dm_atomic_state(obj->state);
3743
3744 if (old_state && old_state->context)
3745 new_state->context = dc_copy_state(old_state->context);
3746
eb3dc897
NK
3747 if (!new_state->context) {
3748 kfree(new_state);
3749 return NULL;
3750 }
3751
eb3dc897
NK
3752 return &new_state->base;
3753}
3754
3755static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3756 struct drm_private_state *state)
3757{
3758 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3759
3760 if (dm_state && dm_state->context)
3761 dc_release_state(dm_state->context);
3762
3763 kfree(dm_state);
3764}
3765
3766static struct drm_private_state_funcs dm_atomic_state_funcs = {
3767 .atomic_duplicate_state = dm_atomic_duplicate_state,
3768 .atomic_destroy_state = dm_atomic_destroy_state,
3769};
3770
4562236b
HW
3771static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3772{
eb3dc897 3773 struct dm_atomic_state *state;
4562236b
HW
3774 int r;
3775
3776 adev->mode_info.mode_config_initialized = true;
3777
4a580877
LT
3778 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3779 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3780
4a580877
LT
3781 adev_to_drm(adev)->mode_config.max_width = 16384;
3782 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3783
4a580877
LT
3784 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3785 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3786 /* indicates support for immediate flip */
4a580877 3787 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3788
4a580877 3789 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3790
eb3dc897
NK
3791 state = kzalloc(sizeof(*state), GFP_KERNEL);
3792 if (!state)
3793 return -ENOMEM;
3794
813d20dc 3795 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3796 if (!state->context) {
3797 kfree(state);
3798 return -ENOMEM;
3799 }
3800
3801 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3802
4a580877 3803 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3804 &adev->dm.atomic_obj,
eb3dc897
NK
3805 &state->base,
3806 &dm_atomic_state_funcs);
3807
3dc9b1ce 3808 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3809 if (r) {
3810 dc_release_state(state->context);
3811 kfree(state);
4562236b 3812 return r;
b67a468a 3813 }
4562236b 3814
6ce8f316 3815 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3816 if (r) {
3817 dc_release_state(state->context);
3818 kfree(state);
6ce8f316 3819 return r;
b67a468a 3820 }
6ce8f316 3821
4562236b
HW
3822 return 0;
3823}
3824
206bbafe
DF
3825#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3826#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3827#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3828
4562236b
HW
3829#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3830 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3831
7fd13bae
AD
3832static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3833 int bl_idx)
206bbafe
DF
3834{
3835#if defined(CONFIG_ACPI)
3836 struct amdgpu_dm_backlight_caps caps;
3837
58965855
FS
3838 memset(&caps, 0, sizeof(caps));
3839
7fd13bae 3840 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3841 return;
3842
f9b7f370 3843 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3844 if (caps.caps_valid) {
7fd13bae 3845 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3846 if (caps.aux_support)
3847 return;
7fd13bae
AD
3848 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3849 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3850 } else {
7fd13bae 3851 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3852 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3853 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3854 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3855 }
3856#else
7fd13bae 3857 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3858 return;
3859
7fd13bae
AD
3860 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3861 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3862#endif
3863}
3864
69d9f427
AM
3865static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3866 unsigned *min, unsigned *max)
94562810 3867{
94562810 3868 if (!caps)
69d9f427 3869 return 0;
94562810 3870
69d9f427
AM
3871 if (caps->aux_support) {
3872 // Firmware limits are in nits, DC API wants millinits.
3873 *max = 1000 * caps->aux_max_input_signal;
3874 *min = 1000 * caps->aux_min_input_signal;
94562810 3875 } else {
69d9f427
AM
3876 // Firmware limits are 8-bit, PWM control is 16-bit.
3877 *max = 0x101 * caps->max_input_signal;
3878 *min = 0x101 * caps->min_input_signal;
94562810 3879 }
69d9f427
AM
3880 return 1;
3881}
94562810 3882
69d9f427
AM
3883static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3884 uint32_t brightness)
3885{
3886 unsigned min, max;
94562810 3887
69d9f427
AM
3888 if (!get_brightness_range(caps, &min, &max))
3889 return brightness;
3890
3891 // Rescale 0..255 to min..max
3892 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3893 AMDGPU_MAX_BL_LEVEL);
3894}
3895
3896static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3897 uint32_t brightness)
3898{
3899 unsigned min, max;
3900
3901 if (!get_brightness_range(caps, &min, &max))
3902 return brightness;
3903
3904 if (brightness < min)
3905 return 0;
3906 // Rescale min..max to 0..255
3907 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3908 max - min);
94562810
RS
3909}
3910
3d6c9164 3911static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3912 int bl_idx,
3d6c9164 3913 u32 user_brightness)
4562236b 3914{
206bbafe 3915 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3916 struct dc_link *link;
3917 u32 brightness;
94562810 3918 bool rc;
4562236b 3919
7fd13bae
AD
3920 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3921 caps = dm->backlight_caps[bl_idx];
94562810 3922
7fd13bae 3923 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
3924 /* update scratch register */
3925 if (bl_idx == 0)
3926 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
3927 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3928 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3929
3d6c9164 3930 /* Change brightness based on AUX property */
118b4627 3931 if (caps.aux_support) {
7fd13bae
AD
3932 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3933 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3934 if (!rc)
3935 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3936 } else {
7fd13bae
AD
3937 rc = dc_link_set_backlight_level(link, brightness, 0);
3938 if (!rc)
3939 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3940 }
94562810
RS
3941
3942 return rc ? 0 : 1;
4562236b
HW
3943}
3944
3d6c9164 3945static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3946{
620a0d27 3947 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3948 int i;
3d6c9164 3949
7fd13bae
AD
3950 for (i = 0; i < dm->num_of_edps; i++) {
3951 if (bd == dm->backlight_dev[i])
3952 break;
3953 }
3954 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3955 i = 0;
3956 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3957
3958 return 0;
3959}
3960
7fd13bae
AD
3961static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3962 int bl_idx)
3d6c9164 3963{
0ad3e64e 3964 struct amdgpu_dm_backlight_caps caps;
7fd13bae 3965 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 3966
7fd13bae
AD
3967 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3968 caps = dm->backlight_caps[bl_idx];
620a0d27 3969
0ad3e64e 3970 if (caps.aux_support) {
0ad3e64e
AD
3971 u32 avg, peak;
3972 bool rc;
3973
3974 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3975 if (!rc)
7fd13bae 3976 return dm->brightness[bl_idx];
0ad3e64e
AD
3977 return convert_brightness_to_user(&caps, avg);
3978 } else {
7fd13bae 3979 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
3980
3981 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 3982 return dm->brightness[bl_idx];
0ad3e64e
AD
3983 return convert_brightness_to_user(&caps, ret);
3984 }
4562236b
HW
3985}
3986
3d6c9164
AD
3987static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3988{
3989 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3990 int i;
3d6c9164 3991
7fd13bae
AD
3992 for (i = 0; i < dm->num_of_edps; i++) {
3993 if (bd == dm->backlight_dev[i])
3994 break;
3995 }
3996 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3997 i = 0;
3998 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
3999}
4000
4562236b 4001static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 4002 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
4003 .get_brightness = amdgpu_dm_backlight_get_brightness,
4004 .update_status = amdgpu_dm_backlight_update_status,
4005};
4006
7578ecda
AD
4007static void
4008amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4009{
4010 char bl_name[16];
4011 struct backlight_properties props = { 0 };
4012
7fd13bae
AD
4013 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4014 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4015
4562236b 4016 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4017 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4018 props.type = BACKLIGHT_RAW;
4019
4020 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4021 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4022
7fd13bae
AD
4023 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4024 adev_to_drm(dm->adev)->dev,
4025 dm,
4026 &amdgpu_dm_backlight_ops,
4027 &props);
4562236b 4028
7fd13bae 4029 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4030 DRM_ERROR("DM: Backlight registration failed!\n");
4031 else
f1ad2f5e 4032 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4033}
4562236b
HW
4034#endif
4035
df534fff 4036static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4037 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4038 enum drm_plane_type plane_type,
4039 const struct dc_plane_cap *plane_cap)
df534fff 4040{
f180b4bc 4041 struct drm_plane *plane;
df534fff
S
4042 unsigned long possible_crtcs;
4043 int ret = 0;
4044
f180b4bc 4045 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4046 if (!plane) {
4047 DRM_ERROR("KMS: Failed to allocate plane\n");
4048 return -ENOMEM;
4049 }
b2fddb13 4050 plane->type = plane_type;
df534fff
S
4051
4052 /*
b2fddb13
NK
4053 * HACK: IGT tests expect that the primary plane for a CRTC
4054 * can only have one possible CRTC. Only expose support for
4055 * any CRTC if they're not going to be used as a primary plane
4056 * for a CRTC - like overlay or underlay planes.
df534fff
S
4057 */
4058 possible_crtcs = 1 << plane_id;
4059 if (plane_id >= dm->dc->caps.max_streams)
4060 possible_crtcs = 0xff;
4061
cc1fec57 4062 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4063
4064 if (ret) {
4065 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4066 kfree(plane);
df534fff
S
4067 return ret;
4068 }
4069
54087768
NK
4070 if (mode_info)
4071 mode_info->planes[plane_id] = plane;
4072
df534fff
S
4073 return ret;
4074}
4075
89fc8d4e
HW
4076
4077static void register_backlight_device(struct amdgpu_display_manager *dm,
4078 struct dc_link *link)
4079{
4080#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4081 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4082
4083 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4084 link->type != dc_connection_none) {
1f6010a9
DF
4085 /*
4086 * Event if registration failed, we should continue with
89fc8d4e
HW
4087 * DM initialization because not having a backlight control
4088 * is better then a black screen.
4089 */
7fd13bae 4090 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4091 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4092
7fd13bae 4093 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4094 dm->backlight_link[dm->num_of_edps] = link;
4095 dm->num_of_edps++;
4096 }
89fc8d4e
HW
4097 }
4098#endif
4099}
4100
4101
1f6010a9
DF
4102/*
4103 * In this architecture, the association
4562236b
HW
4104 * connector -> encoder -> crtc
4105 * id not really requried. The crtc and connector will hold the
4106 * display_index as an abstraction to use with DAL component
4107 *
4108 * Returns 0 on success
4109 */
7578ecda 4110static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4111{
4112 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4113 int32_t i;
c84dec2f 4114 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4115 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4116 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4117 uint32_t link_cnt;
cc1fec57 4118 int32_t primary_planes;
fbbdadf2 4119 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4120 const struct dc_plane_cap *plane;
9470620e 4121 bool psr_feature_enabled = false;
4562236b 4122
d58159de
AD
4123 dm->display_indexes_num = dm->dc->caps.max_streams;
4124 /* Update the actual used number of crtc */
4125 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4126
4562236b 4127 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4128 if (amdgpu_dm_mode_config_init(dm->adev)) {
4129 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4130 return -EINVAL;
4562236b
HW
4131 }
4132
b2fddb13
NK
4133 /* There is one primary plane per CRTC */
4134 primary_planes = dm->dc->caps.max_streams;
54087768 4135 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4136
b2fddb13
NK
4137 /*
4138 * Initialize primary planes, implicit planes for legacy IOCTLS.
4139 * Order is reversed to match iteration order in atomic check.
4140 */
4141 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4142 plane = &dm->dc->caps.planes[i];
4143
b2fddb13 4144 if (initialize_plane(dm, mode_info, i,
cc1fec57 4145 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4146 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4147 goto fail;
d4e13b0d 4148 }
df534fff 4149 }
92f3ac40 4150
0d579c7e
NK
4151 /*
4152 * Initialize overlay planes, index starting after primary planes.
4153 * These planes have a higher DRM index than the primary planes since
4154 * they should be considered as having a higher z-order.
4155 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4156 *
4157 * Only support DCN for now, and only expose one so we don't encourage
4158 * userspace to use up all the pipes.
0d579c7e 4159 */
cc1fec57
NK
4160 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4161 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4162
4163 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4164 continue;
4165
4166 if (!plane->blends_with_above || !plane->blends_with_below)
4167 continue;
4168
ea36ad34 4169 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4170 continue;
4171
54087768 4172 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4173 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4174 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4175 goto fail;
d4e13b0d 4176 }
cc1fec57
NK
4177
4178 /* Only create one overlay plane. */
4179 break;
d4e13b0d 4180 }
4562236b 4181
d4e13b0d 4182 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4183 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4184 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4185 goto fail;
4562236b 4186 }
4562236b 4187
50610b74 4188#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28 4189 /* Use Outbox interrupt */
1d789535 4190 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4191 case IP_VERSION(3, 0, 0):
4192 case IP_VERSION(3, 1, 2):
4193 case IP_VERSION(3, 1, 3):
4194 case IP_VERSION(2, 1, 0):
81927e28
JS
4195 if (register_outbox_irq_handlers(dm->adev)) {
4196 DRM_ERROR("DM: Failed to initialize IRQ\n");
4197 goto fail;
4198 }
4199 break;
4200 default:
c08182f2 4201 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4202 adev->ip_versions[DCE_HWIP][0]);
81927e28 4203 }
9470620e
NK
4204
4205 /* Determine whether to enable PSR support by default. */
4206 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4207 switch (adev->ip_versions[DCE_HWIP][0]) {
4208 case IP_VERSION(3, 1, 2):
4209 case IP_VERSION(3, 1, 3):
4210 psr_feature_enabled = true;
4211 break;
4212 default:
4213 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4214 break;
4215 }
4216 }
50610b74 4217#endif
81927e28 4218
4562236b
HW
4219 /* loops over all connectors on the board */
4220 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4221 struct dc_link *link = NULL;
4562236b
HW
4222
4223 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4224 DRM_ERROR(
4225 "KMS: Cannot support more than %d display indexes\n",
4226 AMDGPU_DM_MAX_DISPLAY_INDEX);
4227 continue;
4228 }
4229
4230 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4231 if (!aconnector)
cd8a2ae8 4232 goto fail;
4562236b
HW
4233
4234 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4235 if (!aencoder)
cd8a2ae8 4236 goto fail;
4562236b
HW
4237
4238 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4239 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4240 goto fail;
4562236b
HW
4241 }
4242
4243 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4244 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4245 goto fail;
4562236b
HW
4246 }
4247
89fc8d4e
HW
4248 link = dc_get_link_at_index(dm->dc, i);
4249
fbbdadf2
BL
4250 if (!dc_link_detect_sink(link, &new_connection_type))
4251 DRM_ERROR("KMS: Failed to detect connector\n");
4252
4253 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4254 emulated_link_detect(link);
4255 amdgpu_dm_update_connector_after_detect(aconnector);
4256
4257 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4258 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4259 register_backlight_device(dm, link);
b295ce39
RL
4260 if (dm->num_of_edps)
4261 update_connector_ext_caps(aconnector);
9470620e 4262 if (psr_feature_enabled)
397a9bc5 4263 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
4264 }
4265
4266
4562236b
HW
4267 }
4268
70897848
NK
4269 /*
4270 * Disable vblank IRQs aggressively for power-saving.
4271 *
4272 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4273 * is also supported.
4274 */
4275 adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4276
4562236b
HW
4277 /* Software is initialized. Now we can register interrupt handlers. */
4278 switch (adev->asic_type) {
55e56389
MR
4279#if defined(CONFIG_DRM_AMD_DC_SI)
4280 case CHIP_TAHITI:
4281 case CHIP_PITCAIRN:
4282 case CHIP_VERDE:
4283 case CHIP_OLAND:
4284 if (dce60_register_irq_handlers(dm->adev)) {
4285 DRM_ERROR("DM: Failed to initialize IRQ\n");
4286 goto fail;
4287 }
4288 break;
4289#endif
4562236b
HW
4290 case CHIP_BONAIRE:
4291 case CHIP_HAWAII:
cd4b356f
AD
4292 case CHIP_KAVERI:
4293 case CHIP_KABINI:
4294 case CHIP_MULLINS:
4562236b
HW
4295 case CHIP_TONGA:
4296 case CHIP_FIJI:
4297 case CHIP_CARRIZO:
4298 case CHIP_STONEY:
4299 case CHIP_POLARIS11:
4300 case CHIP_POLARIS10:
b264d345 4301 case CHIP_POLARIS12:
7737de91 4302 case CHIP_VEGAM:
2c8ad2d5 4303 case CHIP_VEGA10:
2325ff30 4304 case CHIP_VEGA12:
1fe6bf2f 4305 case CHIP_VEGA20:
4562236b
HW
4306 if (dce110_register_irq_handlers(dm->adev)) {
4307 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4308 goto fail;
4562236b
HW
4309 }
4310 break;
4311 default:
c08182f2 4312#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4313 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4314 case IP_VERSION(1, 0, 0):
4315 case IP_VERSION(1, 0, 1):
c08182f2
AD
4316 case IP_VERSION(2, 0, 2):
4317 case IP_VERSION(2, 0, 3):
4318 case IP_VERSION(2, 0, 0):
4319 case IP_VERSION(2, 1, 0):
4320 case IP_VERSION(3, 0, 0):
4321 case IP_VERSION(3, 0, 2):
4322 case IP_VERSION(3, 0, 3):
4323 case IP_VERSION(3, 0, 1):
4324 case IP_VERSION(3, 1, 2):
4325 case IP_VERSION(3, 1, 3):
4326 if (dcn10_register_irq_handlers(dm->adev)) {
4327 DRM_ERROR("DM: Failed to initialize IRQ\n");
4328 goto fail;
4329 }
4330 break;
4331 default:
2cbc6f42 4332 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4333 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4334 goto fail;
c08182f2
AD
4335 }
4336#endif
2cbc6f42 4337 break;
4562236b
HW
4338 }
4339
4562236b 4340 return 0;
cd8a2ae8 4341fail:
4562236b 4342 kfree(aencoder);
4562236b 4343 kfree(aconnector);
54087768 4344
59d0f396 4345 return -EINVAL;
4562236b
HW
4346}
4347
7578ecda 4348static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4349{
eb3dc897 4350 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4351 return;
4352}
4353
4354/******************************************************************************
4355 * amdgpu_display_funcs functions
4356 *****************************************************************************/
4357
1f6010a9 4358/*
4562236b
HW
4359 * dm_bandwidth_update - program display watermarks
4360 *
4361 * @adev: amdgpu_device pointer
4362 *
4363 * Calculate and program the display watermarks and line buffer allocation.
4364 */
4365static void dm_bandwidth_update(struct amdgpu_device *adev)
4366{
49c07a99 4367 /* TODO: implement later */
4562236b
HW
4368}
4369
39cc5be2 4370static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4371 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4372 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4373 .backlight_set_level = NULL, /* never called for DC */
4374 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4375 .hpd_sense = NULL,/* called unconditionally */
4376 .hpd_set_polarity = NULL, /* called unconditionally */
4377 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4378 .page_flip_get_scanoutpos =
4379 dm_crtc_get_scanoutpos,/* called unconditionally */
4380 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4381 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4382};
4383
4384#if defined(CONFIG_DEBUG_KERNEL_DC)
4385
3ee6b26b
AD
4386static ssize_t s3_debug_store(struct device *device,
4387 struct device_attribute *attr,
4388 const char *buf,
4389 size_t count)
4562236b
HW
4390{
4391 int ret;
4392 int s3_state;
ef1de361 4393 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4394 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4395
4396 ret = kstrtoint(buf, 0, &s3_state);
4397
4398 if (ret == 0) {
4399 if (s3_state) {
4400 dm_resume(adev);
4a580877 4401 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4402 } else
4403 dm_suspend(adev);
4404 }
4405
4406 return ret == 0 ? count : 0;
4407}
4408
4409DEVICE_ATTR_WO(s3_debug);
4410
4411#endif
4412
4413static int dm_early_init(void *handle)
4414{
4415 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4416
4562236b 4417 switch (adev->asic_type) {
55e56389
MR
4418#if defined(CONFIG_DRM_AMD_DC_SI)
4419 case CHIP_TAHITI:
4420 case CHIP_PITCAIRN:
4421 case CHIP_VERDE:
4422 adev->mode_info.num_crtc = 6;
4423 adev->mode_info.num_hpd = 6;
4424 adev->mode_info.num_dig = 6;
4425 break;
4426 case CHIP_OLAND:
4427 adev->mode_info.num_crtc = 2;
4428 adev->mode_info.num_hpd = 2;
4429 adev->mode_info.num_dig = 2;
4430 break;
4431#endif
4562236b
HW
4432 case CHIP_BONAIRE:
4433 case CHIP_HAWAII:
4434 adev->mode_info.num_crtc = 6;
4435 adev->mode_info.num_hpd = 6;
4436 adev->mode_info.num_dig = 6;
4562236b 4437 break;
cd4b356f
AD
4438 case CHIP_KAVERI:
4439 adev->mode_info.num_crtc = 4;
4440 adev->mode_info.num_hpd = 6;
4441 adev->mode_info.num_dig = 7;
cd4b356f
AD
4442 break;
4443 case CHIP_KABINI:
4444 case CHIP_MULLINS:
4445 adev->mode_info.num_crtc = 2;
4446 adev->mode_info.num_hpd = 6;
4447 adev->mode_info.num_dig = 6;
cd4b356f 4448 break;
4562236b
HW
4449 case CHIP_FIJI:
4450 case CHIP_TONGA:
4451 adev->mode_info.num_crtc = 6;
4452 adev->mode_info.num_hpd = 6;
4453 adev->mode_info.num_dig = 7;
4562236b
HW
4454 break;
4455 case CHIP_CARRIZO:
4456 adev->mode_info.num_crtc = 3;
4457 adev->mode_info.num_hpd = 6;
4458 adev->mode_info.num_dig = 9;
4562236b
HW
4459 break;
4460 case CHIP_STONEY:
4461 adev->mode_info.num_crtc = 2;
4462 adev->mode_info.num_hpd = 6;
4463 adev->mode_info.num_dig = 9;
4562236b
HW
4464 break;
4465 case CHIP_POLARIS11:
b264d345 4466 case CHIP_POLARIS12:
4562236b
HW
4467 adev->mode_info.num_crtc = 5;
4468 adev->mode_info.num_hpd = 5;
4469 adev->mode_info.num_dig = 5;
4562236b
HW
4470 break;
4471 case CHIP_POLARIS10:
7737de91 4472 case CHIP_VEGAM:
4562236b
HW
4473 adev->mode_info.num_crtc = 6;
4474 adev->mode_info.num_hpd = 6;
4475 adev->mode_info.num_dig = 6;
4562236b 4476 break;
2c8ad2d5 4477 case CHIP_VEGA10:
2325ff30 4478 case CHIP_VEGA12:
1fe6bf2f 4479 case CHIP_VEGA20:
2c8ad2d5
AD
4480 adev->mode_info.num_crtc = 6;
4481 adev->mode_info.num_hpd = 6;
4482 adev->mode_info.num_dig = 6;
4483 break;
4562236b 4484 default:
c08182f2 4485#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4486 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4487 case IP_VERSION(2, 0, 2):
4488 case IP_VERSION(3, 0, 0):
4489 adev->mode_info.num_crtc = 6;
4490 adev->mode_info.num_hpd = 6;
4491 adev->mode_info.num_dig = 6;
4492 break;
4493 case IP_VERSION(2, 0, 0):
4494 case IP_VERSION(3, 0, 2):
4495 adev->mode_info.num_crtc = 5;
4496 adev->mode_info.num_hpd = 5;
4497 adev->mode_info.num_dig = 5;
4498 break;
4499 case IP_VERSION(2, 0, 3):
4500 case IP_VERSION(3, 0, 3):
4501 adev->mode_info.num_crtc = 2;
4502 adev->mode_info.num_hpd = 2;
4503 adev->mode_info.num_dig = 2;
4504 break;
559f591d
AD
4505 case IP_VERSION(1, 0, 0):
4506 case IP_VERSION(1, 0, 1):
c08182f2
AD
4507 case IP_VERSION(3, 0, 1):
4508 case IP_VERSION(2, 1, 0):
4509 case IP_VERSION(3, 1, 2):
4510 case IP_VERSION(3, 1, 3):
4511 adev->mode_info.num_crtc = 4;
4512 adev->mode_info.num_hpd = 4;
4513 adev->mode_info.num_dig = 4;
4514 break;
4515 default:
2cbc6f42 4516 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4517 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4518 return -EINVAL;
c08182f2
AD
4519 }
4520#endif
2cbc6f42 4521 break;
4562236b
HW
4522 }
4523
c8dd5715
MD
4524 amdgpu_dm_set_irq_funcs(adev);
4525
39cc5be2
AD
4526 if (adev->mode_info.funcs == NULL)
4527 adev->mode_info.funcs = &dm_display_funcs;
4528
1f6010a9
DF
4529 /*
4530 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4531 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4532 * amdgpu_device_init()
4533 */
4562236b
HW
4534#if defined(CONFIG_DEBUG_KERNEL_DC)
4535 device_create_file(
4a580877 4536 adev_to_drm(adev)->dev,
4562236b
HW
4537 &dev_attr_s3_debug);
4538#endif
4539
4540 return 0;
4541}
4542
9b690ef3 4543static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4544 struct dc_stream_state *new_stream,
4545 struct dc_stream_state *old_stream)
9b690ef3 4546{
2afda735 4547 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4548}
4549
4550static bool modereset_required(struct drm_crtc_state *crtc_state)
4551{
2afda735 4552 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4553}
4554
7578ecda 4555static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4556{
4557 drm_encoder_cleanup(encoder);
4558 kfree(encoder);
4559}
4560
4561static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4562 .destroy = amdgpu_dm_encoder_destroy,
4563};
4564
e7b07cee 4565
6300b3bd
MK
4566static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4567 struct drm_framebuffer *fb,
4568 int *min_downscale, int *max_upscale)
4569{
4570 struct amdgpu_device *adev = drm_to_adev(dev);
4571 struct dc *dc = adev->dm.dc;
4572 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4573 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4574
4575 switch (fb->format->format) {
4576 case DRM_FORMAT_P010:
4577 case DRM_FORMAT_NV12:
4578 case DRM_FORMAT_NV21:
4579 *max_upscale = plane_cap->max_upscale_factor.nv12;
4580 *min_downscale = plane_cap->max_downscale_factor.nv12;
4581 break;
4582
4583 case DRM_FORMAT_XRGB16161616F:
4584 case DRM_FORMAT_ARGB16161616F:
4585 case DRM_FORMAT_XBGR16161616F:
4586 case DRM_FORMAT_ABGR16161616F:
4587 *max_upscale = plane_cap->max_upscale_factor.fp16;
4588 *min_downscale = plane_cap->max_downscale_factor.fp16;
4589 break;
4590
4591 default:
4592 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4593 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4594 break;
4595 }
4596
4597 /*
4598 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4599 * scaling factor of 1.0 == 1000 units.
4600 */
4601 if (*max_upscale == 1)
4602 *max_upscale = 1000;
4603
4604 if (*min_downscale == 1)
4605 *min_downscale = 1000;
4606}
4607
4608
4375d625
S
4609static int fill_dc_scaling_info(struct amdgpu_device *adev,
4610 const struct drm_plane_state *state,
695af5f9 4611 struct dc_scaling_info *scaling_info)
e7b07cee 4612{
6300b3bd 4613 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4614
695af5f9 4615 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4616
695af5f9
NK
4617 /* Source is fixed 16.16 but we ignore mantissa for now... */
4618 scaling_info->src_rect.x = state->src_x >> 16;
4619 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4620
d89f6048
HW
4621 /*
4622 * For reasons we don't (yet) fully understand a non-zero
4623 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4624 * system hang on DCN1x.
4625 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4626 * let's reject both non-zero src_x and src_y.
4627 *
4628 * We currently know of only one use-case to reproduce a
4629 * scenario with non-zero src_x and src_y for NV12, which
4630 * is to gesture the YouTube Android app into full screen
4631 * on ChromeOS.
4632 */
4375d625
S
4633 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4634 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4635 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4636 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4637 return -EINVAL;
4638
695af5f9
NK
4639 scaling_info->src_rect.width = state->src_w >> 16;
4640 if (scaling_info->src_rect.width == 0)
4641 return -EINVAL;
4642
4643 scaling_info->src_rect.height = state->src_h >> 16;
4644 if (scaling_info->src_rect.height == 0)
4645 return -EINVAL;
4646
4647 scaling_info->dst_rect.x = state->crtc_x;
4648 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4649
4650 if (state->crtc_w == 0)
695af5f9 4651 return -EINVAL;
e7b07cee 4652
695af5f9 4653 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4654
4655 if (state->crtc_h == 0)
695af5f9 4656 return -EINVAL;
e7b07cee 4657
695af5f9 4658 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4659
695af5f9
NK
4660 /* DRM doesn't specify clipping on destination output. */
4661 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4662
6300b3bd
MK
4663 /* Validate scaling per-format with DC plane caps */
4664 if (state->plane && state->plane->dev && state->fb) {
4665 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4666 &min_downscale, &max_upscale);
4667 } else {
4668 min_downscale = 250;
4669 max_upscale = 16000;
4670 }
4671
6491f0c0
NK
4672 scale_w = scaling_info->dst_rect.width * 1000 /
4673 scaling_info->src_rect.width;
e7b07cee 4674
6300b3bd 4675 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4676 return -EINVAL;
4677
4678 scale_h = scaling_info->dst_rect.height * 1000 /
4679 scaling_info->src_rect.height;
4680
6300b3bd 4681 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4682 return -EINVAL;
4683
695af5f9
NK
4684 /*
4685 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4686 * assume reasonable defaults based on the format.
4687 */
e7b07cee 4688
695af5f9 4689 return 0;
4562236b 4690}
695af5f9 4691
a3241991
BN
4692static void
4693fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4694 uint64_t tiling_flags)
e7b07cee 4695{
a3241991
BN
4696 /* Fill GFX8 params */
4697 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4698 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4699
a3241991
BN
4700 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4701 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4702 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4703 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4704 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4705
a3241991
BN
4706 /* XXX fix me for VI */
4707 tiling_info->gfx8.num_banks = num_banks;
4708 tiling_info->gfx8.array_mode =
4709 DC_ARRAY_2D_TILED_THIN1;
4710 tiling_info->gfx8.tile_split = tile_split;
4711 tiling_info->gfx8.bank_width = bankw;
4712 tiling_info->gfx8.bank_height = bankh;
4713 tiling_info->gfx8.tile_aspect = mtaspect;
4714 tiling_info->gfx8.tile_mode =
4715 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4716 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4717 == DC_ARRAY_1D_TILED_THIN1) {
4718 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4719 }
4720
a3241991
BN
4721 tiling_info->gfx8.pipe_config =
4722 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4723}
4724
a3241991
BN
4725static void
4726fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4727 union dc_tiling_info *tiling_info)
4728{
4729 tiling_info->gfx9.num_pipes =
4730 adev->gfx.config.gb_addr_config_fields.num_pipes;
4731 tiling_info->gfx9.num_banks =
4732 adev->gfx.config.gb_addr_config_fields.num_banks;
4733 tiling_info->gfx9.pipe_interleave =
4734 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4735 tiling_info->gfx9.num_shader_engines =
4736 adev->gfx.config.gb_addr_config_fields.num_se;
4737 tiling_info->gfx9.max_compressed_frags =
4738 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4739 tiling_info->gfx9.num_rb_per_se =
4740 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4741 tiling_info->gfx9.shaderEnable = 1;
1d789535 4742 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4743 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4744}
4745
695af5f9 4746static int
a3241991
BN
4747validate_dcc(struct amdgpu_device *adev,
4748 const enum surface_pixel_format format,
4749 const enum dc_rotation_angle rotation,
4750 const union dc_tiling_info *tiling_info,
4751 const struct dc_plane_dcc_param *dcc,
4752 const struct dc_plane_address *address,
4753 const struct plane_size *plane_size)
7df7e505
NK
4754{
4755 struct dc *dc = adev->dm.dc;
8daa1218
NC
4756 struct dc_dcc_surface_param input;
4757 struct dc_surface_dcc_cap output;
7df7e505 4758
8daa1218
NC
4759 memset(&input, 0, sizeof(input));
4760 memset(&output, 0, sizeof(output));
4761
a3241991 4762 if (!dcc->enable)
87b7ebc2
RS
4763 return 0;
4764
a3241991
BN
4765 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4766 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4767 return -EINVAL;
7df7e505 4768
695af5f9 4769 input.format = format;
12e2b2d4
DL
4770 input.surface_size.width = plane_size->surface_size.width;
4771 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4772 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4773
695af5f9 4774 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4775 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4776 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4777 input.scan = SCAN_DIRECTION_VERTICAL;
4778
4779 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4780 return -EINVAL;
7df7e505
NK
4781
4782 if (!output.capable)
09e5665a 4783 return -EINVAL;
7df7e505 4784
a3241991
BN
4785 if (dcc->independent_64b_blks == 0 &&
4786 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4787 return -EINVAL;
7df7e505 4788
a3241991
BN
4789 return 0;
4790}
4791
37384b3f
BN
4792static bool
4793modifier_has_dcc(uint64_t modifier)
4794{
4795 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4796}
4797
4798static unsigned
4799modifier_gfx9_swizzle_mode(uint64_t modifier)
4800{
4801 if (modifier == DRM_FORMAT_MOD_LINEAR)
4802 return 0;
4803
4804 return AMD_FMT_MOD_GET(TILE, modifier);
4805}
4806
dfbbfe3c
BN
4807static const struct drm_format_info *
4808amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4809{
816853f9 4810 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4811}
4812
37384b3f
BN
4813static void
4814fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4815 union dc_tiling_info *tiling_info,
4816 uint64_t modifier)
4817{
4818 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4819 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4820 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4821 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4822
4823 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4824
4825 if (!IS_AMD_FMT_MOD(modifier))
4826 return;
4827
4828 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4829 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4830
4831 if (adev->family >= AMDGPU_FAMILY_NV) {
4832 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4833 } else {
4834 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4835
4836 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4837 }
4838}
4839
faa37f54
BN
4840enum dm_micro_swizzle {
4841 MICRO_SWIZZLE_Z = 0,
4842 MICRO_SWIZZLE_S = 1,
4843 MICRO_SWIZZLE_D = 2,
4844 MICRO_SWIZZLE_R = 3
4845};
4846
4847static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4848 uint32_t format,
4849 uint64_t modifier)
4850{
4851 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4852 const struct drm_format_info *info = drm_format_info(format);
fe180178 4853 int i;
faa37f54
BN
4854
4855 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4856
4857 if (!info)
4858 return false;
4859
4860 /*
fe180178
QZ
4861 * We always have to allow these modifiers:
4862 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4863 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4864 */
fe180178
QZ
4865 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4866 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4867 return true;
fe180178 4868 }
faa37f54 4869
fe180178
QZ
4870 /* Check that the modifier is on the list of the plane's supported modifiers. */
4871 for (i = 0; i < plane->modifier_count; i++) {
4872 if (modifier == plane->modifiers[i])
4873 break;
4874 }
4875 if (i == plane->modifier_count)
faa37f54
BN
4876 return false;
4877
4878 /*
4879 * For D swizzle the canonical modifier depends on the bpp, so check
4880 * it here.
4881 */
4882 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4883 adev->family >= AMDGPU_FAMILY_NV) {
4884 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4885 return false;
4886 }
4887
4888 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4889 info->cpp[0] < 8)
4890 return false;
4891
4892 if (modifier_has_dcc(modifier)) {
4893 /* Per radeonsi comments 16/64 bpp are more complicated. */
4894 if (info->cpp[0] != 4)
4895 return false;
951796f2
SS
4896 /* We support multi-planar formats, but not when combined with
4897 * additional DCC metadata planes. */
4898 if (info->num_planes > 1)
4899 return false;
faa37f54
BN
4900 }
4901
4902 return true;
4903}
4904
4905static void
4906add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4907{
4908 if (!*mods)
4909 return;
4910
4911 if (*cap - *size < 1) {
4912 uint64_t new_cap = *cap * 2;
4913 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4914
4915 if (!new_mods) {
4916 kfree(*mods);
4917 *mods = NULL;
4918 return;
4919 }
4920
4921 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4922 kfree(*mods);
4923 *mods = new_mods;
4924 *cap = new_cap;
4925 }
4926
4927 (*mods)[*size] = mod;
4928 *size += 1;
4929}
4930
4931static void
4932add_gfx9_modifiers(const struct amdgpu_device *adev,
4933 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4934{
4935 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4936 int pipe_xor_bits = min(8, pipes +
4937 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4938 int bank_xor_bits = min(8 - pipe_xor_bits,
4939 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4940 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4941 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4942
4943
4944 if (adev->family == AMDGPU_FAMILY_RV) {
4945 /* Raven2 and later */
4946 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4947
4948 /*
4949 * No _D DCC swizzles yet because we only allow 32bpp, which
4950 * doesn't support _D on DCN
4951 */
4952
4953 if (has_constant_encode) {
4954 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4955 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4956 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4957 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4958 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4959 AMD_FMT_MOD_SET(DCC, 1) |
4960 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4961 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4962 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4963 }
4964
4965 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4966 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4967 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4968 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4969 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4970 AMD_FMT_MOD_SET(DCC, 1) |
4971 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4972 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4973 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4974
4975 if (has_constant_encode) {
4976 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4977 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4978 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4979 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4980 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4981 AMD_FMT_MOD_SET(DCC, 1) |
4982 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4983 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4984 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4985
4986 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4987 AMD_FMT_MOD_SET(RB, rb) |
4988 AMD_FMT_MOD_SET(PIPE, pipes));
4989 }
4990
4991 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4992 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4993 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4994 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4995 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4996 AMD_FMT_MOD_SET(DCC, 1) |
4997 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4998 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4999 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5000 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5001 AMD_FMT_MOD_SET(RB, rb) |
5002 AMD_FMT_MOD_SET(PIPE, pipes));
5003 }
5004
5005 /*
5006 * Only supported for 64bpp on Raven, will be filtered on format in
5007 * dm_plane_format_mod_supported.
5008 */
5009 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5010 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5011 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5012 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5013 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5014
5015 if (adev->family == AMDGPU_FAMILY_RV) {
5016 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5017 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5018 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5019 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5020 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5021 }
5022
5023 /*
5024 * Only supported for 64bpp on Raven, will be filtered on format in
5025 * dm_plane_format_mod_supported.
5026 */
5027 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5028 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5029 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5030
5031 if (adev->family == AMDGPU_FAMILY_RV) {
5032 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5033 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5034 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5035 }
5036}
5037
5038static void
5039add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5040 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5041{
5042 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5043
5044 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5045 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5046 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5047 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5048 AMD_FMT_MOD_SET(DCC, 1) |
5049 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5050 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5051 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5052
5053 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5054 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5055 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5056 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5057 AMD_FMT_MOD_SET(DCC, 1) |
5058 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5059 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5060 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5061 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5062
5063 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5065 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5066 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5067
5068 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5069 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5070 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5071 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5072
5073
5074 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5075 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5076 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5077 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5078
5079 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5080 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5081 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5082}
5083
5084static void
5085add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5086 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5087{
5088 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5089 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5090
5091 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5093 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5094 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5096 AMD_FMT_MOD_SET(DCC, 1) |
5097 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5098 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5099 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5100 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5101
7f6ab50a
JA
5102 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5104 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5105 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5106 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5107 AMD_FMT_MOD_SET(DCC, 1) |
5108 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5109 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5110 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5111
faa37f54
BN
5112 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5113 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5114 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5115 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5116 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5117 AMD_FMT_MOD_SET(DCC, 1) |
5118 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5119 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5120 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5121 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5122 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5123
7f6ab50a
JA
5124 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5126 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5127 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5128 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5129 AMD_FMT_MOD_SET(DCC, 1) |
5130 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5131 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5132 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5133 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5134
faa37f54
BN
5135 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5137 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5138 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5139 AMD_FMT_MOD_SET(PACKERS, pkrs));
5140
5141 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5143 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5144 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5145 AMD_FMT_MOD_SET(PACKERS, pkrs));
5146
5147 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5148 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5149 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5150 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5151
5152 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5154 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5155}
5156
5157static int
5158get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5159{
5160 uint64_t size = 0, capacity = 128;
5161 *mods = NULL;
5162
5163 /* We have not hooked up any pre-GFX9 modifiers. */
5164 if (adev->family < AMDGPU_FAMILY_AI)
5165 return 0;
5166
5167 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5168
5169 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5170 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5171 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5172 return *mods ? 0 : -ENOMEM;
5173 }
5174
5175 switch (adev->family) {
5176 case AMDGPU_FAMILY_AI:
5177 case AMDGPU_FAMILY_RV:
5178 add_gfx9_modifiers(adev, mods, &size, &capacity);
5179 break;
5180 case AMDGPU_FAMILY_NV:
5181 case AMDGPU_FAMILY_VGH:
1ebcaebd 5182 case AMDGPU_FAMILY_YC:
1d789535 5183 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5184 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5185 else
5186 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5187 break;
5188 }
5189
5190 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5191
5192 /* INVALID marks the end of the list. */
5193 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5194
5195 if (!*mods)
5196 return -ENOMEM;
5197
5198 return 0;
5199}
5200
37384b3f
BN
5201static int
5202fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5203 const struct amdgpu_framebuffer *afb,
5204 const enum surface_pixel_format format,
5205 const enum dc_rotation_angle rotation,
5206 const struct plane_size *plane_size,
5207 union dc_tiling_info *tiling_info,
5208 struct dc_plane_dcc_param *dcc,
5209 struct dc_plane_address *address,
5210 const bool force_disable_dcc)
5211{
5212 const uint64_t modifier = afb->base.modifier;
2be7f77f 5213 int ret = 0;
37384b3f
BN
5214
5215 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5216 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5217
5218 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5219 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5220 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5221 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5222
5223 dcc->enable = 1;
5224 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5225 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5226 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5227 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5228 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5229 else if (independent_128b_blks)
5230 dcc->dcc_ind_blk = hubp_ind_block_128b;
5231 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5232 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5233 else
5234 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5235 } else {
5236 if (independent_64b_blks)
5237 dcc->dcc_ind_blk = hubp_ind_block_64b;
5238 else
5239 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5240 }
37384b3f
BN
5241
5242 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5243 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5244 }
5245
5246 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5247 if (ret)
2be7f77f 5248 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5249
2be7f77f 5250 return ret;
09e5665a
NK
5251}
5252
5253static int
320932bf 5254fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5255 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5256 const enum surface_pixel_format format,
5257 const enum dc_rotation_angle rotation,
5258 const uint64_t tiling_flags,
09e5665a 5259 union dc_tiling_info *tiling_info,
12e2b2d4 5260 struct plane_size *plane_size,
09e5665a 5261 struct dc_plane_dcc_param *dcc,
87b7ebc2 5262 struct dc_plane_address *address,
5888f07a 5263 bool tmz_surface,
87b7ebc2 5264 bool force_disable_dcc)
09e5665a 5265{
320932bf 5266 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5267 int ret;
5268
5269 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5270 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5271 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5272 memset(address, 0, sizeof(*address));
5273
5888f07a
HW
5274 address->tmz_surface = tmz_surface;
5275
695af5f9 5276 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5277 uint64_t addr = afb->address + fb->offsets[0];
5278
12e2b2d4
DL
5279 plane_size->surface_size.x = 0;
5280 plane_size->surface_size.y = 0;
5281 plane_size->surface_size.width = fb->width;
5282 plane_size->surface_size.height = fb->height;
5283 plane_size->surface_pitch =
320932bf
NK
5284 fb->pitches[0] / fb->format->cpp[0];
5285
e0634e8d 5286 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5287 address->grph.addr.low_part = lower_32_bits(addr);
5288 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5289 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5290 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5291 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5292
12e2b2d4
DL
5293 plane_size->surface_size.x = 0;
5294 plane_size->surface_size.y = 0;
5295 plane_size->surface_size.width = fb->width;
5296 plane_size->surface_size.height = fb->height;
5297 plane_size->surface_pitch =
320932bf
NK
5298 fb->pitches[0] / fb->format->cpp[0];
5299
12e2b2d4
DL
5300 plane_size->chroma_size.x = 0;
5301 plane_size->chroma_size.y = 0;
320932bf 5302 /* TODO: set these based on surface format */
12e2b2d4
DL
5303 plane_size->chroma_size.width = fb->width / 2;
5304 plane_size->chroma_size.height = fb->height / 2;
320932bf 5305
12e2b2d4 5306 plane_size->chroma_pitch =
320932bf
NK
5307 fb->pitches[1] / fb->format->cpp[1];
5308
e0634e8d
NK
5309 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5310 address->video_progressive.luma_addr.low_part =
be7b9b32 5311 lower_32_bits(luma_addr);
e0634e8d 5312 address->video_progressive.luma_addr.high_part =
be7b9b32 5313 upper_32_bits(luma_addr);
e0634e8d
NK
5314 address->video_progressive.chroma_addr.low_part =
5315 lower_32_bits(chroma_addr);
5316 address->video_progressive.chroma_addr.high_part =
5317 upper_32_bits(chroma_addr);
5318 }
09e5665a 5319
a3241991 5320 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5321 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5322 rotation, plane_size,
5323 tiling_info, dcc,
5324 address,
5325 force_disable_dcc);
09e5665a
NK
5326 if (ret)
5327 return ret;
a3241991
BN
5328 } else {
5329 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5330 }
5331
5332 return 0;
7df7e505
NK
5333}
5334
d74004b6 5335static void
695af5f9 5336fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5337 bool *per_pixel_alpha, bool *global_alpha,
5338 int *global_alpha_value)
5339{
5340 *per_pixel_alpha = false;
5341 *global_alpha = false;
5342 *global_alpha_value = 0xff;
5343
5344 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5345 return;
5346
5347 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5348 static const uint32_t alpha_formats[] = {
5349 DRM_FORMAT_ARGB8888,
5350 DRM_FORMAT_RGBA8888,
5351 DRM_FORMAT_ABGR8888,
5352 };
5353 uint32_t format = plane_state->fb->format->format;
5354 unsigned int i;
5355
5356 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5357 if (format == alpha_formats[i]) {
5358 *per_pixel_alpha = true;
5359 break;
5360 }
5361 }
5362 }
5363
5364 if (plane_state->alpha < 0xffff) {
5365 *global_alpha = true;
5366 *global_alpha_value = plane_state->alpha >> 8;
5367 }
5368}
5369
004fefa3
NK
5370static int
5371fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5372 const enum surface_pixel_format format,
004fefa3
NK
5373 enum dc_color_space *color_space)
5374{
5375 bool full_range;
5376
5377 *color_space = COLOR_SPACE_SRGB;
5378
5379 /* DRM color properties only affect non-RGB formats. */
695af5f9 5380 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5381 return 0;
5382
5383 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5384
5385 switch (plane_state->color_encoding) {
5386 case DRM_COLOR_YCBCR_BT601:
5387 if (full_range)
5388 *color_space = COLOR_SPACE_YCBCR601;
5389 else
5390 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5391 break;
5392
5393 case DRM_COLOR_YCBCR_BT709:
5394 if (full_range)
5395 *color_space = COLOR_SPACE_YCBCR709;
5396 else
5397 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5398 break;
5399
5400 case DRM_COLOR_YCBCR_BT2020:
5401 if (full_range)
5402 *color_space = COLOR_SPACE_2020_YCBCR;
5403 else
5404 return -EINVAL;
5405 break;
5406
5407 default:
5408 return -EINVAL;
5409 }
5410
5411 return 0;
5412}
5413
695af5f9
NK
5414static int
5415fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5416 const struct drm_plane_state *plane_state,
5417 const uint64_t tiling_flags,
5418 struct dc_plane_info *plane_info,
87b7ebc2 5419 struct dc_plane_address *address,
5888f07a 5420 bool tmz_surface,
87b7ebc2 5421 bool force_disable_dcc)
695af5f9
NK
5422{
5423 const struct drm_framebuffer *fb = plane_state->fb;
5424 const struct amdgpu_framebuffer *afb =
5425 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5426 int ret;
5427
5428 memset(plane_info, 0, sizeof(*plane_info));
5429
5430 switch (fb->format->format) {
5431 case DRM_FORMAT_C8:
5432 plane_info->format =
5433 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5434 break;
5435 case DRM_FORMAT_RGB565:
5436 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5437 break;
5438 case DRM_FORMAT_XRGB8888:
5439 case DRM_FORMAT_ARGB8888:
5440 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5441 break;
5442 case DRM_FORMAT_XRGB2101010:
5443 case DRM_FORMAT_ARGB2101010:
5444 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5445 break;
5446 case DRM_FORMAT_XBGR2101010:
5447 case DRM_FORMAT_ABGR2101010:
5448 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5449 break;
5450 case DRM_FORMAT_XBGR8888:
5451 case DRM_FORMAT_ABGR8888:
5452 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5453 break;
5454 case DRM_FORMAT_NV21:
5455 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5456 break;
5457 case DRM_FORMAT_NV12:
5458 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5459 break;
cbec6477
SW
5460 case DRM_FORMAT_P010:
5461 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5462 break;
492548dc
SW
5463 case DRM_FORMAT_XRGB16161616F:
5464 case DRM_FORMAT_ARGB16161616F:
5465 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5466 break;
2a5195dc
MK
5467 case DRM_FORMAT_XBGR16161616F:
5468 case DRM_FORMAT_ABGR16161616F:
5469 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5470 break;
58020403
MK
5471 case DRM_FORMAT_XRGB16161616:
5472 case DRM_FORMAT_ARGB16161616:
5473 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5474 break;
5475 case DRM_FORMAT_XBGR16161616:
5476 case DRM_FORMAT_ABGR16161616:
5477 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5478 break;
695af5f9
NK
5479 default:
5480 DRM_ERROR(
92f1d09c
SA
5481 "Unsupported screen format %p4cc\n",
5482 &fb->format->format);
695af5f9
NK
5483 return -EINVAL;
5484 }
5485
5486 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5487 case DRM_MODE_ROTATE_0:
5488 plane_info->rotation = ROTATION_ANGLE_0;
5489 break;
5490 case DRM_MODE_ROTATE_90:
5491 plane_info->rotation = ROTATION_ANGLE_90;
5492 break;
5493 case DRM_MODE_ROTATE_180:
5494 plane_info->rotation = ROTATION_ANGLE_180;
5495 break;
5496 case DRM_MODE_ROTATE_270:
5497 plane_info->rotation = ROTATION_ANGLE_270;
5498 break;
5499 default:
5500 plane_info->rotation = ROTATION_ANGLE_0;
5501 break;
5502 }
5503
5504 plane_info->visible = true;
5505 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5506
6d83a32d
MS
5507 plane_info->layer_index = 0;
5508
695af5f9
NK
5509 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5510 &plane_info->color_space);
5511 if (ret)
5512 return ret;
5513
5514 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5515 plane_info->rotation, tiling_flags,
5516 &plane_info->tiling_info,
5517 &plane_info->plane_size,
5888f07a 5518 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5519 force_disable_dcc);
695af5f9
NK
5520 if (ret)
5521 return ret;
5522
5523 fill_blending_from_plane_state(
5524 plane_state, &plane_info->per_pixel_alpha,
5525 &plane_info->global_alpha, &plane_info->global_alpha_value);
5526
5527 return 0;
5528}
5529
5530static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5531 struct dc_plane_state *dc_plane_state,
5532 struct drm_plane_state *plane_state,
5533 struct drm_crtc_state *crtc_state)
e7b07cee 5534{
cf020d49 5535 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5536 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5537 struct dc_scaling_info scaling_info;
5538 struct dc_plane_info plane_info;
695af5f9 5539 int ret;
87b7ebc2 5540 bool force_disable_dcc = false;
e7b07cee 5541
4375d625 5542 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5543 if (ret)
5544 return ret;
e7b07cee 5545
695af5f9
NK
5546 dc_plane_state->src_rect = scaling_info.src_rect;
5547 dc_plane_state->dst_rect = scaling_info.dst_rect;
5548 dc_plane_state->clip_rect = scaling_info.clip_rect;
5549 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5550
87b7ebc2 5551 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5552 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5553 afb->tiling_flags,
695af5f9 5554 &plane_info,
87b7ebc2 5555 &dc_plane_state->address,
6eed95b0 5556 afb->tmz_surface,
87b7ebc2 5557 force_disable_dcc);
004fefa3
NK
5558 if (ret)
5559 return ret;
5560
695af5f9
NK
5561 dc_plane_state->format = plane_info.format;
5562 dc_plane_state->color_space = plane_info.color_space;
5563 dc_plane_state->format = plane_info.format;
5564 dc_plane_state->plane_size = plane_info.plane_size;
5565 dc_plane_state->rotation = plane_info.rotation;
5566 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5567 dc_plane_state->stereo_format = plane_info.stereo_format;
5568 dc_plane_state->tiling_info = plane_info.tiling_info;
5569 dc_plane_state->visible = plane_info.visible;
5570 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5571 dc_plane_state->global_alpha = plane_info.global_alpha;
5572 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5573 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5574 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5575 dc_plane_state->flip_int_enabled = true;
695af5f9 5576
e277adc5
LSL
5577 /*
5578 * Always set input transfer function, since plane state is refreshed
5579 * every time.
5580 */
cf020d49
NK
5581 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5582 if (ret)
5583 return ret;
e7b07cee 5584
cf020d49 5585 return 0;
e7b07cee
HW
5586}
5587
3ee6b26b
AD
5588static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5589 const struct dm_connector_state *dm_state,
5590 struct dc_stream_state *stream)
e7b07cee
HW
5591{
5592 enum amdgpu_rmx_type rmx_type;
5593
5594 struct rect src = { 0 }; /* viewport in composition space*/
5595 struct rect dst = { 0 }; /* stream addressable area */
5596
5597 /* no mode. nothing to be done */
5598 if (!mode)
5599 return;
5600
5601 /* Full screen scaling by default */
5602 src.width = mode->hdisplay;
5603 src.height = mode->vdisplay;
5604 dst.width = stream->timing.h_addressable;
5605 dst.height = stream->timing.v_addressable;
5606
f4791779
HW
5607 if (dm_state) {
5608 rmx_type = dm_state->scaling;
5609 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5610 if (src.width * dst.height <
5611 src.height * dst.width) {
5612 /* height needs less upscaling/more downscaling */
5613 dst.width = src.width *
5614 dst.height / src.height;
5615 } else {
5616 /* width needs less upscaling/more downscaling */
5617 dst.height = src.height *
5618 dst.width / src.width;
5619 }
5620 } else if (rmx_type == RMX_CENTER) {
5621 dst = src;
e7b07cee 5622 }
e7b07cee 5623
f4791779
HW
5624 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5625 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5626
f4791779
HW
5627 if (dm_state->underscan_enable) {
5628 dst.x += dm_state->underscan_hborder / 2;
5629 dst.y += dm_state->underscan_vborder / 2;
5630 dst.width -= dm_state->underscan_hborder;
5631 dst.height -= dm_state->underscan_vborder;
5632 }
e7b07cee
HW
5633 }
5634
5635 stream->src = src;
5636 stream->dst = dst;
5637
4711c033
LT
5638 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5639 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5640
5641}
5642
3ee6b26b 5643static enum dc_color_depth
42ba01fc 5644convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5645 bool is_y420, int requested_bpc)
e7b07cee 5646{
1bc22f20 5647 uint8_t bpc;
01c22997 5648
1bc22f20
SW
5649 if (is_y420) {
5650 bpc = 8;
5651
5652 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5653 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5654 bpc = 16;
5655 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5656 bpc = 12;
5657 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5658 bpc = 10;
5659 } else {
5660 bpc = (uint8_t)connector->display_info.bpc;
5661 /* Assume 8 bpc by default if no bpc is specified. */
5662 bpc = bpc ? bpc : 8;
5663 }
e7b07cee 5664
cbd14ae7 5665 if (requested_bpc > 0) {
01c22997
NK
5666 /*
5667 * Cap display bpc based on the user requested value.
5668 *
5669 * The value for state->max_bpc may not correctly updated
5670 * depending on when the connector gets added to the state
5671 * or if this was called outside of atomic check, so it
5672 * can't be used directly.
5673 */
cbd14ae7 5674 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5675
1825fd34
NK
5676 /* Round down to the nearest even number. */
5677 bpc = bpc - (bpc & 1);
5678 }
07e3a1cf 5679
e7b07cee
HW
5680 switch (bpc) {
5681 case 0:
1f6010a9
DF
5682 /*
5683 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5684 * EDID revision before 1.4
5685 * TODO: Fix edid parsing
5686 */
5687 return COLOR_DEPTH_888;
5688 case 6:
5689 return COLOR_DEPTH_666;
5690 case 8:
5691 return COLOR_DEPTH_888;
5692 case 10:
5693 return COLOR_DEPTH_101010;
5694 case 12:
5695 return COLOR_DEPTH_121212;
5696 case 14:
5697 return COLOR_DEPTH_141414;
5698 case 16:
5699 return COLOR_DEPTH_161616;
5700 default:
5701 return COLOR_DEPTH_UNDEFINED;
5702 }
5703}
5704
3ee6b26b
AD
5705static enum dc_aspect_ratio
5706get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5707{
e11d4147
LSL
5708 /* 1-1 mapping, since both enums follow the HDMI spec. */
5709 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5710}
5711
3ee6b26b
AD
5712static enum dc_color_space
5713get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5714{
5715 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5716
5717 switch (dc_crtc_timing->pixel_encoding) {
5718 case PIXEL_ENCODING_YCBCR422:
5719 case PIXEL_ENCODING_YCBCR444:
5720 case PIXEL_ENCODING_YCBCR420:
5721 {
5722 /*
5723 * 27030khz is the separation point between HDTV and SDTV
5724 * according to HDMI spec, we use YCbCr709 and YCbCr601
5725 * respectively
5726 */
380604e2 5727 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5728 if (dc_crtc_timing->flags.Y_ONLY)
5729 color_space =
5730 COLOR_SPACE_YCBCR709_LIMITED;
5731 else
5732 color_space = COLOR_SPACE_YCBCR709;
5733 } else {
5734 if (dc_crtc_timing->flags.Y_ONLY)
5735 color_space =
5736 COLOR_SPACE_YCBCR601_LIMITED;
5737 else
5738 color_space = COLOR_SPACE_YCBCR601;
5739 }
5740
5741 }
5742 break;
5743 case PIXEL_ENCODING_RGB:
5744 color_space = COLOR_SPACE_SRGB;
5745 break;
5746
5747 default:
5748 WARN_ON(1);
5749 break;
5750 }
5751
5752 return color_space;
5753}
5754
ea117312
TA
5755static bool adjust_colour_depth_from_display_info(
5756 struct dc_crtc_timing *timing_out,
5757 const struct drm_display_info *info)
400443e8 5758{
ea117312 5759 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5760 int normalized_clk;
400443e8 5761 do {
380604e2 5762 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5763 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5764 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5765 normalized_clk /= 2;
5766 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5767 switch (depth) {
5768 case COLOR_DEPTH_888:
5769 break;
400443e8
ML
5770 case COLOR_DEPTH_101010:
5771 normalized_clk = (normalized_clk * 30) / 24;
5772 break;
5773 case COLOR_DEPTH_121212:
5774 normalized_clk = (normalized_clk * 36) / 24;
5775 break;
5776 case COLOR_DEPTH_161616:
5777 normalized_clk = (normalized_clk * 48) / 24;
5778 break;
5779 default:
ea117312
TA
5780 /* The above depths are the only ones valid for HDMI. */
5781 return false;
400443e8 5782 }
ea117312
TA
5783 if (normalized_clk <= info->max_tmds_clock) {
5784 timing_out->display_color_depth = depth;
5785 return true;
5786 }
5787 } while (--depth > COLOR_DEPTH_666);
5788 return false;
400443e8 5789}
e7b07cee 5790
42ba01fc
NK
5791static void fill_stream_properties_from_drm_display_mode(
5792 struct dc_stream_state *stream,
5793 const struct drm_display_mode *mode_in,
5794 const struct drm_connector *connector,
5795 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5796 const struct dc_stream_state *old_stream,
5797 int requested_bpc)
e7b07cee
HW
5798{
5799 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5800 const struct drm_display_info *info = &connector->display_info;
d4252eee 5801 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5802 struct hdmi_vendor_infoframe hv_frame;
5803 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5804
acf83f86
WL
5805 memset(&hv_frame, 0, sizeof(hv_frame));
5806 memset(&avi_frame, 0, sizeof(avi_frame));
5807
e7b07cee
HW
5808 timing_out->h_border_left = 0;
5809 timing_out->h_border_right = 0;
5810 timing_out->v_border_top = 0;
5811 timing_out->v_border_bottom = 0;
5812 /* TODO: un-hardcode */
fe61a2f1 5813 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5814 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5815 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5816 else if (drm_mode_is_420_also(info, mode_in)
5817 && aconnector->force_yuv420_output)
5818 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5819 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5820 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5821 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5822 else
5823 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5824
5825 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5826 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5827 connector,
5828 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5829 requested_bpc);
e7b07cee
HW
5830 timing_out->scan_type = SCANNING_TYPE_NODATA;
5831 timing_out->hdmi_vic = 0;
b333730d
BL
5832
5833 if(old_stream) {
5834 timing_out->vic = old_stream->timing.vic;
5835 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5836 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5837 } else {
5838 timing_out->vic = drm_match_cea_mode(mode_in);
5839 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5840 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5841 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5842 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5843 }
e7b07cee 5844
1cb1d477
WL
5845 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5846 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5847 timing_out->vic = avi_frame.video_code;
5848 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5849 timing_out->hdmi_vic = hv_frame.vic;
5850 }
5851
fe8858bb
NC
5852 if (is_freesync_video_mode(mode_in, aconnector)) {
5853 timing_out->h_addressable = mode_in->hdisplay;
5854 timing_out->h_total = mode_in->htotal;
5855 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5856 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5857 timing_out->v_total = mode_in->vtotal;
5858 timing_out->v_addressable = mode_in->vdisplay;
5859 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5860 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5861 timing_out->pix_clk_100hz = mode_in->clock * 10;
5862 } else {
5863 timing_out->h_addressable = mode_in->crtc_hdisplay;
5864 timing_out->h_total = mode_in->crtc_htotal;
5865 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5866 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5867 timing_out->v_total = mode_in->crtc_vtotal;
5868 timing_out->v_addressable = mode_in->crtc_vdisplay;
5869 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5870 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5871 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5872 }
a85ba005 5873
e7b07cee 5874 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5875
5876 stream->output_color_space = get_output_color_space(timing_out);
5877
e43a432c
AK
5878 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5879 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5880 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5881 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5882 drm_mode_is_420_also(info, mode_in) &&
5883 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5884 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5885 adjust_colour_depth_from_display_info(timing_out, info);
5886 }
5887 }
e7b07cee
HW
5888}
5889
3ee6b26b
AD
5890static void fill_audio_info(struct audio_info *audio_info,
5891 const struct drm_connector *drm_connector,
5892 const struct dc_sink *dc_sink)
e7b07cee
HW
5893{
5894 int i = 0;
5895 int cea_revision = 0;
5896 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5897
5898 audio_info->manufacture_id = edid_caps->manufacturer_id;
5899 audio_info->product_id = edid_caps->product_id;
5900
5901 cea_revision = drm_connector->display_info.cea_rev;
5902
090afc1e 5903 strscpy(audio_info->display_name,
d2b2562c 5904 edid_caps->display_name,
090afc1e 5905 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5906
b830ebc9 5907 if (cea_revision >= 3) {
e7b07cee
HW
5908 audio_info->mode_count = edid_caps->audio_mode_count;
5909
5910 for (i = 0; i < audio_info->mode_count; ++i) {
5911 audio_info->modes[i].format_code =
5912 (enum audio_format_code)
5913 (edid_caps->audio_modes[i].format_code);
5914 audio_info->modes[i].channel_count =
5915 edid_caps->audio_modes[i].channel_count;
5916 audio_info->modes[i].sample_rates.all =
5917 edid_caps->audio_modes[i].sample_rate;
5918 audio_info->modes[i].sample_size =
5919 edid_caps->audio_modes[i].sample_size;
5920 }
5921 }
5922
5923 audio_info->flags.all = edid_caps->speaker_flags;
5924
5925 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5926 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5927 audio_info->video_latency = drm_connector->video_latency[0];
5928 audio_info->audio_latency = drm_connector->audio_latency[0];
5929 }
5930
5931 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5932
5933}
5934
3ee6b26b
AD
5935static void
5936copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5937 struct drm_display_mode *dst_mode)
e7b07cee
HW
5938{
5939 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5940 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5941 dst_mode->crtc_clock = src_mode->crtc_clock;
5942 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5943 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5944 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5945 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5946 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5947 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5948 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5949 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5950 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5951 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5952 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5953}
5954
3ee6b26b
AD
5955static void
5956decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5957 const struct drm_display_mode *native_mode,
5958 bool scale_enabled)
e7b07cee
HW
5959{
5960 if (scale_enabled) {
5961 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5962 } else if (native_mode->clock == drm_mode->clock &&
5963 native_mode->htotal == drm_mode->htotal &&
5964 native_mode->vtotal == drm_mode->vtotal) {
5965 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5966 } else {
5967 /* no scaling nor amdgpu inserted, no need to patch */
5968 }
5969}
5970
aed15309
ML
5971static struct dc_sink *
5972create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5973{
2e0ac3d6 5974 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5975 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5976 sink_init_data.link = aconnector->dc_link;
5977 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5978
5979 sink = dc_sink_create(&sink_init_data);
423788c7 5980 if (!sink) {
2e0ac3d6 5981 DRM_ERROR("Failed to create sink!\n");
aed15309 5982 return NULL;
423788c7 5983 }
2e0ac3d6 5984 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5985
aed15309 5986 return sink;
2e0ac3d6
HW
5987}
5988
fa2123db
ML
5989static void set_multisync_trigger_params(
5990 struct dc_stream_state *stream)
5991{
ec372186
ML
5992 struct dc_stream_state *master = NULL;
5993
fa2123db 5994 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5995 master = stream->triggered_crtc_reset.event_source;
5996 stream->triggered_crtc_reset.event =
5997 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5998 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5999 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
6000 }
6001}
6002
6003static void set_master_stream(struct dc_stream_state *stream_set[],
6004 int stream_count)
6005{
6006 int j, highest_rfr = 0, master_stream = 0;
6007
6008 for (j = 0; j < stream_count; j++) {
6009 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6010 int refresh_rate = 0;
6011
380604e2 6012 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
6013 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6014 if (refresh_rate > highest_rfr) {
6015 highest_rfr = refresh_rate;
6016 master_stream = j;
6017 }
6018 }
6019 }
6020 for (j = 0; j < stream_count; j++) {
03736f4c 6021 if (stream_set[j])
fa2123db
ML
6022 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6023 }
6024}
6025
6026static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6027{
6028 int i = 0;
ec372186 6029 struct dc_stream_state *stream;
fa2123db
ML
6030
6031 if (context->stream_count < 2)
6032 return;
6033 for (i = 0; i < context->stream_count ; i++) {
6034 if (!context->streams[i])
6035 continue;
1f6010a9
DF
6036 /*
6037 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6038 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6039 * For now it's set to false
fa2123db 6040 */
fa2123db 6041 }
ec372186 6042
fa2123db 6043 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6044
6045 for (i = 0; i < context->stream_count ; i++) {
6046 stream = context->streams[i];
6047
6048 if (!stream)
6049 continue;
6050
6051 set_multisync_trigger_params(stream);
6052 }
fa2123db
ML
6053}
6054
ea2be5c0 6055#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6056static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6057 struct dc_sink *sink, struct dc_stream_state *stream,
6058 struct dsc_dec_dpcd_caps *dsc_caps)
6059{
6060 stream->timing.flags.DSC = 0;
6061
2665f63a
ML
6062 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6063 sink->sink_signal == SIGNAL_TYPE_EDP)) {
50b1f44e
FZ
6064 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6065 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6066 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6067 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6068 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6069 dsc_caps);
998b7ad2
FZ
6070 }
6071}
6072
2665f63a
ML
6073static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6074 struct dc_sink *sink, struct dc_stream_state *stream,
6075 struct dsc_dec_dpcd_caps *dsc_caps,
6076 uint32_t max_dsc_target_bpp_limit_override)
6077{
6078 const struct dc_link_settings *verified_link_cap = NULL;
6079 uint32_t link_bw_in_kbps;
6080 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6081 struct dc *dc = sink->ctx->dc;
6082 struct dc_dsc_bw_range bw_range = {0};
6083 struct dc_dsc_config dsc_cfg = {0};
6084
6085 verified_link_cap = dc_link_get_link_cap(stream->link);
6086 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6087 edp_min_bpp_x16 = 8 * 16;
6088 edp_max_bpp_x16 = 8 * 16;
6089
6090 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6091 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6092
6093 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6094 edp_min_bpp_x16 = edp_max_bpp_x16;
6095
6096 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6097 dc->debug.dsc_min_slice_height_override,
6098 edp_min_bpp_x16, edp_max_bpp_x16,
6099 dsc_caps,
6100 &stream->timing,
6101 &bw_range)) {
6102
6103 if (bw_range.max_kbps < link_bw_in_kbps) {
6104 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6105 dsc_caps,
6106 dc->debug.dsc_min_slice_height_override,
6107 max_dsc_target_bpp_limit_override,
6108 0,
6109 &stream->timing,
6110 &dsc_cfg)) {
6111 stream->timing.dsc_cfg = dsc_cfg;
6112 stream->timing.flags.DSC = 1;
6113 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6114 }
6115 return;
6116 }
6117 }
6118
6119 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6120 dsc_caps,
6121 dc->debug.dsc_min_slice_height_override,
6122 max_dsc_target_bpp_limit_override,
6123 link_bw_in_kbps,
6124 &stream->timing,
6125 &dsc_cfg)) {
6126 stream->timing.dsc_cfg = dsc_cfg;
6127 stream->timing.flags.DSC = 1;
6128 }
6129}
6130
998b7ad2
FZ
6131static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6132 struct dc_sink *sink, struct dc_stream_state *stream,
6133 struct dsc_dec_dpcd_caps *dsc_caps)
6134{
6135 struct drm_connector *drm_connector = &aconnector->base;
6136 uint32_t link_bandwidth_kbps;
f1c1a982 6137 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 6138 struct dc *dc = sink->ctx->dc;
50b1f44e
FZ
6139 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6140 uint32_t dsc_max_supported_bw_in_kbps;
998b7ad2
FZ
6141
6142 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6143 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6144
6145 if (stream->link && stream->link->local_sink)
6146 max_dsc_target_bpp_limit_override =
6147 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6148
998b7ad2
FZ
6149 /* Set DSC policy according to dsc_clock_en */
6150 dc_dsc_policy_set_enable_dsc_when_not_needed(
6151 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6152
2665f63a
ML
6153 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6154 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6155
6156 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6157
6158 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
50b1f44e
FZ
6159 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6160 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
998b7ad2
FZ
6161 dsc_caps,
6162 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6163 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6164 link_bandwidth_kbps,
6165 &stream->timing,
6166 &stream->timing.dsc_cfg)) {
50b1f44e
FZ
6167 stream->timing.flags.DSC = 1;
6168 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6169 __func__, drm_connector->name);
6170 }
6171 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6172 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6173 max_supported_bw_in_kbps = link_bandwidth_kbps;
6174 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6175
6176 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6177 max_supported_bw_in_kbps > 0 &&
6178 dsc_max_supported_bw_in_kbps > 0)
6179 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6180 dsc_caps,
6181 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6182 max_dsc_target_bpp_limit_override,
6183 dsc_max_supported_bw_in_kbps,
6184 &stream->timing,
6185 &stream->timing.dsc_cfg)) {
6186 stream->timing.flags.DSC = 1;
6187 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6188 __func__, drm_connector->name);
6189 }
998b7ad2
FZ
6190 }
6191 }
6192
6193 /* Overwrite the stream flag if DSC is enabled through debugfs */
6194 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6195 stream->timing.flags.DSC = 1;
6196
6197 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6198 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6199
6200 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6201 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6202
6203 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6204 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6205}
433e5dec 6206#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6207
5fd953a3
RS
6208/**
6209 * DOC: FreeSync Video
6210 *
6211 * When a userspace application wants to play a video, the content follows a
6212 * standard format definition that usually specifies the FPS for that format.
6213 * The below list illustrates some video format and the expected FPS,
6214 * respectively:
6215 *
6216 * - TV/NTSC (23.976 FPS)
6217 * - Cinema (24 FPS)
6218 * - TV/PAL (25 FPS)
6219 * - TV/NTSC (29.97 FPS)
6220 * - TV/NTSC (30 FPS)
6221 * - Cinema HFR (48 FPS)
6222 * - TV/PAL (50 FPS)
6223 * - Commonly used (60 FPS)
12cdff6b 6224 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6225 *
6226 * The list of standards video format is not huge and can be added to the
6227 * connector modeset list beforehand. With that, userspace can leverage
6228 * FreeSync to extends the front porch in order to attain the target refresh
6229 * rate. Such a switch will happen seamlessly, without screen blanking or
6230 * reprogramming of the output in any other way. If the userspace requests a
6231 * modesetting change compatible with FreeSync modes that only differ in the
6232 * refresh rate, DC will skip the full update and avoid blink during the
6233 * transition. For example, the video player can change the modesetting from
6234 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6235 * causing any display blink. This same concept can be applied to a mode
6236 * setting change.
6237 */
a85ba005
NC
6238static struct drm_display_mode *
6239get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6240 bool use_probed_modes)
6241{
6242 struct drm_display_mode *m, *m_pref = NULL;
6243 u16 current_refresh, highest_refresh;
6244 struct list_head *list_head = use_probed_modes ?
6245 &aconnector->base.probed_modes :
6246 &aconnector->base.modes;
6247
6248 if (aconnector->freesync_vid_base.clock != 0)
6249 return &aconnector->freesync_vid_base;
6250
6251 /* Find the preferred mode */
6252 list_for_each_entry (m, list_head, head) {
6253 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6254 m_pref = m;
6255 break;
6256 }
6257 }
6258
6259 if (!m_pref) {
6260 /* Probably an EDID with no preferred mode. Fallback to first entry */
6261 m_pref = list_first_entry_or_null(
6262 &aconnector->base.modes, struct drm_display_mode, head);
6263 if (!m_pref) {
6264 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6265 return NULL;
6266 }
6267 }
6268
6269 highest_refresh = drm_mode_vrefresh(m_pref);
6270
6271 /*
6272 * Find the mode with highest refresh rate with same resolution.
6273 * For some monitors, preferred mode is not the mode with highest
6274 * supported refresh rate.
6275 */
6276 list_for_each_entry (m, list_head, head) {
6277 current_refresh = drm_mode_vrefresh(m);
6278
6279 if (m->hdisplay == m_pref->hdisplay &&
6280 m->vdisplay == m_pref->vdisplay &&
6281 highest_refresh < current_refresh) {
6282 highest_refresh = current_refresh;
6283 m_pref = m;
6284 }
6285 }
6286
6287 aconnector->freesync_vid_base = *m_pref;
6288 return m_pref;
6289}
6290
fe8858bb 6291static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6292 struct amdgpu_dm_connector *aconnector)
6293{
6294 struct drm_display_mode *high_mode;
6295 int timing_diff;
6296
6297 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6298 if (!high_mode || !mode)
6299 return false;
6300
6301 timing_diff = high_mode->vtotal - mode->vtotal;
6302
6303 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6304 high_mode->hdisplay != mode->hdisplay ||
6305 high_mode->vdisplay != mode->vdisplay ||
6306 high_mode->hsync_start != mode->hsync_start ||
6307 high_mode->hsync_end != mode->hsync_end ||
6308 high_mode->htotal != mode->htotal ||
6309 high_mode->hskew != mode->hskew ||
6310 high_mode->vscan != mode->vscan ||
6311 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6312 high_mode->vsync_end - mode->vsync_end != timing_diff)
6313 return false;
6314 else
6315 return true;
6316}
6317
3ee6b26b
AD
6318static struct dc_stream_state *
6319create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6320 const struct drm_display_mode *drm_mode,
b333730d 6321 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6322 const struct dc_stream_state *old_stream,
6323 int requested_bpc)
e7b07cee
HW
6324{
6325 struct drm_display_mode *preferred_mode = NULL;
391ef035 6326 struct drm_connector *drm_connector;
42ba01fc
NK
6327 const struct drm_connector_state *con_state =
6328 dm_state ? &dm_state->base : NULL;
0971c40e 6329 struct dc_stream_state *stream = NULL;
e7b07cee 6330 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6331 struct drm_display_mode saved_mode;
6332 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6333 bool native_mode_found = false;
b0781603
NK
6334 bool recalculate_timing = false;
6335 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6336 int mode_refresh;
58124bf8 6337 int preferred_refresh = 0;
defeb878 6338#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6339 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6340#endif
aed15309 6341 struct dc_sink *sink = NULL;
a85ba005
NC
6342
6343 memset(&saved_mode, 0, sizeof(saved_mode));
6344
b830ebc9 6345 if (aconnector == NULL) {
e7b07cee 6346 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6347 return stream;
e7b07cee
HW
6348 }
6349
e7b07cee 6350 drm_connector = &aconnector->base;
2e0ac3d6 6351
f4ac176e 6352 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6353 sink = create_fake_sink(aconnector);
6354 if (!sink)
6355 return stream;
aed15309
ML
6356 } else {
6357 sink = aconnector->dc_sink;
dcd5fb82 6358 dc_sink_retain(sink);
f4ac176e 6359 }
2e0ac3d6 6360
aed15309 6361 stream = dc_create_stream_for_sink(sink);
4562236b 6362
b830ebc9 6363 if (stream == NULL) {
e7b07cee 6364 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6365 goto finish;
e7b07cee
HW
6366 }
6367
ceb3dbb4
JL
6368 stream->dm_stream_context = aconnector;
6369
4a36fcba
WL
6370 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6371 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6372
e7b07cee
HW
6373 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6374 /* Search for preferred mode */
6375 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6376 native_mode_found = true;
6377 break;
6378 }
6379 }
6380 if (!native_mode_found)
6381 preferred_mode = list_first_entry_or_null(
6382 &aconnector->base.modes,
6383 struct drm_display_mode,
6384 head);
6385
b333730d
BL
6386 mode_refresh = drm_mode_vrefresh(&mode);
6387
b830ebc9 6388 if (preferred_mode == NULL) {
1f6010a9
DF
6389 /*
6390 * This may not be an error, the use case is when we have no
e7b07cee
HW
6391 * usermode calls to reset and set mode upon hotplug. In this
6392 * case, we call set mode ourselves to restore the previous mode
6393 * and the modelist may not be filled in in time.
6394 */
f1ad2f5e 6395 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6396 } else {
b0781603 6397 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
6398 is_freesync_video_mode(&mode, aconnector);
6399 if (recalculate_timing) {
6400 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6401 saved_mode = mode;
6402 mode = *freesync_mode;
6403 } else {
6404 decide_crtc_timing_for_drm_display_mode(
b0781603 6405 &mode, preferred_mode, scale);
a85ba005 6406
b0781603
NK
6407 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6408 }
e7b07cee
HW
6409 }
6410
a85ba005
NC
6411 if (recalculate_timing)
6412 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6413 else if (!dm_state)
f783577c
JFZ
6414 drm_mode_set_crtcinfo(&mode, 0);
6415
a85ba005 6416 /*
b333730d
BL
6417 * If scaling is enabled and refresh rate didn't change
6418 * we copy the vic and polarities of the old timings
6419 */
b0781603 6420 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6421 fill_stream_properties_from_drm_display_mode(
6422 stream, &mode, &aconnector->base, con_state, NULL,
6423 requested_bpc);
b333730d 6424 else
a85ba005
NC
6425 fill_stream_properties_from_drm_display_mode(
6426 stream, &mode, &aconnector->base, con_state, old_stream,
6427 requested_bpc);
b333730d 6428
defeb878 6429#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6430 /* SST DSC determination policy */
6431 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6432 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6433 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6434#endif
6435
e7b07cee
HW
6436 update_stream_scaling_settings(&mode, dm_state, stream);
6437
6438 fill_audio_info(
6439 &stream->audio_info,
6440 drm_connector,
aed15309 6441 sink);
e7b07cee 6442
ceb3dbb4 6443 update_stream_signal(stream, sink);
9182b4cb 6444
d832fc3b 6445 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6446 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6447
8a488f5d
RL
6448 if (stream->link->psr_settings.psr_feature_enabled) {
6449 //
6450 // should decide stream support vsc sdp colorimetry capability
6451 // before building vsc info packet
6452 //
6453 stream->use_vsc_sdp_for_colorimetry = false;
6454 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6455 stream->use_vsc_sdp_for_colorimetry =
6456 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6457 } else {
6458 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6459 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6460 }
8a488f5d 6461 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
6462 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6463
8c322309 6464 }
aed15309 6465finish:
dcd5fb82 6466 dc_sink_release(sink);
9e3efe3e 6467
e7b07cee
HW
6468 return stream;
6469}
6470
7578ecda 6471static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6472{
6473 drm_crtc_cleanup(crtc);
6474 kfree(crtc);
6475}
6476
6477static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6478 struct drm_crtc_state *state)
e7b07cee
HW
6479{
6480 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6481
6482 /* TODO Destroy dc_stream objects are stream object is flattened */
6483 if (cur->stream)
6484 dc_stream_release(cur->stream);
6485
6486
6487 __drm_atomic_helper_crtc_destroy_state(state);
6488
6489
6490 kfree(state);
6491}
6492
6493static void dm_crtc_reset_state(struct drm_crtc *crtc)
6494{
6495 struct dm_crtc_state *state;
6496
6497 if (crtc->state)
6498 dm_crtc_destroy_state(crtc, crtc->state);
6499
6500 state = kzalloc(sizeof(*state), GFP_KERNEL);
6501 if (WARN_ON(!state))
6502 return;
6503
1f8a52ec 6504 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6505}
6506
6507static struct drm_crtc_state *
6508dm_crtc_duplicate_state(struct drm_crtc *crtc)
6509{
6510 struct dm_crtc_state *state, *cur;
6511
6512 cur = to_dm_crtc_state(crtc->state);
6513
6514 if (WARN_ON(!crtc->state))
6515 return NULL;
6516
2004f45e 6517 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6518 if (!state)
6519 return NULL;
e7b07cee
HW
6520
6521 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6522
6523 if (cur->stream) {
6524 state->stream = cur->stream;
6525 dc_stream_retain(state->stream);
6526 }
6527
d6ef9b41 6528 state->active_planes = cur->active_planes;
98e6436d 6529 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6530 state->abm_level = cur->abm_level;
bb47de73
NK
6531 state->vrr_supported = cur->vrr_supported;
6532 state->freesync_config = cur->freesync_config;
cf020d49
NK
6533 state->cm_has_degamma = cur->cm_has_degamma;
6534 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6535 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6536 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6537
6538 return &state->base;
6539}
6540
86bc2219 6541#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6542static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6543{
6544 crtc_debugfs_init(crtc);
6545
6546 return 0;
6547}
6548#endif
6549
d2574c33
MK
6550static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6551{
6552 enum dc_irq_source irq_source;
6553 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6554 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6555 int rc;
6556
6557 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6558
6559 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6560
4711c033
LT
6561 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6562 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6563 return rc;
6564}
589d2739
HW
6565
6566static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6567{
6568 enum dc_irq_source irq_source;
6569 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6570 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6571 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6572#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6573 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6574 struct vblank_control_work *work;
ea3b4242 6575#endif
d2574c33
MK
6576 int rc = 0;
6577
6578 if (enable) {
6579 /* vblank irq on -> Only need vupdate irq in vrr mode */
6580 if (amdgpu_dm_vrr_active(acrtc_state))
6581 rc = dm_set_vupdate_irq(crtc, true);
6582 } else {
6583 /* vblank irq off -> vupdate irq off */
6584 rc = dm_set_vupdate_irq(crtc, false);
6585 }
6586
6587 if (rc)
6588 return rc;
589d2739
HW
6589
6590 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6591
6592 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6593 return -EBUSY;
6594
98ab5f35
BL
6595 if (amdgpu_in_reset(adev))
6596 return 0;
6597
4928b480 6598#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6599 if (dm->vblank_control_workqueue) {
6600 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6601 if (!work)
6602 return -ENOMEM;
09a5df6c 6603
06dd1888
NK
6604 INIT_WORK(&work->work, vblank_control_worker);
6605 work->dm = dm;
6606 work->acrtc = acrtc;
6607 work->enable = enable;
09a5df6c 6608
06dd1888
NK
6609 if (acrtc_state->stream) {
6610 dc_stream_retain(acrtc_state->stream);
6611 work->stream = acrtc_state->stream;
6612 }
58aa1c50 6613
06dd1888
NK
6614 queue_work(dm->vblank_control_workqueue, &work->work);
6615 }
4928b480 6616#endif
71338cb4 6617
71338cb4 6618 return 0;
589d2739
HW
6619}
6620
6621static int dm_enable_vblank(struct drm_crtc *crtc)
6622{
6623 return dm_set_vblank(crtc, true);
6624}
6625
6626static void dm_disable_vblank(struct drm_crtc *crtc)
6627{
6628 dm_set_vblank(crtc, false);
6629}
6630
e7b07cee
HW
6631/* Implemented only the options currently availible for the driver */
6632static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6633 .reset = dm_crtc_reset_state,
6634 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6635 .set_config = drm_atomic_helper_set_config,
6636 .page_flip = drm_atomic_helper_page_flip,
6637 .atomic_duplicate_state = dm_crtc_duplicate_state,
6638 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6639 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6640 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6641 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6642 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6643 .enable_vblank = dm_enable_vblank,
6644 .disable_vblank = dm_disable_vblank,
e3eff4b5 6645 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6646#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6647 .late_register = amdgpu_dm_crtc_late_register,
6648#endif
e7b07cee
HW
6649};
6650
6651static enum drm_connector_status
6652amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6653{
6654 bool connected;
c84dec2f 6655 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6656
1f6010a9
DF
6657 /*
6658 * Notes:
e7b07cee
HW
6659 * 1. This interface is NOT called in context of HPD irq.
6660 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6661 * makes it a bad place for *any* MST-related activity.
6662 */
e7b07cee 6663
8580d60b
HW
6664 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6665 !aconnector->fake_enable)
e7b07cee
HW
6666 connected = (aconnector->dc_sink != NULL);
6667 else
6668 connected = (aconnector->base.force == DRM_FORCE_ON);
6669
0f877894
OV
6670 update_subconnector_property(aconnector);
6671
e7b07cee
HW
6672 return (connected ? connector_status_connected :
6673 connector_status_disconnected);
6674}
6675
3ee6b26b
AD
6676int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6677 struct drm_connector_state *connector_state,
6678 struct drm_property *property,
6679 uint64_t val)
e7b07cee
HW
6680{
6681 struct drm_device *dev = connector->dev;
1348969a 6682 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6683 struct dm_connector_state *dm_old_state =
6684 to_dm_connector_state(connector->state);
6685 struct dm_connector_state *dm_new_state =
6686 to_dm_connector_state(connector_state);
6687
6688 int ret = -EINVAL;
6689
6690 if (property == dev->mode_config.scaling_mode_property) {
6691 enum amdgpu_rmx_type rmx_type;
6692
6693 switch (val) {
6694 case DRM_MODE_SCALE_CENTER:
6695 rmx_type = RMX_CENTER;
6696 break;
6697 case DRM_MODE_SCALE_ASPECT:
6698 rmx_type = RMX_ASPECT;
6699 break;
6700 case DRM_MODE_SCALE_FULLSCREEN:
6701 rmx_type = RMX_FULL;
6702 break;
6703 case DRM_MODE_SCALE_NONE:
6704 default:
6705 rmx_type = RMX_OFF;
6706 break;
6707 }
6708
6709 if (dm_old_state->scaling == rmx_type)
6710 return 0;
6711
6712 dm_new_state->scaling = rmx_type;
6713 ret = 0;
6714 } else if (property == adev->mode_info.underscan_hborder_property) {
6715 dm_new_state->underscan_hborder = val;
6716 ret = 0;
6717 } else if (property == adev->mode_info.underscan_vborder_property) {
6718 dm_new_state->underscan_vborder = val;
6719 ret = 0;
6720 } else if (property == adev->mode_info.underscan_property) {
6721 dm_new_state->underscan_enable = val;
6722 ret = 0;
c1ee92f9
DF
6723 } else if (property == adev->mode_info.abm_level_property) {
6724 dm_new_state->abm_level = val;
6725 ret = 0;
e7b07cee
HW
6726 }
6727
6728 return ret;
6729}
6730
3ee6b26b
AD
6731int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6732 const struct drm_connector_state *state,
6733 struct drm_property *property,
6734 uint64_t *val)
e7b07cee
HW
6735{
6736 struct drm_device *dev = connector->dev;
1348969a 6737 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6738 struct dm_connector_state *dm_state =
6739 to_dm_connector_state(state);
6740 int ret = -EINVAL;
6741
6742 if (property == dev->mode_config.scaling_mode_property) {
6743 switch (dm_state->scaling) {
6744 case RMX_CENTER:
6745 *val = DRM_MODE_SCALE_CENTER;
6746 break;
6747 case RMX_ASPECT:
6748 *val = DRM_MODE_SCALE_ASPECT;
6749 break;
6750 case RMX_FULL:
6751 *val = DRM_MODE_SCALE_FULLSCREEN;
6752 break;
6753 case RMX_OFF:
6754 default:
6755 *val = DRM_MODE_SCALE_NONE;
6756 break;
6757 }
6758 ret = 0;
6759 } else if (property == adev->mode_info.underscan_hborder_property) {
6760 *val = dm_state->underscan_hborder;
6761 ret = 0;
6762 } else if (property == adev->mode_info.underscan_vborder_property) {
6763 *val = dm_state->underscan_vborder;
6764 ret = 0;
6765 } else if (property == adev->mode_info.underscan_property) {
6766 *val = dm_state->underscan_enable;
6767 ret = 0;
c1ee92f9
DF
6768 } else if (property == adev->mode_info.abm_level_property) {
6769 *val = dm_state->abm_level;
6770 ret = 0;
e7b07cee 6771 }
c1ee92f9 6772
e7b07cee
HW
6773 return ret;
6774}
6775
526c654a
ED
6776static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6777{
6778 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6779
6780 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6781}
6782
7578ecda 6783static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6784{
c84dec2f 6785 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6786 const struct dc_link *link = aconnector->dc_link;
1348969a 6787 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6788 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6789 int i;
ada8ce15 6790
5dff80bd
AG
6791 /*
6792 * Call only if mst_mgr was iniitalized before since it's not done
6793 * for all connector types.
6794 */
6795 if (aconnector->mst_mgr.dev)
6796 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6797
e7b07cee
HW
6798#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6799 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6800 for (i = 0; i < dm->num_of_edps; i++) {
6801 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6802 backlight_device_unregister(dm->backlight_dev[i]);
6803 dm->backlight_dev[i] = NULL;
6804 }
e7b07cee
HW
6805 }
6806#endif
dcd5fb82
MF
6807
6808 if (aconnector->dc_em_sink)
6809 dc_sink_release(aconnector->dc_em_sink);
6810 aconnector->dc_em_sink = NULL;
6811 if (aconnector->dc_sink)
6812 dc_sink_release(aconnector->dc_sink);
6813 aconnector->dc_sink = NULL;
6814
e86e8947 6815 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6816 drm_connector_unregister(connector);
6817 drm_connector_cleanup(connector);
526c654a
ED
6818 if (aconnector->i2c) {
6819 i2c_del_adapter(&aconnector->i2c->base);
6820 kfree(aconnector->i2c);
6821 }
7daec99f 6822 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6823
e7b07cee
HW
6824 kfree(connector);
6825}
6826
6827void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6828{
6829 struct dm_connector_state *state =
6830 to_dm_connector_state(connector->state);
6831
df099b9b
LSL
6832 if (connector->state)
6833 __drm_atomic_helper_connector_destroy_state(connector->state);
6834
e7b07cee
HW
6835 kfree(state);
6836
6837 state = kzalloc(sizeof(*state), GFP_KERNEL);
6838
6839 if (state) {
6840 state->scaling = RMX_OFF;
6841 state->underscan_enable = false;
6842 state->underscan_hborder = 0;
6843 state->underscan_vborder = 0;
01933ba4 6844 state->base.max_requested_bpc = 8;
3261e013
ML
6845 state->vcpi_slots = 0;
6846 state->pbn = 0;
c3e50f89
NK
6847 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6848 state->abm_level = amdgpu_dm_abm_level;
6849
df099b9b 6850 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6851 }
6852}
6853
3ee6b26b
AD
6854struct drm_connector_state *
6855amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6856{
6857 struct dm_connector_state *state =
6858 to_dm_connector_state(connector->state);
6859
6860 struct dm_connector_state *new_state =
6861 kmemdup(state, sizeof(*state), GFP_KERNEL);
6862
98e6436d
AK
6863 if (!new_state)
6864 return NULL;
e7b07cee 6865
98e6436d
AK
6866 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6867
6868 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6869 new_state->abm_level = state->abm_level;
922454c2
NK
6870 new_state->scaling = state->scaling;
6871 new_state->underscan_enable = state->underscan_enable;
6872 new_state->underscan_hborder = state->underscan_hborder;
6873 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6874 new_state->vcpi_slots = state->vcpi_slots;
6875 new_state->pbn = state->pbn;
98e6436d 6876 return &new_state->base;
e7b07cee
HW
6877}
6878
14f04fa4
AD
6879static int
6880amdgpu_dm_connector_late_register(struct drm_connector *connector)
6881{
6882 struct amdgpu_dm_connector *amdgpu_dm_connector =
6883 to_amdgpu_dm_connector(connector);
00a8037e 6884 int r;
14f04fa4 6885
00a8037e
AD
6886 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6887 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6888 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6889 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6890 if (r)
6891 return r;
6892 }
6893
6894#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6895 connector_debugfs_init(amdgpu_dm_connector);
6896#endif
6897
6898 return 0;
6899}
6900
e7b07cee
HW
6901static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6902 .reset = amdgpu_dm_connector_funcs_reset,
6903 .detect = amdgpu_dm_connector_detect,
6904 .fill_modes = drm_helper_probe_single_connector_modes,
6905 .destroy = amdgpu_dm_connector_destroy,
6906 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6907 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6908 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6909 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6910 .late_register = amdgpu_dm_connector_late_register,
526c654a 6911 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6912};
6913
e7b07cee
HW
6914static int get_modes(struct drm_connector *connector)
6915{
6916 return amdgpu_dm_connector_get_modes(connector);
6917}
6918
c84dec2f 6919static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6920{
6921 struct dc_sink_init_data init_params = {
6922 .link = aconnector->dc_link,
6923 .sink_signal = SIGNAL_TYPE_VIRTUAL
6924 };
70e8ffc5 6925 struct edid *edid;
e7b07cee 6926
a89ff457 6927 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6928 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6929 aconnector->base.name);
6930
6931 aconnector->base.force = DRM_FORCE_OFF;
6932 aconnector->base.override_edid = false;
6933 return;
6934 }
6935
70e8ffc5
HW
6936 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6937
e7b07cee
HW
6938 aconnector->edid = edid;
6939
6940 aconnector->dc_em_sink = dc_link_add_remote_sink(
6941 aconnector->dc_link,
6942 (uint8_t *)edid,
6943 (edid->extensions + 1) * EDID_LENGTH,
6944 &init_params);
6945
dcd5fb82 6946 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6947 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6948 aconnector->dc_link->local_sink :
6949 aconnector->dc_em_sink;
dcd5fb82
MF
6950 dc_sink_retain(aconnector->dc_sink);
6951 }
e7b07cee
HW
6952}
6953
c84dec2f 6954static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6955{
6956 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6957
1f6010a9
DF
6958 /*
6959 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6960 * Those settings have to be != 0 to get initial modeset
6961 */
6962 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6963 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6964 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6965 }
6966
6967
6968 aconnector->base.override_edid = true;
6969 create_eml_sink(aconnector);
6970}
6971
cbd14ae7
SW
6972static struct dc_stream_state *
6973create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6974 const struct drm_display_mode *drm_mode,
6975 const struct dm_connector_state *dm_state,
6976 const struct dc_stream_state *old_stream)
6977{
6978 struct drm_connector *connector = &aconnector->base;
1348969a 6979 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6980 struct dc_stream_state *stream;
4b7da34b
SW
6981 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6982 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6983 enum dc_status dc_result = DC_OK;
6984
6985 do {
6986 stream = create_stream_for_sink(aconnector, drm_mode,
6987 dm_state, old_stream,
6988 requested_bpc);
6989 if (stream == NULL) {
6990 DRM_ERROR("Failed to create stream for sink!\n");
6991 break;
6992 }
6993
6994 dc_result = dc_validate_stream(adev->dm.dc, stream);
6995
6996 if (dc_result != DC_OK) {
74a16675 6997 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6998 drm_mode->hdisplay,
6999 drm_mode->vdisplay,
7000 drm_mode->clock,
74a16675
RS
7001 dc_result,
7002 dc_status_to_str(dc_result));
cbd14ae7
SW
7003
7004 dc_stream_release(stream);
7005 stream = NULL;
7006 requested_bpc -= 2; /* lower bpc to retry validation */
7007 }
7008
7009 } while (stream == NULL && requested_bpc >= 6);
7010
68eb3ae3
WS
7011 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7012 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7013
7014 aconnector->force_yuv420_output = true;
7015 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7016 dm_state, old_stream);
7017 aconnector->force_yuv420_output = false;
7018 }
7019
cbd14ae7
SW
7020 return stream;
7021}
7022
ba9ca088 7023enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 7024 struct drm_display_mode *mode)
e7b07cee
HW
7025{
7026 int result = MODE_ERROR;
7027 struct dc_sink *dc_sink;
e7b07cee 7028 /* TODO: Unhardcode stream count */
0971c40e 7029 struct dc_stream_state *stream;
c84dec2f 7030 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7031
7032 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7033 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7034 return result;
7035
1f6010a9
DF
7036 /*
7037 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
7038 * EDID mgmt
7039 */
7040 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7041 !aconnector->dc_em_sink)
7042 handle_edid_mgmt(aconnector);
7043
c84dec2f 7044 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 7045
ad975f44
VL
7046 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7047 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
7048 DRM_ERROR("dc_sink is NULL!\n");
7049 goto fail;
7050 }
7051
cbd14ae7
SW
7052 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7053 if (stream) {
7054 dc_stream_release(stream);
e7b07cee 7055 result = MODE_OK;
cbd14ae7 7056 }
e7b07cee
HW
7057
7058fail:
7059 /* TODO: error handling*/
7060 return result;
7061}
7062
88694af9
NK
7063static int fill_hdr_info_packet(const struct drm_connector_state *state,
7064 struct dc_info_packet *out)
7065{
7066 struct hdmi_drm_infoframe frame;
7067 unsigned char buf[30]; /* 26 + 4 */
7068 ssize_t len;
7069 int ret, i;
7070
7071 memset(out, 0, sizeof(*out));
7072
7073 if (!state->hdr_output_metadata)
7074 return 0;
7075
7076 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7077 if (ret)
7078 return ret;
7079
7080 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7081 if (len < 0)
7082 return (int)len;
7083
7084 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7085 if (len != 30)
7086 return -EINVAL;
7087
7088 /* Prepare the infopacket for DC. */
7089 switch (state->connector->connector_type) {
7090 case DRM_MODE_CONNECTOR_HDMIA:
7091 out->hb0 = 0x87; /* type */
7092 out->hb1 = 0x01; /* version */
7093 out->hb2 = 0x1A; /* length */
7094 out->sb[0] = buf[3]; /* checksum */
7095 i = 1;
7096 break;
7097
7098 case DRM_MODE_CONNECTOR_DisplayPort:
7099 case DRM_MODE_CONNECTOR_eDP:
7100 out->hb0 = 0x00; /* sdp id, zero */
7101 out->hb1 = 0x87; /* type */
7102 out->hb2 = 0x1D; /* payload len - 1 */
7103 out->hb3 = (0x13 << 2); /* sdp version */
7104 out->sb[0] = 0x01; /* version */
7105 out->sb[1] = 0x1A; /* length */
7106 i = 2;
7107 break;
7108
7109 default:
7110 return -EINVAL;
7111 }
7112
7113 memcpy(&out->sb[i], &buf[4], 26);
7114 out->valid = true;
7115
7116 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7117 sizeof(out->sb), false);
7118
7119 return 0;
7120}
7121
88694af9
NK
7122static int
7123amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7124 struct drm_atomic_state *state)
88694af9 7125{
51e857af
SP
7126 struct drm_connector_state *new_con_state =
7127 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7128 struct drm_connector_state *old_con_state =
7129 drm_atomic_get_old_connector_state(state, conn);
7130 struct drm_crtc *crtc = new_con_state->crtc;
7131 struct drm_crtc_state *new_crtc_state;
7132 int ret;
7133
e8a98235
RS
7134 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7135
88694af9
NK
7136 if (!crtc)
7137 return 0;
7138
72921cdf 7139 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7140 struct dc_info_packet hdr_infopacket;
7141
7142 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7143 if (ret)
7144 return ret;
7145
7146 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7147 if (IS_ERR(new_crtc_state))
7148 return PTR_ERR(new_crtc_state);
7149
7150 /*
7151 * DC considers the stream backends changed if the
7152 * static metadata changes. Forcing the modeset also
7153 * gives a simple way for userspace to switch from
b232d4ed
NK
7154 * 8bpc to 10bpc when setting the metadata to enter
7155 * or exit HDR.
7156 *
7157 * Changing the static metadata after it's been
7158 * set is permissible, however. So only force a
7159 * modeset if we're entering or exiting HDR.
88694af9 7160 */
b232d4ed
NK
7161 new_crtc_state->mode_changed =
7162 !old_con_state->hdr_output_metadata ||
7163 !new_con_state->hdr_output_metadata;
88694af9
NK
7164 }
7165
7166 return 0;
7167}
7168
e7b07cee
HW
7169static const struct drm_connector_helper_funcs
7170amdgpu_dm_connector_helper_funcs = {
7171 /*
1f6010a9 7172 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7173 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7174 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7175 * in get_modes call back, not just return the modes count
7176 */
e7b07cee
HW
7177 .get_modes = get_modes,
7178 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7179 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7180};
7181
7182static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7183{
7184}
7185
d6ef9b41 7186static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7187{
7188 struct drm_atomic_state *state = new_crtc_state->state;
7189 struct drm_plane *plane;
7190 int num_active = 0;
7191
7192 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7193 struct drm_plane_state *new_plane_state;
7194
7195 /* Cursor planes are "fake". */
7196 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7197 continue;
7198
7199 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7200
7201 if (!new_plane_state) {
7202 /*
7203 * The plane is enable on the CRTC and hasn't changed
7204 * state. This means that it previously passed
7205 * validation and is therefore enabled.
7206 */
7207 num_active += 1;
7208 continue;
7209 }
7210
7211 /* We need a framebuffer to be considered enabled. */
7212 num_active += (new_plane_state->fb != NULL);
7213 }
7214
d6ef9b41
NK
7215 return num_active;
7216}
7217
8fe684e9
NK
7218static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7219 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7220{
7221 struct dm_crtc_state *dm_new_crtc_state =
7222 to_dm_crtc_state(new_crtc_state);
7223
7224 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7225
7226 if (!dm_new_crtc_state->stream)
7227 return;
7228
7229 dm_new_crtc_state->active_planes =
7230 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7231}
7232
3ee6b26b 7233static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7234 struct drm_atomic_state *state)
e7b07cee 7235{
29b77ad7
MR
7236 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7237 crtc);
1348969a 7238 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7239 struct dc *dc = adev->dm.dc;
29b77ad7 7240 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7241 int ret = -EINVAL;
7242
5b8c5969 7243 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7244
29b77ad7 7245 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7246
bcd74374
ND
7247 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7248 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7249 return ret;
7250 }
7251
bc92c065 7252 /*
b836a274
MD
7253 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7254 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7255 * planes are disabled, which is not supported by the hardware. And there is legacy
7256 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7257 */
29b77ad7 7258 if (crtc_state->enable &&
ea9522f5
SS
7259 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7260 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7261 return -EINVAL;
ea9522f5 7262 }
c14a005c 7263
b836a274
MD
7264 /* In some use cases, like reset, no stream is attached */
7265 if (!dm_crtc_state->stream)
7266 return 0;
7267
62c933f9 7268 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7269 return 0;
7270
ea9522f5 7271 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7272 return ret;
7273}
7274
3ee6b26b
AD
7275static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7276 const struct drm_display_mode *mode,
7277 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7278{
7279 return true;
7280}
7281
7282static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7283 .disable = dm_crtc_helper_disable,
7284 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7285 .mode_fixup = dm_crtc_helper_mode_fixup,
7286 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7287};
7288
7289static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7290{
7291
7292}
7293
3261e013
ML
7294static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7295{
7296 switch (display_color_depth) {
7297 case COLOR_DEPTH_666:
7298 return 6;
7299 case COLOR_DEPTH_888:
7300 return 8;
7301 case COLOR_DEPTH_101010:
7302 return 10;
7303 case COLOR_DEPTH_121212:
7304 return 12;
7305 case COLOR_DEPTH_141414:
7306 return 14;
7307 case COLOR_DEPTH_161616:
7308 return 16;
7309 default:
7310 break;
7311 }
7312 return 0;
7313}
7314
3ee6b26b
AD
7315static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7316 struct drm_crtc_state *crtc_state,
7317 struct drm_connector_state *conn_state)
e7b07cee 7318{
3261e013
ML
7319 struct drm_atomic_state *state = crtc_state->state;
7320 struct drm_connector *connector = conn_state->connector;
7321 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7322 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7323 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7324 struct drm_dp_mst_topology_mgr *mst_mgr;
7325 struct drm_dp_mst_port *mst_port;
7326 enum dc_color_depth color_depth;
7327 int clock, bpp = 0;
1bc22f20 7328 bool is_y420 = false;
3261e013
ML
7329
7330 if (!aconnector->port || !aconnector->dc_sink)
7331 return 0;
7332
7333 mst_port = aconnector->port;
7334 mst_mgr = &aconnector->mst_port->mst_mgr;
7335
7336 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7337 return 0;
7338
7339 if (!state->duplicated) {
cbd14ae7 7340 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7341 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7342 aconnector->force_yuv420_output;
cbd14ae7
SW
7343 color_depth = convert_color_depth_from_display_info(connector,
7344 is_y420,
7345 max_bpc);
3261e013
ML
7346 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7347 clock = adjusted_mode->clock;
dc48529f 7348 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7349 }
7350 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7351 mst_mgr,
7352 mst_port,
1c6c1cb5 7353 dm_new_connector_state->pbn,
03ca9600 7354 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7355 if (dm_new_connector_state->vcpi_slots < 0) {
7356 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7357 return dm_new_connector_state->vcpi_slots;
7358 }
e7b07cee
HW
7359 return 0;
7360}
7361
7362const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7363 .disable = dm_encoder_helper_disable,
7364 .atomic_check = dm_encoder_helper_atomic_check
7365};
7366
d9fe1a4c 7367#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7368static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7369 struct dc_state *dc_state,
7370 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7371{
7372 struct dc_stream_state *stream = NULL;
7373 struct drm_connector *connector;
5760dcb9 7374 struct drm_connector_state *new_con_state;
29b9ba74
ML
7375 struct amdgpu_dm_connector *aconnector;
7376 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7377 int i, j;
7378 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7379
5760dcb9 7380 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7381
7382 aconnector = to_amdgpu_dm_connector(connector);
7383
7384 if (!aconnector->port)
7385 continue;
7386
7387 if (!new_con_state || !new_con_state->crtc)
7388 continue;
7389
7390 dm_conn_state = to_dm_connector_state(new_con_state);
7391
7392 for (j = 0; j < dc_state->stream_count; j++) {
7393 stream = dc_state->streams[j];
7394 if (!stream)
7395 continue;
7396
7397 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7398 break;
7399
7400 stream = NULL;
7401 }
7402
7403 if (!stream)
7404 continue;
7405
29b9ba74 7406 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7407 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7408 for (j = 0; j < dc_state->stream_count; j++) {
7409 if (vars[j].aconnector == aconnector) {
7410 pbn = vars[j].pbn;
7411 break;
7412 }
7413 }
7414
a550bb16
HW
7415 if (j == dc_state->stream_count)
7416 continue;
7417
7418 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7419
7420 if (stream->timing.flags.DSC != 1) {
7421 dm_conn_state->pbn = pbn;
7422 dm_conn_state->vcpi_slots = slot_num;
7423
7424 drm_dp_mst_atomic_enable_dsc(state,
7425 aconnector->port,
7426 dm_conn_state->pbn,
7427 0,
7428 false);
7429 continue;
7430 }
7431
29b9ba74
ML
7432 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7433 aconnector->port,
7434 pbn, pbn_div,
7435 true);
7436 if (vcpi < 0)
7437 return vcpi;
7438
7439 dm_conn_state->pbn = pbn;
7440 dm_conn_state->vcpi_slots = vcpi;
7441 }
7442 return 0;
7443}
d9fe1a4c 7444#endif
29b9ba74 7445
e7b07cee
HW
7446static void dm_drm_plane_reset(struct drm_plane *plane)
7447{
7448 struct dm_plane_state *amdgpu_state = NULL;
7449
7450 if (plane->state)
7451 plane->funcs->atomic_destroy_state(plane, plane->state);
7452
7453 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7454 WARN_ON(amdgpu_state == NULL);
1f6010a9 7455
7ddaef96
NK
7456 if (amdgpu_state)
7457 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7458}
7459
7460static struct drm_plane_state *
7461dm_drm_plane_duplicate_state(struct drm_plane *plane)
7462{
7463 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7464
7465 old_dm_plane_state = to_dm_plane_state(plane->state);
7466 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7467 if (!dm_plane_state)
7468 return NULL;
7469
7470 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7471
3be5262e
HW
7472 if (old_dm_plane_state->dc_state) {
7473 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7474 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7475 }
7476
7477 return &dm_plane_state->base;
7478}
7479
dfd84d90 7480static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7481 struct drm_plane_state *state)
e7b07cee
HW
7482{
7483 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7484
3be5262e
HW
7485 if (dm_plane_state->dc_state)
7486 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7487
0627bbd3 7488 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7489}
7490
7491static const struct drm_plane_funcs dm_plane_funcs = {
7492 .update_plane = drm_atomic_helper_update_plane,
7493 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7494 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7495 .reset = dm_drm_plane_reset,
7496 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7497 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7498 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7499};
7500
3ee6b26b
AD
7501static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7502 struct drm_plane_state *new_state)
e7b07cee
HW
7503{
7504 struct amdgpu_framebuffer *afb;
7505 struct drm_gem_object *obj;
5d43be0c 7506 struct amdgpu_device *adev;
e7b07cee 7507 struct amdgpu_bo *rbo;
e7b07cee 7508 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7509 struct list_head list;
7510 struct ttm_validate_buffer tv;
7511 struct ww_acquire_ctx ticket;
5d43be0c
CK
7512 uint32_t domain;
7513 int r;
e7b07cee
HW
7514
7515 if (!new_state->fb) {
4711c033 7516 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7517 return 0;
7518 }
7519
7520 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7521 obj = new_state->fb->obj[0];
e7b07cee 7522 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7523 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7524 INIT_LIST_HEAD(&list);
7525
7526 tv.bo = &rbo->tbo;
7527 tv.num_shared = 1;
7528 list_add(&tv.head, &list);
7529
9165fb87 7530 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7531 if (r) {
7532 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7533 return r;
0f257b09 7534 }
e7b07cee 7535
5d43be0c 7536 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7537 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7538 else
7539 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7540
7b7c6c81 7541 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7542 if (unlikely(r != 0)) {
30b7c614
HW
7543 if (r != -ERESTARTSYS)
7544 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7545 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7546 return r;
7547 }
7548
bb812f1e
JZ
7549 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7550 if (unlikely(r != 0)) {
7551 amdgpu_bo_unpin(rbo);
0f257b09 7552 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7553 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7554 return r;
7555 }
7df7e505 7556
0f257b09 7557 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7558
7b7c6c81 7559 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7560
7561 amdgpu_bo_ref(rbo);
7562
cf322b49
NK
7563 /**
7564 * We don't do surface updates on planes that have been newly created,
7565 * but we also don't have the afb->address during atomic check.
7566 *
7567 * Fill in buffer attributes depending on the address here, but only on
7568 * newly created planes since they're not being used by DC yet and this
7569 * won't modify global state.
7570 */
7571 dm_plane_state_old = to_dm_plane_state(plane->state);
7572 dm_plane_state_new = to_dm_plane_state(new_state);
7573
3be5262e 7574 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7575 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7576 struct dc_plane_state *plane_state =
7577 dm_plane_state_new->dc_state;
7578 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7579
320932bf 7580 fill_plane_buffer_attributes(
695af5f9 7581 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7582 afb->tiling_flags,
cf322b49
NK
7583 &plane_state->tiling_info, &plane_state->plane_size,
7584 &plane_state->dcc, &plane_state->address,
6eed95b0 7585 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7586 }
7587
e7b07cee
HW
7588 return 0;
7589}
7590
3ee6b26b
AD
7591static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7592 struct drm_plane_state *old_state)
e7b07cee
HW
7593{
7594 struct amdgpu_bo *rbo;
e7b07cee
HW
7595 int r;
7596
7597 if (!old_state->fb)
7598 return;
7599
e68d14dd 7600 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7601 r = amdgpu_bo_reserve(rbo, false);
7602 if (unlikely(r)) {
7603 DRM_ERROR("failed to reserve rbo before unpin\n");
7604 return;
b830ebc9
HW
7605 }
7606
7607 amdgpu_bo_unpin(rbo);
7608 amdgpu_bo_unreserve(rbo);
7609 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7610}
7611
8c44515b
AP
7612static int dm_plane_helper_check_state(struct drm_plane_state *state,
7613 struct drm_crtc_state *new_crtc_state)
7614{
6300b3bd
MK
7615 struct drm_framebuffer *fb = state->fb;
7616 int min_downscale, max_upscale;
7617 int min_scale = 0;
7618 int max_scale = INT_MAX;
7619
40d916a2 7620 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7621 if (fb && state->crtc) {
40d916a2
NC
7622 /* Validate viewport to cover the case when only the position changes */
7623 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7624 int viewport_width = state->crtc_w;
7625 int viewport_height = state->crtc_h;
7626
7627 if (state->crtc_x < 0)
7628 viewport_width += state->crtc_x;
7629 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7630 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7631
7632 if (state->crtc_y < 0)
7633 viewport_height += state->crtc_y;
7634 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7635 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7636
4abdb72b
NC
7637 if (viewport_width < 0 || viewport_height < 0) {
7638 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7639 return -EINVAL;
7640 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7641 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7642 return -EINVAL;
4abdb72b
NC
7643 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7644 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7645 return -EINVAL;
4abdb72b
NC
7646 }
7647
40d916a2
NC
7648 }
7649
7650 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7651 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7652 &min_downscale, &max_upscale);
7653 /*
7654 * Convert to drm convention: 16.16 fixed point, instead of dc's
7655 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7656 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7657 */
7658 min_scale = (1000 << 16) / max_upscale;
7659 max_scale = (1000 << 16) / min_downscale;
7660 }
8c44515b 7661
8c44515b 7662 return drm_atomic_helper_check_plane_state(
6300b3bd 7663 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7664}
7665
7578ecda 7666static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7667 struct drm_atomic_state *state)
cbd19488 7668{
7c11b99a
MR
7669 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7670 plane);
1348969a 7671 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7672 struct dc *dc = adev->dm.dc;
78171832 7673 struct dm_plane_state *dm_plane_state;
695af5f9 7674 struct dc_scaling_info scaling_info;
8c44515b 7675 struct drm_crtc_state *new_crtc_state;
695af5f9 7676 int ret;
78171832 7677
ba5c1649 7678 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7679
ba5c1649 7680 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7681
3be5262e 7682 if (!dm_plane_state->dc_state)
9a3329b1 7683 return 0;
cbd19488 7684
8c44515b 7685 new_crtc_state =
dec92020 7686 drm_atomic_get_new_crtc_state(state,
ba5c1649 7687 new_plane_state->crtc);
8c44515b
AP
7688 if (!new_crtc_state)
7689 return -EINVAL;
7690
ba5c1649 7691 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7692 if (ret)
7693 return ret;
7694
4375d625 7695 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7696 if (ret)
7697 return ret;
a05bcff1 7698
62c933f9 7699 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7700 return 0;
7701
7702 return -EINVAL;
7703}
7704
674e78ac 7705static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7706 struct drm_atomic_state *state)
674e78ac
NK
7707{
7708 /* Only support async updates on cursor planes. */
7709 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7710 return -EINVAL;
7711
7712 return 0;
7713}
7714
7715static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7716 struct drm_atomic_state *state)
674e78ac 7717{
5ddb0bd4
MR
7718 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7719 plane);
674e78ac 7720 struct drm_plane_state *old_state =
5ddb0bd4 7721 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7722
e8a98235
RS
7723 trace_amdgpu_dm_atomic_update_cursor(new_state);
7724
332af874 7725 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7726
7727 plane->state->src_x = new_state->src_x;
7728 plane->state->src_y = new_state->src_y;
7729 plane->state->src_w = new_state->src_w;
7730 plane->state->src_h = new_state->src_h;
7731 plane->state->crtc_x = new_state->crtc_x;
7732 plane->state->crtc_y = new_state->crtc_y;
7733 plane->state->crtc_w = new_state->crtc_w;
7734 plane->state->crtc_h = new_state->crtc_h;
7735
7736 handle_cursor_update(plane, old_state);
7737}
7738
e7b07cee
HW
7739static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7740 .prepare_fb = dm_plane_helper_prepare_fb,
7741 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7742 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7743 .atomic_async_check = dm_plane_atomic_async_check,
7744 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7745};
7746
7747/*
7748 * TODO: these are currently initialized to rgb formats only.
7749 * For future use cases we should either initialize them dynamically based on
7750 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7751 * check will succeed, and let DC implement proper check
e7b07cee 7752 */
d90371b0 7753static const uint32_t rgb_formats[] = {
e7b07cee
HW
7754 DRM_FORMAT_XRGB8888,
7755 DRM_FORMAT_ARGB8888,
7756 DRM_FORMAT_RGBA8888,
7757 DRM_FORMAT_XRGB2101010,
7758 DRM_FORMAT_XBGR2101010,
7759 DRM_FORMAT_ARGB2101010,
7760 DRM_FORMAT_ABGR2101010,
58020403
MK
7761 DRM_FORMAT_XRGB16161616,
7762 DRM_FORMAT_XBGR16161616,
7763 DRM_FORMAT_ARGB16161616,
7764 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7765 DRM_FORMAT_XBGR8888,
7766 DRM_FORMAT_ABGR8888,
46dd9ff7 7767 DRM_FORMAT_RGB565,
e7b07cee
HW
7768};
7769
0d579c7e
NK
7770static const uint32_t overlay_formats[] = {
7771 DRM_FORMAT_XRGB8888,
7772 DRM_FORMAT_ARGB8888,
7773 DRM_FORMAT_RGBA8888,
7774 DRM_FORMAT_XBGR8888,
7775 DRM_FORMAT_ABGR8888,
7267a1a9 7776 DRM_FORMAT_RGB565
e7b07cee
HW
7777};
7778
7779static const u32 cursor_formats[] = {
7780 DRM_FORMAT_ARGB8888
7781};
7782
37c6a93b
NK
7783static int get_plane_formats(const struct drm_plane *plane,
7784 const struct dc_plane_cap *plane_cap,
7785 uint32_t *formats, int max_formats)
e7b07cee 7786{
37c6a93b
NK
7787 int i, num_formats = 0;
7788
7789 /*
7790 * TODO: Query support for each group of formats directly from
7791 * DC plane caps. This will require adding more formats to the
7792 * caps list.
7793 */
e7b07cee 7794
f180b4bc 7795 switch (plane->type) {
e7b07cee 7796 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7797 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7798 if (num_formats >= max_formats)
7799 break;
7800
7801 formats[num_formats++] = rgb_formats[i];
7802 }
7803
ea36ad34 7804 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7805 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7806 if (plane_cap && plane_cap->pixel_format_support.p010)
7807 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7808 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7809 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7810 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7811 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7812 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7813 }
e7b07cee 7814 break;
37c6a93b 7815
e7b07cee 7816 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7817 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7818 if (num_formats >= max_formats)
7819 break;
7820
7821 formats[num_formats++] = overlay_formats[i];
7822 }
e7b07cee 7823 break;
37c6a93b 7824
e7b07cee 7825 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7826 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7827 if (num_formats >= max_formats)
7828 break;
7829
7830 formats[num_formats++] = cursor_formats[i];
7831 }
e7b07cee
HW
7832 break;
7833 }
7834
37c6a93b
NK
7835 return num_formats;
7836}
7837
7838static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7839 struct drm_plane *plane,
7840 unsigned long possible_crtcs,
7841 const struct dc_plane_cap *plane_cap)
7842{
7843 uint32_t formats[32];
7844 int num_formats;
7845 int res = -EPERM;
ecc874a6 7846 unsigned int supported_rotations;
faa37f54 7847 uint64_t *modifiers = NULL;
37c6a93b
NK
7848
7849 num_formats = get_plane_formats(plane, plane_cap, formats,
7850 ARRAY_SIZE(formats));
7851
faa37f54
BN
7852 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7853 if (res)
7854 return res;
7855
4a580877 7856 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7857 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7858 modifiers, plane->type, NULL);
7859 kfree(modifiers);
37c6a93b
NK
7860 if (res)
7861 return res;
7862
cc1fec57
NK
7863 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7864 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7865 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7866 BIT(DRM_MODE_BLEND_PREMULTI);
7867
7868 drm_plane_create_alpha_property(plane);
7869 drm_plane_create_blend_mode_property(plane, blend_caps);
7870 }
7871
fc8e5230 7872 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7873 plane_cap &&
7874 (plane_cap->pixel_format_support.nv12 ||
7875 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7876 /* This only affects YUV formats. */
7877 drm_plane_create_color_properties(
7878 plane,
7879 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7880 BIT(DRM_COLOR_YCBCR_BT709) |
7881 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7882 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7883 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7884 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7885 }
7886
ecc874a6
PLG
7887 supported_rotations =
7888 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7889 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7890
1347385f
SS
7891 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7892 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7893 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7894 supported_rotations);
ecc874a6 7895
f180b4bc 7896 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7897
96719c54 7898 /* Create (reset) the plane state */
f180b4bc
HW
7899 if (plane->funcs->reset)
7900 plane->funcs->reset(plane);
96719c54 7901
37c6a93b 7902 return 0;
e7b07cee
HW
7903}
7904
7578ecda
AD
7905static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7906 struct drm_plane *plane,
7907 uint32_t crtc_index)
e7b07cee
HW
7908{
7909 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7910 struct drm_plane *cursor_plane;
e7b07cee
HW
7911
7912 int res = -ENOMEM;
7913
7914 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7915 if (!cursor_plane)
7916 goto fail;
7917
f180b4bc 7918 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7919 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7920
7921 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7922 if (!acrtc)
7923 goto fail;
7924
7925 res = drm_crtc_init_with_planes(
7926 dm->ddev,
7927 &acrtc->base,
7928 plane,
f180b4bc 7929 cursor_plane,
e7b07cee
HW
7930 &amdgpu_dm_crtc_funcs, NULL);
7931
7932 if (res)
7933 goto fail;
7934
7935 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7936
96719c54
HW
7937 /* Create (reset) the plane state */
7938 if (acrtc->base.funcs->reset)
7939 acrtc->base.funcs->reset(&acrtc->base);
7940
e7b07cee
HW
7941 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7942 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7943
7944 acrtc->crtc_id = crtc_index;
7945 acrtc->base.enabled = false;
c37e2d29 7946 acrtc->otg_inst = -1;
e7b07cee
HW
7947
7948 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7949 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7950 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7951 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7952
e7b07cee
HW
7953 return 0;
7954
7955fail:
b830ebc9
HW
7956 kfree(acrtc);
7957 kfree(cursor_plane);
e7b07cee
HW
7958 return res;
7959}
7960
7961
7962static int to_drm_connector_type(enum signal_type st)
7963{
7964 switch (st) {
7965 case SIGNAL_TYPE_HDMI_TYPE_A:
7966 return DRM_MODE_CONNECTOR_HDMIA;
7967 case SIGNAL_TYPE_EDP:
7968 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7969 case SIGNAL_TYPE_LVDS:
7970 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7971 case SIGNAL_TYPE_RGB:
7972 return DRM_MODE_CONNECTOR_VGA;
7973 case SIGNAL_TYPE_DISPLAY_PORT:
7974 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7975 return DRM_MODE_CONNECTOR_DisplayPort;
7976 case SIGNAL_TYPE_DVI_DUAL_LINK:
7977 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7978 return DRM_MODE_CONNECTOR_DVID;
7979 case SIGNAL_TYPE_VIRTUAL:
7980 return DRM_MODE_CONNECTOR_VIRTUAL;
7981
7982 default:
7983 return DRM_MODE_CONNECTOR_Unknown;
7984 }
7985}
7986
2b4c1c05
DV
7987static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7988{
62afb4ad
JRS
7989 struct drm_encoder *encoder;
7990
7991 /* There is only one encoder per connector */
7992 drm_connector_for_each_possible_encoder(connector, encoder)
7993 return encoder;
7994
7995 return NULL;
2b4c1c05
DV
7996}
7997
e7b07cee
HW
7998static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7999{
e7b07cee
HW
8000 struct drm_encoder *encoder;
8001 struct amdgpu_encoder *amdgpu_encoder;
8002
2b4c1c05 8003 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
8004
8005 if (encoder == NULL)
8006 return;
8007
8008 amdgpu_encoder = to_amdgpu_encoder(encoder);
8009
8010 amdgpu_encoder->native_mode.clock = 0;
8011
8012 if (!list_empty(&connector->probed_modes)) {
8013 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 8014
e7b07cee 8015 list_for_each_entry(preferred_mode,
b830ebc9
HW
8016 &connector->probed_modes,
8017 head) {
8018 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8019 amdgpu_encoder->native_mode = *preferred_mode;
8020
e7b07cee
HW
8021 break;
8022 }
8023
8024 }
8025}
8026
3ee6b26b
AD
8027static struct drm_display_mode *
8028amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8029 char *name,
8030 int hdisplay, int vdisplay)
e7b07cee
HW
8031{
8032 struct drm_device *dev = encoder->dev;
8033 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8034 struct drm_display_mode *mode = NULL;
8035 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8036
8037 mode = drm_mode_duplicate(dev, native_mode);
8038
b830ebc9 8039 if (mode == NULL)
e7b07cee
HW
8040 return NULL;
8041
8042 mode->hdisplay = hdisplay;
8043 mode->vdisplay = vdisplay;
8044 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 8045 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
8046
8047 return mode;
8048
8049}
8050
8051static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 8052 struct drm_connector *connector)
e7b07cee
HW
8053{
8054 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8055 struct drm_display_mode *mode = NULL;
8056 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
8057 struct amdgpu_dm_connector *amdgpu_dm_connector =
8058 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8059 int i;
8060 int n;
8061 struct mode_size {
8062 char name[DRM_DISPLAY_MODE_LEN];
8063 int w;
8064 int h;
b830ebc9 8065 } common_modes[] = {
e7b07cee
HW
8066 { "640x480", 640, 480},
8067 { "800x600", 800, 600},
8068 { "1024x768", 1024, 768},
8069 { "1280x720", 1280, 720},
8070 { "1280x800", 1280, 800},
8071 {"1280x1024", 1280, 1024},
8072 { "1440x900", 1440, 900},
8073 {"1680x1050", 1680, 1050},
8074 {"1600x1200", 1600, 1200},
8075 {"1920x1080", 1920, 1080},
8076 {"1920x1200", 1920, 1200}
8077 };
8078
b830ebc9 8079 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
8080
8081 for (i = 0; i < n; i++) {
8082 struct drm_display_mode *curmode = NULL;
8083 bool mode_existed = false;
8084
8085 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
8086 common_modes[i].h > native_mode->vdisplay ||
8087 (common_modes[i].w == native_mode->hdisplay &&
8088 common_modes[i].h == native_mode->vdisplay))
8089 continue;
e7b07cee
HW
8090
8091 list_for_each_entry(curmode, &connector->probed_modes, head) {
8092 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 8093 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
8094 mode_existed = true;
8095 break;
8096 }
8097 }
8098
8099 if (mode_existed)
8100 continue;
8101
8102 mode = amdgpu_dm_create_common_mode(encoder,
8103 common_modes[i].name, common_modes[i].w,
8104 common_modes[i].h);
8105 drm_mode_probed_add(connector, mode);
c84dec2f 8106 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
8107 }
8108}
8109
d77de788
SS
8110static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8111{
8112 struct drm_encoder *encoder;
8113 struct amdgpu_encoder *amdgpu_encoder;
8114 const struct drm_display_mode *native_mode;
8115
8116 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8117 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8118 return;
8119
8120 encoder = amdgpu_dm_connector_to_encoder(connector);
8121 if (!encoder)
8122 return;
8123
8124 amdgpu_encoder = to_amdgpu_encoder(encoder);
8125
8126 native_mode = &amdgpu_encoder->native_mode;
8127 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8128 return;
8129
8130 drm_connector_set_panel_orientation_with_quirk(connector,
8131 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8132 native_mode->hdisplay,
8133 native_mode->vdisplay);
8134}
8135
3ee6b26b
AD
8136static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8137 struct edid *edid)
e7b07cee 8138{
c84dec2f
HW
8139 struct amdgpu_dm_connector *amdgpu_dm_connector =
8140 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8141
8142 if (edid) {
8143 /* empty probed_modes */
8144 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8145 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8146 drm_add_edid_modes(connector, edid);
8147
f1e5e913
YMM
8148 /* sorting the probed modes before calling function
8149 * amdgpu_dm_get_native_mode() since EDID can have
8150 * more than one preferred mode. The modes that are
8151 * later in the probed mode list could be of higher
8152 * and preferred resolution. For example, 3840x2160
8153 * resolution in base EDID preferred timing and 4096x2160
8154 * preferred resolution in DID extension block later.
8155 */
8156 drm_mode_sort(&connector->probed_modes);
e7b07cee 8157 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8158
8159 /* Freesync capabilities are reset by calling
8160 * drm_add_edid_modes() and need to be
8161 * restored here.
8162 */
8163 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8164
8165 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8166 } else {
c84dec2f 8167 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8168 }
e7b07cee
HW
8169}
8170
a85ba005
NC
8171static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8172 struct drm_display_mode *mode)
8173{
8174 struct drm_display_mode *m;
8175
8176 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8177 if (drm_mode_equal(m, mode))
8178 return true;
8179 }
8180
8181 return false;
8182}
8183
8184static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8185{
8186 const struct drm_display_mode *m;
8187 struct drm_display_mode *new_mode;
8188 uint i;
8189 uint32_t new_modes_count = 0;
8190
8191 /* Standard FPS values
8192 *
12cdff6b
SC
8193 * 23.976 - TV/NTSC
8194 * 24 - Cinema
8195 * 25 - TV/PAL
8196 * 29.97 - TV/NTSC
8197 * 30 - TV/NTSC
8198 * 48 - Cinema HFR
8199 * 50 - TV/PAL
8200 * 60 - Commonly used
8201 * 48,72,96,120 - Multiples of 24
a85ba005 8202 */
9ce5ed6e
CIK
8203 static const uint32_t common_rates[] = {
8204 23976, 24000, 25000, 29970, 30000,
12cdff6b 8205 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8206 };
a85ba005
NC
8207
8208 /*
8209 * Find mode with highest refresh rate with the same resolution
8210 * as the preferred mode. Some monitors report a preferred mode
8211 * with lower resolution than the highest refresh rate supported.
8212 */
8213
8214 m = get_highest_refresh_rate_mode(aconnector, true);
8215 if (!m)
8216 return 0;
8217
8218 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8219 uint64_t target_vtotal, target_vtotal_diff;
8220 uint64_t num, den;
8221
8222 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8223 continue;
8224
8225 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8226 common_rates[i] > aconnector->max_vfreq * 1000)
8227 continue;
8228
8229 num = (unsigned long long)m->clock * 1000 * 1000;
8230 den = common_rates[i] * (unsigned long long)m->htotal;
8231 target_vtotal = div_u64(num, den);
8232 target_vtotal_diff = target_vtotal - m->vtotal;
8233
8234 /* Check for illegal modes */
8235 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8236 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8237 m->vtotal + target_vtotal_diff < m->vsync_end)
8238 continue;
8239
8240 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8241 if (!new_mode)
8242 goto out;
8243
8244 new_mode->vtotal += (u16)target_vtotal_diff;
8245 new_mode->vsync_start += (u16)target_vtotal_diff;
8246 new_mode->vsync_end += (u16)target_vtotal_diff;
8247 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8248 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8249
8250 if (!is_duplicate_mode(aconnector, new_mode)) {
8251 drm_mode_probed_add(&aconnector->base, new_mode);
8252 new_modes_count += 1;
8253 } else
8254 drm_mode_destroy(aconnector->base.dev, new_mode);
8255 }
8256 out:
8257 return new_modes_count;
8258}
8259
8260static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8261 struct edid *edid)
8262{
8263 struct amdgpu_dm_connector *amdgpu_dm_connector =
8264 to_amdgpu_dm_connector(connector);
8265
8266 if (!(amdgpu_freesync_vid_mode && edid))
8267 return;
fe8858bb 8268
a85ba005
NC
8269 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8270 amdgpu_dm_connector->num_modes +=
8271 add_fs_modes(amdgpu_dm_connector);
8272}
8273
7578ecda 8274static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8275{
c84dec2f
HW
8276 struct amdgpu_dm_connector *amdgpu_dm_connector =
8277 to_amdgpu_dm_connector(connector);
e7b07cee 8278 struct drm_encoder *encoder;
c84dec2f 8279 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8280
2b4c1c05 8281 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8282
5c0e6840 8283 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8284 amdgpu_dm_connector->num_modes =
8285 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8286 } else {
8287 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8288 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8289 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8290 }
3e332d3a 8291 amdgpu_dm_fbc_init(connector);
5099114b 8292
c84dec2f 8293 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8294}
8295
3ee6b26b
AD
8296void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8297 struct amdgpu_dm_connector *aconnector,
8298 int connector_type,
8299 struct dc_link *link,
8300 int link_index)
e7b07cee 8301{
1348969a 8302 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8303
f04bee34
NK
8304 /*
8305 * Some of the properties below require access to state, like bpc.
8306 * Allocate some default initial connector state with our reset helper.
8307 */
8308 if (aconnector->base.funcs->reset)
8309 aconnector->base.funcs->reset(&aconnector->base);
8310
e7b07cee
HW
8311 aconnector->connector_id = link_index;
8312 aconnector->dc_link = link;
8313 aconnector->base.interlace_allowed = false;
8314 aconnector->base.doublescan_allowed = false;
8315 aconnector->base.stereo_allowed = false;
8316 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8317 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8318 aconnector->audio_inst = -1;
e7b07cee
HW
8319 mutex_init(&aconnector->hpd_lock);
8320
1f6010a9
DF
8321 /*
8322 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8323 * which means HPD hot plug not supported
8324 */
e7b07cee
HW
8325 switch (connector_type) {
8326 case DRM_MODE_CONNECTOR_HDMIA:
8327 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8328 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8329 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8330 break;
8331 case DRM_MODE_CONNECTOR_DisplayPort:
8332 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7b201d53
JS
8333 link->link_enc = dp_get_link_enc(link);
8334 ASSERT(link->link_enc);
f6e03f80
JS
8335 if (link->link_enc)
8336 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8337 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8338 break;
8339 case DRM_MODE_CONNECTOR_DVID:
8340 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8341 break;
8342 default:
8343 break;
8344 }
8345
8346 drm_object_attach_property(&aconnector->base.base,
8347 dm->ddev->mode_config.scaling_mode_property,
8348 DRM_MODE_SCALE_NONE);
8349
8350 drm_object_attach_property(&aconnector->base.base,
8351 adev->mode_info.underscan_property,
8352 UNDERSCAN_OFF);
8353 drm_object_attach_property(&aconnector->base.base,
8354 adev->mode_info.underscan_hborder_property,
8355 0);
8356 drm_object_attach_property(&aconnector->base.base,
8357 adev->mode_info.underscan_vborder_property,
8358 0);
1825fd34 8359
8c61b31e
JFZ
8360 if (!aconnector->mst_port)
8361 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8362
4a8ca46b
RL
8363 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8364 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8365 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8366
c1ee92f9 8367 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8368 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8369 drm_object_attach_property(&aconnector->base.base,
8370 adev->mode_info.abm_level_property, 0);
8371 }
bb47de73
NK
8372
8373 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8374 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8375 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8376 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8377
8c61b31e
JFZ
8378 if (!aconnector->mst_port)
8379 drm_connector_attach_vrr_capable_property(&aconnector->base);
8380
0c8620d6 8381#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8382 if (adev->dm.hdcp_workqueue)
53e108aa 8383 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8384#endif
bb47de73 8385 }
e7b07cee
HW
8386}
8387
7578ecda
AD
8388static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8389 struct i2c_msg *msgs, int num)
e7b07cee
HW
8390{
8391 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8392 struct ddc_service *ddc_service = i2c->ddc_service;
8393 struct i2c_command cmd;
8394 int i;
8395 int result = -EIO;
8396
b830ebc9 8397 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8398
8399 if (!cmd.payloads)
8400 return result;
8401
8402 cmd.number_of_payloads = num;
8403 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8404 cmd.speed = 100;
8405
8406 for (i = 0; i < num; i++) {
8407 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8408 cmd.payloads[i].address = msgs[i].addr;
8409 cmd.payloads[i].length = msgs[i].len;
8410 cmd.payloads[i].data = msgs[i].buf;
8411 }
8412
c85e6e54
DF
8413 if (dc_submit_i2c(
8414 ddc_service->ctx->dc,
8415 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8416 &cmd))
8417 result = num;
8418
8419 kfree(cmd.payloads);
8420 return result;
8421}
8422
7578ecda 8423static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8424{
8425 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8426}
8427
8428static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8429 .master_xfer = amdgpu_dm_i2c_xfer,
8430 .functionality = amdgpu_dm_i2c_func,
8431};
8432
3ee6b26b
AD
8433static struct amdgpu_i2c_adapter *
8434create_i2c(struct ddc_service *ddc_service,
8435 int link_index,
8436 int *res)
e7b07cee
HW
8437{
8438 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8439 struct amdgpu_i2c_adapter *i2c;
8440
b830ebc9 8441 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8442 if (!i2c)
8443 return NULL;
e7b07cee
HW
8444 i2c->base.owner = THIS_MODULE;
8445 i2c->base.class = I2C_CLASS_DDC;
8446 i2c->base.dev.parent = &adev->pdev->dev;
8447 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8448 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8449 i2c_set_adapdata(&i2c->base, i2c);
8450 i2c->ddc_service = ddc_service;
f6e03f80
JS
8451 if (i2c->ddc_service->ddc_pin)
8452 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8453
8454 return i2c;
8455}
8456
89fc8d4e 8457
1f6010a9
DF
8458/*
8459 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8460 * dc_link which will be represented by this aconnector.
8461 */
7578ecda
AD
8462static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8463 struct amdgpu_dm_connector *aconnector,
8464 uint32_t link_index,
8465 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8466{
8467 int res = 0;
8468 int connector_type;
8469 struct dc *dc = dm->dc;
8470 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8471 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8472
8473 link->priv = aconnector;
e7b07cee 8474
f1ad2f5e 8475 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8476
8477 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8478 if (!i2c) {
8479 DRM_ERROR("Failed to create i2c adapter data\n");
8480 return -ENOMEM;
8481 }
8482
e7b07cee
HW
8483 aconnector->i2c = i2c;
8484 res = i2c_add_adapter(&i2c->base);
8485
8486 if (res) {
8487 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8488 goto out_free;
8489 }
8490
8491 connector_type = to_drm_connector_type(link->connector_signal);
8492
17165de2 8493 res = drm_connector_init_with_ddc(
e7b07cee
HW
8494 dm->ddev,
8495 &aconnector->base,
8496 &amdgpu_dm_connector_funcs,
17165de2
AP
8497 connector_type,
8498 &i2c->base);
e7b07cee
HW
8499
8500 if (res) {
8501 DRM_ERROR("connector_init failed\n");
8502 aconnector->connector_id = -1;
8503 goto out_free;
8504 }
8505
8506 drm_connector_helper_add(
8507 &aconnector->base,
8508 &amdgpu_dm_connector_helper_funcs);
8509
8510 amdgpu_dm_connector_init_helper(
8511 dm,
8512 aconnector,
8513 connector_type,
8514 link,
8515 link_index);
8516
cde4c44d 8517 drm_connector_attach_encoder(
e7b07cee
HW
8518 &aconnector->base, &aencoder->base);
8519
e7b07cee
HW
8520 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8521 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8522 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8523
e7b07cee
HW
8524out_free:
8525 if (res) {
8526 kfree(i2c);
8527 aconnector->i2c = NULL;
8528 }
8529 return res;
8530}
8531
8532int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8533{
8534 switch (adev->mode_info.num_crtc) {
8535 case 1:
8536 return 0x1;
8537 case 2:
8538 return 0x3;
8539 case 3:
8540 return 0x7;
8541 case 4:
8542 return 0xf;
8543 case 5:
8544 return 0x1f;
8545 case 6:
8546 default:
8547 return 0x3f;
8548 }
8549}
8550
7578ecda
AD
8551static int amdgpu_dm_encoder_init(struct drm_device *dev,
8552 struct amdgpu_encoder *aencoder,
8553 uint32_t link_index)
e7b07cee 8554{
1348969a 8555 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8556
8557 int res = drm_encoder_init(dev,
8558 &aencoder->base,
8559 &amdgpu_dm_encoder_funcs,
8560 DRM_MODE_ENCODER_TMDS,
8561 NULL);
8562
8563 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8564
8565 if (!res)
8566 aencoder->encoder_id = link_index;
8567 else
8568 aencoder->encoder_id = -1;
8569
8570 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8571
8572 return res;
8573}
8574
3ee6b26b
AD
8575static void manage_dm_interrupts(struct amdgpu_device *adev,
8576 struct amdgpu_crtc *acrtc,
8577 bool enable)
e7b07cee
HW
8578{
8579 /*
8fe684e9
NK
8580 * We have no guarantee that the frontend index maps to the same
8581 * backend index - some even map to more than one.
8582 *
8583 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8584 */
8585 int irq_type =
734dd01d 8586 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8587 adev,
8588 acrtc->crtc_id);
8589
8590 if (enable) {
8591 drm_crtc_vblank_on(&acrtc->base);
8592 amdgpu_irq_get(
8593 adev,
8594 &adev->pageflip_irq,
8595 irq_type);
86bc2219
WL
8596#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8597 amdgpu_irq_get(
8598 adev,
8599 &adev->vline0_irq,
8600 irq_type);
8601#endif
e7b07cee 8602 } else {
86bc2219
WL
8603#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8604 amdgpu_irq_put(
8605 adev,
8606 &adev->vline0_irq,
8607 irq_type);
8608#endif
e7b07cee
HW
8609 amdgpu_irq_put(
8610 adev,
8611 &adev->pageflip_irq,
8612 irq_type);
8613 drm_crtc_vblank_off(&acrtc->base);
8614 }
8615}
8616
8fe684e9
NK
8617static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8618 struct amdgpu_crtc *acrtc)
8619{
8620 int irq_type =
8621 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8622
8623 /**
8624 * This reads the current state for the IRQ and force reapplies
8625 * the setting to hardware.
8626 */
8627 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8628}
8629
3ee6b26b
AD
8630static bool
8631is_scaling_state_different(const struct dm_connector_state *dm_state,
8632 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8633{
8634 if (dm_state->scaling != old_dm_state->scaling)
8635 return true;
8636 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8637 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8638 return true;
8639 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8640 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8641 return true;
b830ebc9
HW
8642 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8643 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8644 return true;
e7b07cee
HW
8645 return false;
8646}
8647
0c8620d6
BL
8648#ifdef CONFIG_DRM_AMD_DC_HDCP
8649static bool is_content_protection_different(struct drm_connector_state *state,
8650 const struct drm_connector_state *old_state,
8651 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8652{
8653 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8654 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8655
31c0ed90 8656 /* Handle: Type0/1 change */
53e108aa
BL
8657 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8658 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8659 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8660 return true;
8661 }
8662
31c0ed90
BL
8663 /* CP is being re enabled, ignore this
8664 *
8665 * Handles: ENABLED -> DESIRED
8666 */
0c8620d6
BL
8667 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8668 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8669 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8670 return false;
8671 }
8672
31c0ed90
BL
8673 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8674 *
8675 * Handles: UNDESIRED -> ENABLED
8676 */
0c8620d6
BL
8677 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8678 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8679 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8680
0d9a947b
QZ
8681 /* Stream removed and re-enabled
8682 *
8683 * Can sometimes overlap with the HPD case,
8684 * thus set update_hdcp to false to avoid
8685 * setting HDCP multiple times.
8686 *
8687 * Handles: DESIRED -> DESIRED (Special case)
8688 */
8689 if (!(old_state->crtc && old_state->crtc->enabled) &&
8690 state->crtc && state->crtc->enabled &&
8691 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8692 dm_con_state->update_hdcp = false;
8693 return true;
8694 }
8695
8696 /* Hot-plug, headless s3, dpms
8697 *
8698 * Only start HDCP if the display is connected/enabled.
8699 * update_hdcp flag will be set to false until the next
8700 * HPD comes in.
31c0ed90
BL
8701 *
8702 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8703 */
97f6c917
BL
8704 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8705 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8706 dm_con_state->update_hdcp = false;
0c8620d6 8707 return true;
97f6c917 8708 }
0c8620d6 8709
31c0ed90
BL
8710 /*
8711 * Handles: UNDESIRED -> UNDESIRED
8712 * DESIRED -> DESIRED
8713 * ENABLED -> ENABLED
8714 */
0c8620d6
BL
8715 if (old_state->content_protection == state->content_protection)
8716 return false;
8717
31c0ed90
BL
8718 /*
8719 * Handles: UNDESIRED -> DESIRED
8720 * DESIRED -> UNDESIRED
8721 * ENABLED -> UNDESIRED
8722 */
97f6c917 8723 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8724 return true;
8725
31c0ed90
BL
8726 /*
8727 * Handles: DESIRED -> ENABLED
8728 */
0c8620d6
BL
8729 return false;
8730}
8731
0c8620d6 8732#endif
3ee6b26b
AD
8733static void remove_stream(struct amdgpu_device *adev,
8734 struct amdgpu_crtc *acrtc,
8735 struct dc_stream_state *stream)
e7b07cee
HW
8736{
8737 /* this is the update mode case */
e7b07cee
HW
8738
8739 acrtc->otg_inst = -1;
8740 acrtc->enabled = false;
8741}
8742
7578ecda
AD
8743static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8744 struct dc_cursor_position *position)
2a8f6ccb 8745{
f4c2cc43 8746 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8747 int x, y;
8748 int xorigin = 0, yorigin = 0;
8749
e371e19c 8750 if (!crtc || !plane->state->fb)
2a8f6ccb 8751 return 0;
2a8f6ccb
HW
8752
8753 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8754 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8755 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8756 __func__,
8757 plane->state->crtc_w,
8758 plane->state->crtc_h);
8759 return -EINVAL;
8760 }
8761
8762 x = plane->state->crtc_x;
8763 y = plane->state->crtc_y;
c14a005c 8764
e371e19c
NK
8765 if (x <= -amdgpu_crtc->max_cursor_width ||
8766 y <= -amdgpu_crtc->max_cursor_height)
8767 return 0;
8768
2a8f6ccb
HW
8769 if (x < 0) {
8770 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8771 x = 0;
8772 }
8773 if (y < 0) {
8774 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8775 y = 0;
8776 }
8777 position->enable = true;
d243b6ff 8778 position->translate_by_source = true;
2a8f6ccb
HW
8779 position->x = x;
8780 position->y = y;
8781 position->x_hotspot = xorigin;
8782 position->y_hotspot = yorigin;
8783
8784 return 0;
8785}
8786
3ee6b26b
AD
8787static void handle_cursor_update(struct drm_plane *plane,
8788 struct drm_plane_state *old_plane_state)
e7b07cee 8789{
1348969a 8790 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8791 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8792 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8793 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8794 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8795 uint64_t address = afb ? afb->address : 0;
6a30a929 8796 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8797 struct dc_cursor_attributes attributes;
8798 int ret;
8799
e7b07cee
HW
8800 if (!plane->state->fb && !old_plane_state->fb)
8801 return;
8802
cb2318b7 8803 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8804 __func__,
8805 amdgpu_crtc->crtc_id,
8806 plane->state->crtc_w,
8807 plane->state->crtc_h);
2a8f6ccb
HW
8808
8809 ret = get_cursor_position(plane, crtc, &position);
8810 if (ret)
8811 return;
8812
8813 if (!position.enable) {
8814 /* turn off cursor */
674e78ac
NK
8815 if (crtc_state && crtc_state->stream) {
8816 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8817 dc_stream_set_cursor_position(crtc_state->stream,
8818 &position);
674e78ac
NK
8819 mutex_unlock(&adev->dm.dc_lock);
8820 }
2a8f6ccb 8821 return;
e7b07cee 8822 }
e7b07cee 8823
2a8f6ccb
HW
8824 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8825 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8826
c1cefe11 8827 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8828 attributes.address.high_part = upper_32_bits(address);
8829 attributes.address.low_part = lower_32_bits(address);
8830 attributes.width = plane->state->crtc_w;
8831 attributes.height = plane->state->crtc_h;
8832 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8833 attributes.rotation_angle = 0;
8834 attributes.attribute_flags.value = 0;
8835
03a66367 8836 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8837
886daac9 8838 if (crtc_state->stream) {
674e78ac 8839 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8840 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8841 &attributes))
8842 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8843
2a8f6ccb
HW
8844 if (!dc_stream_set_cursor_position(crtc_state->stream,
8845 &position))
8846 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8847 mutex_unlock(&adev->dm.dc_lock);
886daac9 8848 }
2a8f6ccb 8849}
e7b07cee
HW
8850
8851static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8852{
8853
8854 assert_spin_locked(&acrtc->base.dev->event_lock);
8855 WARN_ON(acrtc->event);
8856
8857 acrtc->event = acrtc->base.state->event;
8858
8859 /* Set the flip status */
8860 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8861
8862 /* Mark this event as consumed */
8863 acrtc->base.state->event = NULL;
8864
cb2318b7
VL
8865 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8866 acrtc->crtc_id);
e7b07cee
HW
8867}
8868
bb47de73
NK
8869static void update_freesync_state_on_stream(
8870 struct amdgpu_display_manager *dm,
8871 struct dm_crtc_state *new_crtc_state,
180db303
NK
8872 struct dc_stream_state *new_stream,
8873 struct dc_plane_state *surface,
8874 u32 flip_timestamp_in_us)
bb47de73 8875{
09aef2c4 8876 struct mod_vrr_params vrr_params;
bb47de73 8877 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8878 struct amdgpu_device *adev = dm->adev;
585d450c 8879 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8880 unsigned long flags;
4cda3243 8881 bool pack_sdp_v1_3 = false;
bb47de73
NK
8882
8883 if (!new_stream)
8884 return;
8885
8886 /*
8887 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8888 * For now it's sufficient to just guard against these conditions.
8889 */
8890
8891 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8892 return;
8893
4a580877 8894 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8895 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8896
180db303
NK
8897 if (surface) {
8898 mod_freesync_handle_preflip(
8899 dm->freesync_module,
8900 surface,
8901 new_stream,
8902 flip_timestamp_in_us,
8903 &vrr_params);
09aef2c4
MK
8904
8905 if (adev->family < AMDGPU_FAMILY_AI &&
8906 amdgpu_dm_vrr_active(new_crtc_state)) {
8907 mod_freesync_handle_v_update(dm->freesync_module,
8908 new_stream, &vrr_params);
e63e2491
EB
8909
8910 /* Need to call this before the frame ends. */
8911 dc_stream_adjust_vmin_vmax(dm->dc,
8912 new_crtc_state->stream,
8913 &vrr_params.adjust);
09aef2c4 8914 }
180db303 8915 }
bb47de73
NK
8916
8917 mod_freesync_build_vrr_infopacket(
8918 dm->freesync_module,
8919 new_stream,
180db303 8920 &vrr_params,
ecd0136b
HT
8921 PACKET_TYPE_VRR,
8922 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8923 &vrr_infopacket,
8924 pack_sdp_v1_3);
bb47de73 8925
8a48b44c 8926 new_crtc_state->freesync_timing_changed |=
585d450c 8927 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8928 &vrr_params.adjust,
8929 sizeof(vrr_params.adjust)) != 0);
bb47de73 8930
8a48b44c 8931 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8932 (memcmp(&new_crtc_state->vrr_infopacket,
8933 &vrr_infopacket,
8934 sizeof(vrr_infopacket)) != 0);
8935
585d450c 8936 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8937 new_crtc_state->vrr_infopacket = vrr_infopacket;
8938
585d450c 8939 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8940 new_stream->vrr_infopacket = vrr_infopacket;
8941
8942 if (new_crtc_state->freesync_vrr_info_changed)
8943 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8944 new_crtc_state->base.crtc->base.id,
8945 (int)new_crtc_state->base.vrr_enabled,
180db303 8946 (int)vrr_params.state);
09aef2c4 8947
4a580877 8948 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8949}
8950
585d450c 8951static void update_stream_irq_parameters(
e854194c
MK
8952 struct amdgpu_display_manager *dm,
8953 struct dm_crtc_state *new_crtc_state)
8954{
8955 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8956 struct mod_vrr_params vrr_params;
e854194c 8957 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8958 struct amdgpu_device *adev = dm->adev;
585d450c 8959 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8960 unsigned long flags;
e854194c
MK
8961
8962 if (!new_stream)
8963 return;
8964
8965 /*
8966 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8967 * For now it's sufficient to just guard against these conditions.
8968 */
8969 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8970 return;
8971
4a580877 8972 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8973 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8974
e854194c
MK
8975 if (new_crtc_state->vrr_supported &&
8976 config.min_refresh_in_uhz &&
8977 config.max_refresh_in_uhz) {
a85ba005
NC
8978 /*
8979 * if freesync compatible mode was set, config.state will be set
8980 * in atomic check
8981 */
8982 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8983 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8984 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8985 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8986 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8987 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8988 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8989 } else {
8990 config.state = new_crtc_state->base.vrr_enabled ?
8991 VRR_STATE_ACTIVE_VARIABLE :
8992 VRR_STATE_INACTIVE;
8993 }
e854194c
MK
8994 } else {
8995 config.state = VRR_STATE_UNSUPPORTED;
8996 }
8997
8998 mod_freesync_build_vrr_params(dm->freesync_module,
8999 new_stream,
9000 &config, &vrr_params);
9001
9002 new_crtc_state->freesync_timing_changed |=
585d450c
AP
9003 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9004 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 9005
585d450c
AP
9006 new_crtc_state->freesync_config = config;
9007 /* Copy state for access from DM IRQ handler */
9008 acrtc->dm_irq_params.freesync_config = config;
9009 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9010 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 9011 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
9012}
9013
66b0c973
MK
9014static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9015 struct dm_crtc_state *new_state)
9016{
9017 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9018 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9019
9020 if (!old_vrr_active && new_vrr_active) {
9021 /* Transition VRR inactive -> active:
9022 * While VRR is active, we must not disable vblank irq, as a
9023 * reenable after disable would compute bogus vblank/pflip
9024 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
9025 *
9026 * We also need vupdate irq for the actual core vblank handling
9027 * at end of vblank.
66b0c973 9028 */
d2574c33 9029 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
9030 drm_crtc_vblank_get(new_state->base.crtc);
9031 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9032 __func__, new_state->base.crtc->base.id);
9033 } else if (old_vrr_active && !new_vrr_active) {
9034 /* Transition VRR active -> inactive:
9035 * Allow vblank irq disable again for fixed refresh rate.
9036 */
d2574c33 9037 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
9038 drm_crtc_vblank_put(new_state->base.crtc);
9039 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9040 __func__, new_state->base.crtc->base.id);
9041 }
9042}
9043
8ad27806
NK
9044static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9045{
9046 struct drm_plane *plane;
5760dcb9 9047 struct drm_plane_state *old_plane_state;
8ad27806
NK
9048 int i;
9049
9050 /*
9051 * TODO: Make this per-stream so we don't issue redundant updates for
9052 * commits with multiple streams.
9053 */
5760dcb9 9054 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
9055 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9056 handle_cursor_update(plane, old_plane_state);
9057}
9058
3be5262e 9059static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 9060 struct dc_state *dc_state,
3ee6b26b
AD
9061 struct drm_device *dev,
9062 struct amdgpu_display_manager *dm,
9063 struct drm_crtc *pcrtc,
420cd472 9064 bool wait_for_vblank)
e7b07cee 9065{
efc8278e 9066 uint32_t i;
8a48b44c 9067 uint64_t timestamp_ns;
e7b07cee 9068 struct drm_plane *plane;
0bc9706d 9069 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 9070 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
9071 struct drm_crtc_state *new_pcrtc_state =
9072 drm_atomic_get_new_crtc_state(state, pcrtc);
9073 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
9074 struct dm_crtc_state *dm_old_crtc_state =
9075 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 9076 int planes_count = 0, vpos, hpos;
570c91d5 9077 long r;
e7b07cee 9078 unsigned long flags;
8a48b44c 9079 struct amdgpu_bo *abo;
fdd1fe57
MK
9080 uint32_t target_vblank, last_flip_vblank;
9081 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 9082 bool pflip_present = false;
bc7f670e
DF
9083 struct {
9084 struct dc_surface_update surface_updates[MAX_SURFACES];
9085 struct dc_plane_info plane_infos[MAX_SURFACES];
9086 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 9087 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 9088 struct dc_stream_update stream_update;
74aa7bd4 9089 } *bundle;
bc7f670e 9090
74aa7bd4 9091 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 9092
74aa7bd4
DF
9093 if (!bundle) {
9094 dm_error("Failed to allocate update bundle\n");
4b510503
NK
9095 goto cleanup;
9096 }
e7b07cee 9097
8ad27806
NK
9098 /*
9099 * Disable the cursor first if we're disabling all the planes.
9100 * It'll remain on the screen after the planes are re-enabled
9101 * if we don't.
9102 */
9103 if (acrtc_state->active_planes == 0)
9104 amdgpu_dm_commit_cursors(state);
9105
e7b07cee 9106 /* update planes when needed */
efc8278e 9107 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9108 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9109 struct drm_crtc_state *new_crtc_state;
0bc9706d 9110 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9111 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9112 bool plane_needs_flip;
c7af5f77 9113 struct dc_plane_state *dc_plane;
54d76575 9114 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9115
80c218d5
NK
9116 /* Cursor plane is handled after stream updates */
9117 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9118 continue;
e7b07cee 9119
f5ba60fe
DD
9120 if (!fb || !crtc || pcrtc != crtc)
9121 continue;
9122
9123 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9124 if (!new_crtc_state->active)
e7b07cee
HW
9125 continue;
9126
bc7f670e 9127 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9128
74aa7bd4 9129 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9130 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9131 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9132 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9133 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9134 }
8a48b44c 9135
4375d625 9136 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9137 &bundle->scaling_infos[planes_count]);
8a48b44c 9138
695af5f9
NK
9139 bundle->surface_updates[planes_count].scaling_info =
9140 &bundle->scaling_infos[planes_count];
8a48b44c 9141
f5031000 9142 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9143
f5031000 9144 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9145
f5031000
DF
9146 if (!plane_needs_flip) {
9147 planes_count += 1;
9148 continue;
9149 }
8a48b44c 9150
2fac0f53
CK
9151 abo = gem_to_amdgpu_bo(fb->obj[0]);
9152
f8308898
AG
9153 /*
9154 * Wait for all fences on this FB. Do limited wait to avoid
9155 * deadlock during GPU reset when this fence will not signal
9156 * but we hold reservation lock for the BO.
9157 */
d3fae3b3
CK
9158 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9159 msecs_to_jiffies(5000));
f8308898 9160 if (unlikely(r <= 0))
ed8a5fb2 9161 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9162
695af5f9 9163 fill_dc_plane_info_and_addr(
8ce5d842 9164 dm->adev, new_plane_state,
6eed95b0 9165 afb->tiling_flags,
695af5f9 9166 &bundle->plane_infos[planes_count],
87b7ebc2 9167 &bundle->flip_addrs[planes_count].address,
6eed95b0 9168 afb->tmz_surface, false);
87b7ebc2 9169
4711c033 9170 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9171 new_plane_state->plane->index,
9172 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9173
9174 bundle->surface_updates[planes_count].plane_info =
9175 &bundle->plane_infos[planes_count];
8a48b44c 9176
caff0e66
NK
9177 /*
9178 * Only allow immediate flips for fast updates that don't
9179 * change FB pitch, DCC state, rotation or mirroing.
9180 */
f5031000 9181 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9182 crtc->state->async_flip &&
caff0e66 9183 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9184
f5031000
DF
9185 timestamp_ns = ktime_get_ns();
9186 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9187 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9188 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9189
f5031000
DF
9190 if (!bundle->surface_updates[planes_count].surface) {
9191 DRM_ERROR("No surface for CRTC: id=%d\n",
9192 acrtc_attach->crtc_id);
9193 continue;
bc7f670e
DF
9194 }
9195
f5031000
DF
9196 if (plane == pcrtc->primary)
9197 update_freesync_state_on_stream(
9198 dm,
9199 acrtc_state,
9200 acrtc_state->stream,
9201 dc_plane,
9202 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9203
4711c033 9204 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9205 __func__,
9206 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9207 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9208
9209 planes_count += 1;
9210
8a48b44c
DF
9211 }
9212
74aa7bd4 9213 if (pflip_present) {
634092b1
MK
9214 if (!vrr_active) {
9215 /* Use old throttling in non-vrr fixed refresh rate mode
9216 * to keep flip scheduling based on target vblank counts
9217 * working in a backwards compatible way, e.g., for
9218 * clients using the GLX_OML_sync_control extension or
9219 * DRI3/Present extension with defined target_msc.
9220 */
e3eff4b5 9221 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9222 }
9223 else {
9224 /* For variable refresh rate mode only:
9225 * Get vblank of last completed flip to avoid > 1 vrr
9226 * flips per video frame by use of throttling, but allow
9227 * flip programming anywhere in the possibly large
9228 * variable vrr vblank interval for fine-grained flip
9229 * timing control and more opportunity to avoid stutter
9230 * on late submission of flips.
9231 */
9232 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9233 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9234 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9235 }
9236
fdd1fe57 9237 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9238
9239 /*
9240 * Wait until we're out of the vertical blank period before the one
9241 * targeted by the flip
9242 */
9243 while ((acrtc_attach->enabled &&
9244 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9245 0, &vpos, &hpos, NULL,
9246 NULL, &pcrtc->hwmode)
9247 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9248 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9249 (int)(target_vblank -
e3eff4b5 9250 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9251 usleep_range(1000, 1100);
9252 }
9253
8fe684e9
NK
9254 /**
9255 * Prepare the flip event for the pageflip interrupt to handle.
9256 *
9257 * This only works in the case where we've already turned on the
9258 * appropriate hardware blocks (eg. HUBP) so in the transition case
9259 * from 0 -> n planes we have to skip a hardware generated event
9260 * and rely on sending it from software.
9261 */
9262 if (acrtc_attach->base.state->event &&
035f5496
AP
9263 acrtc_state->active_planes > 0 &&
9264 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9265 drm_crtc_vblank_get(pcrtc);
9266
9267 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9268
9269 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9270 prepare_flip_isr(acrtc_attach);
9271
9272 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9273 }
9274
9275 if (acrtc_state->stream) {
8a48b44c 9276 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9277 bundle->stream_update.vrr_infopacket =
8a48b44c 9278 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9279 }
e7b07cee
HW
9280 }
9281
bc92c065 9282 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9283 if ((planes_count || acrtc_state->active_planes == 0) &&
9284 acrtc_state->stream) {
96160687 9285#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9286 /*
9287 * If PSR or idle optimizations are enabled then flush out
9288 * any pending work before hardware programming.
9289 */
06dd1888
NK
9290 if (dm->vblank_control_workqueue)
9291 flush_workqueue(dm->vblank_control_workqueue);
96160687 9292#endif
58aa1c50 9293
b6e881c9 9294 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9295 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9296 bundle->stream_update.src = acrtc_state->stream->src;
9297 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9298 }
9299
cf020d49
NK
9300 if (new_pcrtc_state->color_mgmt_changed) {
9301 /*
9302 * TODO: This isn't fully correct since we've actually
9303 * already modified the stream in place.
9304 */
9305 bundle->stream_update.gamut_remap =
9306 &acrtc_state->stream->gamut_remap_matrix;
9307 bundle->stream_update.output_csc_transform =
9308 &acrtc_state->stream->csc_color_matrix;
9309 bundle->stream_update.out_transfer_func =
9310 acrtc_state->stream->out_transfer_func;
9311 }
bc7f670e 9312
8a48b44c 9313 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9314 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9315 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9316
e63e2491
EB
9317 /*
9318 * If FreeSync state on the stream has changed then we need to
9319 * re-adjust the min/max bounds now that DC doesn't handle this
9320 * as part of commit.
9321 */
a85ba005 9322 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9323 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9324 dc_stream_adjust_vmin_vmax(
9325 dm->dc, acrtc_state->stream,
585d450c 9326 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9327 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9328 }
bc7f670e 9329 mutex_lock(&dm->dc_lock);
8c322309 9330 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9331 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9332 amdgpu_dm_psr_disable(acrtc_state->stream);
9333
bc7f670e 9334 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9335 bundle->surface_updates,
bc7f670e
DF
9336 planes_count,
9337 acrtc_state->stream,
efc8278e
AJ
9338 &bundle->stream_update,
9339 dc_state);
8c322309 9340
8fe684e9
NK
9341 /**
9342 * Enable or disable the interrupts on the backend.
9343 *
9344 * Most pipes are put into power gating when unused.
9345 *
9346 * When power gating is enabled on a pipe we lose the
9347 * interrupt enablement state when power gating is disabled.
9348 *
9349 * So we need to update the IRQ control state in hardware
9350 * whenever the pipe turns on (since it could be previously
9351 * power gated) or off (since some pipes can't be power gated
9352 * on some ASICs).
9353 */
9354 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9355 dm_update_pflip_irq_state(drm_to_adev(dev),
9356 acrtc_attach);
8fe684e9 9357
8c322309 9358 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9359 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9360 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9361 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9362
9363 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9364 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9365 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9366 struct amdgpu_dm_connector *aconn =
9367 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9368
9369 if (aconn->psr_skip_count > 0)
9370 aconn->psr_skip_count--;
58aa1c50
NK
9371
9372 /* Allow PSR when skip count is 0. */
9373 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9374 } else {
9375 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9376 }
9377
bc7f670e 9378 mutex_unlock(&dm->dc_lock);
e7b07cee 9379 }
4b510503 9380
8ad27806
NK
9381 /*
9382 * Update cursor state *after* programming all the planes.
9383 * This avoids redundant programming in the case where we're going
9384 * to be disabling a single plane - those pipes are being disabled.
9385 */
9386 if (acrtc_state->active_planes)
9387 amdgpu_dm_commit_cursors(state);
80c218d5 9388
4b510503 9389cleanup:
74aa7bd4 9390 kfree(bundle);
e7b07cee
HW
9391}
9392
6ce8f316
NK
9393static void amdgpu_dm_commit_audio(struct drm_device *dev,
9394 struct drm_atomic_state *state)
9395{
1348969a 9396 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9397 struct amdgpu_dm_connector *aconnector;
9398 struct drm_connector *connector;
9399 struct drm_connector_state *old_con_state, *new_con_state;
9400 struct drm_crtc_state *new_crtc_state;
9401 struct dm_crtc_state *new_dm_crtc_state;
9402 const struct dc_stream_status *status;
9403 int i, inst;
9404
9405 /* Notify device removals. */
9406 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9407 if (old_con_state->crtc != new_con_state->crtc) {
9408 /* CRTC changes require notification. */
9409 goto notify;
9410 }
9411
9412 if (!new_con_state->crtc)
9413 continue;
9414
9415 new_crtc_state = drm_atomic_get_new_crtc_state(
9416 state, new_con_state->crtc);
9417
9418 if (!new_crtc_state)
9419 continue;
9420
9421 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9422 continue;
9423
9424 notify:
9425 aconnector = to_amdgpu_dm_connector(connector);
9426
9427 mutex_lock(&adev->dm.audio_lock);
9428 inst = aconnector->audio_inst;
9429 aconnector->audio_inst = -1;
9430 mutex_unlock(&adev->dm.audio_lock);
9431
9432 amdgpu_dm_audio_eld_notify(adev, inst);
9433 }
9434
9435 /* Notify audio device additions. */
9436 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9437 if (!new_con_state->crtc)
9438 continue;
9439
9440 new_crtc_state = drm_atomic_get_new_crtc_state(
9441 state, new_con_state->crtc);
9442
9443 if (!new_crtc_state)
9444 continue;
9445
9446 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9447 continue;
9448
9449 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9450 if (!new_dm_crtc_state->stream)
9451 continue;
9452
9453 status = dc_stream_get_status(new_dm_crtc_state->stream);
9454 if (!status)
9455 continue;
9456
9457 aconnector = to_amdgpu_dm_connector(connector);
9458
9459 mutex_lock(&adev->dm.audio_lock);
9460 inst = status->audio_inst;
9461 aconnector->audio_inst = inst;
9462 mutex_unlock(&adev->dm.audio_lock);
9463
9464 amdgpu_dm_audio_eld_notify(adev, inst);
9465 }
9466}
9467
1f6010a9 9468/*
27b3f4fc
LSL
9469 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9470 * @crtc_state: the DRM CRTC state
9471 * @stream_state: the DC stream state.
9472 *
9473 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9474 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9475 */
9476static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9477 struct dc_stream_state *stream_state)
9478{
b9952f93 9479 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9480}
e7b07cee 9481
b8592b48
LL
9482/**
9483 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9484 * @state: The atomic state to commit
9485 *
9486 * This will tell DC to commit the constructed DC state from atomic_check,
9487 * programming the hardware. Any failures here implies a hardware failure, since
9488 * atomic check should have filtered anything non-kosher.
9489 */
7578ecda 9490static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9491{
9492 struct drm_device *dev = state->dev;
1348969a 9493 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9494 struct amdgpu_display_manager *dm = &adev->dm;
9495 struct dm_atomic_state *dm_state;
eb3dc897 9496 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9497 uint32_t i, j;
5cc6dcbd 9498 struct drm_crtc *crtc;
0bc9706d 9499 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9500 unsigned long flags;
9501 bool wait_for_vblank = true;
9502 struct drm_connector *connector;
c2cea706 9503 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9504 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9505 int crtc_disable_count = 0;
6ee90e88 9506 bool mode_set_reset_required = false;
e7b07cee 9507
e8a98235
RS
9508 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9509
e7b07cee
HW
9510 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9511
eb3dc897
NK
9512 dm_state = dm_atomic_get_new_state(state);
9513 if (dm_state && dm_state->context) {
9514 dc_state = dm_state->context;
9515 } else {
9516 /* No state changes, retain current state. */
813d20dc 9517 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9518 ASSERT(dc_state_temp);
9519 dc_state = dc_state_temp;
9520 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9521 }
e7b07cee 9522
6d90a208
AP
9523 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9524 new_crtc_state, i) {
9525 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9526
9527 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9528
9529 if (old_crtc_state->active &&
9530 (!new_crtc_state->active ||
9531 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9532 manage_dm_interrupts(adev, acrtc, false);
9533 dc_stream_release(dm_old_crtc_state->stream);
9534 }
9535 }
9536
8976f73b
RS
9537 drm_atomic_helper_calc_timestamping_constants(state);
9538
e7b07cee 9539 /* update changed items */
0bc9706d 9540 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9541 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9542
54d76575
LSL
9543 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9544 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9545
4711c033 9546 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9547 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9548 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9549 "connectors_changed:%d\n",
9550 acrtc->crtc_id,
0bc9706d
LSL
9551 new_crtc_state->enable,
9552 new_crtc_state->active,
9553 new_crtc_state->planes_changed,
9554 new_crtc_state->mode_changed,
9555 new_crtc_state->active_changed,
9556 new_crtc_state->connectors_changed);
e7b07cee 9557
5c68c652
VL
9558 /* Disable cursor if disabling crtc */
9559 if (old_crtc_state->active && !new_crtc_state->active) {
9560 struct dc_cursor_position position;
9561
9562 memset(&position, 0, sizeof(position));
9563 mutex_lock(&dm->dc_lock);
9564 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9565 mutex_unlock(&dm->dc_lock);
9566 }
9567
27b3f4fc
LSL
9568 /* Copy all transient state flags into dc state */
9569 if (dm_new_crtc_state->stream) {
9570 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9571 dm_new_crtc_state->stream);
9572 }
9573
e7b07cee
HW
9574 /* handles headless hotplug case, updating new_state and
9575 * aconnector as needed
9576 */
9577
54d76575 9578 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9579
4711c033 9580 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9581
54d76575 9582 if (!dm_new_crtc_state->stream) {
e7b07cee 9583 /*
b830ebc9
HW
9584 * this could happen because of issues with
9585 * userspace notifications delivery.
9586 * In this case userspace tries to set mode on
1f6010a9
DF
9587 * display which is disconnected in fact.
9588 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9589 * We expect reset mode will come soon.
9590 *
9591 * This can also happen when unplug is done
9592 * during resume sequence ended
9593 *
9594 * In this case, we want to pretend we still
9595 * have a sink to keep the pipe running so that
9596 * hw state is consistent with the sw state
9597 */
f1ad2f5e 9598 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9599 __func__, acrtc->base.base.id);
9600 continue;
9601 }
9602
54d76575
LSL
9603 if (dm_old_crtc_state->stream)
9604 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9605
97028037
LP
9606 pm_runtime_get_noresume(dev->dev);
9607
e7b07cee 9608 acrtc->enabled = true;
0bc9706d
LSL
9609 acrtc->hw_mode = new_crtc_state->mode;
9610 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9611 mode_set_reset_required = true;
0bc9706d 9612 } else if (modereset_required(new_crtc_state)) {
4711c033 9613 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9614 /* i.e. reset mode */
6ee90e88 9615 if (dm_old_crtc_state->stream)
54d76575 9616 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9617
6ee90e88 9618 mode_set_reset_required = true;
e7b07cee
HW
9619 }
9620 } /* for_each_crtc_in_state() */
9621
eb3dc897 9622 if (dc_state) {
6ee90e88 9623 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9624 if (mode_set_reset_required) {
96160687 9625#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9626 if (dm->vblank_control_workqueue)
9627 flush_workqueue(dm->vblank_control_workqueue);
96160687 9628#endif
6ee90e88 9629 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9630 }
6ee90e88 9631
eb3dc897 9632 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9633 mutex_lock(&dm->dc_lock);
eb3dc897 9634 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9635#if defined(CONFIG_DRM_AMD_DC_DCN)
9636 /* Allow idle optimization when vblank count is 0 for display off */
9637 if (dm->active_vblank_irq_count == 0)
9638 dc_allow_idle_optimizations(dm->dc,true);
9639#endif
674e78ac 9640 mutex_unlock(&dm->dc_lock);
fa2123db 9641 }
fe8858bb 9642
0bc9706d 9643 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9644 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9645
54d76575 9646 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9647
54d76575 9648 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9649 const struct dc_stream_status *status =
54d76575 9650 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9651
eb3dc897 9652 if (!status)
09f609c3
LL
9653 status = dc_stream_get_status_from_state(dc_state,
9654 dm_new_crtc_state->stream);
e7b07cee 9655 if (!status)
54d76575 9656 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9657 else
9658 acrtc->otg_inst = status->primary_otg_inst;
9659 }
9660 }
0c8620d6
BL
9661#ifdef CONFIG_DRM_AMD_DC_HDCP
9662 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9663 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9664 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9665 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9666
9667 new_crtc_state = NULL;
9668
9669 if (acrtc)
9670 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9671
9672 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9673
9674 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9675 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9676 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9677 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9678 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9679 continue;
9680 }
9681
9682 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9683 hdcp_update_display(
9684 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9685 new_con_state->hdcp_content_type,
0e86d3d4 9686 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9687 }
9688#endif
e7b07cee 9689
02d6a6fc 9690 /* Handle connector state changes */
c2cea706 9691 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9692 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9693 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9694 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9695 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9696 struct dc_stream_update stream_update;
b232d4ed 9697 struct dc_info_packet hdr_packet;
e7b07cee 9698 struct dc_stream_status *status = NULL;
b232d4ed 9699 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9700
efc8278e 9701 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9702 memset(&stream_update, 0, sizeof(stream_update));
9703
44d09c6a 9704 if (acrtc) {
0bc9706d 9705 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9706 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9707 }
0bc9706d 9708
e7b07cee 9709 /* Skip any modesets/resets */
0bc9706d 9710 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9711 continue;
9712
54d76575 9713 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9714 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9715
b232d4ed
NK
9716 scaling_changed = is_scaling_state_different(dm_new_con_state,
9717 dm_old_con_state);
9718
9719 abm_changed = dm_new_crtc_state->abm_level !=
9720 dm_old_crtc_state->abm_level;
9721
9722 hdr_changed =
72921cdf 9723 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9724
9725 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9726 continue;
e7b07cee 9727
b6e881c9 9728 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9729 if (scaling_changed) {
02d6a6fc 9730 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9731 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9732
02d6a6fc
DF
9733 stream_update.src = dm_new_crtc_state->stream->src;
9734 stream_update.dst = dm_new_crtc_state->stream->dst;
9735 }
9736
b232d4ed 9737 if (abm_changed) {
02d6a6fc
DF
9738 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9739
9740 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9741 }
70e8ffc5 9742
b232d4ed
NK
9743 if (hdr_changed) {
9744 fill_hdr_info_packet(new_con_state, &hdr_packet);
9745 stream_update.hdr_static_metadata = &hdr_packet;
9746 }
9747
54d76575 9748 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9749
9750 if (WARN_ON(!status))
9751 continue;
9752
3be5262e 9753 WARN_ON(!status->plane_count);
e7b07cee 9754
02d6a6fc
DF
9755 /*
9756 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9757 * Here we create an empty update on each plane.
9758 * To fix this, DC should permit updating only stream properties.
9759 */
9760 for (j = 0; j < status->plane_count; j++)
efc8278e 9761 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9762
9763
9764 mutex_lock(&dm->dc_lock);
9765 dc_commit_updates_for_stream(dm->dc,
efc8278e 9766 dummy_updates,
02d6a6fc
DF
9767 status->plane_count,
9768 dm_new_crtc_state->stream,
efc8278e
AJ
9769 &stream_update,
9770 dc_state);
02d6a6fc 9771 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9772 }
9773
b5e83f6f 9774 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9775 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9776 new_crtc_state, i) {
fe2a1965
LP
9777 if (old_crtc_state->active && !new_crtc_state->active)
9778 crtc_disable_count++;
9779
54d76575 9780 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9781 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9782
585d450c
AP
9783 /* For freesync config update on crtc state and params for irq */
9784 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9785
66b0c973
MK
9786 /* Handle vrr on->off / off->on transitions */
9787 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9788 dm_new_crtc_state);
e7b07cee
HW
9789 }
9790
8fe684e9
NK
9791 /**
9792 * Enable interrupts for CRTCs that are newly enabled or went through
9793 * a modeset. It was intentionally deferred until after the front end
9794 * state was modified to wait until the OTG was on and so the IRQ
9795 * handlers didn't access stale or invalid state.
9796 */
9797 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9798 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9799#ifdef CONFIG_DEBUG_FS
86bc2219 9800 bool configure_crc = false;
8e7b6fee 9801 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9802#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9803 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9804#endif
9805 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9806 cur_crc_src = acrtc->dm_irq_params.crc_src;
9807 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9808#endif
585d450c
AP
9809 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9810
8fe684e9
NK
9811 if (new_crtc_state->active &&
9812 (!old_crtc_state->active ||
9813 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9814 dc_stream_retain(dm_new_crtc_state->stream);
9815 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9816 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9817
24eb9374 9818#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9819 /**
9820 * Frontend may have changed so reapply the CRC capture
9821 * settings for the stream.
9822 */
9823 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9824
8e7b6fee 9825 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9826 configure_crc = true;
9827#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9828 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9829 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9830 acrtc->dm_irq_params.crc_window.update_win = true;
9831 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9832 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9833 crc_rd_wrk->crtc = crtc;
9834 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9835 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9836 }
86bc2219 9837#endif
e2881d6d 9838 }
c920888c 9839
86bc2219 9840 if (configure_crc)
bbc49fc0
WL
9841 if (amdgpu_dm_crtc_configure_crc_source(
9842 crtc, dm_new_crtc_state, cur_crc_src))
9843 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9844#endif
8fe684e9
NK
9845 }
9846 }
e7b07cee 9847
420cd472 9848 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9849 if (new_crtc_state->async_flip)
420cd472
DF
9850 wait_for_vblank = false;
9851
e7b07cee 9852 /* update planes when needed per crtc*/
5cc6dcbd 9853 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9854 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9855
54d76575 9856 if (dm_new_crtc_state->stream)
eb3dc897 9857 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9858 dm, crtc, wait_for_vblank);
e7b07cee
HW
9859 }
9860
6ce8f316
NK
9861 /* Update audio instances for each connector. */
9862 amdgpu_dm_commit_audio(dev, state);
9863
7230362c
AD
9864#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9865 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9866 /* restore the backlight level */
7fd13bae
AD
9867 for (i = 0; i < dm->num_of_edps; i++) {
9868 if (dm->backlight_dev[i] &&
9869 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9870 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9871 }
7230362c 9872#endif
e7b07cee
HW
9873 /*
9874 * send vblank event on all events not handled in flip and
9875 * mark consumed event for drm_atomic_helper_commit_hw_done
9876 */
4a580877 9877 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9878 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9879
0bc9706d
LSL
9880 if (new_crtc_state->event)
9881 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9882
0bc9706d 9883 new_crtc_state->event = NULL;
e7b07cee 9884 }
4a580877 9885 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9886
29c8f234
LL
9887 /* Signal HW programming completion */
9888 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9889
9890 if (wait_for_vblank)
320a1274 9891 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9892
9893 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9894
5f6fab24
AD
9895 /* return the stolen vga memory back to VRAM */
9896 if (!adev->mman.keep_stolen_vga_memory)
9897 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9898 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9899
1f6010a9
DF
9900 /*
9901 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9902 * so we can put the GPU into runtime suspend if we're not driving any
9903 * displays anymore
9904 */
fe2a1965
LP
9905 for (i = 0; i < crtc_disable_count; i++)
9906 pm_runtime_put_autosuspend(dev->dev);
97028037 9907 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9908
9909 if (dc_state_temp)
9910 dc_release_state(dc_state_temp);
e7b07cee
HW
9911}
9912
9913
9914static int dm_force_atomic_commit(struct drm_connector *connector)
9915{
9916 int ret = 0;
9917 struct drm_device *ddev = connector->dev;
9918 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9919 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9920 struct drm_plane *plane = disconnected_acrtc->base.primary;
9921 struct drm_connector_state *conn_state;
9922 struct drm_crtc_state *crtc_state;
9923 struct drm_plane_state *plane_state;
9924
9925 if (!state)
9926 return -ENOMEM;
9927
9928 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9929
9930 /* Construct an atomic state to restore previous display setting */
9931
9932 /*
9933 * Attach connectors to drm_atomic_state
9934 */
9935 conn_state = drm_atomic_get_connector_state(state, connector);
9936
9937 ret = PTR_ERR_OR_ZERO(conn_state);
9938 if (ret)
2dc39051 9939 goto out;
e7b07cee
HW
9940
9941 /* Attach crtc to drm_atomic_state*/
9942 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9943
9944 ret = PTR_ERR_OR_ZERO(crtc_state);
9945 if (ret)
2dc39051 9946 goto out;
e7b07cee
HW
9947
9948 /* force a restore */
9949 crtc_state->mode_changed = true;
9950
9951 /* Attach plane to drm_atomic_state */
9952 plane_state = drm_atomic_get_plane_state(state, plane);
9953
9954 ret = PTR_ERR_OR_ZERO(plane_state);
9955 if (ret)
2dc39051 9956 goto out;
e7b07cee
HW
9957
9958 /* Call commit internally with the state we just constructed */
9959 ret = drm_atomic_commit(state);
e7b07cee 9960
2dc39051 9961out:
e7b07cee 9962 drm_atomic_state_put(state);
2dc39051
VL
9963 if (ret)
9964 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9965
9966 return ret;
9967}
9968
9969/*
1f6010a9
DF
9970 * This function handles all cases when set mode does not come upon hotplug.
9971 * This includes when a display is unplugged then plugged back into the
9972 * same port and when running without usermode desktop manager supprot
e7b07cee 9973 */
3ee6b26b
AD
9974void dm_restore_drm_connector_state(struct drm_device *dev,
9975 struct drm_connector *connector)
e7b07cee 9976{
c84dec2f 9977 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9978 struct amdgpu_crtc *disconnected_acrtc;
9979 struct dm_crtc_state *acrtc_state;
9980
9981 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9982 return;
9983
9984 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9985 if (!disconnected_acrtc)
9986 return;
e7b07cee 9987
70e8ffc5
HW
9988 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9989 if (!acrtc_state->stream)
e7b07cee
HW
9990 return;
9991
9992 /*
9993 * If the previous sink is not released and different from the current,
9994 * we deduce we are in a state where we can not rely on usermode call
9995 * to turn on the display, so we do it here
9996 */
9997 if (acrtc_state->stream->sink != aconnector->dc_sink)
9998 dm_force_atomic_commit(&aconnector->base);
9999}
10000
1f6010a9 10001/*
e7b07cee
HW
10002 * Grabs all modesetting locks to serialize against any blocking commits,
10003 * Waits for completion of all non blocking commits.
10004 */
3ee6b26b
AD
10005static int do_aquire_global_lock(struct drm_device *dev,
10006 struct drm_atomic_state *state)
e7b07cee
HW
10007{
10008 struct drm_crtc *crtc;
10009 struct drm_crtc_commit *commit;
10010 long ret;
10011
1f6010a9
DF
10012 /*
10013 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
10014 * ensure that when the framework release it the
10015 * extra locks we are locking here will get released to
10016 */
10017 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10018 if (ret)
10019 return ret;
10020
10021 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10022 spin_lock(&crtc->commit_lock);
10023 commit = list_first_entry_or_null(&crtc->commit_list,
10024 struct drm_crtc_commit, commit_entry);
10025 if (commit)
10026 drm_crtc_commit_get(commit);
10027 spin_unlock(&crtc->commit_lock);
10028
10029 if (!commit)
10030 continue;
10031
1f6010a9
DF
10032 /*
10033 * Make sure all pending HW programming completed and
e7b07cee
HW
10034 * page flips done
10035 */
10036 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10037
10038 if (ret > 0)
10039 ret = wait_for_completion_interruptible_timeout(
10040 &commit->flip_done, 10*HZ);
10041
10042 if (ret == 0)
10043 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 10044 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
10045
10046 drm_crtc_commit_put(commit);
10047 }
10048
10049 return ret < 0 ? ret : 0;
10050}
10051
bb47de73
NK
10052static void get_freesync_config_for_crtc(
10053 struct dm_crtc_state *new_crtc_state,
10054 struct dm_connector_state *new_con_state)
98e6436d
AK
10055{
10056 struct mod_freesync_config config = {0};
98e6436d
AK
10057 struct amdgpu_dm_connector *aconnector =
10058 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 10059 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 10060 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 10061 bool fs_vid_mode = false;
98e6436d 10062
a057ec46 10063 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
10064 vrefresh >= aconnector->min_vfreq &&
10065 vrefresh <= aconnector->max_vfreq;
bb47de73 10066
a057ec46
IB
10067 if (new_crtc_state->vrr_supported) {
10068 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
10069 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10070
10071 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10072 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 10073 config.vsif_supported = true;
180db303 10074 config.btr = true;
98e6436d 10075
a85ba005
NC
10076 if (fs_vid_mode) {
10077 config.state = VRR_STATE_ACTIVE_FIXED;
10078 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10079 goto out;
10080 } else if (new_crtc_state->base.vrr_enabled) {
10081 config.state = VRR_STATE_ACTIVE_VARIABLE;
10082 } else {
10083 config.state = VRR_STATE_INACTIVE;
10084 }
10085 }
10086out:
bb47de73
NK
10087 new_crtc_state->freesync_config = config;
10088}
98e6436d 10089
bb47de73
NK
10090static void reset_freesync_config_for_crtc(
10091 struct dm_crtc_state *new_crtc_state)
10092{
10093 new_crtc_state->vrr_supported = false;
98e6436d 10094
bb47de73
NK
10095 memset(&new_crtc_state->vrr_infopacket, 0,
10096 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
10097}
10098
a85ba005
NC
10099static bool
10100is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10101 struct drm_crtc_state *new_crtc_state)
10102{
10103 struct drm_display_mode old_mode, new_mode;
10104
10105 if (!old_crtc_state || !new_crtc_state)
10106 return false;
10107
10108 old_mode = old_crtc_state->mode;
10109 new_mode = new_crtc_state->mode;
10110
10111 if (old_mode.clock == new_mode.clock &&
10112 old_mode.hdisplay == new_mode.hdisplay &&
10113 old_mode.vdisplay == new_mode.vdisplay &&
10114 old_mode.htotal == new_mode.htotal &&
10115 old_mode.vtotal != new_mode.vtotal &&
10116 old_mode.hsync_start == new_mode.hsync_start &&
10117 old_mode.vsync_start != new_mode.vsync_start &&
10118 old_mode.hsync_end == new_mode.hsync_end &&
10119 old_mode.vsync_end != new_mode.vsync_end &&
10120 old_mode.hskew == new_mode.hskew &&
10121 old_mode.vscan == new_mode.vscan &&
10122 (old_mode.vsync_end - old_mode.vsync_start) ==
10123 (new_mode.vsync_end - new_mode.vsync_start))
10124 return true;
10125
10126 return false;
10127}
10128
10129static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10130 uint64_t num, den, res;
10131 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10132
10133 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10134
10135 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10136 den = (unsigned long long)new_crtc_state->mode.htotal *
10137 (unsigned long long)new_crtc_state->mode.vtotal;
10138
10139 res = div_u64(num, den);
10140 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10141}
10142
4b9674e5
LL
10143static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10144 struct drm_atomic_state *state,
10145 struct drm_crtc *crtc,
10146 struct drm_crtc_state *old_crtc_state,
10147 struct drm_crtc_state *new_crtc_state,
10148 bool enable,
10149 bool *lock_and_validation_needed)
e7b07cee 10150{
eb3dc897 10151 struct dm_atomic_state *dm_state = NULL;
54d76575 10152 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10153 struct dc_stream_state *new_stream;
62f55537 10154 int ret = 0;
d4d4a645 10155
1f6010a9
DF
10156 /*
10157 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10158 * update changed items
10159 */
4b9674e5
LL
10160 struct amdgpu_crtc *acrtc = NULL;
10161 struct amdgpu_dm_connector *aconnector = NULL;
10162 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10163 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10164
4b9674e5 10165 new_stream = NULL;
9635b754 10166
4b9674e5
LL
10167 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10168 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10169 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10170 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10171
4b9674e5
LL
10172 /* TODO This hack should go away */
10173 if (aconnector && enable) {
10174 /* Make sure fake sink is created in plug-in scenario */
10175 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10176 &aconnector->base);
10177 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10178 &aconnector->base);
19f89e23 10179
4b9674e5
LL
10180 if (IS_ERR(drm_new_conn_state)) {
10181 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10182 goto fail;
10183 }
19f89e23 10184
4b9674e5
LL
10185 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10186 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10187
02d35a67
JFZ
10188 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10189 goto skip_modeset;
10190
cbd14ae7
SW
10191 new_stream = create_validate_stream_for_sink(aconnector,
10192 &new_crtc_state->mode,
10193 dm_new_conn_state,
10194 dm_old_crtc_state->stream);
19f89e23 10195
4b9674e5
LL
10196 /*
10197 * we can have no stream on ACTION_SET if a display
10198 * was disconnected during S3, in this case it is not an
10199 * error, the OS will be updated after detection, and
10200 * will do the right thing on next atomic commit
10201 */
19f89e23 10202
4b9674e5
LL
10203 if (!new_stream) {
10204 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10205 __func__, acrtc->base.base.id);
10206 ret = -ENOMEM;
10207 goto fail;
10208 }
e7b07cee 10209
3d4e52d0
VL
10210 /*
10211 * TODO: Check VSDB bits to decide whether this should
10212 * be enabled or not.
10213 */
10214 new_stream->triggered_crtc_reset.enabled =
10215 dm->force_timing_sync;
10216
4b9674e5 10217 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10218
88694af9
NK
10219 ret = fill_hdr_info_packet(drm_new_conn_state,
10220 &new_stream->hdr_static_metadata);
10221 if (ret)
10222 goto fail;
10223
7e930949
NK
10224 /*
10225 * If we already removed the old stream from the context
10226 * (and set the new stream to NULL) then we can't reuse
10227 * the old stream even if the stream and scaling are unchanged.
10228 * We'll hit the BUG_ON and black screen.
10229 *
10230 * TODO: Refactor this function to allow this check to work
10231 * in all conditions.
10232 */
a85ba005
NC
10233 if (amdgpu_freesync_vid_mode &&
10234 dm_new_crtc_state->stream &&
10235 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10236 goto skip_modeset;
10237
7e930949
NK
10238 if (dm_new_crtc_state->stream &&
10239 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10240 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10241 new_crtc_state->mode_changed = false;
10242 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10243 new_crtc_state->mode_changed);
62f55537 10244 }
4b9674e5 10245 }
b830ebc9 10246
02d35a67 10247 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10248 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10249 goto skip_modeset;
e7b07cee 10250
4711c033 10251 DRM_DEBUG_ATOMIC(
4b9674e5
LL
10252 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10253 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10254 "connectors_changed:%d\n",
10255 acrtc->crtc_id,
10256 new_crtc_state->enable,
10257 new_crtc_state->active,
10258 new_crtc_state->planes_changed,
10259 new_crtc_state->mode_changed,
10260 new_crtc_state->active_changed,
10261 new_crtc_state->connectors_changed);
62f55537 10262
4b9674e5
LL
10263 /* Remove stream for any changed/disabled CRTC */
10264 if (!enable) {
62f55537 10265
4b9674e5
LL
10266 if (!dm_old_crtc_state->stream)
10267 goto skip_modeset;
eb3dc897 10268
a85ba005
NC
10269 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10270 is_timing_unchanged_for_freesync(new_crtc_state,
10271 old_crtc_state)) {
10272 new_crtc_state->mode_changed = false;
10273 DRM_DEBUG_DRIVER(
10274 "Mode change not required for front porch change, "
10275 "setting mode_changed to %d",
10276 new_crtc_state->mode_changed);
10277
10278 set_freesync_fixed_config(dm_new_crtc_state);
10279
10280 goto skip_modeset;
10281 } else if (amdgpu_freesync_vid_mode && aconnector &&
10282 is_freesync_video_mode(&new_crtc_state->mode,
10283 aconnector)) {
e88ebd83
SC
10284 struct drm_display_mode *high_mode;
10285
10286 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10287 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10288 set_freesync_fixed_config(dm_new_crtc_state);
10289 }
a85ba005
NC
10290 }
10291
4b9674e5
LL
10292 ret = dm_atomic_get_state(state, &dm_state);
10293 if (ret)
10294 goto fail;
e7b07cee 10295
4b9674e5
LL
10296 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10297 crtc->base.id);
62f55537 10298
4b9674e5
LL
10299 /* i.e. reset mode */
10300 if (dc_remove_stream_from_ctx(
10301 dm->dc,
10302 dm_state->context,
10303 dm_old_crtc_state->stream) != DC_OK) {
10304 ret = -EINVAL;
10305 goto fail;
10306 }
62f55537 10307
4b9674e5
LL
10308 dc_stream_release(dm_old_crtc_state->stream);
10309 dm_new_crtc_state->stream = NULL;
bb47de73 10310
4b9674e5 10311 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10312
4b9674e5 10313 *lock_and_validation_needed = true;
62f55537 10314
4b9674e5
LL
10315 } else {/* Add stream for any updated/enabled CRTC */
10316 /*
10317 * Quick fix to prevent NULL pointer on new_stream when
10318 * added MST connectors not found in existing crtc_state in the chained mode
10319 * TODO: need to dig out the root cause of that
10320 */
10321 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10322 goto skip_modeset;
62f55537 10323
4b9674e5
LL
10324 if (modereset_required(new_crtc_state))
10325 goto skip_modeset;
62f55537 10326
4b9674e5
LL
10327 if (modeset_required(new_crtc_state, new_stream,
10328 dm_old_crtc_state->stream)) {
62f55537 10329
4b9674e5 10330 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10331
4b9674e5
LL
10332 ret = dm_atomic_get_state(state, &dm_state);
10333 if (ret)
10334 goto fail;
27b3f4fc 10335
4b9674e5 10336 dm_new_crtc_state->stream = new_stream;
62f55537 10337
4b9674e5 10338 dc_stream_retain(new_stream);
1dc90497 10339
4711c033
LT
10340 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10341 crtc->base.id);
1dc90497 10342
4b9674e5
LL
10343 if (dc_add_stream_to_ctx(
10344 dm->dc,
10345 dm_state->context,
10346 dm_new_crtc_state->stream) != DC_OK) {
10347 ret = -EINVAL;
10348 goto fail;
9b690ef3
BL
10349 }
10350
4b9674e5
LL
10351 *lock_and_validation_needed = true;
10352 }
10353 }
e277adc5 10354
4b9674e5
LL
10355skip_modeset:
10356 /* Release extra reference */
10357 if (new_stream)
10358 dc_stream_release(new_stream);
e277adc5 10359
4b9674e5
LL
10360 /*
10361 * We want to do dc stream updates that do not require a
10362 * full modeset below.
10363 */
2afda735 10364 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10365 return 0;
10366 /*
10367 * Given above conditions, the dc state cannot be NULL because:
10368 * 1. We're in the process of enabling CRTCs (just been added
10369 * to the dc context, or already is on the context)
10370 * 2. Has a valid connector attached, and
10371 * 3. Is currently active and enabled.
10372 * => The dc stream state currently exists.
10373 */
10374 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10375
4b9674e5 10376 /* Scaling or underscan settings */
c521fc31
RL
10377 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10378 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10379 update_stream_scaling_settings(
10380 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10381
b05e2c5e
DF
10382 /* ABM settings */
10383 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10384
4b9674e5
LL
10385 /*
10386 * Color management settings. We also update color properties
10387 * when a modeset is needed, to ensure it gets reprogrammed.
10388 */
10389 if (dm_new_crtc_state->base.color_mgmt_changed ||
10390 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10391 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10392 if (ret)
10393 goto fail;
62f55537 10394 }
e7b07cee 10395
4b9674e5
LL
10396 /* Update Freesync settings. */
10397 get_freesync_config_for_crtc(dm_new_crtc_state,
10398 dm_new_conn_state);
10399
62f55537 10400 return ret;
9635b754
DS
10401
10402fail:
10403 if (new_stream)
10404 dc_stream_release(new_stream);
10405 return ret;
62f55537 10406}
9b690ef3 10407
f6ff2a08
NK
10408static bool should_reset_plane(struct drm_atomic_state *state,
10409 struct drm_plane *plane,
10410 struct drm_plane_state *old_plane_state,
10411 struct drm_plane_state *new_plane_state)
10412{
10413 struct drm_plane *other;
10414 struct drm_plane_state *old_other_state, *new_other_state;
10415 struct drm_crtc_state *new_crtc_state;
10416 int i;
10417
70a1efac
NK
10418 /*
10419 * TODO: Remove this hack once the checks below are sufficient
10420 * enough to determine when we need to reset all the planes on
10421 * the stream.
10422 */
10423 if (state->allow_modeset)
10424 return true;
10425
f6ff2a08
NK
10426 /* Exit early if we know that we're adding or removing the plane. */
10427 if (old_plane_state->crtc != new_plane_state->crtc)
10428 return true;
10429
10430 /* old crtc == new_crtc == NULL, plane not in context. */
10431 if (!new_plane_state->crtc)
10432 return false;
10433
10434 new_crtc_state =
10435 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10436
10437 if (!new_crtc_state)
10438 return true;
10439
7316c4ad
NK
10440 /* CRTC Degamma changes currently require us to recreate planes. */
10441 if (new_crtc_state->color_mgmt_changed)
10442 return true;
10443
f6ff2a08
NK
10444 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10445 return true;
10446
10447 /*
10448 * If there are any new primary or overlay planes being added or
10449 * removed then the z-order can potentially change. To ensure
10450 * correct z-order and pipe acquisition the current DC architecture
10451 * requires us to remove and recreate all existing planes.
10452 *
10453 * TODO: Come up with a more elegant solution for this.
10454 */
10455 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10456 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10457 if (other->type == DRM_PLANE_TYPE_CURSOR)
10458 continue;
10459
10460 if (old_other_state->crtc != new_plane_state->crtc &&
10461 new_other_state->crtc != new_plane_state->crtc)
10462 continue;
10463
10464 if (old_other_state->crtc != new_other_state->crtc)
10465 return true;
10466
dc4cb30d
NK
10467 /* Src/dst size and scaling updates. */
10468 if (old_other_state->src_w != new_other_state->src_w ||
10469 old_other_state->src_h != new_other_state->src_h ||
10470 old_other_state->crtc_w != new_other_state->crtc_w ||
10471 old_other_state->crtc_h != new_other_state->crtc_h)
10472 return true;
10473
10474 /* Rotation / mirroring updates. */
10475 if (old_other_state->rotation != new_other_state->rotation)
10476 return true;
10477
10478 /* Blending updates. */
10479 if (old_other_state->pixel_blend_mode !=
10480 new_other_state->pixel_blend_mode)
10481 return true;
10482
10483 /* Alpha updates. */
10484 if (old_other_state->alpha != new_other_state->alpha)
10485 return true;
10486
10487 /* Colorspace changes. */
10488 if (old_other_state->color_range != new_other_state->color_range ||
10489 old_other_state->color_encoding != new_other_state->color_encoding)
10490 return true;
10491
9a81cc60
NK
10492 /* Framebuffer checks fall at the end. */
10493 if (!old_other_state->fb || !new_other_state->fb)
10494 continue;
10495
10496 /* Pixel format changes can require bandwidth updates. */
10497 if (old_other_state->fb->format != new_other_state->fb->format)
10498 return true;
10499
6eed95b0
BN
10500 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10501 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10502
10503 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10504 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10505 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10506 return true;
10507 }
10508
10509 return false;
10510}
10511
b0455fda
SS
10512static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10513 struct drm_plane_state *new_plane_state,
10514 struct drm_framebuffer *fb)
10515{
e72868c4
SS
10516 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10517 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10518 unsigned int pitch;
e72868c4 10519 bool linear;
b0455fda
SS
10520
10521 if (fb->width > new_acrtc->max_cursor_width ||
10522 fb->height > new_acrtc->max_cursor_height) {
10523 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10524 new_plane_state->fb->width,
10525 new_plane_state->fb->height);
10526 return -EINVAL;
10527 }
10528 if (new_plane_state->src_w != fb->width << 16 ||
10529 new_plane_state->src_h != fb->height << 16) {
10530 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10531 return -EINVAL;
10532 }
10533
10534 /* Pitch in pixels */
10535 pitch = fb->pitches[0] / fb->format->cpp[0];
10536
10537 if (fb->width != pitch) {
10538 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10539 fb->width, pitch);
10540 return -EINVAL;
10541 }
10542
10543 switch (pitch) {
10544 case 64:
10545 case 128:
10546 case 256:
10547 /* FB pitch is supported by cursor plane */
10548 break;
10549 default:
10550 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10551 return -EINVAL;
10552 }
10553
e72868c4
SS
10554 /* Core DRM takes care of checking FB modifiers, so we only need to
10555 * check tiling flags when the FB doesn't have a modifier. */
10556 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10557 if (adev->family < AMDGPU_FAMILY_AI) {
10558 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10559 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10560 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10561 } else {
10562 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10563 }
10564 if (!linear) {
10565 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10566 return -EINVAL;
10567 }
10568 }
10569
b0455fda
SS
10570 return 0;
10571}
10572
9e869063
LL
10573static int dm_update_plane_state(struct dc *dc,
10574 struct drm_atomic_state *state,
10575 struct drm_plane *plane,
10576 struct drm_plane_state *old_plane_state,
10577 struct drm_plane_state *new_plane_state,
10578 bool enable,
10579 bool *lock_and_validation_needed)
62f55537 10580{
eb3dc897
NK
10581
10582 struct dm_atomic_state *dm_state = NULL;
62f55537 10583 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10584 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10585 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10586 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10587 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10588 bool needs_reset;
62f55537 10589 int ret = 0;
e7b07cee 10590
9b690ef3 10591
9e869063
LL
10592 new_plane_crtc = new_plane_state->crtc;
10593 old_plane_crtc = old_plane_state->crtc;
10594 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10595 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10596
626bf90f
SS
10597 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10598 if (!enable || !new_plane_crtc ||
10599 drm_atomic_plane_disabling(plane->state, new_plane_state))
10600 return 0;
10601
10602 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10603
5f581248
SS
10604 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10605 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10606 return -EINVAL;
10607 }
10608
24f99d2b 10609 if (new_plane_state->fb) {
b0455fda
SS
10610 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10611 new_plane_state->fb);
10612 if (ret)
10613 return ret;
24f99d2b
SS
10614 }
10615
9e869063 10616 return 0;
626bf90f 10617 }
9b690ef3 10618
f6ff2a08
NK
10619 needs_reset = should_reset_plane(state, plane, old_plane_state,
10620 new_plane_state);
10621
9e869063
LL
10622 /* Remove any changed/removed planes */
10623 if (!enable) {
f6ff2a08 10624 if (!needs_reset)
9e869063 10625 return 0;
a7b06724 10626
9e869063
LL
10627 if (!old_plane_crtc)
10628 return 0;
62f55537 10629
9e869063
LL
10630 old_crtc_state = drm_atomic_get_old_crtc_state(
10631 state, old_plane_crtc);
10632 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10633
9e869063
LL
10634 if (!dm_old_crtc_state->stream)
10635 return 0;
62f55537 10636
9e869063
LL
10637 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10638 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10639
9e869063
LL
10640 ret = dm_atomic_get_state(state, &dm_state);
10641 if (ret)
10642 return ret;
eb3dc897 10643
9e869063
LL
10644 if (!dc_remove_plane_from_context(
10645 dc,
10646 dm_old_crtc_state->stream,
10647 dm_old_plane_state->dc_state,
10648 dm_state->context)) {
62f55537 10649
c3537613 10650 return -EINVAL;
9e869063 10651 }
e7b07cee 10652
9b690ef3 10653
9e869063
LL
10654 dc_plane_state_release(dm_old_plane_state->dc_state);
10655 dm_new_plane_state->dc_state = NULL;
1dc90497 10656
9e869063 10657 *lock_and_validation_needed = true;
1dc90497 10658
9e869063
LL
10659 } else { /* Add new planes */
10660 struct dc_plane_state *dc_new_plane_state;
1dc90497 10661
9e869063
LL
10662 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10663 return 0;
e7b07cee 10664
9e869063
LL
10665 if (!new_plane_crtc)
10666 return 0;
e7b07cee 10667
9e869063
LL
10668 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10669 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10670
9e869063
LL
10671 if (!dm_new_crtc_state->stream)
10672 return 0;
62f55537 10673
f6ff2a08 10674 if (!needs_reset)
9e869063 10675 return 0;
62f55537 10676
8c44515b
AP
10677 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10678 if (ret)
10679 return ret;
10680
9e869063 10681 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10682
9e869063
LL
10683 dc_new_plane_state = dc_create_plane_state(dc);
10684 if (!dc_new_plane_state)
10685 return -ENOMEM;
62f55537 10686
4711c033
LT
10687 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10688 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10689
695af5f9 10690 ret = fill_dc_plane_attributes(
1348969a 10691 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10692 dc_new_plane_state,
10693 new_plane_state,
10694 new_crtc_state);
10695 if (ret) {
10696 dc_plane_state_release(dc_new_plane_state);
10697 return ret;
10698 }
62f55537 10699
9e869063
LL
10700 ret = dm_atomic_get_state(state, &dm_state);
10701 if (ret) {
10702 dc_plane_state_release(dc_new_plane_state);
10703 return ret;
10704 }
eb3dc897 10705
9e869063
LL
10706 /*
10707 * Any atomic check errors that occur after this will
10708 * not need a release. The plane state will be attached
10709 * to the stream, and therefore part of the atomic
10710 * state. It'll be released when the atomic state is
10711 * cleaned.
10712 */
10713 if (!dc_add_plane_to_context(
10714 dc,
10715 dm_new_crtc_state->stream,
10716 dc_new_plane_state,
10717 dm_state->context)) {
62f55537 10718
9e869063
LL
10719 dc_plane_state_release(dc_new_plane_state);
10720 return -EINVAL;
10721 }
8c45c5db 10722
9e869063 10723 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10724
9e869063
LL
10725 /* Tell DC to do a full surface update every time there
10726 * is a plane change. Inefficient, but works for now.
10727 */
10728 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10729
10730 *lock_and_validation_needed = true;
62f55537 10731 }
e7b07cee
HW
10732
10733
62f55537
AG
10734 return ret;
10735}
a87fa993 10736
69cb5629
VZ
10737static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10738 int *src_w, int *src_h)
10739{
10740 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10741 case DRM_MODE_ROTATE_90:
10742 case DRM_MODE_ROTATE_270:
10743 *src_w = plane_state->src_h >> 16;
10744 *src_h = plane_state->src_w >> 16;
10745 break;
10746 case DRM_MODE_ROTATE_0:
10747 case DRM_MODE_ROTATE_180:
10748 default:
10749 *src_w = plane_state->src_w >> 16;
10750 *src_h = plane_state->src_h >> 16;
10751 break;
10752 }
10753}
10754
12f4849a
SS
10755static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10756 struct drm_crtc *crtc,
10757 struct drm_crtc_state *new_crtc_state)
10758{
d1bfbe8a
SS
10759 struct drm_plane *cursor = crtc->cursor, *underlying;
10760 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10761 int i;
10762 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
69cb5629
VZ
10763 int cursor_src_w, cursor_src_h;
10764 int underlying_src_w, underlying_src_h;
12f4849a
SS
10765
10766 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10767 * cursor per pipe but it's going to inherit the scaling and
10768 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 10769 * blending properties match the underlying planes'. */
12f4849a 10770
d1bfbe8a
SS
10771 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10772 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
10773 return 0;
10774 }
10775
69cb5629
VZ
10776 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10777 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10778 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
12f4849a 10779
d1bfbe8a
SS
10780 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10781 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10782 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10783 continue;
12f4849a 10784
d1bfbe8a
SS
10785 /* Ignore disabled planes */
10786 if (!new_underlying_state->fb)
10787 continue;
10788
69cb5629
VZ
10789 dm_get_oriented_plane_size(new_underlying_state,
10790 &underlying_src_w, &underlying_src_h);
10791 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10792 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
d1bfbe8a
SS
10793
10794 if (cursor_scale_w != underlying_scale_w ||
10795 cursor_scale_h != underlying_scale_h) {
10796 drm_dbg_atomic(crtc->dev,
10797 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10798 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10799 return -EINVAL;
10800 }
10801
10802 /* If this plane covers the whole CRTC, no need to check planes underneath */
10803 if (new_underlying_state->crtc_x <= 0 &&
10804 new_underlying_state->crtc_y <= 0 &&
10805 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10806 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10807 break;
12f4849a
SS
10808 }
10809
10810 return 0;
10811}
10812
e10517b3 10813#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10814static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10815{
10816 struct drm_connector *connector;
10817 struct drm_connector_state *conn_state;
10818 struct amdgpu_dm_connector *aconnector = NULL;
10819 int i;
10820 for_each_new_connector_in_state(state, connector, conn_state, i) {
10821 if (conn_state->crtc != crtc)
10822 continue;
10823
10824 aconnector = to_amdgpu_dm_connector(connector);
10825 if (!aconnector->port || !aconnector->mst_port)
10826 aconnector = NULL;
10827 else
10828 break;
10829 }
10830
10831 if (!aconnector)
10832 return 0;
10833
10834 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10835}
e10517b3 10836#endif
44be939f 10837
b8592b48
LL
10838/**
10839 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10840 * @dev: The DRM device
10841 * @state: The atomic state to commit
10842 *
10843 * Validate that the given atomic state is programmable by DC into hardware.
10844 * This involves constructing a &struct dc_state reflecting the new hardware
10845 * state we wish to commit, then querying DC to see if it is programmable. It's
10846 * important not to modify the existing DC state. Otherwise, atomic_check
10847 * may unexpectedly commit hardware changes.
10848 *
10849 * When validating the DC state, it's important that the right locks are
10850 * acquired. For full updates case which removes/adds/updates streams on one
10851 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10852 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10853 * flip using DRMs synchronization events.
b8592b48
LL
10854 *
10855 * Note that DM adds the affected connectors for all CRTCs in state, when that
10856 * might not seem necessary. This is because DC stream creation requires the
10857 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10858 * be possible but non-trivial - a possible TODO item.
10859 *
10860 * Return: -Error code if validation failed.
10861 */
7578ecda
AD
10862static int amdgpu_dm_atomic_check(struct drm_device *dev,
10863 struct drm_atomic_state *state)
62f55537 10864{
1348969a 10865 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10866 struct dm_atomic_state *dm_state = NULL;
62f55537 10867 struct dc *dc = adev->dm.dc;
62f55537 10868 struct drm_connector *connector;
c2cea706 10869 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10870 struct drm_crtc *crtc;
fc9e9920 10871 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10872 struct drm_plane *plane;
10873 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10874 enum dc_status status;
1e88ad0a 10875 int ret, i;
62f55537 10876 bool lock_and_validation_needed = false;
886876ec 10877 struct dm_crtc_state *dm_old_crtc_state;
6513104b
HW
10878#if defined(CONFIG_DRM_AMD_DC_DCN)
10879 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
10880 struct drm_dp_mst_topology_state *mst_state;
10881 struct drm_dp_mst_topology_mgr *mgr;
6513104b 10882#endif
62f55537 10883
e8a98235 10884 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10885
62f55537 10886 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
10887 if (ret) {
10888 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 10889 goto fail;
68ca1c3e 10890 }
62f55537 10891
c5892a10
SW
10892 /* Check connector changes */
10893 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10894 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10895 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10896
10897 /* Skip connectors that are disabled or part of modeset already. */
10898 if (!old_con_state->crtc && !new_con_state->crtc)
10899 continue;
10900
10901 if (!new_con_state->crtc)
10902 continue;
10903
10904 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10905 if (IS_ERR(new_crtc_state)) {
68ca1c3e 10906 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
10907 ret = PTR_ERR(new_crtc_state);
10908 goto fail;
10909 }
10910
10911 if (dm_old_con_state->abm_level !=
10912 dm_new_con_state->abm_level)
10913 new_crtc_state->connectors_changed = true;
10914 }
10915
e10517b3 10916#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10917 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10918 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10919 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10920 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
10921 if (ret) {
10922 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 10923 goto fail;
68ca1c3e 10924 }
44be939f
ML
10925 }
10926 }
10927 }
e10517b3 10928#endif
1e88ad0a 10929 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10930 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10931
1e88ad0a 10932 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10933 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10934 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10935 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10936 continue;
7bef1af3 10937
03fc4cf4 10938 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
10939 if (ret) {
10940 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 10941 goto fail;
68ca1c3e 10942 }
03fc4cf4 10943
1e88ad0a
S
10944 if (!new_crtc_state->enable)
10945 continue;
fc9e9920 10946
1e88ad0a 10947 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
10948 if (ret) {
10949 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 10950 goto fail;
68ca1c3e 10951 }
fc9e9920 10952
1e88ad0a 10953 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
10954 if (ret) {
10955 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 10956 goto fail;
68ca1c3e 10957 }
115a385c 10958
cbac53f7 10959 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10960 new_crtc_state->mode_changed = true;
e7b07cee
HW
10961 }
10962
2d9e6431
NK
10963 /*
10964 * Add all primary and overlay planes on the CRTC to the state
10965 * whenever a plane is enabled to maintain correct z-ordering
10966 * and to enable fast surface updates.
10967 */
10968 drm_for_each_crtc(crtc, dev) {
10969 bool modified = false;
10970
10971 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10972 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10973 continue;
10974
10975 if (new_plane_state->crtc == crtc ||
10976 old_plane_state->crtc == crtc) {
10977 modified = true;
10978 break;
10979 }
10980 }
10981
10982 if (!modified)
10983 continue;
10984
10985 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10986 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10987 continue;
10988
10989 new_plane_state =
10990 drm_atomic_get_plane_state(state, plane);
10991
10992 if (IS_ERR(new_plane_state)) {
10993 ret = PTR_ERR(new_plane_state);
68ca1c3e 10994 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
10995 goto fail;
10996 }
10997 }
10998 }
10999
62f55537 11000 /* Remove exiting planes if they are modified */
9e869063
LL
11001 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11002 ret = dm_update_plane_state(dc, state, plane,
11003 old_plane_state,
11004 new_plane_state,
11005 false,
11006 &lock_and_validation_needed);
68ca1c3e
S
11007 if (ret) {
11008 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11009 goto fail;
68ca1c3e 11010 }
62f55537
AG
11011 }
11012
11013 /* Disable all crtcs which require disable */
4b9674e5
LL
11014 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11015 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11016 old_crtc_state,
11017 new_crtc_state,
11018 false,
11019 &lock_and_validation_needed);
68ca1c3e
S
11020 if (ret) {
11021 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 11022 goto fail;
68ca1c3e 11023 }
62f55537
AG
11024 }
11025
11026 /* Enable all crtcs which require enable */
4b9674e5
LL
11027 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11028 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11029 old_crtc_state,
11030 new_crtc_state,
11031 true,
11032 &lock_and_validation_needed);
68ca1c3e
S
11033 if (ret) {
11034 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 11035 goto fail;
68ca1c3e 11036 }
62f55537
AG
11037 }
11038
11039 /* Add new/modified planes */
9e869063
LL
11040 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11041 ret = dm_update_plane_state(dc, state, plane,
11042 old_plane_state,
11043 new_plane_state,
11044 true,
11045 &lock_and_validation_needed);
68ca1c3e
S
11046 if (ret) {
11047 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11048 goto fail;
68ca1c3e 11049 }
62f55537
AG
11050 }
11051
b349f76e
ES
11052 /* Run this here since we want to validate the streams we created */
11053 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
11054 if (ret) {
11055 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 11056 goto fail;
68ca1c3e 11057 }
62f55537 11058
12f4849a
SS
11059 /* Check cursor planes scaling */
11060 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11061 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
11062 if (ret) {
11063 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 11064 goto fail;
68ca1c3e 11065 }
12f4849a
SS
11066 }
11067
43d10d30
NK
11068 if (state->legacy_cursor_update) {
11069 /*
11070 * This is a fast cursor update coming from the plane update
11071 * helper, check if it can be done asynchronously for better
11072 * performance.
11073 */
11074 state->async_update =
11075 !drm_atomic_helper_async_check(dev, state);
11076
11077 /*
11078 * Skip the remaining global validation if this is an async
11079 * update. Cursor updates can be done without affecting
11080 * state or bandwidth calcs and this avoids the performance
11081 * penalty of locking the private state object and
11082 * allocating a new dc_state.
11083 */
11084 if (state->async_update)
11085 return 0;
11086 }
11087
ebdd27e1 11088 /* Check scaling and underscan changes*/
1f6010a9 11089 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
11090 * new stream into context w\o causing full reset. Need to
11091 * decide how to handle.
11092 */
c2cea706 11093 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
11094 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11095 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11096 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
11097
11098 /* Skip any modesets/resets */
0bc9706d
LSL
11099 if (!acrtc || drm_atomic_crtc_needs_modeset(
11100 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
11101 continue;
11102
b830ebc9 11103 /* Skip any thing not scale or underscan changes */
54d76575 11104 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
11105 continue;
11106
11107 lock_and_validation_needed = true;
11108 }
11109
41724ea2
BL
11110#if defined(CONFIG_DRM_AMD_DC_DCN)
11111 /* set the slot info for each mst_state based on the link encoding format */
11112 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11113 struct amdgpu_dm_connector *aconnector;
11114 struct drm_connector *connector;
11115 struct drm_connector_list_iter iter;
11116 u8 link_coding_cap;
11117
11118 if (!mgr->mst_state )
11119 continue;
11120
11121 drm_connector_list_iter_begin(dev, &iter);
11122 drm_for_each_connector_iter(connector, &iter) {
11123 int id = connector->index;
11124
11125 if (id == mst_state->mgr->conn_base_id) {
11126 aconnector = to_amdgpu_dm_connector(connector);
11127 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11128 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11129
11130 break;
11131 }
11132 }
11133 drm_connector_list_iter_end(&iter);
11134
11135 }
11136#endif
f6d7c7fa
NK
11137 /**
11138 * Streams and planes are reset when there are changes that affect
11139 * bandwidth. Anything that affects bandwidth needs to go through
11140 * DC global validation to ensure that the configuration can be applied
11141 * to hardware.
11142 *
11143 * We have to currently stall out here in atomic_check for outstanding
11144 * commits to finish in this case because our IRQ handlers reference
11145 * DRM state directly - we can end up disabling interrupts too early
11146 * if we don't.
11147 *
11148 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11149 */
f6d7c7fa 11150 if (lock_and_validation_needed) {
eb3dc897 11151 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11152 if (ret) {
11153 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11154 goto fail;
68ca1c3e 11155 }
e7b07cee
HW
11156
11157 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11158 if (ret) {
11159 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11160 goto fail;
68ca1c3e 11161 }
1dc90497 11162
d9fe1a4c 11163#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11164 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11165 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
8c20a1ed 11166 goto fail;
68ca1c3e 11167 }
8c20a1ed 11168
6513104b 11169 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11170 if (ret) {
11171 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11172 goto fail;
68ca1c3e 11173 }
d9fe1a4c 11174#endif
29b9ba74 11175
ded58c7b
ZL
11176 /*
11177 * Perform validation of MST topology in the state:
11178 * We need to perform MST atomic check before calling
11179 * dc_validate_global_state(), or there is a chance
11180 * to get stuck in an infinite loop and hang eventually.
11181 */
11182 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11183 if (ret) {
11184 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11185 goto fail;
68ca1c3e 11186 }
85fb8bb9 11187 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 11188 if (status != DC_OK) {
68ca1c3e 11189 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11190 dc_status_to_str(status), status);
e7b07cee
HW
11191 ret = -EINVAL;
11192 goto fail;
11193 }
bd200d19 11194 } else {
674e78ac 11195 /*
bd200d19
NK
11196 * The commit is a fast update. Fast updates shouldn't change
11197 * the DC context, affect global validation, and can have their
11198 * commit work done in parallel with other commits not touching
11199 * the same resource. If we have a new DC context as part of
11200 * the DM atomic state from validation we need to free it and
11201 * retain the existing one instead.
fde9f39a
MR
11202 *
11203 * Furthermore, since the DM atomic state only contains the DC
11204 * context and can safely be annulled, we can free the state
11205 * and clear the associated private object now to free
11206 * some memory and avoid a possible use-after-free later.
674e78ac 11207 */
bd200d19 11208
fde9f39a
MR
11209 for (i = 0; i < state->num_private_objs; i++) {
11210 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11211
fde9f39a
MR
11212 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11213 int j = state->num_private_objs-1;
bd200d19 11214
fde9f39a
MR
11215 dm_atomic_destroy_state(obj,
11216 state->private_objs[i].state);
11217
11218 /* If i is not at the end of the array then the
11219 * last element needs to be moved to where i was
11220 * before the array can safely be truncated.
11221 */
11222 if (i != j)
11223 state->private_objs[i] =
11224 state->private_objs[j];
bd200d19 11225
fde9f39a
MR
11226 state->private_objs[j].ptr = NULL;
11227 state->private_objs[j].state = NULL;
11228 state->private_objs[j].old_state = NULL;
11229 state->private_objs[j].new_state = NULL;
11230
11231 state->num_private_objs = j;
11232 break;
11233 }
bd200d19 11234 }
e7b07cee
HW
11235 }
11236
caff0e66
NK
11237 /* Store the overall update type for use later in atomic check. */
11238 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11239 struct dm_crtc_state *dm_new_crtc_state =
11240 to_dm_crtc_state(new_crtc_state);
11241
f6d7c7fa
NK
11242 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11243 UPDATE_TYPE_FULL :
11244 UPDATE_TYPE_FAST;
e7b07cee
HW
11245 }
11246
11247 /* Must be success */
11248 WARN_ON(ret);
e8a98235
RS
11249
11250 trace_amdgpu_dm_atomic_check_finish(state, ret);
11251
e7b07cee
HW
11252 return ret;
11253
11254fail:
11255 if (ret == -EDEADLK)
01e28f9c 11256 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11257 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11258 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11259 else
01e28f9c 11260 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11261
e8a98235
RS
11262 trace_amdgpu_dm_atomic_check_finish(state, ret);
11263
e7b07cee
HW
11264 return ret;
11265}
11266
3ee6b26b
AD
11267static bool is_dp_capable_without_timing_msa(struct dc *dc,
11268 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11269{
11270 uint8_t dpcd_data;
11271 bool capable = false;
11272
c84dec2f 11273 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11274 dm_helpers_dp_read_dpcd(
11275 NULL,
c84dec2f 11276 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11277 DP_DOWN_STREAM_PORT_COUNT,
11278 &dpcd_data,
11279 sizeof(dpcd_data))) {
11280 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11281 }
11282
11283 return capable;
11284}
f9b4f20c 11285
46db138d
SW
11286static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11287 unsigned int offset,
11288 unsigned int total_length,
11289 uint8_t *data,
11290 unsigned int length,
11291 struct amdgpu_hdmi_vsdb_info *vsdb)
11292{
11293 bool res;
11294 union dmub_rb_cmd cmd;
11295 struct dmub_cmd_send_edid_cea *input;
11296 struct dmub_cmd_edid_cea_output *output;
11297
11298 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11299 return false;
11300
11301 memset(&cmd, 0, sizeof(cmd));
11302
11303 input = &cmd.edid_cea.data.input;
11304
11305 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11306 cmd.edid_cea.header.sub_type = 0;
11307 cmd.edid_cea.header.payload_bytes =
11308 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11309 input->offset = offset;
11310 input->length = length;
11311 input->total_length = total_length;
11312 memcpy(input->payload, data, length);
11313
11314 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11315 if (!res) {
11316 DRM_ERROR("EDID CEA parser failed\n");
11317 return false;
11318 }
11319
11320 output = &cmd.edid_cea.data.output;
11321
11322 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11323 if (!output->ack.success) {
11324 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11325 output->ack.offset);
11326 }
11327 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11328 if (!output->amd_vsdb.vsdb_found)
11329 return false;
11330
11331 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11332 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11333 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11334 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11335 } else {
b76a8062 11336 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11337 return false;
11338 }
11339
11340 return true;
11341}
11342
11343static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11344 uint8_t *edid_ext, int len,
11345 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11346{
11347 int i;
f9b4f20c
SW
11348
11349 /* send extension block to DMCU for parsing */
11350 for (i = 0; i < len; i += 8) {
11351 bool res;
11352 int offset;
11353
11354 /* send 8 bytes a time */
46db138d 11355 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11356 return false;
11357
11358 if (i+8 == len) {
11359 /* EDID block sent completed, expect result */
11360 int version, min_rate, max_rate;
11361
46db138d 11362 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11363 if (res) {
11364 /* amd vsdb found */
11365 vsdb_info->freesync_supported = 1;
11366 vsdb_info->amd_vsdb_version = version;
11367 vsdb_info->min_refresh_rate_hz = min_rate;
11368 vsdb_info->max_refresh_rate_hz = max_rate;
11369 return true;
11370 }
11371 /* not amd vsdb */
11372 return false;
11373 }
11374
11375 /* check for ack*/
46db138d 11376 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11377 if (!res)
11378 return false;
11379 }
11380
11381 return false;
11382}
11383
46db138d
SW
11384static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11385 uint8_t *edid_ext, int len,
11386 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11387{
11388 int i;
11389
11390 /* send extension block to DMCU for parsing */
11391 for (i = 0; i < len; i += 8) {
11392 /* send 8 bytes a time */
11393 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11394 return false;
11395 }
11396
11397 return vsdb_info->freesync_supported;
11398}
11399
11400static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11401 uint8_t *edid_ext, int len,
11402 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11403{
11404 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11405
11406 if (adev->dm.dmub_srv)
11407 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11408 else
11409 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11410}
11411
7c7dd774 11412static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11413 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11414{
11415 uint8_t *edid_ext = NULL;
11416 int i;
11417 bool valid_vsdb_found = false;
11418
11419 /*----- drm_find_cea_extension() -----*/
11420 /* No EDID or EDID extensions */
11421 if (edid == NULL || edid->extensions == 0)
7c7dd774 11422 return -ENODEV;
f9b4f20c
SW
11423
11424 /* Find CEA extension */
11425 for (i = 0; i < edid->extensions; i++) {
11426 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11427 if (edid_ext[0] == CEA_EXT)
11428 break;
11429 }
11430
11431 if (i == edid->extensions)
7c7dd774 11432 return -ENODEV;
f9b4f20c
SW
11433
11434 /*----- cea_db_offsets() -----*/
11435 if (edid_ext[0] != CEA_EXT)
7c7dd774 11436 return -ENODEV;
f9b4f20c
SW
11437
11438 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11439
11440 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11441}
11442
98e6436d
AK
11443void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11444 struct edid *edid)
e7b07cee 11445{
eb0709ba 11446 int i = 0;
e7b07cee
HW
11447 struct detailed_timing *timing;
11448 struct detailed_non_pixel *data;
11449 struct detailed_data_monitor_range *range;
c84dec2f
HW
11450 struct amdgpu_dm_connector *amdgpu_dm_connector =
11451 to_amdgpu_dm_connector(connector);
bb47de73 11452 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11453 struct dc_sink *sink;
e7b07cee
HW
11454
11455 struct drm_device *dev = connector->dev;
1348969a 11456 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11457 bool freesync_capable = false;
f9b4f20c 11458 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11459
8218d7f1
HW
11460 if (!connector->state) {
11461 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11462 goto update;
8218d7f1
HW
11463 }
11464
9b2fdc33
AP
11465 sink = amdgpu_dm_connector->dc_sink ?
11466 amdgpu_dm_connector->dc_sink :
11467 amdgpu_dm_connector->dc_em_sink;
11468
11469 if (!edid || !sink) {
98e6436d
AK
11470 dm_con_state = to_dm_connector_state(connector->state);
11471
11472 amdgpu_dm_connector->min_vfreq = 0;
11473 amdgpu_dm_connector->max_vfreq = 0;
11474 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11475 connector->display_info.monitor_range.min_vfreq = 0;
11476 connector->display_info.monitor_range.max_vfreq = 0;
11477 freesync_capable = false;
98e6436d 11478
bb47de73 11479 goto update;
98e6436d
AK
11480 }
11481
8218d7f1
HW
11482 dm_con_state = to_dm_connector_state(connector->state);
11483
e7b07cee 11484 if (!adev->dm.freesync_module)
bb47de73 11485 goto update;
f9b4f20c
SW
11486
11487
9b2fdc33
AP
11488 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11489 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11490 bool edid_check_required = false;
11491
11492 if (edid) {
e7b07cee
HW
11493 edid_check_required = is_dp_capable_without_timing_msa(
11494 adev->dm.dc,
c84dec2f 11495 amdgpu_dm_connector);
e7b07cee 11496 }
e7b07cee 11497
f9b4f20c
SW
11498 if (edid_check_required == true && (edid->version > 1 ||
11499 (edid->version == 1 && edid->revision > 1))) {
11500 for (i = 0; i < 4; i++) {
e7b07cee 11501
f9b4f20c
SW
11502 timing = &edid->detailed_timings[i];
11503 data = &timing->data.other_data;
11504 range = &data->data.range;
11505 /*
11506 * Check if monitor has continuous frequency mode
11507 */
11508 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11509 continue;
11510 /*
11511 * Check for flag range limits only. If flag == 1 then
11512 * no additional timing information provided.
11513 * Default GTF, GTF Secondary curve and CVT are not
11514 * supported
11515 */
11516 if (range->flags != 1)
11517 continue;
a0ffc3fd 11518
f9b4f20c
SW
11519 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11520 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11521 amdgpu_dm_connector->pixel_clock_mhz =
11522 range->pixel_clock_mhz * 10;
a0ffc3fd 11523
f9b4f20c
SW
11524 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11525 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11526
f9b4f20c
SW
11527 break;
11528 }
98e6436d 11529
f9b4f20c
SW
11530 if (amdgpu_dm_connector->max_vfreq -
11531 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11532
f9b4f20c
SW
11533 freesync_capable = true;
11534 }
11535 }
9b2fdc33 11536 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11537 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11538 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11539 timing = &edid->detailed_timings[i];
11540 data = &timing->data.other_data;
11541
11542 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11543 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11544 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11545 freesync_capable = true;
11546
11547 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11548 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11549 }
11550 }
bb47de73
NK
11551
11552update:
11553 if (dm_con_state)
11554 dm_con_state->freesync_capable = freesync_capable;
11555
11556 if (connector->vrr_capable_property)
11557 drm_connector_set_vrr_capable_property(connector,
11558 freesync_capable);
e7b07cee
HW
11559}
11560
3d4e52d0
VL
11561void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11562{
1348969a 11563 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11564 struct dc *dc = adev->dm.dc;
11565 int i;
11566
11567 mutex_lock(&adev->dm.dc_lock);
11568 if (dc->current_state) {
11569 for (i = 0; i < dc->current_state->stream_count; ++i)
11570 dc->current_state->streams[i]
11571 ->triggered_crtc_reset.enabled =
11572 adev->dm.force_timing_sync;
11573
11574 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11575 dc_trigger_sync(dc, dc->current_state);
11576 }
11577 mutex_unlock(&adev->dm.dc_lock);
11578}
9d83722d
RS
11579
11580void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11581 uint32_t value, const char *func_name)
11582{
11583#ifdef DM_CHECK_ADDR_0
11584 if (address == 0) {
11585 DC_ERR("invalid register write. address = 0");
11586 return;
11587 }
11588#endif
11589 cgs_write_register(ctx->cgs_device, address, value);
11590 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11591}
11592
11593uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11594 const char *func_name)
11595{
11596 uint32_t value;
11597#ifdef DM_CHECK_ADDR_0
11598 if (address == 0) {
11599 DC_ERR("invalid register read; address = 0\n");
11600 return 0;
11601 }
11602#endif
11603
11604 if (ctx->dmub_srv &&
11605 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11606 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11607 ASSERT(false);
11608 return 0;
11609 }
11610
11611 value = cgs_read_register(ctx->cgs_device, address);
11612
11613 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11614
11615 return value;
11616}
81927e28 11617
88f52b1f
JS
11618int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11619 uint8_t status_type, uint32_t *operation_result)
11620{
11621 struct amdgpu_device *adev = ctx->driver_context;
11622 int return_status = -1;
11623 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11624
11625 if (is_cmd_aux) {
11626 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11627 return_status = p_notify->aux_reply.length;
11628 *operation_result = p_notify->result;
11629 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11630 *operation_result = AUX_RET_ERROR_TIMEOUT;
11631 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11632 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11633 } else {
11634 *operation_result = AUX_RET_ERROR_UNKNOWN;
11635 }
11636 } else {
11637 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11638 return_status = 0;
11639 *operation_result = p_notify->sc_status;
11640 } else {
11641 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11642 }
11643 }
11644
11645 return return_status;
11646}
11647
11648int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11649 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11650{
11651 struct amdgpu_device *adev = ctx->driver_context;
11652 int ret = 0;
11653
88f52b1f
JS
11654 if (is_cmd_aux) {
11655 dc_process_dmub_aux_transfer_async(ctx->dc,
11656 link_index, (struct aux_payload *)cmd_payload);
11657 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11658 (struct set_config_cmd_payload *)cmd_payload,
11659 adev->dm.dmub_notify)) {
11660 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11661 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11662 (uint32_t *)operation_result);
11663 }
11664
9e3a50d2 11665 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11666 if (ret == 0) {
9e3a50d2 11667 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11668 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11669 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11670 (uint32_t *)operation_result);
81927e28 11671 }
81927e28 11672
88f52b1f
JS
11673 if (is_cmd_aux) {
11674 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11675 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11676
88f52b1f
JS
11677 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11678 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11679 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11680 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11681 adev->dm.dmub_notify->aux_reply.length);
11682 }
11683 }
81927e28
JS
11684 }
11685
88f52b1f
JS
11686 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11687 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11688 (uint32_t *)operation_result);
81927e28 11689}
1edf5ae1
ZL
11690
11691/*
11692 * Check whether seamless boot is supported.
11693 *
11694 * So far we only support seamless boot on CHIP_VANGOGH.
11695 * If everything goes well, we may consider expanding
11696 * seamless boot to other ASICs.
11697 */
11698bool check_seamless_boot_capability(struct amdgpu_device *adev)
11699{
11700 switch (adev->asic_type) {
11701 case CHIP_VANGOGH:
11702 if (!adev->mman.keep_stolen_vga_memory)
11703 return true;
11704 break;
11705 default:
11706 break;
11707 }
11708
11709 return false;
11710}