drm/amd/display: Reduce stack size for dml31 UseMinimumDCFCLK
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
53e108aa 51#include <drm/drm_hdcp.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
1f579254 54#include "amdgpu_atombios.h"
4562236b
HW
55
56#include "amd_shared.h"
57#include "amdgpu_dm_irq.h"
58#include "dm_helpers.h"
e7b07cee 59#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
60#if defined(CONFIG_DEBUG_FS)
61#include "amdgpu_dm_debugfs.h"
62#endif
f4594cd1 63#include "amdgpu_dm_psr.h"
4562236b
HW
64
65#include "ivsrcid/ivsrcid_vislands30.h"
66
81927e28 67#include "i2caux_interface.h"
4562236b
HW
68#include <linux/module.h>
69#include <linux/moduleparam.h>
e7b07cee 70#include <linux/types.h>
97028037 71#include <linux/pm_runtime.h>
09d21852 72#include <linux/pci.h>
a94d5569 73#include <linux/firmware.h>
6ce8f316 74#include <linux/component.h>
4562236b
HW
75
76#include <drm/drm_atomic.h>
674e78ac 77#include <drm/drm_atomic_uapi.h>
4562236b
HW
78#include <drm/drm_atomic_helper.h>
79#include <drm/drm_dp_mst_helper.h>
e7b07cee 80#include <drm/drm_fb_helper.h>
09d21852 81#include <drm/drm_fourcc.h>
e7b07cee 82#include <drm/drm_edid.h>
09d21852 83#include <drm/drm_vblank.h>
6ce8f316 84#include <drm/drm_audio_component.h>
4562236b 85
b86a1aa3 86#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 87#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 88
ad941f7a
FX
89#include "dcn/dcn_1_0_offset.h"
90#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
91#include "soc15_hw_ip.h"
92#include "vega10_ip_offset.h"
ff5ef992
AD
93
94#include "soc15_common.h"
95#endif
96
e7b07cee 97#include "modules/inc/mod_freesync.h"
bbf854dc 98#include "modules/power/power_helpers.h"
ecd0136b 99#include "modules/inc/mod_info_packet.h"
e7b07cee 100
743b9786
NK
101#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
103#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
105#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
107#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
109#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
111#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
113#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
115#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 117
a94d5569
DF
118#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
119MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 120
5ea23931
RL
121#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
122MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
8c7aea40
NK
124/* Number of bytes in PSP header for firmware. */
125#define PSP_HEADER_BYTES 0x100
126
127/* Number of bytes in PSP footer for firmware. */
128#define PSP_FOOTER_BYTES 0x100
129
b8592b48
LL
130/**
131 * DOC: overview
132 *
133 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 134 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
135 * requests into DC requests, and DC responses into DRM responses.
136 *
137 * The root control structure is &struct amdgpu_display_manager.
138 */
139
7578ecda
AD
140/* basic init/fini API */
141static int amdgpu_dm_init(struct amdgpu_device *adev);
142static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 143static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 144
0f877894
OV
145static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146{
147 switch (link->dpcd_caps.dongle_type) {
148 case DISPLAY_DONGLE_NONE:
149 return DRM_MODE_SUBCONNECTOR_Native;
150 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 return DRM_MODE_SUBCONNECTOR_VGA;
152 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_DVID;
155 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_HDMIA;
158 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 default:
160 return DRM_MODE_SUBCONNECTOR_Unknown;
161 }
162}
163
164static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165{
166 struct dc_link *link = aconnector->dc_link;
167 struct drm_connector *connector = &aconnector->base;
168 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 return;
172
173 if (aconnector->dc_sink)
174 subconnector = get_subconnector_type(link);
175
176 drm_object_property_set_value(&connector->base,
177 connector->dev->mode_config.dp_subconnector_property,
178 subconnector);
179}
180
1f6010a9
DF
181/*
182 * initializes drm_device display related structures, based on the information
7578ecda
AD
183 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184 * drm_encoder, drm_mode_config
185 *
186 * Returns 0 on success
187 */
188static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189/* removes and deallocates the drm structures, created by the above function */
190static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
7578ecda 192static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 193 struct drm_plane *plane,
cc1fec57
NK
194 unsigned long possible_crtcs,
195 const struct dc_plane_cap *plane_cap);
7578ecda
AD
196static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 uint32_t link_index);
199static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 struct amdgpu_dm_connector *amdgpu_dm_connector,
201 uint32_t link_index,
202 struct amdgpu_encoder *amdgpu_encoder);
203static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 struct amdgpu_encoder *aencoder,
205 uint32_t link_index);
206
207static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
7578ecda
AD
209static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 struct drm_atomic_state *state);
213
674e78ac
NK
214static void handle_cursor_update(struct drm_plane *plane,
215 struct drm_plane_state *old_plane_state);
7578ecda 216
dfbbfe3c
BN
217static const struct drm_format_info *
218amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
e27c41d5 220static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 221static void handle_hpd_rx_irq(void *param);
e27c41d5 222
a85ba005
NC
223static bool
224is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 struct drm_crtc_state *new_crtc_state);
4562236b
HW
226/*
227 * dm_vblank_get_counter
228 *
229 * @brief
230 * Get counter for number of vertical blanks
231 *
232 * @param
233 * struct amdgpu_device *adev - [in] desired amdgpu device
234 * int disp_idx - [in] which CRTC to get the counter from
235 *
236 * @return
237 * Counter for vertical blanks
238 */
239static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240{
241 if (crtc >= adev->mode_info.num_crtc)
242 return 0;
243 else {
244 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
585d450c 246 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
247 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 crtc);
4562236b
HW
249 return 0;
250 }
251
585d450c 252 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
253 }
254}
255
256static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 257 u32 *vbl, u32 *position)
4562236b 258{
81c50963
ST
259 uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
4562236b
HW
261 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 return -EINVAL;
263 else {
264 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
585d450c 266 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
267 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 crtc);
4562236b
HW
269 return 0;
270 }
271
81c50963
ST
272 /*
273 * TODO rework base driver to use values directly.
274 * for now parse it back into reg-format
275 */
585d450c 276 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
277 &v_blank_start,
278 &v_blank_end,
279 &h_position,
280 &v_position);
281
e806208d
AG
282 *position = v_position | (h_position << 16);
283 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
284 }
285
286 return 0;
287}
288
289static bool dm_is_idle(void *handle)
290{
291 /* XXX todo */
292 return true;
293}
294
295static int dm_wait_for_idle(void *handle)
296{
297 /* XXX todo */
298 return 0;
299}
300
301static bool dm_check_soft_reset(void *handle)
302{
303 return false;
304}
305
306static int dm_soft_reset(void *handle)
307{
308 /* XXX todo */
309 return 0;
310}
311
3ee6b26b
AD
312static struct amdgpu_crtc *
313get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 int otg_inst)
4562236b 315{
4a580877 316 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
317 struct drm_crtc *crtc;
318 struct amdgpu_crtc *amdgpu_crtc;
319
bcd74374 320 if (WARN_ON(otg_inst == -1))
4562236b 321 return adev->mode_info.crtcs[0];
4562236b
HW
322
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326 if (amdgpu_crtc->otg_inst == otg_inst)
327 return amdgpu_crtc;
328 }
329
330 return NULL;
331}
332
585d450c
AP
333static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334{
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
339}
340
66b0c973
MK
341static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342{
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345}
346
a85ba005
NC
347static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
349{
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
351 return true;
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 return true;
354 else
355 return false;
356}
357
b8e8c934
HW
358/**
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
361 *
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
364 */
4562236b
HW
365static void dm_pflip_high_irq(void *interrupt_params)
366{
4562236b
HW
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
370 unsigned long flags;
71bbe51a 371 struct drm_pending_vblank_event *e;
71bbe51a
MK
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 bool vrr_active;
4562236b
HW
374
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377 /* IRQ could occur when in initial stage */
1f6010a9 378 /* TODO work and BO cleanup */
4562236b 379 if (amdgpu_crtc == NULL) {
cb2318b7 380 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
381 return;
382 }
383
4a580877 384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
385
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
391 amdgpu_crtc);
4a580877 392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
393 return;
394 }
395
71bbe51a
MK
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
4562236b 399
bcd74374 400 WARN_ON(!e);
1159898a 401
585d450c 402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
403
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 if (!vrr_active ||
585d450c 406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
412 */
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 414
71bbe51a
MK
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
417 */
418 if (e) {
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
423 }
424 } else if (e) {
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
431 *
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
436 */
437
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
441
4a580877 442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
443 e = NULL;
444 }
4562236b 445
fdd1fe57
MK
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
450 */
5d1c59c4 451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 453
54f5499a 454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 456
cb2318b7
VL
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
4562236b
HW
460}
461
d2574c33
MK
462static void dm_vupdate_high_irq(void *interrupt_params)
463{
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
47588233
RS
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 470 unsigned long flags;
585d450c 471 int vrr_active;
d2574c33
MK
472
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475 if (acrtc) {
585d450c 476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
481
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 frame_duration_ns,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 }
d2574c33 488
cb2318b7 489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 490 acrtc->crtc_id,
585d450c 491 vrr_active);
d2574c33
MK
492
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
498 */
585d450c 499 if (vrr_active) {
d2574c33 500 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
501
502 /* BTR processing for pre-DCE12 ASICs */
585d450c 503 if (acrtc->dm_irq_params.stream &&
09aef2c4 504 adev->family < AMDGPU_FAMILY_AI) {
4a580877 505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
585d450c
AP
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
510
511 dc_stream_adjust_vmin_vmax(
512 adev->dm.dc,
585d450c
AP
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
516 }
517 }
d2574c33
MK
518 }
519}
520
b8e8c934
HW
521/**
522 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 523 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
524 *
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526 * event handler.
527 */
4562236b
HW
528static void dm_crtc_high_irq(void *interrupt_params)
529{
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
4562236b 532 struct amdgpu_crtc *acrtc;
09aef2c4 533 unsigned long flags;
585d450c 534 int vrr_active;
4562236b 535
b57de80a 536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
537 if (!acrtc)
538 return;
539
585d450c 540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 541
cb2318b7 542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 543 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 544
2346ef47
NK
545 /**
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
550 */
585d450c 551 if (!vrr_active)
2346ef47
NK
552 drm_crtc_handle_vblank(&acrtc->base);
553
554 /**
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
557 */
16f17eda 558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
559
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
562 return;
16f17eda 563
4a580877 564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 565
585d450c
AP
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 570 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
16f17eda 573
585d450c
AP
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
576 }
577
2b5aed9a
MK
578 /*
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
583 *
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
587 */
2346ef47
NK
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 590 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
591 if (acrtc->event) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 acrtc->event = NULL;
594 drm_crtc_vblank_put(&acrtc->base);
595 }
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 }
598
4a580877 599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
600}
601
86bc2219 602#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 603#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
604/**
605 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606 * DCN generation ASICs
48e01bf4 607 * @interrupt_params: interrupt parameters
86bc2219
WL
608 *
609 * Used to set crc window/read out crc value at vertical line 0 position
610 */
86bc2219
WL
611static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612{
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
616
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619 if (!acrtc)
620 return;
621
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623}
433e5dec 624#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 625
e27c41d5
JS
626/**
627 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628 * @adev: amdgpu_device pointer
629 * @notify: dmub notification structure
630 *
631 * Dmub AUX or SET_CONFIG command completion processing callback
632 * Copies dmub notification to DM which is to be read by AUX command.
633 * issuing thread and also signals the event to wake up the thread.
634 */
635void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636{
637 if (adev->dm.dmub_notify)
638 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 complete(&adev->dm.dmub_aux_transfer_done);
641}
642
643/**
644 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645 * @adev: amdgpu_device pointer
646 * @notify: dmub notification structure
647 *
648 * Dmub Hpd interrupt processing callback. Gets displayindex through the
649 * ink index and calls helper to do the processing.
650 */
651void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652{
653 struct amdgpu_dm_connector *aconnector;
f6e03f80 654 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
655 struct drm_connector *connector;
656 struct drm_connector_list_iter iter;
657 struct dc_link *link;
658 uint8_t link_index = 0;
659 struct drm_device *dev = adev->dm.ddev;
660
661 if (adev == NULL)
662 return;
663
664 if (notify == NULL) {
665 DRM_ERROR("DMUB HPD callback notification was NULL");
666 return;
667 }
668
669 if (notify->link_index > adev->dm.dc->link_count) {
670 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671 return;
672 }
673
e27c41d5 674 link_index = notify->link_index;
e27c41d5
JS
675 link = adev->dm.dc->links[link_index];
676
677 drm_connector_list_iter_begin(dev, &iter);
678 drm_for_each_connector_iter(connector, &iter) {
679 aconnector = to_amdgpu_dm_connector(connector);
680 if (link && aconnector->dc_link == link) {
681 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 682 hpd_aconnector = aconnector;
e27c41d5
JS
683 break;
684 }
685 }
686 drm_connector_list_iter_end(&iter);
e27c41d5 687
c40a09e5
NK
688 if (hpd_aconnector) {
689 if (notify->type == DMUB_NOTIFICATION_HPD)
690 handle_hpd_irq_helper(hpd_aconnector);
691 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 handle_hpd_rx_irq(hpd_aconnector);
693 }
e27c41d5
JS
694}
695
696/**
697 * register_dmub_notify_callback - Sets callback for DMUB notify
698 * @adev: amdgpu_device pointer
699 * @type: Type of dmub notification
700 * @callback: Dmub interrupt callback function
701 * @dmub_int_thread_offload: offload indicator
702 *
703 * API to register a dmub callback handler for a dmub notification
704 * Also sets indicator whether callback processing to be offloaded.
705 * to dmub interrupt handling thread
706 * Return: true if successfully registered, false if there is existing registration
707 */
708bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710{
711 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 adev->dm.dmub_callback[type] = callback;
713 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714 } else
715 return false;
716
717 return true;
718}
719
720static void dm_handle_hpd_work(struct work_struct *work)
721{
722 struct dmub_hpd_work *dmub_hpd_wrk;
723
724 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725
726 if (!dmub_hpd_wrk->dmub_notify) {
727 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728 return;
729 }
730
731 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 dmub_hpd_wrk->dmub_notify);
734 }
094b21c1
JS
735
736 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
737 kfree(dmub_hpd_wrk);
738
739}
740
e25515e2 741#define DMUB_TRACE_MAX_READ 64
81927e28
JS
742/**
743 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744 * @interrupt_params: used for determining the Outbox instance
745 *
746 * Handles the Outbox Interrupt
747 * event handler.
748 */
81927e28
JS
749static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750{
751 struct dmub_notification notify;
752 struct common_irq_params *irq_params = interrupt_params;
753 struct amdgpu_device *adev = irq_params->adev;
754 struct amdgpu_display_manager *dm = &adev->dm;
755 struct dmcub_trace_buf_entry entry = { 0 };
756 uint32_t count = 0;
e27c41d5 757 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 758 struct dc_link *plink = NULL;
81927e28 759
f6e03f80
JS
760 if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 762
f6e03f80
JS
763 do {
764 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 DRM_ERROR("DM: notify type %d invalid!", notify.type);
767 continue;
768 }
c40a09e5
NK
769 if (!dm->dmub_callback[notify.type]) {
770 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771 continue;
772 }
f6e03f80 773 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
774 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775 if (!dmub_hpd_wrk) {
776 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777 return;
778 }
779 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 if (!dmub_hpd_wrk->dmub_notify) {
781 kfree(dmub_hpd_wrk);
782 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783 return;
784 }
785 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 if (dmub_hpd_wrk->dmub_notify)
787 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
788 dmub_hpd_wrk->adev = adev;
789 if (notify.type == DMUB_NOTIFICATION_HPD) {
790 plink = adev->dm.dc->links[notify.link_index];
791 if (plink) {
792 plink->hpd_status =
b97788e5 793 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 794 }
e27c41d5 795 }
f6e03f80
JS
796 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797 } else {
798 dm->dmub_callback[notify.type](adev, &notify);
799 }
800 } while (notify.pending_notification);
81927e28
JS
801 }
802
803
804 do {
805 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807 entry.param0, entry.param1);
808
809 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811 } else
812 break;
813
814 count++;
815
816 } while (count <= DMUB_TRACE_MAX_READ);
817
f6e03f80
JS
818 if (count > DMUB_TRACE_MAX_READ)
819 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 820}
433e5dec 821#endif /* CONFIG_DRM_AMD_DC_DCN */
86bc2219 822
4562236b
HW
823static int dm_set_clockgating_state(void *handle,
824 enum amd_clockgating_state state)
825{
826 return 0;
827}
828
829static int dm_set_powergating_state(void *handle,
830 enum amd_powergating_state state)
831{
832 return 0;
833}
834
835/* Prototypes of private functions */
836static int dm_early_init(void* handle);
837
a32e24b4 838/* Allocate memory for FBC compressed data */
3e332d3a 839static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 840{
3e332d3a 841 struct drm_device *dev = connector->dev;
1348969a 842 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 843 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
844 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845 struct drm_display_mode *mode;
42e67c3b
RL
846 unsigned long max_size = 0;
847
848 if (adev->dm.dc->fbc_compressor == NULL)
849 return;
a32e24b4 850
3e332d3a 851 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
852 return;
853
3e332d3a
RL
854 if (compressor->bo_ptr)
855 return;
42e67c3b 856
42e67c3b 857
3e332d3a
RL
858 list_for_each_entry(mode, &connector->modes, head) {
859 if (max_size < mode->htotal * mode->vtotal)
860 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
861 }
862
863 if (max_size) {
864 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 865 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 866 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
867
868 if (r)
42e67c3b
RL
869 DRM_ERROR("DM: Failed to initialize FBC\n");
870 else {
871 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873 }
874
a32e24b4
RL
875 }
876
877}
a32e24b4 878
6ce8f316
NK
879static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880 int pipe, bool *enabled,
881 unsigned char *buf, int max_bytes)
882{
883 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 884 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
885 struct drm_connector *connector;
886 struct drm_connector_list_iter conn_iter;
887 struct amdgpu_dm_connector *aconnector;
888 int ret = 0;
889
890 *enabled = false;
891
892 mutex_lock(&adev->dm.audio_lock);
893
894 drm_connector_list_iter_begin(dev, &conn_iter);
895 drm_for_each_connector_iter(connector, &conn_iter) {
896 aconnector = to_amdgpu_dm_connector(connector);
897 if (aconnector->audio_inst != port)
898 continue;
899
900 *enabled = true;
901 ret = drm_eld_size(connector->eld);
902 memcpy(buf, connector->eld, min(max_bytes, ret));
903
904 break;
905 }
906 drm_connector_list_iter_end(&conn_iter);
907
908 mutex_unlock(&adev->dm.audio_lock);
909
910 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911
912 return ret;
913}
914
915static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916 .get_eld = amdgpu_dm_audio_component_get_eld,
917};
918
919static int amdgpu_dm_audio_component_bind(struct device *kdev,
920 struct device *hda_kdev, void *data)
921{
922 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 923 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
924 struct drm_audio_component *acomp = data;
925
926 acomp->ops = &amdgpu_dm_audio_component_ops;
927 acomp->dev = kdev;
928 adev->dm.audio_component = acomp;
929
930 return 0;
931}
932
933static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934 struct device *hda_kdev, void *data)
935{
936 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 937 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
938 struct drm_audio_component *acomp = data;
939
940 acomp->ops = NULL;
941 acomp->dev = NULL;
942 adev->dm.audio_component = NULL;
943}
944
945static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946 .bind = amdgpu_dm_audio_component_bind,
947 .unbind = amdgpu_dm_audio_component_unbind,
948};
949
950static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951{
952 int i, ret;
953
954 if (!amdgpu_audio)
955 return 0;
956
957 adev->mode_info.audio.enabled = true;
958
959 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960
961 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962 adev->mode_info.audio.pin[i].channels = -1;
963 adev->mode_info.audio.pin[i].rate = -1;
964 adev->mode_info.audio.pin[i].bits_per_sample = -1;
965 adev->mode_info.audio.pin[i].status_bits = 0;
966 adev->mode_info.audio.pin[i].category_code = 0;
967 adev->mode_info.audio.pin[i].connected = false;
968 adev->mode_info.audio.pin[i].id =
969 adev->dm.dc->res_pool->audios[i]->inst;
970 adev->mode_info.audio.pin[i].offset = 0;
971 }
972
973 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974 if (ret < 0)
975 return ret;
976
977 adev->dm.audio_registered = true;
978
979 return 0;
980}
981
982static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983{
984 if (!amdgpu_audio)
985 return;
986
987 if (!adev->mode_info.audio.enabled)
988 return;
989
990 if (adev->dm.audio_registered) {
991 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992 adev->dm.audio_registered = false;
993 }
994
995 /* TODO: Disable audio? */
996
997 adev->mode_info.audio.enabled = false;
998}
999
dfd84d90 1000static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1001{
1002 struct drm_audio_component *acomp = adev->dm.audio_component;
1003
1004 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006
1007 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008 pin, -1);
1009 }
1010}
1011
743b9786
NK
1012static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013{
743b9786
NK
1014 const struct dmcub_firmware_header_v1_0 *hdr;
1015 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1016 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1017 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1020 struct dmub_srv_hw_params hw_params;
1021 enum dmub_status status;
1022 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1023 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786 1024 bool has_hw_support;
5b109397 1025 struct dc *dc = adev->dm.dc;
743b9786
NK
1026
1027 if (!dmub_srv)
1028 /* DMUB isn't supported on the ASIC. */
1029 return 0;
1030
8c7aea40
NK
1031 if (!fb_info) {
1032 DRM_ERROR("No framebuffer info for DMUB service.\n");
1033 return -EINVAL;
1034 }
1035
743b9786
NK
1036 if (!dmub_fw) {
1037 /* Firmware required for DMUB support. */
1038 DRM_ERROR("No firmware provided for DMUB.\n");
1039 return -EINVAL;
1040 }
1041
1042 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043 if (status != DMUB_STATUS_OK) {
1044 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045 return -EINVAL;
1046 }
1047
1048 if (!has_hw_support) {
1049 DRM_INFO("DMUB unsupported on ASIC\n");
1050 return 0;
1051 }
1052
1053 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054
743b9786
NK
1055 fw_inst_const = dmub_fw->data +
1056 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1057 PSP_HEADER_BYTES;
743b9786
NK
1058
1059 fw_bss_data = dmub_fw->data +
1060 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 le32_to_cpu(hdr->inst_const_bytes);
1062
1063 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1064 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066
1067 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068
ddde28a5
HW
1069 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070 * amdgpu_ucode_init_single_fw will load dmub firmware
1071 * fw_inst_const part to cw0; otherwise, the firmware back door load
1072 * will be done by dm_dmub_hw_init
1073 */
1074 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076 fw_inst_const_size);
1077 }
1078
a576b345
NK
1079 if (fw_bss_data_size)
1080 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1082
1083 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1084 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085 adev->bios_size);
1086
1087 /* Reset regions that need to be reset. */
1088 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090
1091 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093
1094 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1096
1097 /* Initialize hardware. */
1098 memset(&hw_params, 0, sizeof(hw_params));
1099 hw_params.fb_base = adev->gmc.fb_start;
1100 hw_params.fb_offset = adev->gmc.aper_base;
1101
31a7f4bb
HW
1102 /* backdoor load firmware and trigger dmub running */
1103 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104 hw_params.load_inst_const = true;
1105
743b9786
NK
1106 if (dmcu)
1107 hw_params.psp_version = dmcu->psp_version;
1108
8c7aea40
NK
1109 for (i = 0; i < fb_info->num_fb; ++i)
1110 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1111
5b109397
JS
1112 switch (adev->asic_type) {
1113 case CHIP_YELLOW_CARP:
1114 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115 hw_params.dpia_supported = true;
1116#if defined(CONFIG_DRM_AMD_DC_DCN)
1117 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118#endif
1119 }
1120 break;
1121 default:
1122 break;
1123 }
1124
743b9786
NK
1125 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126 if (status != DMUB_STATUS_OK) {
1127 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128 return -EINVAL;
1129 }
1130
1131 /* Wait for firmware load to finish. */
1132 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133 if (status != DMUB_STATUS_OK)
1134 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135
1136 /* Init DMCU and ABM if available. */
1137 if (dmcu && abm) {
1138 dmcu->funcs->dmcu_init(dmcu);
1139 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140 }
1141
051b7887
RL
1142 if (!adev->dm.dc->ctx->dmub_srv)
1143 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1144 if (!adev->dm.dc->ctx->dmub_srv) {
1145 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146 return -ENOMEM;
1147 }
1148
743b9786
NK
1149 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150 adev->dm.dmcub_fw_version);
1151
1152 return 0;
1153}
1154
a3fe0e33 1155#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1156static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1157{
c0fb85ae
YZ
1158 uint64_t pt_base;
1159 uint32_t logical_addr_low;
1160 uint32_t logical_addr_high;
1161 uint32_t agp_base, agp_bot, agp_top;
1162 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1163
a0f884f5
NK
1164 memset(pa_config, 0, sizeof(*pa_config));
1165
c0fb85ae
YZ
1166 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1168
c0fb85ae
YZ
1169 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170 /*
1171 * Raven2 has a HW issue that it is unable to use the vram which
1172 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173 * workaround that increase system aperture high address (add 1)
1174 * to get rid of the VM fault and hardware hang.
1175 */
1176 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177 else
1178 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1179
c0fb85ae
YZ
1180 agp_base = 0;
1181 agp_bot = adev->gmc.agp_start >> 24;
1182 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1183
c44a22b3 1184
c0fb85ae
YZ
1185 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1191
c0fb85ae
YZ
1192 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194
1195 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198
1199 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202
1203 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206
1207 pa_config->is_hvm_enabled = 0;
c44a22b3 1208
c44a22b3 1209}
e6cd859d 1210#endif
ea3b4242 1211#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1212static void vblank_control_worker(struct work_struct *work)
ea3b4242 1213{
09a5df6c
NK
1214 struct vblank_control_work *vblank_work =
1215 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1216 struct amdgpu_display_manager *dm = vblank_work->dm;
1217
1218 mutex_lock(&dm->dc_lock);
1219
1220 if (vblank_work->enable)
1221 dm->active_vblank_irq_count++;
5af50b0b 1222 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1223 dm->active_vblank_irq_count--;
1224
2cbcb78c 1225 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1226
4711c033 1227 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1228
58aa1c50
NK
1229 /* Control PSR based on vblank requirements from OS */
1230 if (vblank_work->stream && vblank_work->stream->link) {
1231 if (vblank_work->enable) {
1232 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233 amdgpu_dm_psr_disable(vblank_work->stream);
1234 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237 amdgpu_dm_psr_enable(vblank_work->stream);
1238 }
1239 }
1240
ea3b4242 1241 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1242
1243 dc_stream_release(vblank_work->stream);
1244
09a5df6c 1245 kfree(vblank_work);
ea3b4242
QZ
1246}
1247
ea3b4242 1248#endif
8e794421
WL
1249
1250static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251{
1252 struct hpd_rx_irq_offload_work *offload_work;
1253 struct amdgpu_dm_connector *aconnector;
1254 struct dc_link *dc_link;
1255 struct amdgpu_device *adev;
1256 enum dc_connection_type new_connection_type = dc_connection_none;
1257 unsigned long flags;
1258
1259 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260 aconnector = offload_work->offload_wq->aconnector;
1261
1262 if (!aconnector) {
1263 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264 goto skip;
1265 }
1266
1267 adev = drm_to_adev(aconnector->base.dev);
1268 dc_link = aconnector->dc_link;
1269
1270 mutex_lock(&aconnector->hpd_lock);
1271 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272 DRM_ERROR("KMS: Failed to detect connector\n");
1273 mutex_unlock(&aconnector->hpd_lock);
1274
1275 if (new_connection_type == dc_connection_none)
1276 goto skip;
1277
1278 if (amdgpu_in_reset(adev))
1279 goto skip;
1280
1281 mutex_lock(&adev->dm.dc_lock);
1282 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283 dc_link_dp_handle_automated_test(dc_link);
1284 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287 dc_link_dp_handle_link_loss(dc_link);
1288 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289 offload_work->offload_wq->is_handling_link_loss = false;
1290 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291 }
1292 mutex_unlock(&adev->dm.dc_lock);
1293
1294skip:
1295 kfree(offload_work);
1296
1297}
1298
1299static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300{
1301 int max_caps = dc->caps.max_links;
1302 int i = 0;
1303 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304
1305 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306
1307 if (!hpd_rx_offload_wq)
1308 return NULL;
1309
1310
1311 for (i = 0; i < max_caps; i++) {
1312 hpd_rx_offload_wq[i].wq =
1313 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314
1315 if (hpd_rx_offload_wq[i].wq == NULL) {
1316 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317 return NULL;
1318 }
1319
1320 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321 }
1322
1323 return hpd_rx_offload_wq;
1324}
1325
3ce51649
AD
1326struct amdgpu_stutter_quirk {
1327 u16 chip_vendor;
1328 u16 chip_device;
1329 u16 subsys_vendor;
1330 u16 subsys_device;
1331 u8 revision;
1332};
1333
1334static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337 { 0, 0, 0, 0, 0 },
1338};
1339
1340static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341{
1342 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343
1344 while (p && p->chip_device != 0) {
1345 if (pdev->vendor == p->chip_vendor &&
1346 pdev->device == p->chip_device &&
1347 pdev->subsystem_vendor == p->subsys_vendor &&
1348 pdev->subsystem_device == p->subsys_device &&
1349 pdev->revision == p->revision) {
1350 return true;
1351 }
1352 ++p;
1353 }
1354 return false;
1355}
1356
7578ecda 1357static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1358{
1359 struct dc_init_data init_data;
52704fca
BL
1360#ifdef CONFIG_DRM_AMD_DC_HDCP
1361 struct dc_callback_init init_params;
1362#endif
743b9786 1363 int r;
52704fca 1364
4a580877 1365 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1366 adev->dm.adev = adev;
1367
4562236b
HW
1368 /* Zero all the fields */
1369 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1370#ifdef CONFIG_DRM_AMD_DC_HDCP
1371 memset(&init_params, 0, sizeof(init_params));
1372#endif
4562236b 1373
674e78ac 1374 mutex_init(&adev->dm.dc_lock);
6ce8f316 1375 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1376#if defined(CONFIG_DRM_AMD_DC_DCN)
1377 spin_lock_init(&adev->dm.vblank_lock);
1378#endif
674e78ac 1379
4562236b
HW
1380 if(amdgpu_dm_irq_init(adev)) {
1381 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382 goto error;
1383 }
1384
1385 init_data.asic_id.chip_family = adev->family;
1386
2dc31ca1 1387 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1388 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1389 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1390
770d13b1 1391 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1392 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393 init_data.asic_id.atombios_base_address =
1394 adev->mode_info.atom_context->bios;
1395
1396 init_data.driver = adev;
1397
1398 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399
1400 if (!adev->dm.cgs_device) {
1401 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402 goto error;
1403 }
1404
1405 init_data.cgs_device = adev->dm.cgs_device;
1406
4562236b
HW
1407 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408
60fb100b
AD
1409 switch (adev->asic_type) {
1410 case CHIP_CARRIZO:
1411 case CHIP_STONEY:
1ebcaebd
NK
1412 init_data.flags.gpu_vm_support = true;
1413 break;
60fb100b 1414 default:
1d789535 1415 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1416 case IP_VERSION(2, 1, 0):
1417 init_data.flags.gpu_vm_support = true;
91adec9e
ML
1418 switch (adev->dm.dmcub_fw_version) {
1419 case 0: /* development */
1420 case 0x1: /* linux-firmware.git hash 6d9f399 */
1421 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422 init_data.flags.disable_dmcu = false;
1423 break;
1424 default:
1425 init_data.flags.disable_dmcu = true;
1426 }
c08182f2 1427 break;
559f591d
AD
1428 case IP_VERSION(1, 0, 0):
1429 case IP_VERSION(1, 0, 1):
c08182f2
AD
1430 case IP_VERSION(3, 0, 1):
1431 case IP_VERSION(3, 1, 2):
1432 case IP_VERSION(3, 1, 3):
1433 init_data.flags.gpu_vm_support = true;
1434 break;
1435 case IP_VERSION(2, 0, 3):
1436 init_data.flags.disable_dmcu = true;
1437 break;
1438 default:
1439 break;
1440 }
60fb100b
AD
1441 break;
1442 }
6e227308 1443
04b94af4
AD
1444 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445 init_data.flags.fbc_support = true;
1446
d99f38ae
AD
1447 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448 init_data.flags.multi_mon_pp_mclk_switch = true;
1449
eaf56410
LL
1450 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1452
1453 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1455
12320274
AP
1456#ifdef CONFIG_DRM_AMD_DC_DCN
1457 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1458 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1459 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1460 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1461#endif
1462
27eaa492 1463 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1464
1edf5ae1
ZL
1465 if (check_seamless_boot_capability(adev)) {
1466 init_data.flags.power_down_display_on_boot = false;
1467 init_data.flags.allow_seamless_boot_optimization = true;
1468 DRM_INFO("Seamless boot condition check passed\n");
1469 }
1470
0dd79532 1471 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1472 /* Display Core create. */
1473 adev->dm.dc = dc_create(&init_data);
1474
423788c7 1475 if (adev->dm.dc) {
76121231 1476 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1477 } else {
76121231 1478 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1479 goto error;
1480 }
4562236b 1481
8a791dab
HW
1482 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1483 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1484 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1485 }
1486
f99d8762
HW
1487 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1488 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1489 if (dm_should_disable_stutter(adev->pdev))
1490 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1491
8a791dab
HW
1492 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1493 adev->dm.dc->debug.disable_stutter = true;
1494
2665f63a 1495 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1496 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1497 adev->dm.dc->debug.disable_dsc_edp = true;
1498 }
8a791dab
HW
1499
1500 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1501 adev->dm.dc->debug.disable_clock_gate = true;
1502
743b9786
NK
1503 r = dm_dmub_hw_init(adev);
1504 if (r) {
1505 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1506 goto error;
1507 }
1508
bb6785c1
NK
1509 dc_hardware_init(adev->dm.dc);
1510
8e794421
WL
1511 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1512 if (!adev->dm.hpd_rx_offload_wq) {
1513 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1514 goto error;
1515 }
1516
0b08c54b 1517#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1518 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1519 struct dc_phy_addr_space_config pa_config;
1520
0b08c54b 1521 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1522
0b08c54b
YZ
1523 // Call the DC init_memory func
1524 dc_setup_system_context(adev->dm.dc, &pa_config);
1525 }
1526#endif
c0fb85ae 1527
4562236b
HW
1528 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1529 if (!adev->dm.freesync_module) {
1530 DRM_ERROR(
1531 "amdgpu: failed to initialize freesync_module.\n");
1532 } else
f1ad2f5e 1533 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1534 adev->dm.freesync_module);
1535
e277adc5
LSL
1536 amdgpu_dm_init_color_mod();
1537
ea3b4242
QZ
1538#if defined(CONFIG_DRM_AMD_DC_DCN)
1539 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1540 adev->dm.vblank_control_workqueue =
1541 create_singlethread_workqueue("dm_vblank_control_workqueue");
1542 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1543 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1544 }
1545#endif
1546
52704fca 1547#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1548 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1549 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1550
96a3b32e
BL
1551 if (!adev->dm.hdcp_workqueue)
1552 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1553 else
1554 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1555
96a3b32e
BL
1556 dc_init_callbacks(adev->dm.dc, &init_params);
1557 }
9a65df19
WL
1558#endif
1559#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1560 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1561#endif
81927e28
JS
1562 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1563 init_completion(&adev->dm.dmub_aux_transfer_done);
1564 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1565 if (!adev->dm.dmub_notify) {
1566 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1567 goto error;
1568 }
e27c41d5
JS
1569
1570 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1571 if (!adev->dm.delayed_hpd_wq) {
1572 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1573 goto error;
1574 }
1575
81927e28 1576 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1577#if defined(CONFIG_DRM_AMD_DC_DCN)
1578 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1579 dmub_aux_setconfig_callback, false)) {
1580 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1581 goto error;
1582 }
1583 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1584 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1585 goto error;
1586 }
c40a09e5
NK
1587 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1588 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1589 goto error;
1590 }
433e5dec 1591#endif /* CONFIG_DRM_AMD_DC_DCN */
81927e28
JS
1592 }
1593
4562236b
HW
1594 if (amdgpu_dm_initialize_drm_device(adev)) {
1595 DRM_ERROR(
1596 "amdgpu: failed to initialize sw for display support.\n");
1597 goto error;
1598 }
1599
f74367e4
AD
1600 /* create fake encoders for MST */
1601 dm_dp_create_fake_mst_encoders(adev);
1602
4562236b
HW
1603 /* TODO: Add_display_info? */
1604
1605 /* TODO use dynamic cursor width */
4a580877
LT
1606 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1607 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1608
4a580877 1609 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1610 DRM_ERROR(
1611 "amdgpu: failed to initialize sw for display support.\n");
1612 goto error;
1613 }
1614
c0fb85ae 1615
f1ad2f5e 1616 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1617
1618 return 0;
1619error:
1620 amdgpu_dm_fini(adev);
1621
59d0f396 1622 return -EINVAL;
4562236b
HW
1623}
1624
e9669fb7
AG
1625static int amdgpu_dm_early_fini(void *handle)
1626{
1627 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1628
1629 amdgpu_dm_audio_fini(adev);
1630
1631 return 0;
1632}
1633
7578ecda 1634static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1635{
f74367e4
AD
1636 int i;
1637
09a5df6c
NK
1638#if defined(CONFIG_DRM_AMD_DC_DCN)
1639 if (adev->dm.vblank_control_workqueue) {
1640 destroy_workqueue(adev->dm.vblank_control_workqueue);
1641 adev->dm.vblank_control_workqueue = NULL;
1642 }
1643#endif
1644
f74367e4
AD
1645 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1646 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1647 }
1648
4562236b 1649 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1650
9a65df19
WL
1651#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1652 if (adev->dm.crc_rd_wrk) {
1653 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1654 kfree(adev->dm.crc_rd_wrk);
1655 adev->dm.crc_rd_wrk = NULL;
1656 }
1657#endif
52704fca
BL
1658#ifdef CONFIG_DRM_AMD_DC_HDCP
1659 if (adev->dm.hdcp_workqueue) {
e96b1b29 1660 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1661 adev->dm.hdcp_workqueue = NULL;
1662 }
1663
1664 if (adev->dm.dc)
1665 dc_deinit_callbacks(adev->dm.dc);
1666#endif
51ba6912 1667
3beac533 1668 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1669
81927e28
JS
1670 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1671 kfree(adev->dm.dmub_notify);
1672 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1673 destroy_workqueue(adev->dm.delayed_hpd_wq);
1674 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1675 }
1676
743b9786
NK
1677 if (adev->dm.dmub_bo)
1678 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1679 &adev->dm.dmub_bo_gpu_addr,
1680 &adev->dm.dmub_bo_cpu_addr);
52704fca 1681
006c26a0
AG
1682 if (adev->dm.hpd_rx_offload_wq) {
1683 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1684 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1685 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1686 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1687 }
1688 }
1689
1690 kfree(adev->dm.hpd_rx_offload_wq);
1691 adev->dm.hpd_rx_offload_wq = NULL;
1692 }
1693
c8bdf2b6
ED
1694 /* DC Destroy TODO: Replace destroy DAL */
1695 if (adev->dm.dc)
1696 dc_destroy(&adev->dm.dc);
4562236b
HW
1697 /*
1698 * TODO: pageflip, vlank interrupt
1699 *
1700 * amdgpu_dm_irq_fini(adev);
1701 */
1702
1703 if (adev->dm.cgs_device) {
1704 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1705 adev->dm.cgs_device = NULL;
1706 }
1707 if (adev->dm.freesync_module) {
1708 mod_freesync_destroy(adev->dm.freesync_module);
1709 adev->dm.freesync_module = NULL;
1710 }
674e78ac 1711
6ce8f316 1712 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1713 mutex_destroy(&adev->dm.dc_lock);
1714
4562236b
HW
1715 return;
1716}
1717
a94d5569 1718static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1719{
a7669aff 1720 const char *fw_name_dmcu = NULL;
a94d5569
DF
1721 int r;
1722 const struct dmcu_firmware_header_v1_0 *hdr;
1723
1724 switch(adev->asic_type) {
55e56389
MR
1725#if defined(CONFIG_DRM_AMD_DC_SI)
1726 case CHIP_TAHITI:
1727 case CHIP_PITCAIRN:
1728 case CHIP_VERDE:
1729 case CHIP_OLAND:
1730#endif
a94d5569
DF
1731 case CHIP_BONAIRE:
1732 case CHIP_HAWAII:
1733 case CHIP_KAVERI:
1734 case CHIP_KABINI:
1735 case CHIP_MULLINS:
1736 case CHIP_TONGA:
1737 case CHIP_FIJI:
1738 case CHIP_CARRIZO:
1739 case CHIP_STONEY:
1740 case CHIP_POLARIS11:
1741 case CHIP_POLARIS10:
1742 case CHIP_POLARIS12:
1743 case CHIP_VEGAM:
1744 case CHIP_VEGA10:
1745 case CHIP_VEGA12:
1746 case CHIP_VEGA20:
1747 return 0;
5ea23931
RL
1748 case CHIP_NAVI12:
1749 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1750 break;
a94d5569 1751 case CHIP_RAVEN:
a7669aff
HW
1752 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1753 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1754 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1755 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1756 else
a7669aff 1757 return 0;
a94d5569
DF
1758 break;
1759 default:
1d789535 1760 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1761 case IP_VERSION(2, 0, 2):
1762 case IP_VERSION(2, 0, 3):
1763 case IP_VERSION(2, 0, 0):
1764 case IP_VERSION(2, 1, 0):
1765 case IP_VERSION(3, 0, 0):
1766 case IP_VERSION(3, 0, 2):
1767 case IP_VERSION(3, 0, 3):
1768 case IP_VERSION(3, 0, 1):
1769 case IP_VERSION(3, 1, 2):
1770 case IP_VERSION(3, 1, 3):
1771 return 0;
1772 default:
1773 break;
1774 }
a94d5569 1775 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1776 return -EINVAL;
a94d5569
DF
1777 }
1778
1779 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1780 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1781 return 0;
1782 }
1783
1784 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1785 if (r == -ENOENT) {
1786 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1787 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1788 adev->dm.fw_dmcu = NULL;
1789 return 0;
1790 }
1791 if (r) {
1792 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1793 fw_name_dmcu);
1794 return r;
1795 }
1796
1797 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1798 if (r) {
1799 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1800 fw_name_dmcu);
1801 release_firmware(adev->dm.fw_dmcu);
1802 adev->dm.fw_dmcu = NULL;
1803 return r;
1804 }
1805
1806 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1807 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1808 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1809 adev->firmware.fw_size +=
1810 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1811
1812 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1813 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1814 adev->firmware.fw_size +=
1815 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1816
ee6e89c0
DF
1817 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1818
a94d5569
DF
1819 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1820
4562236b
HW
1821 return 0;
1822}
1823
743b9786
NK
1824static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1825{
1826 struct amdgpu_device *adev = ctx;
1827
1828 return dm_read_reg(adev->dm.dc->ctx, address);
1829}
1830
1831static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1832 uint32_t value)
1833{
1834 struct amdgpu_device *adev = ctx;
1835
1836 return dm_write_reg(adev->dm.dc->ctx, address, value);
1837}
1838
1839static int dm_dmub_sw_init(struct amdgpu_device *adev)
1840{
1841 struct dmub_srv_create_params create_params;
8c7aea40
NK
1842 struct dmub_srv_region_params region_params;
1843 struct dmub_srv_region_info region_info;
1844 struct dmub_srv_fb_params fb_params;
1845 struct dmub_srv_fb_info *fb_info;
1846 struct dmub_srv *dmub_srv;
743b9786
NK
1847 const struct dmcub_firmware_header_v1_0 *hdr;
1848 const char *fw_name_dmub;
1849 enum dmub_asic dmub_asic;
1850 enum dmub_status status;
1851 int r;
1852
1d789535 1853 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1854 case IP_VERSION(2, 1, 0):
743b9786
NK
1855 dmub_asic = DMUB_ASIC_DCN21;
1856 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1857 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1858 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1859 break;
c08182f2 1860 case IP_VERSION(3, 0, 0):
1d789535 1861 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1862 dmub_asic = DMUB_ASIC_DCN30;
1863 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1864 } else {
1865 dmub_asic = DMUB_ASIC_DCN30;
1866 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1867 }
79037324 1868 break;
c08182f2 1869 case IP_VERSION(3, 0, 1):
469989ca
RL
1870 dmub_asic = DMUB_ASIC_DCN301;
1871 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1872 break;
c08182f2 1873 case IP_VERSION(3, 0, 2):
2a411205
BL
1874 dmub_asic = DMUB_ASIC_DCN302;
1875 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1876 break;
c08182f2 1877 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1878 dmub_asic = DMUB_ASIC_DCN303;
1879 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1880 break;
c08182f2
AD
1881 case IP_VERSION(3, 1, 2):
1882 case IP_VERSION(3, 1, 3):
3137f792 1883 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1884 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1885 break;
743b9786
NK
1886
1887 default:
1888 /* ASIC doesn't support DMUB. */
1889 return 0;
1890 }
1891
743b9786
NK
1892 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1893 if (r) {
1894 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1895 return 0;
1896 }
1897
1898 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1899 if (r) {
1900 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1901 return 0;
1902 }
1903
743b9786 1904 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1905 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1906
9a6ed547
NK
1907 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1908 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1909 AMDGPU_UCODE_ID_DMCUB;
1910 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1911 adev->dm.dmub_fw;
1912 adev->firmware.fw_size +=
1913 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1914
9a6ed547
NK
1915 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1916 adev->dm.dmcub_fw_version);
1917 }
1918
743b9786 1919
8c7aea40
NK
1920 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1921 dmub_srv = adev->dm.dmub_srv;
1922
1923 if (!dmub_srv) {
1924 DRM_ERROR("Failed to allocate DMUB service!\n");
1925 return -ENOMEM;
1926 }
1927
1928 memset(&create_params, 0, sizeof(create_params));
1929 create_params.user_ctx = adev;
1930 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1931 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1932 create_params.asic = dmub_asic;
1933
1934 /* Create the DMUB service. */
1935 status = dmub_srv_create(dmub_srv, &create_params);
1936 if (status != DMUB_STATUS_OK) {
1937 DRM_ERROR("Error creating DMUB service: %d\n", status);
1938 return -EINVAL;
1939 }
1940
1941 /* Calculate the size of all the regions for the DMUB service. */
1942 memset(&region_params, 0, sizeof(region_params));
1943
1944 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1945 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1946 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1947 region_params.vbios_size = adev->bios_size;
0922b899 1948 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1949 adev->dm.dmub_fw->data +
1950 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1951 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1952 region_params.fw_inst_const =
1953 adev->dm.dmub_fw->data +
1954 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1955 PSP_HEADER_BYTES;
8c7aea40
NK
1956
1957 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1958 &region_info);
1959
1960 if (status != DMUB_STATUS_OK) {
1961 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1962 return -EINVAL;
1963 }
1964
1965 /*
1966 * Allocate a framebuffer based on the total size of all the regions.
1967 * TODO: Move this into GART.
1968 */
1969 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1970 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1971 &adev->dm.dmub_bo_gpu_addr,
1972 &adev->dm.dmub_bo_cpu_addr);
1973 if (r)
1974 return r;
1975
1976 /* Rebase the regions on the framebuffer address. */
1977 memset(&fb_params, 0, sizeof(fb_params));
1978 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1979 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1980 fb_params.region_info = &region_info;
1981
1982 adev->dm.dmub_fb_info =
1983 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1984 fb_info = adev->dm.dmub_fb_info;
1985
1986 if (!fb_info) {
1987 DRM_ERROR(
1988 "Failed to allocate framebuffer info for DMUB service!\n");
1989 return -ENOMEM;
1990 }
1991
1992 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1993 if (status != DMUB_STATUS_OK) {
1994 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1995 return -EINVAL;
1996 }
1997
743b9786
NK
1998 return 0;
1999}
2000
a94d5569
DF
2001static int dm_sw_init(void *handle)
2002{
2003 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
2004 int r;
2005
2006 r = dm_dmub_sw_init(adev);
2007 if (r)
2008 return r;
a94d5569
DF
2009
2010 return load_dmcu_fw(adev);
2011}
2012
4562236b
HW
2013static int dm_sw_fini(void *handle)
2014{
a94d5569
DF
2015 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2016
8c7aea40
NK
2017 kfree(adev->dm.dmub_fb_info);
2018 adev->dm.dmub_fb_info = NULL;
2019
743b9786
NK
2020 if (adev->dm.dmub_srv) {
2021 dmub_srv_destroy(adev->dm.dmub_srv);
2022 adev->dm.dmub_srv = NULL;
2023 }
2024
75e1658e
ND
2025 release_firmware(adev->dm.dmub_fw);
2026 adev->dm.dmub_fw = NULL;
743b9786 2027
75e1658e
ND
2028 release_firmware(adev->dm.fw_dmcu);
2029 adev->dm.fw_dmcu = NULL;
a94d5569 2030
4562236b
HW
2031 return 0;
2032}
2033
7abcf6b5 2034static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2035{
c84dec2f 2036 struct amdgpu_dm_connector *aconnector;
4562236b 2037 struct drm_connector *connector;
f8d2d39e 2038 struct drm_connector_list_iter iter;
7abcf6b5 2039 int ret = 0;
4562236b 2040
f8d2d39e
LP
2041 drm_connector_list_iter_begin(dev, &iter);
2042 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2043 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2044 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2045 aconnector->mst_mgr.aux) {
f1ad2f5e 2046 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2047 aconnector,
2048 aconnector->base.base.id);
7abcf6b5
AG
2049
2050 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2051 if (ret < 0) {
2052 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2053 aconnector->dc_link->type =
2054 dc_connection_single;
2055 break;
7abcf6b5 2056 }
f8d2d39e 2057 }
4562236b 2058 }
f8d2d39e 2059 drm_connector_list_iter_end(&iter);
4562236b 2060
7abcf6b5
AG
2061 return ret;
2062}
2063
2064static int dm_late_init(void *handle)
2065{
42e67c3b 2066 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2067
bbf854dc
DF
2068 struct dmcu_iram_parameters params;
2069 unsigned int linear_lut[16];
2070 int i;
17bdb4a8 2071 struct dmcu *dmcu = NULL;
bbf854dc 2072
17bdb4a8
JFZ
2073 dmcu = adev->dm.dc->res_pool->dmcu;
2074
bbf854dc
DF
2075 for (i = 0; i < 16; i++)
2076 linear_lut[i] = 0xFFFF * i / 15;
2077
2078 params.set = 0;
75068994 2079 params.backlight_ramping_override = false;
bbf854dc
DF
2080 params.backlight_ramping_start = 0xCCCC;
2081 params.backlight_ramping_reduction = 0xCCCCCCCC;
2082 params.backlight_lut_array_size = 16;
2083 params.backlight_lut_array = linear_lut;
2084
2ad0cdf9
AK
2085 /* Min backlight level after ABM reduction, Don't allow below 1%
2086 * 0xFFFF x 0.01 = 0x28F
2087 */
2088 params.min_abm_backlight = 0x28F;
5cb32419 2089 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2090 * dmcu object will be null.
2091 * ABM 2.4 and up are implemented on dmcub.
2092 */
2093 if (dmcu) {
2094 if (!dmcu_load_iram(dmcu, params))
2095 return -EINVAL;
2096 } else if (adev->dm.dc->ctx->dmub_srv) {
2097 struct dc_link *edp_links[MAX_NUM_EDP];
2098 int edp_num;
bbf854dc 2099
6e568e43
JW
2100 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2101 for (i = 0; i < edp_num; i++) {
2102 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2103 return -EINVAL;
2104 }
2105 }
bbf854dc 2106
4a580877 2107 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2108}
2109
2110static void s3_handle_mst(struct drm_device *dev, bool suspend)
2111{
c84dec2f 2112 struct amdgpu_dm_connector *aconnector;
4562236b 2113 struct drm_connector *connector;
f8d2d39e 2114 struct drm_connector_list_iter iter;
fe7553be
LP
2115 struct drm_dp_mst_topology_mgr *mgr;
2116 int ret;
2117 bool need_hotplug = false;
4562236b 2118
f8d2d39e
LP
2119 drm_connector_list_iter_begin(dev, &iter);
2120 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2121 aconnector = to_amdgpu_dm_connector(connector);
2122 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2123 aconnector->mst_port)
2124 continue;
2125
2126 mgr = &aconnector->mst_mgr;
2127
2128 if (suspend) {
2129 drm_dp_mst_topology_mgr_suspend(mgr);
2130 } else {
6f85f738 2131 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2132 if (ret < 0) {
2133 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2134 need_hotplug = true;
2135 }
2136 }
4562236b 2137 }
f8d2d39e 2138 drm_connector_list_iter_end(&iter);
fe7553be
LP
2139
2140 if (need_hotplug)
2141 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2142}
2143
9340dfd3
HW
2144static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2145{
2146 struct smu_context *smu = &adev->smu;
2147 int ret = 0;
2148
2149 if (!is_support_sw_smu(adev))
2150 return 0;
2151
2152 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2153 * on window driver dc implementation.
2154 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2155 * should be passed to smu during boot up and resume from s3.
2156 * boot up: dc calculate dcn watermark clock settings within dc_create,
2157 * dcn20_resource_construct
2158 * then call pplib functions below to pass the settings to smu:
2159 * smu_set_watermarks_for_clock_ranges
2160 * smu_set_watermarks_table
2161 * navi10_set_watermarks_table
2162 * smu_write_watermarks_table
2163 *
2164 * For Renoir, clock settings of dcn watermark are also fixed values.
2165 * dc has implemented different flow for window driver:
2166 * dc_hardware_init / dc_set_power_state
2167 * dcn10_init_hw
2168 * notify_wm_ranges
2169 * set_wm_ranges
2170 * -- Linux
2171 * smu_set_watermarks_for_clock_ranges
2172 * renoir_set_watermarks_table
2173 * smu_write_watermarks_table
2174 *
2175 * For Linux,
2176 * dc_hardware_init -> amdgpu_dm_init
2177 * dc_set_power_state --> dm_resume
2178 *
2179 * therefore, this function apply to navi10/12/14 but not Renoir
2180 * *
2181 */
1d789535 2182 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2183 case IP_VERSION(2, 0, 2):
2184 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2185 break;
2186 default:
2187 return 0;
2188 }
2189
e7a95eea
EQ
2190 ret = smu_write_watermarks_table(smu);
2191 if (ret) {
2192 DRM_ERROR("Failed to update WMTABLE!\n");
2193 return ret;
9340dfd3
HW
2194 }
2195
9340dfd3
HW
2196 return 0;
2197}
2198
b8592b48
LL
2199/**
2200 * dm_hw_init() - Initialize DC device
28d687ea 2201 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2202 *
2203 * Initialize the &struct amdgpu_display_manager device. This involves calling
2204 * the initializers of each DM component, then populating the struct with them.
2205 *
2206 * Although the function implies hardware initialization, both hardware and
2207 * software are initialized here. Splitting them out to their relevant init
2208 * hooks is a future TODO item.
2209 *
2210 * Some notable things that are initialized here:
2211 *
2212 * - Display Core, both software and hardware
2213 * - DC modules that we need (freesync and color management)
2214 * - DRM software states
2215 * - Interrupt sources and handlers
2216 * - Vblank support
2217 * - Debug FS entries, if enabled
2218 */
4562236b
HW
2219static int dm_hw_init(void *handle)
2220{
2221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2222 /* Create DAL display manager */
2223 amdgpu_dm_init(adev);
4562236b
HW
2224 amdgpu_dm_hpd_init(adev);
2225
4562236b
HW
2226 return 0;
2227}
2228
b8592b48
LL
2229/**
2230 * dm_hw_fini() - Teardown DC device
28d687ea 2231 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2232 *
2233 * Teardown components within &struct amdgpu_display_manager that require
2234 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2235 * were loaded. Also flush IRQ workqueues and disable them.
2236 */
4562236b
HW
2237static int dm_hw_fini(void *handle)
2238{
2239 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2240
2241 amdgpu_dm_hpd_fini(adev);
2242
2243 amdgpu_dm_irq_fini(adev);
21de3396 2244 amdgpu_dm_fini(adev);
4562236b
HW
2245 return 0;
2246}
2247
cdaae837
BL
2248
2249static int dm_enable_vblank(struct drm_crtc *crtc);
2250static void dm_disable_vblank(struct drm_crtc *crtc);
2251
2252static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2253 struct dc_state *state, bool enable)
2254{
2255 enum dc_irq_source irq_source;
2256 struct amdgpu_crtc *acrtc;
2257 int rc = -EBUSY;
2258 int i = 0;
2259
2260 for (i = 0; i < state->stream_count; i++) {
2261 acrtc = get_crtc_by_otg_inst(
2262 adev, state->stream_status[i].primary_otg_inst);
2263
2264 if (acrtc && state->stream_status[i].plane_count != 0) {
2265 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2266 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2267 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2268 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2269 if (rc)
2270 DRM_WARN("Failed to %s pflip interrupts\n",
2271 enable ? "enable" : "disable");
2272
2273 if (enable) {
2274 rc = dm_enable_vblank(&acrtc->base);
2275 if (rc)
2276 DRM_WARN("Failed to enable vblank interrupts\n");
2277 } else {
2278 dm_disable_vblank(&acrtc->base);
2279 }
2280
2281 }
2282 }
2283
2284}
2285
dfd84d90 2286static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2287{
2288 struct dc_state *context = NULL;
2289 enum dc_status res = DC_ERROR_UNEXPECTED;
2290 int i;
2291 struct dc_stream_state *del_streams[MAX_PIPES];
2292 int del_streams_count = 0;
2293
2294 memset(del_streams, 0, sizeof(del_streams));
2295
2296 context = dc_create_state(dc);
2297 if (context == NULL)
2298 goto context_alloc_fail;
2299
2300 dc_resource_state_copy_construct_current(dc, context);
2301
2302 /* First remove from context all streams */
2303 for (i = 0; i < context->stream_count; i++) {
2304 struct dc_stream_state *stream = context->streams[i];
2305
2306 del_streams[del_streams_count++] = stream;
2307 }
2308
2309 /* Remove all planes for removed streams and then remove the streams */
2310 for (i = 0; i < del_streams_count; i++) {
2311 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2312 res = DC_FAIL_DETACH_SURFACES;
2313 goto fail;
2314 }
2315
2316 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2317 if (res != DC_OK)
2318 goto fail;
2319 }
2320
cdaae837
BL
2321 res = dc_commit_state(dc, context);
2322
2323fail:
2324 dc_release_state(context);
2325
2326context_alloc_fail:
2327 return res;
2328}
2329
8e794421
WL
2330static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2331{
2332 int i;
2333
2334 if (dm->hpd_rx_offload_wq) {
2335 for (i = 0; i < dm->dc->caps.max_links; i++)
2336 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2337 }
2338}
2339
4562236b
HW
2340static int dm_suspend(void *handle)
2341{
2342 struct amdgpu_device *adev = handle;
2343 struct amdgpu_display_manager *dm = &adev->dm;
2344 int ret = 0;
4562236b 2345
53b3f8f4 2346 if (amdgpu_in_reset(adev)) {
cdaae837 2347 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2348
2349#if defined(CONFIG_DRM_AMD_DC_DCN)
2350 dc_allow_idle_optimizations(adev->dm.dc, false);
2351#endif
2352
cdaae837
BL
2353 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2354
2355 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2356
2357 amdgpu_dm_commit_zero_streams(dm->dc);
2358
2359 amdgpu_dm_irq_suspend(adev);
2360
8e794421
WL
2361 hpd_rx_irq_work_suspend(dm);
2362
cdaae837
BL
2363 return ret;
2364 }
4562236b 2365
d2f0b53b 2366 WARN_ON(adev->dm.cached_state);
4a580877 2367 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2368
4a580877 2369 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2370
4562236b
HW
2371 amdgpu_dm_irq_suspend(adev);
2372
8e794421
WL
2373 hpd_rx_irq_work_suspend(dm);
2374
32f5062d 2375 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2376
1c2075d4 2377 return 0;
4562236b
HW
2378}
2379
1daf8c63
AD
2380static struct amdgpu_dm_connector *
2381amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2382 struct drm_crtc *crtc)
4562236b
HW
2383{
2384 uint32_t i;
c2cea706 2385 struct drm_connector_state *new_con_state;
4562236b
HW
2386 struct drm_connector *connector;
2387 struct drm_crtc *crtc_from_state;
2388
c2cea706
LSL
2389 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2390 crtc_from_state = new_con_state->crtc;
4562236b
HW
2391
2392 if (crtc_from_state == crtc)
c84dec2f 2393 return to_amdgpu_dm_connector(connector);
4562236b
HW
2394 }
2395
2396 return NULL;
2397}
2398
fbbdadf2
BL
2399static void emulated_link_detect(struct dc_link *link)
2400{
2401 struct dc_sink_init_data sink_init_data = { 0 };
2402 struct display_sink_capability sink_caps = { 0 };
2403 enum dc_edid_status edid_status;
2404 struct dc_context *dc_ctx = link->ctx;
2405 struct dc_sink *sink = NULL;
2406 struct dc_sink *prev_sink = NULL;
2407
2408 link->type = dc_connection_none;
2409 prev_sink = link->local_sink;
2410
30164a16
VL
2411 if (prev_sink)
2412 dc_sink_release(prev_sink);
fbbdadf2
BL
2413
2414 switch (link->connector_signal) {
2415 case SIGNAL_TYPE_HDMI_TYPE_A: {
2416 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2417 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2418 break;
2419 }
2420
2421 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2422 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2423 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2424 break;
2425 }
2426
2427 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2428 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2429 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2430 break;
2431 }
2432
2433 case SIGNAL_TYPE_LVDS: {
2434 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2435 sink_caps.signal = SIGNAL_TYPE_LVDS;
2436 break;
2437 }
2438
2439 case SIGNAL_TYPE_EDP: {
2440 sink_caps.transaction_type =
2441 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442 sink_caps.signal = SIGNAL_TYPE_EDP;
2443 break;
2444 }
2445
2446 case SIGNAL_TYPE_DISPLAY_PORT: {
2447 sink_caps.transaction_type =
2448 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2449 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2450 break;
2451 }
2452
2453 default:
2454 DC_ERROR("Invalid connector type! signal:%d\n",
2455 link->connector_signal);
2456 return;
2457 }
2458
2459 sink_init_data.link = link;
2460 sink_init_data.sink_signal = sink_caps.signal;
2461
2462 sink = dc_sink_create(&sink_init_data);
2463 if (!sink) {
2464 DC_ERROR("Failed to create sink!\n");
2465 return;
2466 }
2467
dcd5fb82 2468 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2469 link->local_sink = sink;
2470
2471 edid_status = dm_helpers_read_local_edid(
2472 link->ctx,
2473 link,
2474 sink);
2475
2476 if (edid_status != EDID_OK)
2477 DC_ERROR("Failed to read EDID");
2478
2479}
2480
cdaae837
BL
2481static void dm_gpureset_commit_state(struct dc_state *dc_state,
2482 struct amdgpu_display_manager *dm)
2483{
2484 struct {
2485 struct dc_surface_update surface_updates[MAX_SURFACES];
2486 struct dc_plane_info plane_infos[MAX_SURFACES];
2487 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2488 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2489 struct dc_stream_update stream_update;
2490 } * bundle;
2491 int k, m;
2492
2493 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2494
2495 if (!bundle) {
2496 dm_error("Failed to allocate update bundle\n");
2497 goto cleanup;
2498 }
2499
2500 for (k = 0; k < dc_state->stream_count; k++) {
2501 bundle->stream_update.stream = dc_state->streams[k];
2502
2503 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2504 bundle->surface_updates[m].surface =
2505 dc_state->stream_status->plane_states[m];
2506 bundle->surface_updates[m].surface->force_full_update =
2507 true;
2508 }
2509 dc_commit_updates_for_stream(
2510 dm->dc, bundle->surface_updates,
2511 dc_state->stream_status->plane_count,
efc8278e 2512 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2513 }
2514
2515cleanup:
2516 kfree(bundle);
2517
2518 return;
2519}
2520
035f5496 2521static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2522{
2523 struct dc_stream_state *stream_state;
2524 struct amdgpu_dm_connector *aconnector = link->priv;
2525 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2526 struct dc_stream_update stream_update;
2527 bool dpms_off = true;
2528
2529 memset(&stream_update, 0, sizeof(stream_update));
2530 stream_update.dpms_off = &dpms_off;
2531
2532 mutex_lock(&adev->dm.dc_lock);
2533 stream_state = dc_stream_find_from_link(link);
2534
2535 if (stream_state == NULL) {
2536 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2537 mutex_unlock(&adev->dm.dc_lock);
2538 return;
2539 }
2540
2541 stream_update.stream = stream_state;
035f5496 2542 acrtc_state->force_dpms_off = true;
3c4d55c9 2543 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2544 stream_state, &stream_update,
2545 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2546 mutex_unlock(&adev->dm.dc_lock);
2547}
2548
4562236b
HW
2549static int dm_resume(void *handle)
2550{
2551 struct amdgpu_device *adev = handle;
4a580877 2552 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2553 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2554 struct amdgpu_dm_connector *aconnector;
4562236b 2555 struct drm_connector *connector;
f8d2d39e 2556 struct drm_connector_list_iter iter;
4562236b 2557 struct drm_crtc *crtc;
c2cea706 2558 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2559 struct dm_crtc_state *dm_new_crtc_state;
2560 struct drm_plane *plane;
2561 struct drm_plane_state *new_plane_state;
2562 struct dm_plane_state *dm_new_plane_state;
113b7a01 2563 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2564 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2565 struct dc_state *dc_state;
2566 int i, r, j;
4562236b 2567
53b3f8f4 2568 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2569 dc_state = dm->cached_dc_state;
2570
6d63fcc2
NK
2571 /*
2572 * The dc->current_state is backed up into dm->cached_dc_state
2573 * before we commit 0 streams.
2574 *
2575 * DC will clear link encoder assignments on the real state
2576 * but the changes won't propagate over to the copy we made
2577 * before the 0 streams commit.
2578 *
2579 * DC expects that link encoder assignments are *not* valid
2580 * when committing a state, so as a workaround it needs to be
2581 * cleared here.
2582 */
2583 link_enc_cfg_init(dm->dc, dc_state);
2584
be1ac692
NK
2585 if (dc_enable_dmub_notifications(adev->dm.dc))
2586 amdgpu_dm_outbox_init(adev);
524a0ba6 2587
cdaae837
BL
2588 r = dm_dmub_hw_init(adev);
2589 if (r)
2590 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2591
2592 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2593 dc_resume(dm->dc);
2594
2595 amdgpu_dm_irq_resume_early(adev);
2596
2597 for (i = 0; i < dc_state->stream_count; i++) {
2598 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2599 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2600 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2601 = 0xffffffff;
2602 }
2603 }
2604
2605 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2606
cdaae837
BL
2607 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2608
2609 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2610
2611 dc_release_state(dm->cached_dc_state);
2612 dm->cached_dc_state = NULL;
2613
2614 amdgpu_dm_irq_resume_late(adev);
2615
2616 mutex_unlock(&dm->dc_lock);
2617
2618 return 0;
2619 }
113b7a01
LL
2620 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2621 dc_release_state(dm_state->context);
2622 dm_state->context = dc_create_state(dm->dc);
2623 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2624 dc_resource_state_construct(dm->dc, dm_state->context);
2625
be1ac692
NK
2626 /* Re-enable outbox interrupts for DPIA. */
2627 if (dc_enable_dmub_notifications(adev->dm.dc))
2628 amdgpu_dm_outbox_init(adev);
2629
8c7aea40
NK
2630 /* Before powering on DC we need to re-initialize DMUB. */
2631 r = dm_dmub_hw_init(adev);
2632 if (r)
2633 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2634
a80aa93d
ML
2635 /* power on hardware */
2636 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2637
4562236b
HW
2638 /* program HPD filter */
2639 dc_resume(dm->dc);
2640
4562236b
HW
2641 /*
2642 * early enable HPD Rx IRQ, should be done before set mode as short
2643 * pulse interrupts are used for MST
2644 */
2645 amdgpu_dm_irq_resume_early(adev);
2646
d20ebea8 2647 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2648 s3_handle_mst(ddev, false);
2649
4562236b 2650 /* Do detection*/
f8d2d39e
LP
2651 drm_connector_list_iter_begin(ddev, &iter);
2652 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2653 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2654
2655 /*
2656 * this is the case when traversing through already created
2657 * MST connectors, should be skipped
2658 */
2659 if (aconnector->mst_port)
2660 continue;
2661
03ea364c 2662 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2663 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2664 DRM_ERROR("KMS: Failed to detect connector\n");
2665
2666 if (aconnector->base.force && new_connection_type == dc_connection_none)
2667 emulated_link_detect(aconnector->dc_link);
2668 else
2669 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2670
2671 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2672 aconnector->fake_enable = false;
2673
dcd5fb82
MF
2674 if (aconnector->dc_sink)
2675 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2676 aconnector->dc_sink = NULL;
2677 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2678 mutex_unlock(&aconnector->hpd_lock);
4562236b 2679 }
f8d2d39e 2680 drm_connector_list_iter_end(&iter);
4562236b 2681
1f6010a9 2682 /* Force mode set in atomic commit */
a80aa93d 2683 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2684 new_crtc_state->active_changed = true;
4f346e65 2685
fcb4019e
LSL
2686 /*
2687 * atomic_check is expected to create the dc states. We need to release
2688 * them here, since they were duplicated as part of the suspend
2689 * procedure.
2690 */
a80aa93d 2691 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2692 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2693 if (dm_new_crtc_state->stream) {
2694 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2695 dc_stream_release(dm_new_crtc_state->stream);
2696 dm_new_crtc_state->stream = NULL;
2697 }
2698 }
2699
a80aa93d 2700 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2701 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2702 if (dm_new_plane_state->dc_state) {
2703 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2704 dc_plane_state_release(dm_new_plane_state->dc_state);
2705 dm_new_plane_state->dc_state = NULL;
2706 }
2707 }
2708
2d1af6a1 2709 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2710
a80aa93d 2711 dm->cached_state = NULL;
0a214e2f 2712
9faa4237 2713 amdgpu_dm_irq_resume_late(adev);
4562236b 2714
9340dfd3
HW
2715 amdgpu_dm_smu_write_watermarks_table(adev);
2716
2d1af6a1 2717 return 0;
4562236b
HW
2718}
2719
b8592b48
LL
2720/**
2721 * DOC: DM Lifecycle
2722 *
2723 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2724 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2725 * the base driver's device list to be initialized and torn down accordingly.
2726 *
2727 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2728 */
2729
4562236b
HW
2730static const struct amd_ip_funcs amdgpu_dm_funcs = {
2731 .name = "dm",
2732 .early_init = dm_early_init,
7abcf6b5 2733 .late_init = dm_late_init,
4562236b
HW
2734 .sw_init = dm_sw_init,
2735 .sw_fini = dm_sw_fini,
e9669fb7 2736 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2737 .hw_init = dm_hw_init,
2738 .hw_fini = dm_hw_fini,
2739 .suspend = dm_suspend,
2740 .resume = dm_resume,
2741 .is_idle = dm_is_idle,
2742 .wait_for_idle = dm_wait_for_idle,
2743 .check_soft_reset = dm_check_soft_reset,
2744 .soft_reset = dm_soft_reset,
2745 .set_clockgating_state = dm_set_clockgating_state,
2746 .set_powergating_state = dm_set_powergating_state,
2747};
2748
2749const struct amdgpu_ip_block_version dm_ip_block =
2750{
2751 .type = AMD_IP_BLOCK_TYPE_DCE,
2752 .major = 1,
2753 .minor = 0,
2754 .rev = 0,
2755 .funcs = &amdgpu_dm_funcs,
2756};
2757
ca3268c4 2758
b8592b48
LL
2759/**
2760 * DOC: atomic
2761 *
2762 * *WIP*
2763 */
0a323b84 2764
b3663f70 2765static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2766 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2767 .get_format_info = amd_get_format_info,
366c1baa 2768 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2769 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2770 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2771};
2772
2773static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2774 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2775};
2776
94562810
RS
2777static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2778{
2779 u32 max_cll, min_cll, max, min, q, r;
2780 struct amdgpu_dm_backlight_caps *caps;
2781 struct amdgpu_display_manager *dm;
2782 struct drm_connector *conn_base;
2783 struct amdgpu_device *adev;
ec11fe37 2784 struct dc_link *link = NULL;
94562810
RS
2785 static const u8 pre_computed_values[] = {
2786 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2787 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2788 int i;
94562810
RS
2789
2790 if (!aconnector || !aconnector->dc_link)
2791 return;
2792
ec11fe37 2793 link = aconnector->dc_link;
2794 if (link->connector_signal != SIGNAL_TYPE_EDP)
2795 return;
2796
94562810 2797 conn_base = &aconnector->base;
1348969a 2798 adev = drm_to_adev(conn_base->dev);
94562810 2799 dm = &adev->dm;
7fd13bae
AD
2800 for (i = 0; i < dm->num_of_edps; i++) {
2801 if (link == dm->backlight_link[i])
2802 break;
2803 }
2804 if (i >= dm->num_of_edps)
2805 return;
2806 caps = &dm->backlight_caps[i];
94562810
RS
2807 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2808 caps->aux_support = false;
2809 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2810 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2811
d0ae0b64 2812 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2813 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2814 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2815 caps->aux_support = true;
2816
7a46f05e
TI
2817 if (amdgpu_backlight == 0)
2818 caps->aux_support = false;
2819 else if (amdgpu_backlight == 1)
2820 caps->aux_support = true;
2821
94562810
RS
2822 /* From the specification (CTA-861-G), for calculating the maximum
2823 * luminance we need to use:
2824 * Luminance = 50*2**(CV/32)
2825 * Where CV is a one-byte value.
2826 * For calculating this expression we may need float point precision;
2827 * to avoid this complexity level, we take advantage that CV is divided
2828 * by a constant. From the Euclids division algorithm, we know that CV
2829 * can be written as: CV = 32*q + r. Next, we replace CV in the
2830 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2831 * need to pre-compute the value of r/32. For pre-computing the values
2832 * We just used the following Ruby line:
2833 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2834 * The results of the above expressions can be verified at
2835 * pre_computed_values.
2836 */
2837 q = max_cll >> 5;
2838 r = max_cll % 32;
2839 max = (1 << q) * pre_computed_values[r];
2840
2841 // min luminance: maxLum * (CV/255)^2 / 100
2842 q = DIV_ROUND_CLOSEST(min_cll, 255);
2843 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2844
2845 caps->aux_max_input_signal = max;
2846 caps->aux_min_input_signal = min;
2847}
2848
97e51c16
HW
2849void amdgpu_dm_update_connector_after_detect(
2850 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2851{
2852 struct drm_connector *connector = &aconnector->base;
2853 struct drm_device *dev = connector->dev;
b73a22d3 2854 struct dc_sink *sink;
4562236b
HW
2855
2856 /* MST handled by drm_mst framework */
2857 if (aconnector->mst_mgr.mst_state == true)
2858 return;
2859
4562236b 2860 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2861 if (sink)
2862 dc_sink_retain(sink);
4562236b 2863
1f6010a9
DF
2864 /*
2865 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2866 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2867 * Skip if already done during boot.
4562236b
HW
2868 */
2869 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2870 && aconnector->dc_em_sink) {
2871
1f6010a9
DF
2872 /*
2873 * For S3 resume with headless use eml_sink to fake stream
2874 * because on resume connector->sink is set to NULL
4562236b
HW
2875 */
2876 mutex_lock(&dev->mode_config.mutex);
2877
2878 if (sink) {
922aa1e1 2879 if (aconnector->dc_sink) {
98e6436d 2880 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2881 /*
2882 * retain and release below are used to
2883 * bump up refcount for sink because the link doesn't point
2884 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2885 * reshuffle by UMD we will get into unwanted dc_sink release
2886 */
dcd5fb82 2887 dc_sink_release(aconnector->dc_sink);
922aa1e1 2888 }
4562236b 2889 aconnector->dc_sink = sink;
dcd5fb82 2890 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2891 amdgpu_dm_update_freesync_caps(connector,
2892 aconnector->edid);
4562236b 2893 } else {
98e6436d 2894 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2895 if (!aconnector->dc_sink) {
4562236b 2896 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2897 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2898 }
4562236b
HW
2899 }
2900
2901 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2902
2903 if (sink)
2904 dc_sink_release(sink);
4562236b
HW
2905 return;
2906 }
2907
2908 /*
2909 * TODO: temporary guard to look for proper fix
2910 * if this sink is MST sink, we should not do anything
2911 */
dcd5fb82
MF
2912 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2913 dc_sink_release(sink);
4562236b 2914 return;
dcd5fb82 2915 }
4562236b
HW
2916
2917 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2918 /*
2919 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2920 * Do nothing!!
2921 */
f1ad2f5e 2922 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2923 aconnector->connector_id);
dcd5fb82
MF
2924 if (sink)
2925 dc_sink_release(sink);
4562236b
HW
2926 return;
2927 }
2928
f1ad2f5e 2929 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2930 aconnector->connector_id, aconnector->dc_sink, sink);
2931
2932 mutex_lock(&dev->mode_config.mutex);
2933
1f6010a9
DF
2934 /*
2935 * 1. Update status of the drm connector
2936 * 2. Send an event and let userspace tell us what to do
2937 */
4562236b 2938 if (sink) {
1f6010a9
DF
2939 /*
2940 * TODO: check if we still need the S3 mode update workaround.
2941 * If yes, put it here.
2942 */
c64b0d6b 2943 if (aconnector->dc_sink) {
98e6436d 2944 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2945 dc_sink_release(aconnector->dc_sink);
2946 }
4562236b
HW
2947
2948 aconnector->dc_sink = sink;
dcd5fb82 2949 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2950 if (sink->dc_edid.length == 0) {
4562236b 2951 aconnector->edid = NULL;
e6142dd5
AP
2952 if (aconnector->dc_link->aux_mode) {
2953 drm_dp_cec_unset_edid(
2954 &aconnector->dm_dp_aux.aux);
2955 }
900b3cb1 2956 } else {
4562236b 2957 aconnector->edid =
e6142dd5 2958 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2959
e6142dd5
AP
2960 if (aconnector->dc_link->aux_mode)
2961 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2962 aconnector->edid);
4562236b 2963 }
e6142dd5 2964
20543be9 2965 drm_connector_update_edid_property(connector, aconnector->edid);
98e6436d 2966 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2967 update_connector_ext_caps(aconnector);
4562236b 2968 } else {
e86e8947 2969 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2970 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2971 drm_connector_update_edid_property(connector, NULL);
4562236b 2972 aconnector->num_modes = 0;
dcd5fb82 2973 dc_sink_release(aconnector->dc_sink);
4562236b 2974 aconnector->dc_sink = NULL;
5326c452 2975 aconnector->edid = NULL;
0c8620d6
BL
2976#ifdef CONFIG_DRM_AMD_DC_HDCP
2977 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2978 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2979 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2980#endif
4562236b
HW
2981 }
2982
2983 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2984
0f877894
OV
2985 update_subconnector_property(aconnector);
2986
dcd5fb82
MF
2987 if (sink)
2988 dc_sink_release(sink);
4562236b
HW
2989}
2990
e27c41d5 2991static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 2992{
4562236b
HW
2993 struct drm_connector *connector = &aconnector->base;
2994 struct drm_device *dev = connector->dev;
fbbdadf2 2995 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2996 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2997 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 2998 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 2999
b972b4f9
HW
3000 if (adev->dm.disable_hpd_irq)
3001 return;
3002
035f5496
AP
3003 if (dm_con_state->base.state && dm_con_state->base.crtc)
3004 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3005 dm_con_state->base.state,
3006 dm_con_state->base.crtc));
1f6010a9
DF
3007 /*
3008 * In case of failure or MST no need to update connector status or notify the OS
3009 * since (for MST case) MST does this in its own context.
4562236b
HW
3010 */
3011 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3012
0c8620d6 3013#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3014 if (adev->dm.hdcp_workqueue) {
96a3b32e 3015 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3016 dm_con_state->update_hdcp = true;
3017 }
0c8620d6 3018#endif
2e0ac3d6
HW
3019 if (aconnector->fake_enable)
3020 aconnector->fake_enable = false;
3021
fbbdadf2
BL
3022 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3023 DRM_ERROR("KMS: Failed to detect connector\n");
3024
3025 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3026 emulated_link_detect(aconnector->dc_link);
3027
fbbdadf2
BL
3028 drm_modeset_lock_all(dev);
3029 dm_restore_drm_connector_state(dev, connector);
3030 drm_modeset_unlock_all(dev);
3031
3032 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3033 drm_kms_helper_hotplug_event(dev);
3034
3035 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3036 if (new_connection_type == dc_connection_none &&
035f5496
AP
3037 aconnector->dc_link->type == dc_connection_none &&
3038 dm_crtc_state)
3039 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 3040
3c4d55c9 3041 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3042
3043 drm_modeset_lock_all(dev);
3044 dm_restore_drm_connector_state(dev, connector);
3045 drm_modeset_unlock_all(dev);
3046
3047 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3048 drm_kms_helper_hotplug_event(dev);
3049 }
3050 mutex_unlock(&aconnector->hpd_lock);
3051
3052}
3053
e27c41d5
JS
3054static void handle_hpd_irq(void *param)
3055{
3056 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3057
3058 handle_hpd_irq_helper(aconnector);
3059
3060}
3061
8e794421 3062static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3063{
3064 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3065 uint8_t dret;
3066 bool new_irq_handled = false;
3067 int dpcd_addr;
3068 int dpcd_bytes_to_read;
3069
3070 const int max_process_count = 30;
3071 int process_count = 0;
3072
3073 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3074
3075 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3076 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3077 /* DPCD 0x200 - 0x201 for downstream IRQ */
3078 dpcd_addr = DP_SINK_COUNT;
3079 } else {
3080 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3081 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3082 dpcd_addr = DP_SINK_COUNT_ESI;
3083 }
3084
3085 dret = drm_dp_dpcd_read(
3086 &aconnector->dm_dp_aux.aux,
3087 dpcd_addr,
3088 esi,
3089 dpcd_bytes_to_read);
3090
3091 while (dret == dpcd_bytes_to_read &&
3092 process_count < max_process_count) {
3093 uint8_t retry;
3094 dret = 0;
3095
3096 process_count++;
3097
f1ad2f5e 3098 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3099 /* handle HPD short pulse irq */
3100 if (aconnector->mst_mgr.mst_state)
3101 drm_dp_mst_hpd_irq(
3102 &aconnector->mst_mgr,
3103 esi,
3104 &new_irq_handled);
4562236b
HW
3105
3106 if (new_irq_handled) {
3107 /* ACK at DPCD to notify down stream */
3108 const int ack_dpcd_bytes_to_write =
3109 dpcd_bytes_to_read - 1;
3110
3111 for (retry = 0; retry < 3; retry++) {
3112 uint8_t wret;
3113
3114 wret = drm_dp_dpcd_write(
3115 &aconnector->dm_dp_aux.aux,
3116 dpcd_addr + 1,
3117 &esi[1],
3118 ack_dpcd_bytes_to_write);
3119 if (wret == ack_dpcd_bytes_to_write)
3120 break;
3121 }
3122
1f6010a9 3123 /* check if there is new irq to be handled */
4562236b
HW
3124 dret = drm_dp_dpcd_read(
3125 &aconnector->dm_dp_aux.aux,
3126 dpcd_addr,
3127 esi,
3128 dpcd_bytes_to_read);
3129
3130 new_irq_handled = false;
d4a6e8a9 3131 } else {
4562236b 3132 break;
d4a6e8a9 3133 }
4562236b
HW
3134 }
3135
3136 if (process_count == max_process_count)
f1ad2f5e 3137 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3138}
3139
8e794421
WL
3140static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3141 union hpd_irq_data hpd_irq_data)
3142{
3143 struct hpd_rx_irq_offload_work *offload_work =
3144 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3145
3146 if (!offload_work) {
3147 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3148 return;
3149 }
3150
3151 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3152 offload_work->data = hpd_irq_data;
3153 offload_work->offload_wq = offload_wq;
3154
3155 queue_work(offload_wq->wq, &offload_work->work);
3156 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3157}
3158
4562236b
HW
3159static void handle_hpd_rx_irq(void *param)
3160{
c84dec2f 3161 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3162 struct drm_connector *connector = &aconnector->base;
3163 struct drm_device *dev = connector->dev;
53cbf65c 3164 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3165 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3166 bool result = false;
fbbdadf2 3167 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3168 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3169 union hpd_irq_data hpd_irq_data;
8e794421
WL
3170 bool link_loss = false;
3171 bool has_left_work = false;
3172 int idx = aconnector->base.index;
3173 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3174
3175 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3176
b972b4f9
HW
3177 if (adev->dm.disable_hpd_irq)
3178 return;
3179
1f6010a9
DF
3180 /*
3181 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3182 * conflict, after implement i2c helper, this mutex should be
3183 * retired.
3184 */
b86e7eef 3185 mutex_lock(&aconnector->hpd_lock);
4562236b 3186
8e794421
WL
3187 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3188 &link_loss, true, &has_left_work);
3083a984 3189
8e794421
WL
3190 if (!has_left_work)
3191 goto out;
3192
3193 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3194 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3195 goto out;
3196 }
3197
3198 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3199 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3200 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3201 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3202 goto out;
3203 }
3083a984 3204
8e794421
WL
3205 if (link_loss) {
3206 bool skip = false;
d2aa1356 3207
8e794421
WL
3208 spin_lock(&offload_wq->offload_lock);
3209 skip = offload_wq->is_handling_link_loss;
3210
3211 if (!skip)
3212 offload_wq->is_handling_link_loss = true;
3213
3214 spin_unlock(&offload_wq->offload_lock);
3215
3216 if (!skip)
3217 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3218
3219 goto out;
3220 }
3221 }
c8ea79a8 3222
3083a984 3223out:
c8ea79a8 3224 if (result && !is_mst_root_connector) {
4562236b 3225 /* Downstream Port status changed. */
fbbdadf2
BL
3226 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3227 DRM_ERROR("KMS: Failed to detect connector\n");
3228
3229 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3230 emulated_link_detect(dc_link);
3231
3232 if (aconnector->fake_enable)
3233 aconnector->fake_enable = false;
3234
3235 amdgpu_dm_update_connector_after_detect(aconnector);
3236
3237
3238 drm_modeset_lock_all(dev);
3239 dm_restore_drm_connector_state(dev, connector);
3240 drm_modeset_unlock_all(dev);
3241
3242 drm_kms_helper_hotplug_event(dev);
3243 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3244
3245 if (aconnector->fake_enable)
3246 aconnector->fake_enable = false;
3247
4562236b
HW
3248 amdgpu_dm_update_connector_after_detect(aconnector);
3249
3250
3251 drm_modeset_lock_all(dev);
3252 dm_restore_drm_connector_state(dev, connector);
3253 drm_modeset_unlock_all(dev);
3254
3255 drm_kms_helper_hotplug_event(dev);
3256 }
3257 }
2a0f9270 3258#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3259 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3260 if (adev->dm.hdcp_workqueue)
3261 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3262 }
2a0f9270 3263#endif
4562236b 3264
b86e7eef 3265 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3266 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3267
3268 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3269}
3270
3271static void register_hpd_handlers(struct amdgpu_device *adev)
3272{
4a580877 3273 struct drm_device *dev = adev_to_drm(adev);
4562236b 3274 struct drm_connector *connector;
c84dec2f 3275 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3276 const struct dc_link *dc_link;
3277 struct dc_interrupt_params int_params = {0};
3278
3279 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3280 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3281
3282 list_for_each_entry(connector,
3283 &dev->mode_config.connector_list, head) {
3284
c84dec2f 3285 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3286 dc_link = aconnector->dc_link;
3287
3288 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3289 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3290 int_params.irq_source = dc_link->irq_source_hpd;
3291
3292 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3293 handle_hpd_irq,
3294 (void *) aconnector);
3295 }
3296
3297 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3298
3299 /* Also register for DP short pulse (hpd_rx). */
3300 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3301 int_params.irq_source = dc_link->irq_source_hpd_rx;
3302
3303 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3304 handle_hpd_rx_irq,
3305 (void *) aconnector);
8e794421
WL
3306
3307 if (adev->dm.hpd_rx_offload_wq)
3308 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3309 aconnector;
4562236b
HW
3310 }
3311 }
3312}
3313
55e56389
MR
3314#if defined(CONFIG_DRM_AMD_DC_SI)
3315/* Register IRQ sources and initialize IRQ callbacks */
3316static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3317{
3318 struct dc *dc = adev->dm.dc;
3319 struct common_irq_params *c_irq_params;
3320 struct dc_interrupt_params int_params = {0};
3321 int r;
3322 int i;
3323 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3324
3325 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3326 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3327
3328 /*
3329 * Actions of amdgpu_irq_add_id():
3330 * 1. Register a set() function with base driver.
3331 * Base driver will call set() function to enable/disable an
3332 * interrupt in DC hardware.
3333 * 2. Register amdgpu_dm_irq_handler().
3334 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3335 * coming from DC hardware.
3336 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3337 * for acknowledging and handling. */
3338
3339 /* Use VBLANK interrupt */
3340 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3341 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3342 if (r) {
3343 DRM_ERROR("Failed to add crtc irq id!\n");
3344 return r;
3345 }
3346
3347 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3348 int_params.irq_source =
3349 dc_interrupt_to_irq_source(dc, i+1 , 0);
3350
3351 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3352
3353 c_irq_params->adev = adev;
3354 c_irq_params->irq_src = int_params.irq_source;
3355
3356 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3357 dm_crtc_high_irq, c_irq_params);
3358 }
3359
3360 /* Use GRPH_PFLIP interrupt */
3361 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3362 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3363 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3364 if (r) {
3365 DRM_ERROR("Failed to add page flip irq id!\n");
3366 return r;
3367 }
3368
3369 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3370 int_params.irq_source =
3371 dc_interrupt_to_irq_source(dc, i, 0);
3372
3373 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3374
3375 c_irq_params->adev = adev;
3376 c_irq_params->irq_src = int_params.irq_source;
3377
3378 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3379 dm_pflip_high_irq, c_irq_params);
3380
3381 }
3382
3383 /* HPD */
3384 r = amdgpu_irq_add_id(adev, client_id,
3385 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3386 if (r) {
3387 DRM_ERROR("Failed to add hpd irq id!\n");
3388 return r;
3389 }
3390
3391 register_hpd_handlers(adev);
3392
3393 return 0;
3394}
3395#endif
3396
4562236b
HW
3397/* Register IRQ sources and initialize IRQ callbacks */
3398static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3399{
3400 struct dc *dc = adev->dm.dc;
3401 struct common_irq_params *c_irq_params;
3402 struct dc_interrupt_params int_params = {0};
3403 int r;
3404 int i;
1ffdeca6 3405 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3406
c08182f2 3407 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3408 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3409
3410 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3411 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3412
1f6010a9
DF
3413 /*
3414 * Actions of amdgpu_irq_add_id():
4562236b
HW
3415 * 1. Register a set() function with base driver.
3416 * Base driver will call set() function to enable/disable an
3417 * interrupt in DC hardware.
3418 * 2. Register amdgpu_dm_irq_handler().
3419 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3420 * coming from DC hardware.
3421 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3422 * for acknowledging and handling. */
3423
b57de80a 3424 /* Use VBLANK interrupt */
e9029155 3425 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3426 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3427 if (r) {
3428 DRM_ERROR("Failed to add crtc irq id!\n");
3429 return r;
3430 }
3431
3432 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3433 int_params.irq_source =
3d761e79 3434 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3435
b57de80a 3436 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3437
3438 c_irq_params->adev = adev;
3439 c_irq_params->irq_src = int_params.irq_source;
3440
3441 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3442 dm_crtc_high_irq, c_irq_params);
3443 }
3444
d2574c33
MK
3445 /* Use VUPDATE interrupt */
3446 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3447 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3448 if (r) {
3449 DRM_ERROR("Failed to add vupdate irq id!\n");
3450 return r;
3451 }
3452
3453 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3454 int_params.irq_source =
3455 dc_interrupt_to_irq_source(dc, i, 0);
3456
3457 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3458
3459 c_irq_params->adev = adev;
3460 c_irq_params->irq_src = int_params.irq_source;
3461
3462 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3463 dm_vupdate_high_irq, c_irq_params);
3464 }
3465
3d761e79 3466 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3467 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3468 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3469 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3470 if (r) {
3471 DRM_ERROR("Failed to add page flip irq id!\n");
3472 return r;
3473 }
3474
3475 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3476 int_params.irq_source =
3477 dc_interrupt_to_irq_source(dc, i, 0);
3478
3479 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3480
3481 c_irq_params->adev = adev;
3482 c_irq_params->irq_src = int_params.irq_source;
3483
3484 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3485 dm_pflip_high_irq, c_irq_params);
3486
3487 }
3488
3489 /* HPD */
2c8ad2d5
AD
3490 r = amdgpu_irq_add_id(adev, client_id,
3491 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3492 if (r) {
3493 DRM_ERROR("Failed to add hpd irq id!\n");
3494 return r;
3495 }
3496
3497 register_hpd_handlers(adev);
3498
3499 return 0;
3500}
3501
b86a1aa3 3502#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3503/* Register IRQ sources and initialize IRQ callbacks */
3504static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3505{
3506 struct dc *dc = adev->dm.dc;
3507 struct common_irq_params *c_irq_params;
3508 struct dc_interrupt_params int_params = {0};
3509 int r;
3510 int i;
660d5406
WL
3511#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3512 static const unsigned int vrtl_int_srcid[] = {
3513 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3514 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3515 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3516 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3517 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3518 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3519 };
3520#endif
ff5ef992
AD
3521
3522 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3523 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3524
1f6010a9
DF
3525 /*
3526 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3527 * 1. Register a set() function with base driver.
3528 * Base driver will call set() function to enable/disable an
3529 * interrupt in DC hardware.
3530 * 2. Register amdgpu_dm_irq_handler().
3531 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3532 * coming from DC hardware.
3533 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3534 * for acknowledging and handling.
1f6010a9 3535 */
ff5ef992
AD
3536
3537 /* Use VSTARTUP interrupt */
3538 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3539 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3540 i++) {
3760f76c 3541 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3542
3543 if (r) {
3544 DRM_ERROR("Failed to add crtc irq id!\n");
3545 return r;
3546 }
3547
3548 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3549 int_params.irq_source =
3550 dc_interrupt_to_irq_source(dc, i, 0);
3551
3552 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3553
3554 c_irq_params->adev = adev;
3555 c_irq_params->irq_src = int_params.irq_source;
3556
2346ef47
NK
3557 amdgpu_dm_irq_register_interrupt(
3558 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3559 }
3560
86bc2219
WL
3561 /* Use otg vertical line interrupt */
3562#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3563 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3564 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3565 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3566
3567 if (r) {
3568 DRM_ERROR("Failed to add vline0 irq id!\n");
3569 return r;
3570 }
3571
3572 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3573 int_params.irq_source =
660d5406
WL
3574 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3575
3576 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3577 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3578 break;
3579 }
86bc2219
WL
3580
3581 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3582 - DC_IRQ_SOURCE_DC1_VLINE0];
3583
3584 c_irq_params->adev = adev;
3585 c_irq_params->irq_src = int_params.irq_source;
3586
3587 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3588 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3589 }
3590#endif
3591
2346ef47
NK
3592 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3593 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3594 * to trigger at end of each vblank, regardless of state of the lock,
3595 * matching DCE behaviour.
3596 */
3597 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3598 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3599 i++) {
3600 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3601
3602 if (r) {
3603 DRM_ERROR("Failed to add vupdate irq id!\n");
3604 return r;
3605 }
3606
3607 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3608 int_params.irq_source =
3609 dc_interrupt_to_irq_source(dc, i, 0);
3610
3611 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3612
3613 c_irq_params->adev = adev;
3614 c_irq_params->irq_src = int_params.irq_source;
3615
ff5ef992 3616 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3617 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3618 }
3619
ff5ef992
AD
3620 /* Use GRPH_PFLIP interrupt */
3621 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3622 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3623 i++) {
3760f76c 3624 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3625 if (r) {
3626 DRM_ERROR("Failed to add page flip irq id!\n");
3627 return r;
3628 }
3629
3630 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3631 int_params.irq_source =
3632 dc_interrupt_to_irq_source(dc, i, 0);
3633
3634 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3635
3636 c_irq_params->adev = adev;
3637 c_irq_params->irq_src = int_params.irq_source;
3638
3639 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3640 dm_pflip_high_irq, c_irq_params);
3641
3642 }
3643
81927e28
JS
3644 /* HPD */
3645 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3646 &adev->hpd_irq);
3647 if (r) {
3648 DRM_ERROR("Failed to add hpd irq id!\n");
3649 return r;
3650 }
a08f16cf 3651
81927e28 3652 register_hpd_handlers(adev);
a08f16cf 3653
81927e28
JS
3654 return 0;
3655}
3656/* Register Outbox IRQ sources and initialize IRQ callbacks */
3657static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3658{
3659 struct dc *dc = adev->dm.dc;
3660 struct common_irq_params *c_irq_params;
3661 struct dc_interrupt_params int_params = {0};
3662 int r, i;
3663
3664 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3665 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3666
3667 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3668 &adev->dmub_outbox_irq);
3669 if (r) {
3670 DRM_ERROR("Failed to add outbox irq id!\n");
3671 return r;
3672 }
3673
3674 if (dc->ctx->dmub_srv) {
3675 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3676 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3677 int_params.irq_source =
81927e28 3678 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3679
81927e28 3680 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3681
3682 c_irq_params->adev = adev;
3683 c_irq_params->irq_src = int_params.irq_source;
3684
3685 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3686 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3687 }
3688
ff5ef992
AD
3689 return 0;
3690}
3691#endif
3692
eb3dc897
NK
3693/*
3694 * Acquires the lock for the atomic state object and returns
3695 * the new atomic state.
3696 *
3697 * This should only be called during atomic check.
3698 */
3699static int dm_atomic_get_state(struct drm_atomic_state *state,
3700 struct dm_atomic_state **dm_state)
3701{
3702 struct drm_device *dev = state->dev;
1348969a 3703 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3704 struct amdgpu_display_manager *dm = &adev->dm;
3705 struct drm_private_state *priv_state;
eb3dc897
NK
3706
3707 if (*dm_state)
3708 return 0;
3709
eb3dc897
NK
3710 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3711 if (IS_ERR(priv_state))
3712 return PTR_ERR(priv_state);
3713
3714 *dm_state = to_dm_atomic_state(priv_state);
3715
3716 return 0;
3717}
3718
dfd84d90 3719static struct dm_atomic_state *
eb3dc897
NK
3720dm_atomic_get_new_state(struct drm_atomic_state *state)
3721{
3722 struct drm_device *dev = state->dev;
1348969a 3723 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3724 struct amdgpu_display_manager *dm = &adev->dm;
3725 struct drm_private_obj *obj;
3726 struct drm_private_state *new_obj_state;
3727 int i;
3728
3729 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3730 if (obj->funcs == dm->atomic_obj.funcs)
3731 return to_dm_atomic_state(new_obj_state);
3732 }
3733
3734 return NULL;
3735}
3736
eb3dc897
NK
3737static struct drm_private_state *
3738dm_atomic_duplicate_state(struct drm_private_obj *obj)
3739{
3740 struct dm_atomic_state *old_state, *new_state;
3741
3742 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3743 if (!new_state)
3744 return NULL;
3745
3746 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3747
813d20dc
AW
3748 old_state = to_dm_atomic_state(obj->state);
3749
3750 if (old_state && old_state->context)
3751 new_state->context = dc_copy_state(old_state->context);
3752
eb3dc897
NK
3753 if (!new_state->context) {
3754 kfree(new_state);
3755 return NULL;
3756 }
3757
eb3dc897
NK
3758 return &new_state->base;
3759}
3760
3761static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3762 struct drm_private_state *state)
3763{
3764 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3765
3766 if (dm_state && dm_state->context)
3767 dc_release_state(dm_state->context);
3768
3769 kfree(dm_state);
3770}
3771
3772static struct drm_private_state_funcs dm_atomic_state_funcs = {
3773 .atomic_duplicate_state = dm_atomic_duplicate_state,
3774 .atomic_destroy_state = dm_atomic_destroy_state,
3775};
3776
4562236b
HW
3777static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3778{
eb3dc897 3779 struct dm_atomic_state *state;
4562236b
HW
3780 int r;
3781
3782 adev->mode_info.mode_config_initialized = true;
3783
4a580877
LT
3784 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3785 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3786
4a580877
LT
3787 adev_to_drm(adev)->mode_config.max_width = 16384;
3788 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3789
4a580877
LT
3790 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3791 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3792 /* indicates support for immediate flip */
4a580877 3793 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3794
4a580877 3795 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3796
eb3dc897
NK
3797 state = kzalloc(sizeof(*state), GFP_KERNEL);
3798 if (!state)
3799 return -ENOMEM;
3800
813d20dc 3801 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3802 if (!state->context) {
3803 kfree(state);
3804 return -ENOMEM;
3805 }
3806
3807 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3808
4a580877 3809 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3810 &adev->dm.atomic_obj,
eb3dc897
NK
3811 &state->base,
3812 &dm_atomic_state_funcs);
3813
3dc9b1ce 3814 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3815 if (r) {
3816 dc_release_state(state->context);
3817 kfree(state);
4562236b 3818 return r;
b67a468a 3819 }
4562236b 3820
6ce8f316 3821 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3822 if (r) {
3823 dc_release_state(state->context);
3824 kfree(state);
6ce8f316 3825 return r;
b67a468a 3826 }
6ce8f316 3827
4562236b
HW
3828 return 0;
3829}
3830
206bbafe
DF
3831#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3832#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3833#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3834
4562236b
HW
3835#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3836 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3837
7fd13bae
AD
3838static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3839 int bl_idx)
206bbafe
DF
3840{
3841#if defined(CONFIG_ACPI)
3842 struct amdgpu_dm_backlight_caps caps;
3843
58965855
FS
3844 memset(&caps, 0, sizeof(caps));
3845
7fd13bae 3846 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3847 return;
3848
f9b7f370 3849 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3850 if (caps.caps_valid) {
7fd13bae 3851 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3852 if (caps.aux_support)
3853 return;
7fd13bae
AD
3854 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3855 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3856 } else {
7fd13bae 3857 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3858 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3859 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3860 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3861 }
3862#else
7fd13bae 3863 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3864 return;
3865
7fd13bae
AD
3866 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3867 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3868#endif
3869}
3870
69d9f427
AM
3871static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3872 unsigned *min, unsigned *max)
94562810 3873{
94562810 3874 if (!caps)
69d9f427 3875 return 0;
94562810 3876
69d9f427
AM
3877 if (caps->aux_support) {
3878 // Firmware limits are in nits, DC API wants millinits.
3879 *max = 1000 * caps->aux_max_input_signal;
3880 *min = 1000 * caps->aux_min_input_signal;
94562810 3881 } else {
69d9f427
AM
3882 // Firmware limits are 8-bit, PWM control is 16-bit.
3883 *max = 0x101 * caps->max_input_signal;
3884 *min = 0x101 * caps->min_input_signal;
94562810 3885 }
69d9f427
AM
3886 return 1;
3887}
94562810 3888
69d9f427
AM
3889static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3890 uint32_t brightness)
3891{
3892 unsigned min, max;
94562810 3893
69d9f427
AM
3894 if (!get_brightness_range(caps, &min, &max))
3895 return brightness;
3896
3897 // Rescale 0..255 to min..max
3898 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3899 AMDGPU_MAX_BL_LEVEL);
3900}
3901
3902static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3903 uint32_t brightness)
3904{
3905 unsigned min, max;
3906
3907 if (!get_brightness_range(caps, &min, &max))
3908 return brightness;
3909
3910 if (brightness < min)
3911 return 0;
3912 // Rescale min..max to 0..255
3913 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3914 max - min);
94562810
RS
3915}
3916
3d6c9164 3917static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3918 int bl_idx,
3d6c9164 3919 u32 user_brightness)
4562236b 3920{
206bbafe 3921 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3922 struct dc_link *link;
3923 u32 brightness;
94562810 3924 bool rc;
4562236b 3925
7fd13bae
AD
3926 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3927 caps = dm->backlight_caps[bl_idx];
94562810 3928
7fd13bae 3929 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
3930 /* update scratch register */
3931 if (bl_idx == 0)
3932 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
3933 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3934 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3935
3d6c9164 3936 /* Change brightness based on AUX property */
118b4627 3937 if (caps.aux_support) {
7fd13bae
AD
3938 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3939 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3940 if (!rc)
3941 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3942 } else {
7fd13bae
AD
3943 rc = dc_link_set_backlight_level(link, brightness, 0);
3944 if (!rc)
3945 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3946 }
94562810
RS
3947
3948 return rc ? 0 : 1;
4562236b
HW
3949}
3950
3d6c9164 3951static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3952{
620a0d27 3953 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3954 int i;
3d6c9164 3955
7fd13bae
AD
3956 for (i = 0; i < dm->num_of_edps; i++) {
3957 if (bd == dm->backlight_dev[i])
3958 break;
3959 }
3960 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3961 i = 0;
3962 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3963
3964 return 0;
3965}
3966
7fd13bae
AD
3967static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3968 int bl_idx)
3d6c9164 3969{
0ad3e64e 3970 struct amdgpu_dm_backlight_caps caps;
7fd13bae 3971 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 3972
7fd13bae
AD
3973 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3974 caps = dm->backlight_caps[bl_idx];
620a0d27 3975
0ad3e64e 3976 if (caps.aux_support) {
0ad3e64e
AD
3977 u32 avg, peak;
3978 bool rc;
3979
3980 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3981 if (!rc)
7fd13bae 3982 return dm->brightness[bl_idx];
0ad3e64e
AD
3983 return convert_brightness_to_user(&caps, avg);
3984 } else {
7fd13bae 3985 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
3986
3987 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 3988 return dm->brightness[bl_idx];
0ad3e64e
AD
3989 return convert_brightness_to_user(&caps, ret);
3990 }
4562236b
HW
3991}
3992
3d6c9164
AD
3993static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3994{
3995 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3996 int i;
3d6c9164 3997
7fd13bae
AD
3998 for (i = 0; i < dm->num_of_edps; i++) {
3999 if (bd == dm->backlight_dev[i])
4000 break;
4001 }
4002 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4003 i = 0;
4004 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
4005}
4006
4562236b 4007static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 4008 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
4009 .get_brightness = amdgpu_dm_backlight_get_brightness,
4010 .update_status = amdgpu_dm_backlight_update_status,
4011};
4012
7578ecda
AD
4013static void
4014amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4015{
4016 char bl_name[16];
4017 struct backlight_properties props = { 0 };
4018
7fd13bae
AD
4019 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4020 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4021
4562236b 4022 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4023 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4024 props.type = BACKLIGHT_RAW;
4025
4026 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4027 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4028
7fd13bae
AD
4029 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4030 adev_to_drm(dm->adev)->dev,
4031 dm,
4032 &amdgpu_dm_backlight_ops,
4033 &props);
4562236b 4034
7fd13bae 4035 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4036 DRM_ERROR("DM: Backlight registration failed!\n");
4037 else
f1ad2f5e 4038 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4039}
4562236b
HW
4040#endif
4041
df534fff 4042static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4043 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4044 enum drm_plane_type plane_type,
4045 const struct dc_plane_cap *plane_cap)
df534fff 4046{
f180b4bc 4047 struct drm_plane *plane;
df534fff
S
4048 unsigned long possible_crtcs;
4049 int ret = 0;
4050
f180b4bc 4051 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4052 if (!plane) {
4053 DRM_ERROR("KMS: Failed to allocate plane\n");
4054 return -ENOMEM;
4055 }
b2fddb13 4056 plane->type = plane_type;
df534fff
S
4057
4058 /*
b2fddb13
NK
4059 * HACK: IGT tests expect that the primary plane for a CRTC
4060 * can only have one possible CRTC. Only expose support for
4061 * any CRTC if they're not going to be used as a primary plane
4062 * for a CRTC - like overlay or underlay planes.
df534fff
S
4063 */
4064 possible_crtcs = 1 << plane_id;
4065 if (plane_id >= dm->dc->caps.max_streams)
4066 possible_crtcs = 0xff;
4067
cc1fec57 4068 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4069
4070 if (ret) {
4071 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4072 kfree(plane);
df534fff
S
4073 return ret;
4074 }
4075
54087768
NK
4076 if (mode_info)
4077 mode_info->planes[plane_id] = plane;
4078
df534fff
S
4079 return ret;
4080}
4081
89fc8d4e
HW
4082
4083static void register_backlight_device(struct amdgpu_display_manager *dm,
4084 struct dc_link *link)
4085{
4086#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4087 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4088
4089 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4090 link->type != dc_connection_none) {
1f6010a9
DF
4091 /*
4092 * Event if registration failed, we should continue with
89fc8d4e
HW
4093 * DM initialization because not having a backlight control
4094 * is better then a black screen.
4095 */
7fd13bae 4096 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4097 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4098
7fd13bae 4099 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4100 dm->backlight_link[dm->num_of_edps] = link;
4101 dm->num_of_edps++;
4102 }
89fc8d4e
HW
4103 }
4104#endif
4105}
4106
4107
1f6010a9
DF
4108/*
4109 * In this architecture, the association
4562236b
HW
4110 * connector -> encoder -> crtc
4111 * id not really requried. The crtc and connector will hold the
4112 * display_index as an abstraction to use with DAL component
4113 *
4114 * Returns 0 on success
4115 */
7578ecda 4116static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4117{
4118 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4119 int32_t i;
c84dec2f 4120 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4121 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4122 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4123 uint32_t link_cnt;
cc1fec57 4124 int32_t primary_planes;
fbbdadf2 4125 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4126 const struct dc_plane_cap *plane;
9470620e 4127 bool psr_feature_enabled = false;
4562236b 4128
d58159de
AD
4129 dm->display_indexes_num = dm->dc->caps.max_streams;
4130 /* Update the actual used number of crtc */
4131 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4132
4562236b 4133 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4134 if (amdgpu_dm_mode_config_init(dm->adev)) {
4135 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4136 return -EINVAL;
4562236b
HW
4137 }
4138
b2fddb13
NK
4139 /* There is one primary plane per CRTC */
4140 primary_planes = dm->dc->caps.max_streams;
54087768 4141 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4142
b2fddb13
NK
4143 /*
4144 * Initialize primary planes, implicit planes for legacy IOCTLS.
4145 * Order is reversed to match iteration order in atomic check.
4146 */
4147 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4148 plane = &dm->dc->caps.planes[i];
4149
b2fddb13 4150 if (initialize_plane(dm, mode_info, i,
cc1fec57 4151 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4152 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4153 goto fail;
d4e13b0d 4154 }
df534fff 4155 }
92f3ac40 4156
0d579c7e
NK
4157 /*
4158 * Initialize overlay planes, index starting after primary planes.
4159 * These planes have a higher DRM index than the primary planes since
4160 * they should be considered as having a higher z-order.
4161 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4162 *
4163 * Only support DCN for now, and only expose one so we don't encourage
4164 * userspace to use up all the pipes.
0d579c7e 4165 */
cc1fec57
NK
4166 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4167 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4168
4169 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4170 continue;
4171
4172 if (!plane->blends_with_above || !plane->blends_with_below)
4173 continue;
4174
ea36ad34 4175 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4176 continue;
4177
54087768 4178 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4179 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4180 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4181 goto fail;
d4e13b0d 4182 }
cc1fec57
NK
4183
4184 /* Only create one overlay plane. */
4185 break;
d4e13b0d 4186 }
4562236b 4187
d4e13b0d 4188 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4189 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4190 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4191 goto fail;
4562236b 4192 }
4562236b 4193
50610b74 4194#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28 4195 /* Use Outbox interrupt */
1d789535 4196 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4197 case IP_VERSION(3, 0, 0):
4198 case IP_VERSION(3, 1, 2):
4199 case IP_VERSION(3, 1, 3):
4200 case IP_VERSION(2, 1, 0):
81927e28
JS
4201 if (register_outbox_irq_handlers(dm->adev)) {
4202 DRM_ERROR("DM: Failed to initialize IRQ\n");
4203 goto fail;
4204 }
4205 break;
4206 default:
c08182f2 4207 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4208 adev->ip_versions[DCE_HWIP][0]);
81927e28 4209 }
9470620e
NK
4210
4211 /* Determine whether to enable PSR support by default. */
4212 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4213 switch (adev->ip_versions[DCE_HWIP][0]) {
4214 case IP_VERSION(3, 1, 2):
4215 case IP_VERSION(3, 1, 3):
4216 psr_feature_enabled = true;
4217 break;
4218 default:
4219 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4220 break;
4221 }
4222 }
50610b74 4223#endif
81927e28 4224
4562236b
HW
4225 /* loops over all connectors on the board */
4226 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4227 struct dc_link *link = NULL;
4562236b
HW
4228
4229 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4230 DRM_ERROR(
4231 "KMS: Cannot support more than %d display indexes\n",
4232 AMDGPU_DM_MAX_DISPLAY_INDEX);
4233 continue;
4234 }
4235
4236 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4237 if (!aconnector)
cd8a2ae8 4238 goto fail;
4562236b
HW
4239
4240 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4241 if (!aencoder)
cd8a2ae8 4242 goto fail;
4562236b
HW
4243
4244 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4245 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4246 goto fail;
4562236b
HW
4247 }
4248
4249 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4250 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4251 goto fail;
4562236b
HW
4252 }
4253
89fc8d4e
HW
4254 link = dc_get_link_at_index(dm->dc, i);
4255
fbbdadf2
BL
4256 if (!dc_link_detect_sink(link, &new_connection_type))
4257 DRM_ERROR("KMS: Failed to detect connector\n");
4258
4259 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4260 emulated_link_detect(link);
4261 amdgpu_dm_update_connector_after_detect(aconnector);
4262
4263 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4264 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4265 register_backlight_device(dm, link);
b295ce39
RL
4266 if (dm->num_of_edps)
4267 update_connector_ext_caps(aconnector);
9470620e 4268 if (psr_feature_enabled)
397a9bc5 4269 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
4270 }
4271
4272
4562236b
HW
4273 }
4274
70897848
NK
4275 /*
4276 * Disable vblank IRQs aggressively for power-saving.
4277 *
4278 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4279 * is also supported.
4280 */
4281 adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4282
4562236b
HW
4283 /* Software is initialized. Now we can register interrupt handlers. */
4284 switch (adev->asic_type) {
55e56389
MR
4285#if defined(CONFIG_DRM_AMD_DC_SI)
4286 case CHIP_TAHITI:
4287 case CHIP_PITCAIRN:
4288 case CHIP_VERDE:
4289 case CHIP_OLAND:
4290 if (dce60_register_irq_handlers(dm->adev)) {
4291 DRM_ERROR("DM: Failed to initialize IRQ\n");
4292 goto fail;
4293 }
4294 break;
4295#endif
4562236b
HW
4296 case CHIP_BONAIRE:
4297 case CHIP_HAWAII:
cd4b356f
AD
4298 case CHIP_KAVERI:
4299 case CHIP_KABINI:
4300 case CHIP_MULLINS:
4562236b
HW
4301 case CHIP_TONGA:
4302 case CHIP_FIJI:
4303 case CHIP_CARRIZO:
4304 case CHIP_STONEY:
4305 case CHIP_POLARIS11:
4306 case CHIP_POLARIS10:
b264d345 4307 case CHIP_POLARIS12:
7737de91 4308 case CHIP_VEGAM:
2c8ad2d5 4309 case CHIP_VEGA10:
2325ff30 4310 case CHIP_VEGA12:
1fe6bf2f 4311 case CHIP_VEGA20:
4562236b
HW
4312 if (dce110_register_irq_handlers(dm->adev)) {
4313 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4314 goto fail;
4562236b
HW
4315 }
4316 break;
4317 default:
c08182f2 4318#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4319 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4320 case IP_VERSION(1, 0, 0):
4321 case IP_VERSION(1, 0, 1):
c08182f2
AD
4322 case IP_VERSION(2, 0, 2):
4323 case IP_VERSION(2, 0, 3):
4324 case IP_VERSION(2, 0, 0):
4325 case IP_VERSION(2, 1, 0):
4326 case IP_VERSION(3, 0, 0):
4327 case IP_VERSION(3, 0, 2):
4328 case IP_VERSION(3, 0, 3):
4329 case IP_VERSION(3, 0, 1):
4330 case IP_VERSION(3, 1, 2):
4331 case IP_VERSION(3, 1, 3):
4332 if (dcn10_register_irq_handlers(dm->adev)) {
4333 DRM_ERROR("DM: Failed to initialize IRQ\n");
4334 goto fail;
4335 }
4336 break;
4337 default:
2cbc6f42 4338 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4339 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4340 goto fail;
c08182f2
AD
4341 }
4342#endif
2cbc6f42 4343 break;
4562236b
HW
4344 }
4345
4562236b 4346 return 0;
cd8a2ae8 4347fail:
4562236b 4348 kfree(aencoder);
4562236b 4349 kfree(aconnector);
54087768 4350
59d0f396 4351 return -EINVAL;
4562236b
HW
4352}
4353
7578ecda 4354static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4355{
eb3dc897 4356 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4357 return;
4358}
4359
4360/******************************************************************************
4361 * amdgpu_display_funcs functions
4362 *****************************************************************************/
4363
1f6010a9 4364/*
4562236b
HW
4365 * dm_bandwidth_update - program display watermarks
4366 *
4367 * @adev: amdgpu_device pointer
4368 *
4369 * Calculate and program the display watermarks and line buffer allocation.
4370 */
4371static void dm_bandwidth_update(struct amdgpu_device *adev)
4372{
49c07a99 4373 /* TODO: implement later */
4562236b
HW
4374}
4375
39cc5be2 4376static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4377 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4378 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4379 .backlight_set_level = NULL, /* never called for DC */
4380 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4381 .hpd_sense = NULL,/* called unconditionally */
4382 .hpd_set_polarity = NULL, /* called unconditionally */
4383 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4384 .page_flip_get_scanoutpos =
4385 dm_crtc_get_scanoutpos,/* called unconditionally */
4386 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4387 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4388};
4389
4390#if defined(CONFIG_DEBUG_KERNEL_DC)
4391
3ee6b26b
AD
4392static ssize_t s3_debug_store(struct device *device,
4393 struct device_attribute *attr,
4394 const char *buf,
4395 size_t count)
4562236b
HW
4396{
4397 int ret;
4398 int s3_state;
ef1de361 4399 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4400 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4401
4402 ret = kstrtoint(buf, 0, &s3_state);
4403
4404 if (ret == 0) {
4405 if (s3_state) {
4406 dm_resume(adev);
4a580877 4407 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4408 } else
4409 dm_suspend(adev);
4410 }
4411
4412 return ret == 0 ? count : 0;
4413}
4414
4415DEVICE_ATTR_WO(s3_debug);
4416
4417#endif
4418
4419static int dm_early_init(void *handle)
4420{
4421 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4422
4562236b 4423 switch (adev->asic_type) {
55e56389
MR
4424#if defined(CONFIG_DRM_AMD_DC_SI)
4425 case CHIP_TAHITI:
4426 case CHIP_PITCAIRN:
4427 case CHIP_VERDE:
4428 adev->mode_info.num_crtc = 6;
4429 adev->mode_info.num_hpd = 6;
4430 adev->mode_info.num_dig = 6;
4431 break;
4432 case CHIP_OLAND:
4433 adev->mode_info.num_crtc = 2;
4434 adev->mode_info.num_hpd = 2;
4435 adev->mode_info.num_dig = 2;
4436 break;
4437#endif
4562236b
HW
4438 case CHIP_BONAIRE:
4439 case CHIP_HAWAII:
4440 adev->mode_info.num_crtc = 6;
4441 adev->mode_info.num_hpd = 6;
4442 adev->mode_info.num_dig = 6;
4562236b 4443 break;
cd4b356f
AD
4444 case CHIP_KAVERI:
4445 adev->mode_info.num_crtc = 4;
4446 adev->mode_info.num_hpd = 6;
4447 adev->mode_info.num_dig = 7;
cd4b356f
AD
4448 break;
4449 case CHIP_KABINI:
4450 case CHIP_MULLINS:
4451 adev->mode_info.num_crtc = 2;
4452 adev->mode_info.num_hpd = 6;
4453 adev->mode_info.num_dig = 6;
cd4b356f 4454 break;
4562236b
HW
4455 case CHIP_FIJI:
4456 case CHIP_TONGA:
4457 adev->mode_info.num_crtc = 6;
4458 adev->mode_info.num_hpd = 6;
4459 adev->mode_info.num_dig = 7;
4562236b
HW
4460 break;
4461 case CHIP_CARRIZO:
4462 adev->mode_info.num_crtc = 3;
4463 adev->mode_info.num_hpd = 6;
4464 adev->mode_info.num_dig = 9;
4562236b
HW
4465 break;
4466 case CHIP_STONEY:
4467 adev->mode_info.num_crtc = 2;
4468 adev->mode_info.num_hpd = 6;
4469 adev->mode_info.num_dig = 9;
4562236b
HW
4470 break;
4471 case CHIP_POLARIS11:
b264d345 4472 case CHIP_POLARIS12:
4562236b
HW
4473 adev->mode_info.num_crtc = 5;
4474 adev->mode_info.num_hpd = 5;
4475 adev->mode_info.num_dig = 5;
4562236b
HW
4476 break;
4477 case CHIP_POLARIS10:
7737de91 4478 case CHIP_VEGAM:
4562236b
HW
4479 adev->mode_info.num_crtc = 6;
4480 adev->mode_info.num_hpd = 6;
4481 adev->mode_info.num_dig = 6;
4562236b 4482 break;
2c8ad2d5 4483 case CHIP_VEGA10:
2325ff30 4484 case CHIP_VEGA12:
1fe6bf2f 4485 case CHIP_VEGA20:
2c8ad2d5
AD
4486 adev->mode_info.num_crtc = 6;
4487 adev->mode_info.num_hpd = 6;
4488 adev->mode_info.num_dig = 6;
4489 break;
4562236b 4490 default:
c08182f2 4491#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4492 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4493 case IP_VERSION(2, 0, 2):
4494 case IP_VERSION(3, 0, 0):
4495 adev->mode_info.num_crtc = 6;
4496 adev->mode_info.num_hpd = 6;
4497 adev->mode_info.num_dig = 6;
4498 break;
4499 case IP_VERSION(2, 0, 0):
4500 case IP_VERSION(3, 0, 2):
4501 adev->mode_info.num_crtc = 5;
4502 adev->mode_info.num_hpd = 5;
4503 adev->mode_info.num_dig = 5;
4504 break;
4505 case IP_VERSION(2, 0, 3):
4506 case IP_VERSION(3, 0, 3):
4507 adev->mode_info.num_crtc = 2;
4508 adev->mode_info.num_hpd = 2;
4509 adev->mode_info.num_dig = 2;
4510 break;
559f591d
AD
4511 case IP_VERSION(1, 0, 0):
4512 case IP_VERSION(1, 0, 1):
c08182f2
AD
4513 case IP_VERSION(3, 0, 1):
4514 case IP_VERSION(2, 1, 0):
4515 case IP_VERSION(3, 1, 2):
4516 case IP_VERSION(3, 1, 3):
4517 adev->mode_info.num_crtc = 4;
4518 adev->mode_info.num_hpd = 4;
4519 adev->mode_info.num_dig = 4;
4520 break;
4521 default:
2cbc6f42 4522 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4523 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4524 return -EINVAL;
c08182f2
AD
4525 }
4526#endif
2cbc6f42 4527 break;
4562236b
HW
4528 }
4529
c8dd5715
MD
4530 amdgpu_dm_set_irq_funcs(adev);
4531
39cc5be2
AD
4532 if (adev->mode_info.funcs == NULL)
4533 adev->mode_info.funcs = &dm_display_funcs;
4534
1f6010a9
DF
4535 /*
4536 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4537 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4538 * amdgpu_device_init()
4539 */
4562236b
HW
4540#if defined(CONFIG_DEBUG_KERNEL_DC)
4541 device_create_file(
4a580877 4542 adev_to_drm(adev)->dev,
4562236b
HW
4543 &dev_attr_s3_debug);
4544#endif
4545
4546 return 0;
4547}
4548
9b690ef3 4549static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4550 struct dc_stream_state *new_stream,
4551 struct dc_stream_state *old_stream)
9b690ef3 4552{
2afda735 4553 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4554}
4555
4556static bool modereset_required(struct drm_crtc_state *crtc_state)
4557{
2afda735 4558 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4559}
4560
7578ecda 4561static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4562{
4563 drm_encoder_cleanup(encoder);
4564 kfree(encoder);
4565}
4566
4567static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4568 .destroy = amdgpu_dm_encoder_destroy,
4569};
4570
e7b07cee 4571
6300b3bd
MK
4572static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4573 struct drm_framebuffer *fb,
4574 int *min_downscale, int *max_upscale)
4575{
4576 struct amdgpu_device *adev = drm_to_adev(dev);
4577 struct dc *dc = adev->dm.dc;
4578 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4579 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4580
4581 switch (fb->format->format) {
4582 case DRM_FORMAT_P010:
4583 case DRM_FORMAT_NV12:
4584 case DRM_FORMAT_NV21:
4585 *max_upscale = plane_cap->max_upscale_factor.nv12;
4586 *min_downscale = plane_cap->max_downscale_factor.nv12;
4587 break;
4588
4589 case DRM_FORMAT_XRGB16161616F:
4590 case DRM_FORMAT_ARGB16161616F:
4591 case DRM_FORMAT_XBGR16161616F:
4592 case DRM_FORMAT_ABGR16161616F:
4593 *max_upscale = plane_cap->max_upscale_factor.fp16;
4594 *min_downscale = plane_cap->max_downscale_factor.fp16;
4595 break;
4596
4597 default:
4598 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4599 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4600 break;
4601 }
4602
4603 /*
4604 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4605 * scaling factor of 1.0 == 1000 units.
4606 */
4607 if (*max_upscale == 1)
4608 *max_upscale = 1000;
4609
4610 if (*min_downscale == 1)
4611 *min_downscale = 1000;
4612}
4613
4614
4375d625
S
4615static int fill_dc_scaling_info(struct amdgpu_device *adev,
4616 const struct drm_plane_state *state,
695af5f9 4617 struct dc_scaling_info *scaling_info)
e7b07cee 4618{
6300b3bd 4619 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4620
695af5f9 4621 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4622
695af5f9
NK
4623 /* Source is fixed 16.16 but we ignore mantissa for now... */
4624 scaling_info->src_rect.x = state->src_x >> 16;
4625 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4626
d89f6048
HW
4627 /*
4628 * For reasons we don't (yet) fully understand a non-zero
4629 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4630 * system hang on DCN1x.
4631 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4632 * let's reject both non-zero src_x and src_y.
4633 *
4634 * We currently know of only one use-case to reproduce a
4635 * scenario with non-zero src_x and src_y for NV12, which
4636 * is to gesture the YouTube Android app into full screen
4637 * on ChromeOS.
4638 */
4375d625
S
4639 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4640 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4641 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4642 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4643 return -EINVAL;
4644
695af5f9
NK
4645 scaling_info->src_rect.width = state->src_w >> 16;
4646 if (scaling_info->src_rect.width == 0)
4647 return -EINVAL;
4648
4649 scaling_info->src_rect.height = state->src_h >> 16;
4650 if (scaling_info->src_rect.height == 0)
4651 return -EINVAL;
4652
4653 scaling_info->dst_rect.x = state->crtc_x;
4654 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4655
4656 if (state->crtc_w == 0)
695af5f9 4657 return -EINVAL;
e7b07cee 4658
695af5f9 4659 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4660
4661 if (state->crtc_h == 0)
695af5f9 4662 return -EINVAL;
e7b07cee 4663
695af5f9 4664 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4665
695af5f9
NK
4666 /* DRM doesn't specify clipping on destination output. */
4667 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4668
6300b3bd
MK
4669 /* Validate scaling per-format with DC plane caps */
4670 if (state->plane && state->plane->dev && state->fb) {
4671 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4672 &min_downscale, &max_upscale);
4673 } else {
4674 min_downscale = 250;
4675 max_upscale = 16000;
4676 }
4677
6491f0c0
NK
4678 scale_w = scaling_info->dst_rect.width * 1000 /
4679 scaling_info->src_rect.width;
e7b07cee 4680
6300b3bd 4681 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4682 return -EINVAL;
4683
4684 scale_h = scaling_info->dst_rect.height * 1000 /
4685 scaling_info->src_rect.height;
4686
6300b3bd 4687 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4688 return -EINVAL;
4689
695af5f9
NK
4690 /*
4691 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4692 * assume reasonable defaults based on the format.
4693 */
e7b07cee 4694
695af5f9 4695 return 0;
4562236b 4696}
695af5f9 4697
a3241991
BN
4698static void
4699fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4700 uint64_t tiling_flags)
e7b07cee 4701{
a3241991
BN
4702 /* Fill GFX8 params */
4703 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4704 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4705
a3241991
BN
4706 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4707 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4708 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4709 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4710 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4711
a3241991
BN
4712 /* XXX fix me for VI */
4713 tiling_info->gfx8.num_banks = num_banks;
4714 tiling_info->gfx8.array_mode =
4715 DC_ARRAY_2D_TILED_THIN1;
4716 tiling_info->gfx8.tile_split = tile_split;
4717 tiling_info->gfx8.bank_width = bankw;
4718 tiling_info->gfx8.bank_height = bankh;
4719 tiling_info->gfx8.tile_aspect = mtaspect;
4720 tiling_info->gfx8.tile_mode =
4721 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4722 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4723 == DC_ARRAY_1D_TILED_THIN1) {
4724 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4725 }
4726
a3241991
BN
4727 tiling_info->gfx8.pipe_config =
4728 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4729}
4730
a3241991
BN
4731static void
4732fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4733 union dc_tiling_info *tiling_info)
4734{
4735 tiling_info->gfx9.num_pipes =
4736 adev->gfx.config.gb_addr_config_fields.num_pipes;
4737 tiling_info->gfx9.num_banks =
4738 adev->gfx.config.gb_addr_config_fields.num_banks;
4739 tiling_info->gfx9.pipe_interleave =
4740 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4741 tiling_info->gfx9.num_shader_engines =
4742 adev->gfx.config.gb_addr_config_fields.num_se;
4743 tiling_info->gfx9.max_compressed_frags =
4744 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4745 tiling_info->gfx9.num_rb_per_se =
4746 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4747 tiling_info->gfx9.shaderEnable = 1;
1d789535 4748 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4749 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4750}
4751
695af5f9 4752static int
a3241991
BN
4753validate_dcc(struct amdgpu_device *adev,
4754 const enum surface_pixel_format format,
4755 const enum dc_rotation_angle rotation,
4756 const union dc_tiling_info *tiling_info,
4757 const struct dc_plane_dcc_param *dcc,
4758 const struct dc_plane_address *address,
4759 const struct plane_size *plane_size)
7df7e505
NK
4760{
4761 struct dc *dc = adev->dm.dc;
8daa1218
NC
4762 struct dc_dcc_surface_param input;
4763 struct dc_surface_dcc_cap output;
7df7e505 4764
8daa1218
NC
4765 memset(&input, 0, sizeof(input));
4766 memset(&output, 0, sizeof(output));
4767
a3241991 4768 if (!dcc->enable)
87b7ebc2
RS
4769 return 0;
4770
a3241991
BN
4771 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4772 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4773 return -EINVAL;
7df7e505 4774
695af5f9 4775 input.format = format;
12e2b2d4
DL
4776 input.surface_size.width = plane_size->surface_size.width;
4777 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4778 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4779
695af5f9 4780 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4781 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4782 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4783 input.scan = SCAN_DIRECTION_VERTICAL;
4784
4785 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4786 return -EINVAL;
7df7e505
NK
4787
4788 if (!output.capable)
09e5665a 4789 return -EINVAL;
7df7e505 4790
a3241991
BN
4791 if (dcc->independent_64b_blks == 0 &&
4792 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4793 return -EINVAL;
7df7e505 4794
a3241991
BN
4795 return 0;
4796}
4797
37384b3f
BN
4798static bool
4799modifier_has_dcc(uint64_t modifier)
4800{
4801 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4802}
4803
4804static unsigned
4805modifier_gfx9_swizzle_mode(uint64_t modifier)
4806{
4807 if (modifier == DRM_FORMAT_MOD_LINEAR)
4808 return 0;
4809
4810 return AMD_FMT_MOD_GET(TILE, modifier);
4811}
4812
dfbbfe3c
BN
4813static const struct drm_format_info *
4814amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4815{
816853f9 4816 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4817}
4818
37384b3f
BN
4819static void
4820fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4821 union dc_tiling_info *tiling_info,
4822 uint64_t modifier)
4823{
4824 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4825 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4826 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4827 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4828
4829 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4830
4831 if (!IS_AMD_FMT_MOD(modifier))
4832 return;
4833
4834 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4835 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4836
4837 if (adev->family >= AMDGPU_FAMILY_NV) {
4838 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4839 } else {
4840 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4841
4842 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4843 }
4844}
4845
faa37f54
BN
4846enum dm_micro_swizzle {
4847 MICRO_SWIZZLE_Z = 0,
4848 MICRO_SWIZZLE_S = 1,
4849 MICRO_SWIZZLE_D = 2,
4850 MICRO_SWIZZLE_R = 3
4851};
4852
4853static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4854 uint32_t format,
4855 uint64_t modifier)
4856{
4857 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4858 const struct drm_format_info *info = drm_format_info(format);
fe180178 4859 int i;
faa37f54
BN
4860
4861 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4862
4863 if (!info)
4864 return false;
4865
4866 /*
fe180178
QZ
4867 * We always have to allow these modifiers:
4868 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4869 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4870 */
fe180178
QZ
4871 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4872 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4873 return true;
fe180178 4874 }
faa37f54 4875
fe180178
QZ
4876 /* Check that the modifier is on the list of the plane's supported modifiers. */
4877 for (i = 0; i < plane->modifier_count; i++) {
4878 if (modifier == plane->modifiers[i])
4879 break;
4880 }
4881 if (i == plane->modifier_count)
faa37f54
BN
4882 return false;
4883
4884 /*
4885 * For D swizzle the canonical modifier depends on the bpp, so check
4886 * it here.
4887 */
4888 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4889 adev->family >= AMDGPU_FAMILY_NV) {
4890 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4891 return false;
4892 }
4893
4894 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4895 info->cpp[0] < 8)
4896 return false;
4897
4898 if (modifier_has_dcc(modifier)) {
4899 /* Per radeonsi comments 16/64 bpp are more complicated. */
4900 if (info->cpp[0] != 4)
4901 return false;
951796f2
SS
4902 /* We support multi-planar formats, but not when combined with
4903 * additional DCC metadata planes. */
4904 if (info->num_planes > 1)
4905 return false;
faa37f54
BN
4906 }
4907
4908 return true;
4909}
4910
4911static void
4912add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4913{
4914 if (!*mods)
4915 return;
4916
4917 if (*cap - *size < 1) {
4918 uint64_t new_cap = *cap * 2;
4919 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4920
4921 if (!new_mods) {
4922 kfree(*mods);
4923 *mods = NULL;
4924 return;
4925 }
4926
4927 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4928 kfree(*mods);
4929 *mods = new_mods;
4930 *cap = new_cap;
4931 }
4932
4933 (*mods)[*size] = mod;
4934 *size += 1;
4935}
4936
4937static void
4938add_gfx9_modifiers(const struct amdgpu_device *adev,
4939 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4940{
4941 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4942 int pipe_xor_bits = min(8, pipes +
4943 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4944 int bank_xor_bits = min(8 - pipe_xor_bits,
4945 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4946 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4947 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4948
4949
4950 if (adev->family == AMDGPU_FAMILY_RV) {
4951 /* Raven2 and later */
4952 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4953
4954 /*
4955 * No _D DCC swizzles yet because we only allow 32bpp, which
4956 * doesn't support _D on DCN
4957 */
4958
4959 if (has_constant_encode) {
4960 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4961 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4962 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4963 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4964 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4965 AMD_FMT_MOD_SET(DCC, 1) |
4966 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4967 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4968 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4969 }
4970
4971 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4972 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4973 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4974 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4975 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4976 AMD_FMT_MOD_SET(DCC, 1) |
4977 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4978 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4979 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4980
4981 if (has_constant_encode) {
4982 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4983 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4984 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4985 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4986 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4987 AMD_FMT_MOD_SET(DCC, 1) |
4988 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4989 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4990 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4991
4992 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4993 AMD_FMT_MOD_SET(RB, rb) |
4994 AMD_FMT_MOD_SET(PIPE, pipes));
4995 }
4996
4997 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4998 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4999 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5000 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5001 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5002 AMD_FMT_MOD_SET(DCC, 1) |
5003 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5004 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5005 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5006 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5007 AMD_FMT_MOD_SET(RB, rb) |
5008 AMD_FMT_MOD_SET(PIPE, pipes));
5009 }
5010
5011 /*
5012 * Only supported for 64bpp on Raven, will be filtered on format in
5013 * dm_plane_format_mod_supported.
5014 */
5015 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5016 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5017 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5018 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5019 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5020
5021 if (adev->family == AMDGPU_FAMILY_RV) {
5022 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5023 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5024 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5025 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5026 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5027 }
5028
5029 /*
5030 * Only supported for 64bpp on Raven, will be filtered on format in
5031 * dm_plane_format_mod_supported.
5032 */
5033 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5034 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5035 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5036
5037 if (adev->family == AMDGPU_FAMILY_RV) {
5038 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5039 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5040 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5041 }
5042}
5043
5044static void
5045add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5046 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5047{
5048 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5049
5050 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5051 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5052 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5053 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5054 AMD_FMT_MOD_SET(DCC, 1) |
5055 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5056 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5057 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5058
5059 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5060 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5061 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5062 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5063 AMD_FMT_MOD_SET(DCC, 1) |
5064 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5065 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5066 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5067 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5068
5069 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5070 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5071 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5072 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5073
5074 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5076 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5077 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5078
5079
5080 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5081 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5082 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5083 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5084
5085 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5086 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5087 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5088}
5089
5090static void
5091add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5092 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5093{
5094 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5095 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5096
5097 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5098 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5099 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5100 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5101 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5102 AMD_FMT_MOD_SET(DCC, 1) |
5103 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5104 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5105 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5106 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5107
7f6ab50a
JA
5108 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5110 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5111 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5112 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5113 AMD_FMT_MOD_SET(DCC, 1) |
5114 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5115 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5116 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5117
faa37f54
BN
5118 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5119 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5120 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5121 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5122 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5123 AMD_FMT_MOD_SET(DCC, 1) |
5124 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5125 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5126 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5127 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5128 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5129
7f6ab50a
JA
5130 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5131 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5132 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5133 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5134 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5135 AMD_FMT_MOD_SET(DCC, 1) |
5136 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5137 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5138 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5139 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5140
faa37f54
BN
5141 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5143 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5144 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5145 AMD_FMT_MOD_SET(PACKERS, pkrs));
5146
5147 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5149 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5150 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5151 AMD_FMT_MOD_SET(PACKERS, pkrs));
5152
5153 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5154 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5155 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5156 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5157
5158 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5159 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5160 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5161}
5162
5163static int
5164get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5165{
5166 uint64_t size = 0, capacity = 128;
5167 *mods = NULL;
5168
5169 /* We have not hooked up any pre-GFX9 modifiers. */
5170 if (adev->family < AMDGPU_FAMILY_AI)
5171 return 0;
5172
5173 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5174
5175 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5176 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5177 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5178 return *mods ? 0 : -ENOMEM;
5179 }
5180
5181 switch (adev->family) {
5182 case AMDGPU_FAMILY_AI:
5183 case AMDGPU_FAMILY_RV:
5184 add_gfx9_modifiers(adev, mods, &size, &capacity);
5185 break;
5186 case AMDGPU_FAMILY_NV:
5187 case AMDGPU_FAMILY_VGH:
1ebcaebd 5188 case AMDGPU_FAMILY_YC:
1d789535 5189 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5190 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5191 else
5192 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5193 break;
5194 }
5195
5196 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5197
5198 /* INVALID marks the end of the list. */
5199 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5200
5201 if (!*mods)
5202 return -ENOMEM;
5203
5204 return 0;
5205}
5206
37384b3f
BN
5207static int
5208fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5209 const struct amdgpu_framebuffer *afb,
5210 const enum surface_pixel_format format,
5211 const enum dc_rotation_angle rotation,
5212 const struct plane_size *plane_size,
5213 union dc_tiling_info *tiling_info,
5214 struct dc_plane_dcc_param *dcc,
5215 struct dc_plane_address *address,
5216 const bool force_disable_dcc)
5217{
5218 const uint64_t modifier = afb->base.modifier;
2be7f77f 5219 int ret = 0;
37384b3f
BN
5220
5221 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5222 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5223
5224 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5225 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5226 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5227 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5228
5229 dcc->enable = 1;
5230 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5231 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5232 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5233 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5234 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5235 else if (independent_128b_blks)
5236 dcc->dcc_ind_blk = hubp_ind_block_128b;
5237 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5238 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5239 else
5240 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5241 } else {
5242 if (independent_64b_blks)
5243 dcc->dcc_ind_blk = hubp_ind_block_64b;
5244 else
5245 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5246 }
37384b3f
BN
5247
5248 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5249 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5250 }
5251
5252 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5253 if (ret)
2be7f77f 5254 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5255
2be7f77f 5256 return ret;
09e5665a
NK
5257}
5258
5259static int
320932bf 5260fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5261 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5262 const enum surface_pixel_format format,
5263 const enum dc_rotation_angle rotation,
5264 const uint64_t tiling_flags,
09e5665a 5265 union dc_tiling_info *tiling_info,
12e2b2d4 5266 struct plane_size *plane_size,
09e5665a 5267 struct dc_plane_dcc_param *dcc,
87b7ebc2 5268 struct dc_plane_address *address,
5888f07a 5269 bool tmz_surface,
87b7ebc2 5270 bool force_disable_dcc)
09e5665a 5271{
320932bf 5272 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5273 int ret;
5274
5275 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5276 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5277 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5278 memset(address, 0, sizeof(*address));
5279
5888f07a
HW
5280 address->tmz_surface = tmz_surface;
5281
695af5f9 5282 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5283 uint64_t addr = afb->address + fb->offsets[0];
5284
12e2b2d4
DL
5285 plane_size->surface_size.x = 0;
5286 plane_size->surface_size.y = 0;
5287 plane_size->surface_size.width = fb->width;
5288 plane_size->surface_size.height = fb->height;
5289 plane_size->surface_pitch =
320932bf
NK
5290 fb->pitches[0] / fb->format->cpp[0];
5291
e0634e8d 5292 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5293 address->grph.addr.low_part = lower_32_bits(addr);
5294 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5295 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5296 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5297 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5298
12e2b2d4
DL
5299 plane_size->surface_size.x = 0;
5300 plane_size->surface_size.y = 0;
5301 plane_size->surface_size.width = fb->width;
5302 plane_size->surface_size.height = fb->height;
5303 plane_size->surface_pitch =
320932bf
NK
5304 fb->pitches[0] / fb->format->cpp[0];
5305
12e2b2d4
DL
5306 plane_size->chroma_size.x = 0;
5307 plane_size->chroma_size.y = 0;
320932bf 5308 /* TODO: set these based on surface format */
12e2b2d4
DL
5309 plane_size->chroma_size.width = fb->width / 2;
5310 plane_size->chroma_size.height = fb->height / 2;
320932bf 5311
12e2b2d4 5312 plane_size->chroma_pitch =
320932bf
NK
5313 fb->pitches[1] / fb->format->cpp[1];
5314
e0634e8d
NK
5315 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5316 address->video_progressive.luma_addr.low_part =
be7b9b32 5317 lower_32_bits(luma_addr);
e0634e8d 5318 address->video_progressive.luma_addr.high_part =
be7b9b32 5319 upper_32_bits(luma_addr);
e0634e8d
NK
5320 address->video_progressive.chroma_addr.low_part =
5321 lower_32_bits(chroma_addr);
5322 address->video_progressive.chroma_addr.high_part =
5323 upper_32_bits(chroma_addr);
5324 }
09e5665a 5325
a3241991 5326 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5327 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5328 rotation, plane_size,
5329 tiling_info, dcc,
5330 address,
5331 force_disable_dcc);
09e5665a
NK
5332 if (ret)
5333 return ret;
a3241991
BN
5334 } else {
5335 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5336 }
5337
5338 return 0;
7df7e505
NK
5339}
5340
d74004b6 5341static void
695af5f9 5342fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5343 bool *per_pixel_alpha, bool *global_alpha,
5344 int *global_alpha_value)
5345{
5346 *per_pixel_alpha = false;
5347 *global_alpha = false;
5348 *global_alpha_value = 0xff;
5349
5350 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5351 return;
5352
5353 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5354 static const uint32_t alpha_formats[] = {
5355 DRM_FORMAT_ARGB8888,
5356 DRM_FORMAT_RGBA8888,
5357 DRM_FORMAT_ABGR8888,
5358 };
5359 uint32_t format = plane_state->fb->format->format;
5360 unsigned int i;
5361
5362 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5363 if (format == alpha_formats[i]) {
5364 *per_pixel_alpha = true;
5365 break;
5366 }
5367 }
5368 }
5369
5370 if (plane_state->alpha < 0xffff) {
5371 *global_alpha = true;
5372 *global_alpha_value = plane_state->alpha >> 8;
5373 }
5374}
5375
004fefa3
NK
5376static int
5377fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5378 const enum surface_pixel_format format,
004fefa3
NK
5379 enum dc_color_space *color_space)
5380{
5381 bool full_range;
5382
5383 *color_space = COLOR_SPACE_SRGB;
5384
5385 /* DRM color properties only affect non-RGB formats. */
695af5f9 5386 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5387 return 0;
5388
5389 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5390
5391 switch (plane_state->color_encoding) {
5392 case DRM_COLOR_YCBCR_BT601:
5393 if (full_range)
5394 *color_space = COLOR_SPACE_YCBCR601;
5395 else
5396 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5397 break;
5398
5399 case DRM_COLOR_YCBCR_BT709:
5400 if (full_range)
5401 *color_space = COLOR_SPACE_YCBCR709;
5402 else
5403 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5404 break;
5405
5406 case DRM_COLOR_YCBCR_BT2020:
5407 if (full_range)
5408 *color_space = COLOR_SPACE_2020_YCBCR;
5409 else
5410 return -EINVAL;
5411 break;
5412
5413 default:
5414 return -EINVAL;
5415 }
5416
5417 return 0;
5418}
5419
695af5f9
NK
5420static int
5421fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5422 const struct drm_plane_state *plane_state,
5423 const uint64_t tiling_flags,
5424 struct dc_plane_info *plane_info,
87b7ebc2 5425 struct dc_plane_address *address,
5888f07a 5426 bool tmz_surface,
87b7ebc2 5427 bool force_disable_dcc)
695af5f9
NK
5428{
5429 const struct drm_framebuffer *fb = plane_state->fb;
5430 const struct amdgpu_framebuffer *afb =
5431 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5432 int ret;
5433
5434 memset(plane_info, 0, sizeof(*plane_info));
5435
5436 switch (fb->format->format) {
5437 case DRM_FORMAT_C8:
5438 plane_info->format =
5439 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5440 break;
5441 case DRM_FORMAT_RGB565:
5442 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5443 break;
5444 case DRM_FORMAT_XRGB8888:
5445 case DRM_FORMAT_ARGB8888:
5446 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5447 break;
5448 case DRM_FORMAT_XRGB2101010:
5449 case DRM_FORMAT_ARGB2101010:
5450 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5451 break;
5452 case DRM_FORMAT_XBGR2101010:
5453 case DRM_FORMAT_ABGR2101010:
5454 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5455 break;
5456 case DRM_FORMAT_XBGR8888:
5457 case DRM_FORMAT_ABGR8888:
5458 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5459 break;
5460 case DRM_FORMAT_NV21:
5461 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5462 break;
5463 case DRM_FORMAT_NV12:
5464 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5465 break;
cbec6477
SW
5466 case DRM_FORMAT_P010:
5467 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5468 break;
492548dc
SW
5469 case DRM_FORMAT_XRGB16161616F:
5470 case DRM_FORMAT_ARGB16161616F:
5471 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5472 break;
2a5195dc
MK
5473 case DRM_FORMAT_XBGR16161616F:
5474 case DRM_FORMAT_ABGR16161616F:
5475 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5476 break;
58020403
MK
5477 case DRM_FORMAT_XRGB16161616:
5478 case DRM_FORMAT_ARGB16161616:
5479 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5480 break;
5481 case DRM_FORMAT_XBGR16161616:
5482 case DRM_FORMAT_ABGR16161616:
5483 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5484 break;
695af5f9
NK
5485 default:
5486 DRM_ERROR(
92f1d09c
SA
5487 "Unsupported screen format %p4cc\n",
5488 &fb->format->format);
695af5f9
NK
5489 return -EINVAL;
5490 }
5491
5492 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5493 case DRM_MODE_ROTATE_0:
5494 plane_info->rotation = ROTATION_ANGLE_0;
5495 break;
5496 case DRM_MODE_ROTATE_90:
5497 plane_info->rotation = ROTATION_ANGLE_90;
5498 break;
5499 case DRM_MODE_ROTATE_180:
5500 plane_info->rotation = ROTATION_ANGLE_180;
5501 break;
5502 case DRM_MODE_ROTATE_270:
5503 plane_info->rotation = ROTATION_ANGLE_270;
5504 break;
5505 default:
5506 plane_info->rotation = ROTATION_ANGLE_0;
5507 break;
5508 }
5509
5510 plane_info->visible = true;
5511 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5512
6d83a32d
MS
5513 plane_info->layer_index = 0;
5514
695af5f9
NK
5515 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5516 &plane_info->color_space);
5517 if (ret)
5518 return ret;
5519
5520 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5521 plane_info->rotation, tiling_flags,
5522 &plane_info->tiling_info,
5523 &plane_info->plane_size,
5888f07a 5524 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5525 force_disable_dcc);
695af5f9
NK
5526 if (ret)
5527 return ret;
5528
5529 fill_blending_from_plane_state(
5530 plane_state, &plane_info->per_pixel_alpha,
5531 &plane_info->global_alpha, &plane_info->global_alpha_value);
5532
5533 return 0;
5534}
5535
5536static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5537 struct dc_plane_state *dc_plane_state,
5538 struct drm_plane_state *plane_state,
5539 struct drm_crtc_state *crtc_state)
e7b07cee 5540{
cf020d49 5541 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5542 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5543 struct dc_scaling_info scaling_info;
5544 struct dc_plane_info plane_info;
695af5f9 5545 int ret;
87b7ebc2 5546 bool force_disable_dcc = false;
e7b07cee 5547
4375d625 5548 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5549 if (ret)
5550 return ret;
e7b07cee 5551
695af5f9
NK
5552 dc_plane_state->src_rect = scaling_info.src_rect;
5553 dc_plane_state->dst_rect = scaling_info.dst_rect;
5554 dc_plane_state->clip_rect = scaling_info.clip_rect;
5555 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5556
87b7ebc2 5557 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5558 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5559 afb->tiling_flags,
695af5f9 5560 &plane_info,
87b7ebc2 5561 &dc_plane_state->address,
6eed95b0 5562 afb->tmz_surface,
87b7ebc2 5563 force_disable_dcc);
004fefa3
NK
5564 if (ret)
5565 return ret;
5566
695af5f9
NK
5567 dc_plane_state->format = plane_info.format;
5568 dc_plane_state->color_space = plane_info.color_space;
5569 dc_plane_state->format = plane_info.format;
5570 dc_plane_state->plane_size = plane_info.plane_size;
5571 dc_plane_state->rotation = plane_info.rotation;
5572 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5573 dc_plane_state->stereo_format = plane_info.stereo_format;
5574 dc_plane_state->tiling_info = plane_info.tiling_info;
5575 dc_plane_state->visible = plane_info.visible;
5576 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5577 dc_plane_state->global_alpha = plane_info.global_alpha;
5578 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5579 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5580 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5581 dc_plane_state->flip_int_enabled = true;
695af5f9 5582
e277adc5
LSL
5583 /*
5584 * Always set input transfer function, since plane state is refreshed
5585 * every time.
5586 */
cf020d49
NK
5587 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5588 if (ret)
5589 return ret;
e7b07cee 5590
cf020d49 5591 return 0;
e7b07cee
HW
5592}
5593
3ee6b26b
AD
5594static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5595 const struct dm_connector_state *dm_state,
5596 struct dc_stream_state *stream)
e7b07cee
HW
5597{
5598 enum amdgpu_rmx_type rmx_type;
5599
5600 struct rect src = { 0 }; /* viewport in composition space*/
5601 struct rect dst = { 0 }; /* stream addressable area */
5602
5603 /* no mode. nothing to be done */
5604 if (!mode)
5605 return;
5606
5607 /* Full screen scaling by default */
5608 src.width = mode->hdisplay;
5609 src.height = mode->vdisplay;
5610 dst.width = stream->timing.h_addressable;
5611 dst.height = stream->timing.v_addressable;
5612
f4791779
HW
5613 if (dm_state) {
5614 rmx_type = dm_state->scaling;
5615 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5616 if (src.width * dst.height <
5617 src.height * dst.width) {
5618 /* height needs less upscaling/more downscaling */
5619 dst.width = src.width *
5620 dst.height / src.height;
5621 } else {
5622 /* width needs less upscaling/more downscaling */
5623 dst.height = src.height *
5624 dst.width / src.width;
5625 }
5626 } else if (rmx_type == RMX_CENTER) {
5627 dst = src;
e7b07cee 5628 }
e7b07cee 5629
f4791779
HW
5630 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5631 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5632
f4791779
HW
5633 if (dm_state->underscan_enable) {
5634 dst.x += dm_state->underscan_hborder / 2;
5635 dst.y += dm_state->underscan_vborder / 2;
5636 dst.width -= dm_state->underscan_hborder;
5637 dst.height -= dm_state->underscan_vborder;
5638 }
e7b07cee
HW
5639 }
5640
5641 stream->src = src;
5642 stream->dst = dst;
5643
4711c033
LT
5644 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5645 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5646
5647}
5648
3ee6b26b 5649static enum dc_color_depth
42ba01fc 5650convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5651 bool is_y420, int requested_bpc)
e7b07cee 5652{
1bc22f20 5653 uint8_t bpc;
01c22997 5654
1bc22f20
SW
5655 if (is_y420) {
5656 bpc = 8;
5657
5658 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5659 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5660 bpc = 16;
5661 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5662 bpc = 12;
5663 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5664 bpc = 10;
5665 } else {
5666 bpc = (uint8_t)connector->display_info.bpc;
5667 /* Assume 8 bpc by default if no bpc is specified. */
5668 bpc = bpc ? bpc : 8;
5669 }
e7b07cee 5670
cbd14ae7 5671 if (requested_bpc > 0) {
01c22997
NK
5672 /*
5673 * Cap display bpc based on the user requested value.
5674 *
5675 * The value for state->max_bpc may not correctly updated
5676 * depending on when the connector gets added to the state
5677 * or if this was called outside of atomic check, so it
5678 * can't be used directly.
5679 */
cbd14ae7 5680 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5681
1825fd34
NK
5682 /* Round down to the nearest even number. */
5683 bpc = bpc - (bpc & 1);
5684 }
07e3a1cf 5685
e7b07cee
HW
5686 switch (bpc) {
5687 case 0:
1f6010a9
DF
5688 /*
5689 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5690 * EDID revision before 1.4
5691 * TODO: Fix edid parsing
5692 */
5693 return COLOR_DEPTH_888;
5694 case 6:
5695 return COLOR_DEPTH_666;
5696 case 8:
5697 return COLOR_DEPTH_888;
5698 case 10:
5699 return COLOR_DEPTH_101010;
5700 case 12:
5701 return COLOR_DEPTH_121212;
5702 case 14:
5703 return COLOR_DEPTH_141414;
5704 case 16:
5705 return COLOR_DEPTH_161616;
5706 default:
5707 return COLOR_DEPTH_UNDEFINED;
5708 }
5709}
5710
3ee6b26b
AD
5711static enum dc_aspect_ratio
5712get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5713{
e11d4147
LSL
5714 /* 1-1 mapping, since both enums follow the HDMI spec. */
5715 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5716}
5717
3ee6b26b
AD
5718static enum dc_color_space
5719get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5720{
5721 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5722
5723 switch (dc_crtc_timing->pixel_encoding) {
5724 case PIXEL_ENCODING_YCBCR422:
5725 case PIXEL_ENCODING_YCBCR444:
5726 case PIXEL_ENCODING_YCBCR420:
5727 {
5728 /*
5729 * 27030khz is the separation point between HDTV and SDTV
5730 * according to HDMI spec, we use YCbCr709 and YCbCr601
5731 * respectively
5732 */
380604e2 5733 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5734 if (dc_crtc_timing->flags.Y_ONLY)
5735 color_space =
5736 COLOR_SPACE_YCBCR709_LIMITED;
5737 else
5738 color_space = COLOR_SPACE_YCBCR709;
5739 } else {
5740 if (dc_crtc_timing->flags.Y_ONLY)
5741 color_space =
5742 COLOR_SPACE_YCBCR601_LIMITED;
5743 else
5744 color_space = COLOR_SPACE_YCBCR601;
5745 }
5746
5747 }
5748 break;
5749 case PIXEL_ENCODING_RGB:
5750 color_space = COLOR_SPACE_SRGB;
5751 break;
5752
5753 default:
5754 WARN_ON(1);
5755 break;
5756 }
5757
5758 return color_space;
5759}
5760
ea117312
TA
5761static bool adjust_colour_depth_from_display_info(
5762 struct dc_crtc_timing *timing_out,
5763 const struct drm_display_info *info)
400443e8 5764{
ea117312 5765 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5766 int normalized_clk;
400443e8 5767 do {
380604e2 5768 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5769 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5770 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5771 normalized_clk /= 2;
5772 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5773 switch (depth) {
5774 case COLOR_DEPTH_888:
5775 break;
400443e8
ML
5776 case COLOR_DEPTH_101010:
5777 normalized_clk = (normalized_clk * 30) / 24;
5778 break;
5779 case COLOR_DEPTH_121212:
5780 normalized_clk = (normalized_clk * 36) / 24;
5781 break;
5782 case COLOR_DEPTH_161616:
5783 normalized_clk = (normalized_clk * 48) / 24;
5784 break;
5785 default:
ea117312
TA
5786 /* The above depths are the only ones valid for HDMI. */
5787 return false;
400443e8 5788 }
ea117312
TA
5789 if (normalized_clk <= info->max_tmds_clock) {
5790 timing_out->display_color_depth = depth;
5791 return true;
5792 }
5793 } while (--depth > COLOR_DEPTH_666);
5794 return false;
400443e8 5795}
e7b07cee 5796
42ba01fc
NK
5797static void fill_stream_properties_from_drm_display_mode(
5798 struct dc_stream_state *stream,
5799 const struct drm_display_mode *mode_in,
5800 const struct drm_connector *connector,
5801 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5802 const struct dc_stream_state *old_stream,
5803 int requested_bpc)
e7b07cee
HW
5804{
5805 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5806 const struct drm_display_info *info = &connector->display_info;
d4252eee 5807 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5808 struct hdmi_vendor_infoframe hv_frame;
5809 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5810
acf83f86
WL
5811 memset(&hv_frame, 0, sizeof(hv_frame));
5812 memset(&avi_frame, 0, sizeof(avi_frame));
5813
e7b07cee
HW
5814 timing_out->h_border_left = 0;
5815 timing_out->h_border_right = 0;
5816 timing_out->v_border_top = 0;
5817 timing_out->v_border_bottom = 0;
5818 /* TODO: un-hardcode */
fe61a2f1 5819 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5820 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5821 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5822 else if (drm_mode_is_420_also(info, mode_in)
5823 && aconnector->force_yuv420_output)
5824 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5825 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5826 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5827 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5828 else
5829 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5830
5831 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5832 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5833 connector,
5834 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5835 requested_bpc);
e7b07cee
HW
5836 timing_out->scan_type = SCANNING_TYPE_NODATA;
5837 timing_out->hdmi_vic = 0;
b333730d
BL
5838
5839 if(old_stream) {
5840 timing_out->vic = old_stream->timing.vic;
5841 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5842 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5843 } else {
5844 timing_out->vic = drm_match_cea_mode(mode_in);
5845 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5846 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5847 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5848 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5849 }
e7b07cee 5850
1cb1d477
WL
5851 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5852 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5853 timing_out->vic = avi_frame.video_code;
5854 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5855 timing_out->hdmi_vic = hv_frame.vic;
5856 }
5857
fe8858bb
NC
5858 if (is_freesync_video_mode(mode_in, aconnector)) {
5859 timing_out->h_addressable = mode_in->hdisplay;
5860 timing_out->h_total = mode_in->htotal;
5861 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5862 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5863 timing_out->v_total = mode_in->vtotal;
5864 timing_out->v_addressable = mode_in->vdisplay;
5865 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5866 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5867 timing_out->pix_clk_100hz = mode_in->clock * 10;
5868 } else {
5869 timing_out->h_addressable = mode_in->crtc_hdisplay;
5870 timing_out->h_total = mode_in->crtc_htotal;
5871 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5872 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5873 timing_out->v_total = mode_in->crtc_vtotal;
5874 timing_out->v_addressable = mode_in->crtc_vdisplay;
5875 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5876 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5877 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5878 }
a85ba005 5879
e7b07cee 5880 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5881
5882 stream->output_color_space = get_output_color_space(timing_out);
5883
e43a432c
AK
5884 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5885 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5886 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5887 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5888 drm_mode_is_420_also(info, mode_in) &&
5889 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5890 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5891 adjust_colour_depth_from_display_info(timing_out, info);
5892 }
5893 }
e7b07cee
HW
5894}
5895
3ee6b26b
AD
5896static void fill_audio_info(struct audio_info *audio_info,
5897 const struct drm_connector *drm_connector,
5898 const struct dc_sink *dc_sink)
e7b07cee
HW
5899{
5900 int i = 0;
5901 int cea_revision = 0;
5902 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5903
5904 audio_info->manufacture_id = edid_caps->manufacturer_id;
5905 audio_info->product_id = edid_caps->product_id;
5906
5907 cea_revision = drm_connector->display_info.cea_rev;
5908
090afc1e 5909 strscpy(audio_info->display_name,
d2b2562c 5910 edid_caps->display_name,
090afc1e 5911 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5912
b830ebc9 5913 if (cea_revision >= 3) {
e7b07cee
HW
5914 audio_info->mode_count = edid_caps->audio_mode_count;
5915
5916 for (i = 0; i < audio_info->mode_count; ++i) {
5917 audio_info->modes[i].format_code =
5918 (enum audio_format_code)
5919 (edid_caps->audio_modes[i].format_code);
5920 audio_info->modes[i].channel_count =
5921 edid_caps->audio_modes[i].channel_count;
5922 audio_info->modes[i].sample_rates.all =
5923 edid_caps->audio_modes[i].sample_rate;
5924 audio_info->modes[i].sample_size =
5925 edid_caps->audio_modes[i].sample_size;
5926 }
5927 }
5928
5929 audio_info->flags.all = edid_caps->speaker_flags;
5930
5931 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5932 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5933 audio_info->video_latency = drm_connector->video_latency[0];
5934 audio_info->audio_latency = drm_connector->audio_latency[0];
5935 }
5936
5937 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5938
5939}
5940
3ee6b26b
AD
5941static void
5942copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5943 struct drm_display_mode *dst_mode)
e7b07cee
HW
5944{
5945 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5946 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5947 dst_mode->crtc_clock = src_mode->crtc_clock;
5948 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5949 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5950 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5951 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5952 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5953 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5954 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5955 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5956 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5957 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5958 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5959}
5960
3ee6b26b
AD
5961static void
5962decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5963 const struct drm_display_mode *native_mode,
5964 bool scale_enabled)
e7b07cee
HW
5965{
5966 if (scale_enabled) {
5967 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5968 } else if (native_mode->clock == drm_mode->clock &&
5969 native_mode->htotal == drm_mode->htotal &&
5970 native_mode->vtotal == drm_mode->vtotal) {
5971 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5972 } else {
5973 /* no scaling nor amdgpu inserted, no need to patch */
5974 }
5975}
5976
aed15309
ML
5977static struct dc_sink *
5978create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5979{
2e0ac3d6 5980 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5981 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5982 sink_init_data.link = aconnector->dc_link;
5983 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5984
5985 sink = dc_sink_create(&sink_init_data);
423788c7 5986 if (!sink) {
2e0ac3d6 5987 DRM_ERROR("Failed to create sink!\n");
aed15309 5988 return NULL;
423788c7 5989 }
2e0ac3d6 5990 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5991
aed15309 5992 return sink;
2e0ac3d6
HW
5993}
5994
fa2123db
ML
5995static void set_multisync_trigger_params(
5996 struct dc_stream_state *stream)
5997{
ec372186
ML
5998 struct dc_stream_state *master = NULL;
5999
fa2123db 6000 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
6001 master = stream->triggered_crtc_reset.event_source;
6002 stream->triggered_crtc_reset.event =
6003 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6004 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6005 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
6006 }
6007}
6008
6009static void set_master_stream(struct dc_stream_state *stream_set[],
6010 int stream_count)
6011{
6012 int j, highest_rfr = 0, master_stream = 0;
6013
6014 for (j = 0; j < stream_count; j++) {
6015 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6016 int refresh_rate = 0;
6017
380604e2 6018 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
6019 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6020 if (refresh_rate > highest_rfr) {
6021 highest_rfr = refresh_rate;
6022 master_stream = j;
6023 }
6024 }
6025 }
6026 for (j = 0; j < stream_count; j++) {
03736f4c 6027 if (stream_set[j])
fa2123db
ML
6028 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6029 }
6030}
6031
6032static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6033{
6034 int i = 0;
ec372186 6035 struct dc_stream_state *stream;
fa2123db
ML
6036
6037 if (context->stream_count < 2)
6038 return;
6039 for (i = 0; i < context->stream_count ; i++) {
6040 if (!context->streams[i])
6041 continue;
1f6010a9
DF
6042 /*
6043 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6044 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6045 * For now it's set to false
fa2123db 6046 */
fa2123db 6047 }
ec372186 6048
fa2123db 6049 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6050
6051 for (i = 0; i < context->stream_count ; i++) {
6052 stream = context->streams[i];
6053
6054 if (!stream)
6055 continue;
6056
6057 set_multisync_trigger_params(stream);
6058 }
fa2123db
ML
6059}
6060
ea2be5c0 6061#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6062static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6063 struct dc_sink *sink, struct dc_stream_state *stream,
6064 struct dsc_dec_dpcd_caps *dsc_caps)
6065{
6066 stream->timing.flags.DSC = 0;
6067
2665f63a
ML
6068 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6069 sink->sink_signal == SIGNAL_TYPE_EDP)) {
50b1f44e
FZ
6070 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6071 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6072 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6073 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6074 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6075 dsc_caps);
998b7ad2
FZ
6076 }
6077}
6078
2665f63a
ML
6079static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6080 struct dc_sink *sink, struct dc_stream_state *stream,
6081 struct dsc_dec_dpcd_caps *dsc_caps,
6082 uint32_t max_dsc_target_bpp_limit_override)
6083{
6084 const struct dc_link_settings *verified_link_cap = NULL;
6085 uint32_t link_bw_in_kbps;
6086 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6087 struct dc *dc = sink->ctx->dc;
6088 struct dc_dsc_bw_range bw_range = {0};
6089 struct dc_dsc_config dsc_cfg = {0};
6090
6091 verified_link_cap = dc_link_get_link_cap(stream->link);
6092 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6093 edp_min_bpp_x16 = 8 * 16;
6094 edp_max_bpp_x16 = 8 * 16;
6095
6096 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6097 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6098
6099 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6100 edp_min_bpp_x16 = edp_max_bpp_x16;
6101
6102 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6103 dc->debug.dsc_min_slice_height_override,
6104 edp_min_bpp_x16, edp_max_bpp_x16,
6105 dsc_caps,
6106 &stream->timing,
6107 &bw_range)) {
6108
6109 if (bw_range.max_kbps < link_bw_in_kbps) {
6110 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6111 dsc_caps,
6112 dc->debug.dsc_min_slice_height_override,
6113 max_dsc_target_bpp_limit_override,
6114 0,
6115 &stream->timing,
6116 &dsc_cfg)) {
6117 stream->timing.dsc_cfg = dsc_cfg;
6118 stream->timing.flags.DSC = 1;
6119 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6120 }
6121 return;
6122 }
6123 }
6124
6125 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6126 dsc_caps,
6127 dc->debug.dsc_min_slice_height_override,
6128 max_dsc_target_bpp_limit_override,
6129 link_bw_in_kbps,
6130 &stream->timing,
6131 &dsc_cfg)) {
6132 stream->timing.dsc_cfg = dsc_cfg;
6133 stream->timing.flags.DSC = 1;
6134 }
6135}
6136
998b7ad2
FZ
6137static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6138 struct dc_sink *sink, struct dc_stream_state *stream,
6139 struct dsc_dec_dpcd_caps *dsc_caps)
6140{
6141 struct drm_connector *drm_connector = &aconnector->base;
6142 uint32_t link_bandwidth_kbps;
f1c1a982 6143 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 6144 struct dc *dc = sink->ctx->dc;
50b1f44e
FZ
6145 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6146 uint32_t dsc_max_supported_bw_in_kbps;
998b7ad2
FZ
6147
6148 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6149 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6150
6151 if (stream->link && stream->link->local_sink)
6152 max_dsc_target_bpp_limit_override =
6153 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6154
998b7ad2
FZ
6155 /* Set DSC policy according to dsc_clock_en */
6156 dc_dsc_policy_set_enable_dsc_when_not_needed(
6157 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6158
2665f63a
ML
6159 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6160 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6161
6162 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6163
6164 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
50b1f44e
FZ
6165 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6166 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
998b7ad2
FZ
6167 dsc_caps,
6168 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6169 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6170 link_bandwidth_kbps,
6171 &stream->timing,
6172 &stream->timing.dsc_cfg)) {
50b1f44e
FZ
6173 stream->timing.flags.DSC = 1;
6174 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6175 __func__, drm_connector->name);
6176 }
6177 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6178 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6179 max_supported_bw_in_kbps = link_bandwidth_kbps;
6180 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6181
6182 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6183 max_supported_bw_in_kbps > 0 &&
6184 dsc_max_supported_bw_in_kbps > 0)
6185 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6186 dsc_caps,
6187 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6188 max_dsc_target_bpp_limit_override,
6189 dsc_max_supported_bw_in_kbps,
6190 &stream->timing,
6191 &stream->timing.dsc_cfg)) {
6192 stream->timing.flags.DSC = 1;
6193 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6194 __func__, drm_connector->name);
6195 }
998b7ad2
FZ
6196 }
6197 }
6198
6199 /* Overwrite the stream flag if DSC is enabled through debugfs */
6200 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6201 stream->timing.flags.DSC = 1;
6202
6203 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6204 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6205
6206 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6207 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6208
6209 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6210 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6211}
433e5dec 6212#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6213
5fd953a3
RS
6214/**
6215 * DOC: FreeSync Video
6216 *
6217 * When a userspace application wants to play a video, the content follows a
6218 * standard format definition that usually specifies the FPS for that format.
6219 * The below list illustrates some video format and the expected FPS,
6220 * respectively:
6221 *
6222 * - TV/NTSC (23.976 FPS)
6223 * - Cinema (24 FPS)
6224 * - TV/PAL (25 FPS)
6225 * - TV/NTSC (29.97 FPS)
6226 * - TV/NTSC (30 FPS)
6227 * - Cinema HFR (48 FPS)
6228 * - TV/PAL (50 FPS)
6229 * - Commonly used (60 FPS)
12cdff6b 6230 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6231 *
6232 * The list of standards video format is not huge and can be added to the
6233 * connector modeset list beforehand. With that, userspace can leverage
6234 * FreeSync to extends the front porch in order to attain the target refresh
6235 * rate. Such a switch will happen seamlessly, without screen blanking or
6236 * reprogramming of the output in any other way. If the userspace requests a
6237 * modesetting change compatible with FreeSync modes that only differ in the
6238 * refresh rate, DC will skip the full update and avoid blink during the
6239 * transition. For example, the video player can change the modesetting from
6240 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6241 * causing any display blink. This same concept can be applied to a mode
6242 * setting change.
6243 */
a85ba005
NC
6244static struct drm_display_mode *
6245get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6246 bool use_probed_modes)
6247{
6248 struct drm_display_mode *m, *m_pref = NULL;
6249 u16 current_refresh, highest_refresh;
6250 struct list_head *list_head = use_probed_modes ?
6251 &aconnector->base.probed_modes :
6252 &aconnector->base.modes;
6253
6254 if (aconnector->freesync_vid_base.clock != 0)
6255 return &aconnector->freesync_vid_base;
6256
6257 /* Find the preferred mode */
6258 list_for_each_entry (m, list_head, head) {
6259 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6260 m_pref = m;
6261 break;
6262 }
6263 }
6264
6265 if (!m_pref) {
6266 /* Probably an EDID with no preferred mode. Fallback to first entry */
6267 m_pref = list_first_entry_or_null(
6268 &aconnector->base.modes, struct drm_display_mode, head);
6269 if (!m_pref) {
6270 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6271 return NULL;
6272 }
6273 }
6274
6275 highest_refresh = drm_mode_vrefresh(m_pref);
6276
6277 /*
6278 * Find the mode with highest refresh rate with same resolution.
6279 * For some monitors, preferred mode is not the mode with highest
6280 * supported refresh rate.
6281 */
6282 list_for_each_entry (m, list_head, head) {
6283 current_refresh = drm_mode_vrefresh(m);
6284
6285 if (m->hdisplay == m_pref->hdisplay &&
6286 m->vdisplay == m_pref->vdisplay &&
6287 highest_refresh < current_refresh) {
6288 highest_refresh = current_refresh;
6289 m_pref = m;
6290 }
6291 }
6292
6293 aconnector->freesync_vid_base = *m_pref;
6294 return m_pref;
6295}
6296
fe8858bb 6297static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6298 struct amdgpu_dm_connector *aconnector)
6299{
6300 struct drm_display_mode *high_mode;
6301 int timing_diff;
6302
6303 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6304 if (!high_mode || !mode)
6305 return false;
6306
6307 timing_diff = high_mode->vtotal - mode->vtotal;
6308
6309 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6310 high_mode->hdisplay != mode->hdisplay ||
6311 high_mode->vdisplay != mode->vdisplay ||
6312 high_mode->hsync_start != mode->hsync_start ||
6313 high_mode->hsync_end != mode->hsync_end ||
6314 high_mode->htotal != mode->htotal ||
6315 high_mode->hskew != mode->hskew ||
6316 high_mode->vscan != mode->vscan ||
6317 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6318 high_mode->vsync_end - mode->vsync_end != timing_diff)
6319 return false;
6320 else
6321 return true;
6322}
6323
3ee6b26b
AD
6324static struct dc_stream_state *
6325create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6326 const struct drm_display_mode *drm_mode,
b333730d 6327 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6328 const struct dc_stream_state *old_stream,
6329 int requested_bpc)
e7b07cee
HW
6330{
6331 struct drm_display_mode *preferred_mode = NULL;
391ef035 6332 struct drm_connector *drm_connector;
42ba01fc
NK
6333 const struct drm_connector_state *con_state =
6334 dm_state ? &dm_state->base : NULL;
0971c40e 6335 struct dc_stream_state *stream = NULL;
e7b07cee 6336 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6337 struct drm_display_mode saved_mode;
6338 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6339 bool native_mode_found = false;
b0781603
NK
6340 bool recalculate_timing = false;
6341 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6342 int mode_refresh;
58124bf8 6343 int preferred_refresh = 0;
defeb878 6344#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6345 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6346#endif
aed15309 6347 struct dc_sink *sink = NULL;
a85ba005
NC
6348
6349 memset(&saved_mode, 0, sizeof(saved_mode));
6350
b830ebc9 6351 if (aconnector == NULL) {
e7b07cee 6352 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6353 return stream;
e7b07cee
HW
6354 }
6355
e7b07cee 6356 drm_connector = &aconnector->base;
2e0ac3d6 6357
f4ac176e 6358 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6359 sink = create_fake_sink(aconnector);
6360 if (!sink)
6361 return stream;
aed15309
ML
6362 } else {
6363 sink = aconnector->dc_sink;
dcd5fb82 6364 dc_sink_retain(sink);
f4ac176e 6365 }
2e0ac3d6 6366
aed15309 6367 stream = dc_create_stream_for_sink(sink);
4562236b 6368
b830ebc9 6369 if (stream == NULL) {
e7b07cee 6370 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6371 goto finish;
e7b07cee
HW
6372 }
6373
ceb3dbb4
JL
6374 stream->dm_stream_context = aconnector;
6375
4a36fcba
WL
6376 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6377 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6378
e7b07cee
HW
6379 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6380 /* Search for preferred mode */
6381 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6382 native_mode_found = true;
6383 break;
6384 }
6385 }
6386 if (!native_mode_found)
6387 preferred_mode = list_first_entry_or_null(
6388 &aconnector->base.modes,
6389 struct drm_display_mode,
6390 head);
6391
b333730d
BL
6392 mode_refresh = drm_mode_vrefresh(&mode);
6393
b830ebc9 6394 if (preferred_mode == NULL) {
1f6010a9
DF
6395 /*
6396 * This may not be an error, the use case is when we have no
e7b07cee
HW
6397 * usermode calls to reset and set mode upon hotplug. In this
6398 * case, we call set mode ourselves to restore the previous mode
6399 * and the modelist may not be filled in in time.
6400 */
f1ad2f5e 6401 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6402 } else {
b0781603 6403 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
6404 is_freesync_video_mode(&mode, aconnector);
6405 if (recalculate_timing) {
6406 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6407 saved_mode = mode;
6408 mode = *freesync_mode;
6409 } else {
6410 decide_crtc_timing_for_drm_display_mode(
b0781603 6411 &mode, preferred_mode, scale);
a85ba005 6412
b0781603
NK
6413 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6414 }
e7b07cee
HW
6415 }
6416
a85ba005
NC
6417 if (recalculate_timing)
6418 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6419 else if (!dm_state)
f783577c
JFZ
6420 drm_mode_set_crtcinfo(&mode, 0);
6421
a85ba005 6422 /*
b333730d
BL
6423 * If scaling is enabled and refresh rate didn't change
6424 * we copy the vic and polarities of the old timings
6425 */
b0781603 6426 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6427 fill_stream_properties_from_drm_display_mode(
6428 stream, &mode, &aconnector->base, con_state, NULL,
6429 requested_bpc);
b333730d 6430 else
a85ba005
NC
6431 fill_stream_properties_from_drm_display_mode(
6432 stream, &mode, &aconnector->base, con_state, old_stream,
6433 requested_bpc);
b333730d 6434
defeb878 6435#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6436 /* SST DSC determination policy */
6437 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6438 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6439 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6440#endif
6441
e7b07cee
HW
6442 update_stream_scaling_settings(&mode, dm_state, stream);
6443
6444 fill_audio_info(
6445 &stream->audio_info,
6446 drm_connector,
aed15309 6447 sink);
e7b07cee 6448
ceb3dbb4 6449 update_stream_signal(stream, sink);
9182b4cb 6450
d832fc3b 6451 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6452 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6453
8a488f5d
RL
6454 if (stream->link->psr_settings.psr_feature_enabled) {
6455 //
6456 // should decide stream support vsc sdp colorimetry capability
6457 // before building vsc info packet
6458 //
6459 stream->use_vsc_sdp_for_colorimetry = false;
6460 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6461 stream->use_vsc_sdp_for_colorimetry =
6462 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6463 } else {
6464 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6465 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6466 }
8a488f5d 6467 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
6468 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6469
8c322309 6470 }
aed15309 6471finish:
dcd5fb82 6472 dc_sink_release(sink);
9e3efe3e 6473
e7b07cee
HW
6474 return stream;
6475}
6476
7578ecda 6477static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6478{
6479 drm_crtc_cleanup(crtc);
6480 kfree(crtc);
6481}
6482
6483static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6484 struct drm_crtc_state *state)
e7b07cee
HW
6485{
6486 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6487
6488 /* TODO Destroy dc_stream objects are stream object is flattened */
6489 if (cur->stream)
6490 dc_stream_release(cur->stream);
6491
6492
6493 __drm_atomic_helper_crtc_destroy_state(state);
6494
6495
6496 kfree(state);
6497}
6498
6499static void dm_crtc_reset_state(struct drm_crtc *crtc)
6500{
6501 struct dm_crtc_state *state;
6502
6503 if (crtc->state)
6504 dm_crtc_destroy_state(crtc, crtc->state);
6505
6506 state = kzalloc(sizeof(*state), GFP_KERNEL);
6507 if (WARN_ON(!state))
6508 return;
6509
1f8a52ec 6510 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6511}
6512
6513static struct drm_crtc_state *
6514dm_crtc_duplicate_state(struct drm_crtc *crtc)
6515{
6516 struct dm_crtc_state *state, *cur;
6517
6518 cur = to_dm_crtc_state(crtc->state);
6519
6520 if (WARN_ON(!crtc->state))
6521 return NULL;
6522
2004f45e 6523 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6524 if (!state)
6525 return NULL;
e7b07cee
HW
6526
6527 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6528
6529 if (cur->stream) {
6530 state->stream = cur->stream;
6531 dc_stream_retain(state->stream);
6532 }
6533
d6ef9b41 6534 state->active_planes = cur->active_planes;
98e6436d 6535 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6536 state->abm_level = cur->abm_level;
bb47de73
NK
6537 state->vrr_supported = cur->vrr_supported;
6538 state->freesync_config = cur->freesync_config;
cf020d49
NK
6539 state->cm_has_degamma = cur->cm_has_degamma;
6540 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6541 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6542 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6543
6544 return &state->base;
6545}
6546
86bc2219 6547#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6548static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6549{
6550 crtc_debugfs_init(crtc);
6551
6552 return 0;
6553}
6554#endif
6555
d2574c33
MK
6556static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6557{
6558 enum dc_irq_source irq_source;
6559 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6560 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6561 int rc;
6562
6563 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6564
6565 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6566
4711c033
LT
6567 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6568 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6569 return rc;
6570}
589d2739
HW
6571
6572static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6573{
6574 enum dc_irq_source irq_source;
6575 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6576 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6577 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6578#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6579 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6580 struct vblank_control_work *work;
ea3b4242 6581#endif
d2574c33
MK
6582 int rc = 0;
6583
6584 if (enable) {
6585 /* vblank irq on -> Only need vupdate irq in vrr mode */
6586 if (amdgpu_dm_vrr_active(acrtc_state))
6587 rc = dm_set_vupdate_irq(crtc, true);
6588 } else {
6589 /* vblank irq off -> vupdate irq off */
6590 rc = dm_set_vupdate_irq(crtc, false);
6591 }
6592
6593 if (rc)
6594 return rc;
589d2739
HW
6595
6596 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6597
6598 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6599 return -EBUSY;
6600
98ab5f35
BL
6601 if (amdgpu_in_reset(adev))
6602 return 0;
6603
4928b480 6604#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6605 if (dm->vblank_control_workqueue) {
6606 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6607 if (!work)
6608 return -ENOMEM;
09a5df6c 6609
06dd1888
NK
6610 INIT_WORK(&work->work, vblank_control_worker);
6611 work->dm = dm;
6612 work->acrtc = acrtc;
6613 work->enable = enable;
09a5df6c 6614
06dd1888
NK
6615 if (acrtc_state->stream) {
6616 dc_stream_retain(acrtc_state->stream);
6617 work->stream = acrtc_state->stream;
6618 }
58aa1c50 6619
06dd1888
NK
6620 queue_work(dm->vblank_control_workqueue, &work->work);
6621 }
4928b480 6622#endif
71338cb4 6623
71338cb4 6624 return 0;
589d2739
HW
6625}
6626
6627static int dm_enable_vblank(struct drm_crtc *crtc)
6628{
6629 return dm_set_vblank(crtc, true);
6630}
6631
6632static void dm_disable_vblank(struct drm_crtc *crtc)
6633{
6634 dm_set_vblank(crtc, false);
6635}
6636
e7b07cee
HW
6637/* Implemented only the options currently availible for the driver */
6638static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6639 .reset = dm_crtc_reset_state,
6640 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6641 .set_config = drm_atomic_helper_set_config,
6642 .page_flip = drm_atomic_helper_page_flip,
6643 .atomic_duplicate_state = dm_crtc_duplicate_state,
6644 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6645 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6646 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6647 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6648 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6649 .enable_vblank = dm_enable_vblank,
6650 .disable_vblank = dm_disable_vblank,
e3eff4b5 6651 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6652#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6653 .late_register = amdgpu_dm_crtc_late_register,
6654#endif
e7b07cee
HW
6655};
6656
6657static enum drm_connector_status
6658amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6659{
6660 bool connected;
c84dec2f 6661 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6662
1f6010a9
DF
6663 /*
6664 * Notes:
e7b07cee
HW
6665 * 1. This interface is NOT called in context of HPD irq.
6666 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6667 * makes it a bad place for *any* MST-related activity.
6668 */
e7b07cee 6669
8580d60b
HW
6670 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6671 !aconnector->fake_enable)
e7b07cee
HW
6672 connected = (aconnector->dc_sink != NULL);
6673 else
6674 connected = (aconnector->base.force == DRM_FORCE_ON);
6675
0f877894
OV
6676 update_subconnector_property(aconnector);
6677
e7b07cee
HW
6678 return (connected ? connector_status_connected :
6679 connector_status_disconnected);
6680}
6681
3ee6b26b
AD
6682int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6683 struct drm_connector_state *connector_state,
6684 struct drm_property *property,
6685 uint64_t val)
e7b07cee
HW
6686{
6687 struct drm_device *dev = connector->dev;
1348969a 6688 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6689 struct dm_connector_state *dm_old_state =
6690 to_dm_connector_state(connector->state);
6691 struct dm_connector_state *dm_new_state =
6692 to_dm_connector_state(connector_state);
6693
6694 int ret = -EINVAL;
6695
6696 if (property == dev->mode_config.scaling_mode_property) {
6697 enum amdgpu_rmx_type rmx_type;
6698
6699 switch (val) {
6700 case DRM_MODE_SCALE_CENTER:
6701 rmx_type = RMX_CENTER;
6702 break;
6703 case DRM_MODE_SCALE_ASPECT:
6704 rmx_type = RMX_ASPECT;
6705 break;
6706 case DRM_MODE_SCALE_FULLSCREEN:
6707 rmx_type = RMX_FULL;
6708 break;
6709 case DRM_MODE_SCALE_NONE:
6710 default:
6711 rmx_type = RMX_OFF;
6712 break;
6713 }
6714
6715 if (dm_old_state->scaling == rmx_type)
6716 return 0;
6717
6718 dm_new_state->scaling = rmx_type;
6719 ret = 0;
6720 } else if (property == adev->mode_info.underscan_hborder_property) {
6721 dm_new_state->underscan_hborder = val;
6722 ret = 0;
6723 } else if (property == adev->mode_info.underscan_vborder_property) {
6724 dm_new_state->underscan_vborder = val;
6725 ret = 0;
6726 } else if (property == adev->mode_info.underscan_property) {
6727 dm_new_state->underscan_enable = val;
6728 ret = 0;
c1ee92f9
DF
6729 } else if (property == adev->mode_info.abm_level_property) {
6730 dm_new_state->abm_level = val;
6731 ret = 0;
e7b07cee
HW
6732 }
6733
6734 return ret;
6735}
6736
3ee6b26b
AD
6737int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6738 const struct drm_connector_state *state,
6739 struct drm_property *property,
6740 uint64_t *val)
e7b07cee
HW
6741{
6742 struct drm_device *dev = connector->dev;
1348969a 6743 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6744 struct dm_connector_state *dm_state =
6745 to_dm_connector_state(state);
6746 int ret = -EINVAL;
6747
6748 if (property == dev->mode_config.scaling_mode_property) {
6749 switch (dm_state->scaling) {
6750 case RMX_CENTER:
6751 *val = DRM_MODE_SCALE_CENTER;
6752 break;
6753 case RMX_ASPECT:
6754 *val = DRM_MODE_SCALE_ASPECT;
6755 break;
6756 case RMX_FULL:
6757 *val = DRM_MODE_SCALE_FULLSCREEN;
6758 break;
6759 case RMX_OFF:
6760 default:
6761 *val = DRM_MODE_SCALE_NONE;
6762 break;
6763 }
6764 ret = 0;
6765 } else if (property == adev->mode_info.underscan_hborder_property) {
6766 *val = dm_state->underscan_hborder;
6767 ret = 0;
6768 } else if (property == adev->mode_info.underscan_vborder_property) {
6769 *val = dm_state->underscan_vborder;
6770 ret = 0;
6771 } else if (property == adev->mode_info.underscan_property) {
6772 *val = dm_state->underscan_enable;
6773 ret = 0;
c1ee92f9
DF
6774 } else if (property == adev->mode_info.abm_level_property) {
6775 *val = dm_state->abm_level;
6776 ret = 0;
e7b07cee 6777 }
c1ee92f9 6778
e7b07cee
HW
6779 return ret;
6780}
6781
526c654a
ED
6782static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6783{
6784 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6785
6786 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6787}
6788
7578ecda 6789static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6790{
c84dec2f 6791 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6792 const struct dc_link *link = aconnector->dc_link;
1348969a 6793 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6794 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6795 int i;
ada8ce15 6796
5dff80bd
AG
6797 /*
6798 * Call only if mst_mgr was iniitalized before since it's not done
6799 * for all connector types.
6800 */
6801 if (aconnector->mst_mgr.dev)
6802 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6803
e7b07cee
HW
6804#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6805 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6806 for (i = 0; i < dm->num_of_edps; i++) {
6807 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6808 backlight_device_unregister(dm->backlight_dev[i]);
6809 dm->backlight_dev[i] = NULL;
6810 }
e7b07cee
HW
6811 }
6812#endif
dcd5fb82
MF
6813
6814 if (aconnector->dc_em_sink)
6815 dc_sink_release(aconnector->dc_em_sink);
6816 aconnector->dc_em_sink = NULL;
6817 if (aconnector->dc_sink)
6818 dc_sink_release(aconnector->dc_sink);
6819 aconnector->dc_sink = NULL;
6820
e86e8947 6821 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6822 drm_connector_unregister(connector);
6823 drm_connector_cleanup(connector);
526c654a
ED
6824 if (aconnector->i2c) {
6825 i2c_del_adapter(&aconnector->i2c->base);
6826 kfree(aconnector->i2c);
6827 }
7daec99f 6828 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6829
e7b07cee
HW
6830 kfree(connector);
6831}
6832
6833void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6834{
6835 struct dm_connector_state *state =
6836 to_dm_connector_state(connector->state);
6837
df099b9b
LSL
6838 if (connector->state)
6839 __drm_atomic_helper_connector_destroy_state(connector->state);
6840
e7b07cee
HW
6841 kfree(state);
6842
6843 state = kzalloc(sizeof(*state), GFP_KERNEL);
6844
6845 if (state) {
6846 state->scaling = RMX_OFF;
6847 state->underscan_enable = false;
6848 state->underscan_hborder = 0;
6849 state->underscan_vborder = 0;
01933ba4 6850 state->base.max_requested_bpc = 8;
3261e013
ML
6851 state->vcpi_slots = 0;
6852 state->pbn = 0;
c3e50f89
NK
6853 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6854 state->abm_level = amdgpu_dm_abm_level;
6855
df099b9b 6856 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6857 }
6858}
6859
3ee6b26b
AD
6860struct drm_connector_state *
6861amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6862{
6863 struct dm_connector_state *state =
6864 to_dm_connector_state(connector->state);
6865
6866 struct dm_connector_state *new_state =
6867 kmemdup(state, sizeof(*state), GFP_KERNEL);
6868
98e6436d
AK
6869 if (!new_state)
6870 return NULL;
e7b07cee 6871
98e6436d
AK
6872 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6873
6874 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6875 new_state->abm_level = state->abm_level;
922454c2
NK
6876 new_state->scaling = state->scaling;
6877 new_state->underscan_enable = state->underscan_enable;
6878 new_state->underscan_hborder = state->underscan_hborder;
6879 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6880 new_state->vcpi_slots = state->vcpi_slots;
6881 new_state->pbn = state->pbn;
98e6436d 6882 return &new_state->base;
e7b07cee
HW
6883}
6884
14f04fa4
AD
6885static int
6886amdgpu_dm_connector_late_register(struct drm_connector *connector)
6887{
6888 struct amdgpu_dm_connector *amdgpu_dm_connector =
6889 to_amdgpu_dm_connector(connector);
00a8037e 6890 int r;
14f04fa4 6891
00a8037e
AD
6892 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6893 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6894 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6895 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6896 if (r)
6897 return r;
6898 }
6899
6900#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6901 connector_debugfs_init(amdgpu_dm_connector);
6902#endif
6903
6904 return 0;
6905}
6906
e7b07cee
HW
6907static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6908 .reset = amdgpu_dm_connector_funcs_reset,
6909 .detect = amdgpu_dm_connector_detect,
6910 .fill_modes = drm_helper_probe_single_connector_modes,
6911 .destroy = amdgpu_dm_connector_destroy,
6912 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6913 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6914 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6915 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6916 .late_register = amdgpu_dm_connector_late_register,
526c654a 6917 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6918};
6919
e7b07cee
HW
6920static int get_modes(struct drm_connector *connector)
6921{
6922 return amdgpu_dm_connector_get_modes(connector);
6923}
6924
c84dec2f 6925static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6926{
6927 struct dc_sink_init_data init_params = {
6928 .link = aconnector->dc_link,
6929 .sink_signal = SIGNAL_TYPE_VIRTUAL
6930 };
70e8ffc5 6931 struct edid *edid;
e7b07cee 6932
a89ff457 6933 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6934 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6935 aconnector->base.name);
6936
6937 aconnector->base.force = DRM_FORCE_OFF;
6938 aconnector->base.override_edid = false;
6939 return;
6940 }
6941
70e8ffc5
HW
6942 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6943
e7b07cee
HW
6944 aconnector->edid = edid;
6945
6946 aconnector->dc_em_sink = dc_link_add_remote_sink(
6947 aconnector->dc_link,
6948 (uint8_t *)edid,
6949 (edid->extensions + 1) * EDID_LENGTH,
6950 &init_params);
6951
dcd5fb82 6952 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6953 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6954 aconnector->dc_link->local_sink :
6955 aconnector->dc_em_sink;
dcd5fb82
MF
6956 dc_sink_retain(aconnector->dc_sink);
6957 }
e7b07cee
HW
6958}
6959
c84dec2f 6960static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6961{
6962 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6963
1f6010a9
DF
6964 /*
6965 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6966 * Those settings have to be != 0 to get initial modeset
6967 */
6968 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6969 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6970 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6971 }
6972
6973
6974 aconnector->base.override_edid = true;
6975 create_eml_sink(aconnector);
6976}
6977
cbd14ae7
SW
6978static struct dc_stream_state *
6979create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6980 const struct drm_display_mode *drm_mode,
6981 const struct dm_connector_state *dm_state,
6982 const struct dc_stream_state *old_stream)
6983{
6984 struct drm_connector *connector = &aconnector->base;
1348969a 6985 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6986 struct dc_stream_state *stream;
4b7da34b
SW
6987 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6988 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6989 enum dc_status dc_result = DC_OK;
6990
6991 do {
6992 stream = create_stream_for_sink(aconnector, drm_mode,
6993 dm_state, old_stream,
6994 requested_bpc);
6995 if (stream == NULL) {
6996 DRM_ERROR("Failed to create stream for sink!\n");
6997 break;
6998 }
6999
7000 dc_result = dc_validate_stream(adev->dm.dc, stream);
7001
7002 if (dc_result != DC_OK) {
74a16675 7003 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
7004 drm_mode->hdisplay,
7005 drm_mode->vdisplay,
7006 drm_mode->clock,
74a16675
RS
7007 dc_result,
7008 dc_status_to_str(dc_result));
cbd14ae7
SW
7009
7010 dc_stream_release(stream);
7011 stream = NULL;
7012 requested_bpc -= 2; /* lower bpc to retry validation */
7013 }
7014
7015 } while (stream == NULL && requested_bpc >= 6);
7016
68eb3ae3
WS
7017 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7018 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7019
7020 aconnector->force_yuv420_output = true;
7021 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7022 dm_state, old_stream);
7023 aconnector->force_yuv420_output = false;
7024 }
7025
cbd14ae7
SW
7026 return stream;
7027}
7028
ba9ca088 7029enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 7030 struct drm_display_mode *mode)
e7b07cee
HW
7031{
7032 int result = MODE_ERROR;
7033 struct dc_sink *dc_sink;
e7b07cee 7034 /* TODO: Unhardcode stream count */
0971c40e 7035 struct dc_stream_state *stream;
c84dec2f 7036 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7037
7038 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7039 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7040 return result;
7041
1f6010a9
DF
7042 /*
7043 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
7044 * EDID mgmt
7045 */
7046 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7047 !aconnector->dc_em_sink)
7048 handle_edid_mgmt(aconnector);
7049
c84dec2f 7050 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 7051
ad975f44
VL
7052 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7053 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
7054 DRM_ERROR("dc_sink is NULL!\n");
7055 goto fail;
7056 }
7057
cbd14ae7
SW
7058 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7059 if (stream) {
7060 dc_stream_release(stream);
e7b07cee 7061 result = MODE_OK;
cbd14ae7 7062 }
e7b07cee
HW
7063
7064fail:
7065 /* TODO: error handling*/
7066 return result;
7067}
7068
88694af9
NK
7069static int fill_hdr_info_packet(const struct drm_connector_state *state,
7070 struct dc_info_packet *out)
7071{
7072 struct hdmi_drm_infoframe frame;
7073 unsigned char buf[30]; /* 26 + 4 */
7074 ssize_t len;
7075 int ret, i;
7076
7077 memset(out, 0, sizeof(*out));
7078
7079 if (!state->hdr_output_metadata)
7080 return 0;
7081
7082 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7083 if (ret)
7084 return ret;
7085
7086 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7087 if (len < 0)
7088 return (int)len;
7089
7090 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7091 if (len != 30)
7092 return -EINVAL;
7093
7094 /* Prepare the infopacket for DC. */
7095 switch (state->connector->connector_type) {
7096 case DRM_MODE_CONNECTOR_HDMIA:
7097 out->hb0 = 0x87; /* type */
7098 out->hb1 = 0x01; /* version */
7099 out->hb2 = 0x1A; /* length */
7100 out->sb[0] = buf[3]; /* checksum */
7101 i = 1;
7102 break;
7103
7104 case DRM_MODE_CONNECTOR_DisplayPort:
7105 case DRM_MODE_CONNECTOR_eDP:
7106 out->hb0 = 0x00; /* sdp id, zero */
7107 out->hb1 = 0x87; /* type */
7108 out->hb2 = 0x1D; /* payload len - 1 */
7109 out->hb3 = (0x13 << 2); /* sdp version */
7110 out->sb[0] = 0x01; /* version */
7111 out->sb[1] = 0x1A; /* length */
7112 i = 2;
7113 break;
7114
7115 default:
7116 return -EINVAL;
7117 }
7118
7119 memcpy(&out->sb[i], &buf[4], 26);
7120 out->valid = true;
7121
7122 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7123 sizeof(out->sb), false);
7124
7125 return 0;
7126}
7127
88694af9
NK
7128static int
7129amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7130 struct drm_atomic_state *state)
88694af9 7131{
51e857af
SP
7132 struct drm_connector_state *new_con_state =
7133 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7134 struct drm_connector_state *old_con_state =
7135 drm_atomic_get_old_connector_state(state, conn);
7136 struct drm_crtc *crtc = new_con_state->crtc;
7137 struct drm_crtc_state *new_crtc_state;
7138 int ret;
7139
e8a98235
RS
7140 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7141
88694af9
NK
7142 if (!crtc)
7143 return 0;
7144
72921cdf 7145 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7146 struct dc_info_packet hdr_infopacket;
7147
7148 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7149 if (ret)
7150 return ret;
7151
7152 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7153 if (IS_ERR(new_crtc_state))
7154 return PTR_ERR(new_crtc_state);
7155
7156 /*
7157 * DC considers the stream backends changed if the
7158 * static metadata changes. Forcing the modeset also
7159 * gives a simple way for userspace to switch from
b232d4ed
NK
7160 * 8bpc to 10bpc when setting the metadata to enter
7161 * or exit HDR.
7162 *
7163 * Changing the static metadata after it's been
7164 * set is permissible, however. So only force a
7165 * modeset if we're entering or exiting HDR.
88694af9 7166 */
b232d4ed
NK
7167 new_crtc_state->mode_changed =
7168 !old_con_state->hdr_output_metadata ||
7169 !new_con_state->hdr_output_metadata;
88694af9
NK
7170 }
7171
7172 return 0;
7173}
7174
e7b07cee
HW
7175static const struct drm_connector_helper_funcs
7176amdgpu_dm_connector_helper_funcs = {
7177 /*
1f6010a9 7178 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7179 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7180 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7181 * in get_modes call back, not just return the modes count
7182 */
e7b07cee
HW
7183 .get_modes = get_modes,
7184 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7185 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7186};
7187
7188static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7189{
7190}
7191
d6ef9b41 7192static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7193{
7194 struct drm_atomic_state *state = new_crtc_state->state;
7195 struct drm_plane *plane;
7196 int num_active = 0;
7197
7198 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7199 struct drm_plane_state *new_plane_state;
7200
7201 /* Cursor planes are "fake". */
7202 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7203 continue;
7204
7205 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7206
7207 if (!new_plane_state) {
7208 /*
7209 * The plane is enable on the CRTC and hasn't changed
7210 * state. This means that it previously passed
7211 * validation and is therefore enabled.
7212 */
7213 num_active += 1;
7214 continue;
7215 }
7216
7217 /* We need a framebuffer to be considered enabled. */
7218 num_active += (new_plane_state->fb != NULL);
7219 }
7220
d6ef9b41
NK
7221 return num_active;
7222}
7223
8fe684e9
NK
7224static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7225 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7226{
7227 struct dm_crtc_state *dm_new_crtc_state =
7228 to_dm_crtc_state(new_crtc_state);
7229
7230 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7231
7232 if (!dm_new_crtc_state->stream)
7233 return;
7234
7235 dm_new_crtc_state->active_planes =
7236 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7237}
7238
3ee6b26b 7239static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7240 struct drm_atomic_state *state)
e7b07cee 7241{
29b77ad7
MR
7242 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7243 crtc);
1348969a 7244 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7245 struct dc *dc = adev->dm.dc;
29b77ad7 7246 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7247 int ret = -EINVAL;
7248
5b8c5969 7249 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7250
29b77ad7 7251 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7252
bcd74374
ND
7253 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7254 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7255 return ret;
7256 }
7257
bc92c065 7258 /*
b836a274
MD
7259 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7260 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7261 * planes are disabled, which is not supported by the hardware. And there is legacy
7262 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7263 */
29b77ad7 7264 if (crtc_state->enable &&
ea9522f5
SS
7265 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7266 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7267 return -EINVAL;
ea9522f5 7268 }
c14a005c 7269
b836a274
MD
7270 /* In some use cases, like reset, no stream is attached */
7271 if (!dm_crtc_state->stream)
7272 return 0;
7273
62c933f9 7274 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7275 return 0;
7276
ea9522f5 7277 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7278 return ret;
7279}
7280
3ee6b26b
AD
7281static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7282 const struct drm_display_mode *mode,
7283 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7284{
7285 return true;
7286}
7287
7288static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7289 .disable = dm_crtc_helper_disable,
7290 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7291 .mode_fixup = dm_crtc_helper_mode_fixup,
7292 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7293};
7294
7295static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7296{
7297
7298}
7299
3261e013
ML
7300static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7301{
7302 switch (display_color_depth) {
7303 case COLOR_DEPTH_666:
7304 return 6;
7305 case COLOR_DEPTH_888:
7306 return 8;
7307 case COLOR_DEPTH_101010:
7308 return 10;
7309 case COLOR_DEPTH_121212:
7310 return 12;
7311 case COLOR_DEPTH_141414:
7312 return 14;
7313 case COLOR_DEPTH_161616:
7314 return 16;
7315 default:
7316 break;
7317 }
7318 return 0;
7319}
7320
3ee6b26b
AD
7321static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7322 struct drm_crtc_state *crtc_state,
7323 struct drm_connector_state *conn_state)
e7b07cee 7324{
3261e013
ML
7325 struct drm_atomic_state *state = crtc_state->state;
7326 struct drm_connector *connector = conn_state->connector;
7327 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7328 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7329 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7330 struct drm_dp_mst_topology_mgr *mst_mgr;
7331 struct drm_dp_mst_port *mst_port;
7332 enum dc_color_depth color_depth;
7333 int clock, bpp = 0;
1bc22f20 7334 bool is_y420 = false;
3261e013
ML
7335
7336 if (!aconnector->port || !aconnector->dc_sink)
7337 return 0;
7338
7339 mst_port = aconnector->port;
7340 mst_mgr = &aconnector->mst_port->mst_mgr;
7341
7342 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7343 return 0;
7344
7345 if (!state->duplicated) {
cbd14ae7 7346 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7347 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7348 aconnector->force_yuv420_output;
cbd14ae7
SW
7349 color_depth = convert_color_depth_from_display_info(connector,
7350 is_y420,
7351 max_bpc);
3261e013
ML
7352 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7353 clock = adjusted_mode->clock;
dc48529f 7354 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7355 }
7356 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7357 mst_mgr,
7358 mst_port,
1c6c1cb5 7359 dm_new_connector_state->pbn,
03ca9600 7360 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7361 if (dm_new_connector_state->vcpi_slots < 0) {
7362 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7363 return dm_new_connector_state->vcpi_slots;
7364 }
e7b07cee
HW
7365 return 0;
7366}
7367
7368const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7369 .disable = dm_encoder_helper_disable,
7370 .atomic_check = dm_encoder_helper_atomic_check
7371};
7372
d9fe1a4c 7373#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7374static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7375 struct dc_state *dc_state,
7376 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7377{
7378 struct dc_stream_state *stream = NULL;
7379 struct drm_connector *connector;
5760dcb9 7380 struct drm_connector_state *new_con_state;
29b9ba74
ML
7381 struct amdgpu_dm_connector *aconnector;
7382 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7383 int i, j;
7384 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7385
5760dcb9 7386 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7387
7388 aconnector = to_amdgpu_dm_connector(connector);
7389
7390 if (!aconnector->port)
7391 continue;
7392
7393 if (!new_con_state || !new_con_state->crtc)
7394 continue;
7395
7396 dm_conn_state = to_dm_connector_state(new_con_state);
7397
7398 for (j = 0; j < dc_state->stream_count; j++) {
7399 stream = dc_state->streams[j];
7400 if (!stream)
7401 continue;
7402
7403 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7404 break;
7405
7406 stream = NULL;
7407 }
7408
7409 if (!stream)
7410 continue;
7411
29b9ba74 7412 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7413 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7414 for (j = 0; j < dc_state->stream_count; j++) {
7415 if (vars[j].aconnector == aconnector) {
7416 pbn = vars[j].pbn;
7417 break;
7418 }
7419 }
7420
a550bb16
HW
7421 if (j == dc_state->stream_count)
7422 continue;
7423
7424 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7425
7426 if (stream->timing.flags.DSC != 1) {
7427 dm_conn_state->pbn = pbn;
7428 dm_conn_state->vcpi_slots = slot_num;
7429
7430 drm_dp_mst_atomic_enable_dsc(state,
7431 aconnector->port,
7432 dm_conn_state->pbn,
7433 0,
7434 false);
7435 continue;
7436 }
7437
29b9ba74
ML
7438 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7439 aconnector->port,
7440 pbn, pbn_div,
7441 true);
7442 if (vcpi < 0)
7443 return vcpi;
7444
7445 dm_conn_state->pbn = pbn;
7446 dm_conn_state->vcpi_slots = vcpi;
7447 }
7448 return 0;
7449}
d9fe1a4c 7450#endif
29b9ba74 7451
e7b07cee
HW
7452static void dm_drm_plane_reset(struct drm_plane *plane)
7453{
7454 struct dm_plane_state *amdgpu_state = NULL;
7455
7456 if (plane->state)
7457 plane->funcs->atomic_destroy_state(plane, plane->state);
7458
7459 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7460 WARN_ON(amdgpu_state == NULL);
1f6010a9 7461
7ddaef96
NK
7462 if (amdgpu_state)
7463 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7464}
7465
7466static struct drm_plane_state *
7467dm_drm_plane_duplicate_state(struct drm_plane *plane)
7468{
7469 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7470
7471 old_dm_plane_state = to_dm_plane_state(plane->state);
7472 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7473 if (!dm_plane_state)
7474 return NULL;
7475
7476 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7477
3be5262e
HW
7478 if (old_dm_plane_state->dc_state) {
7479 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7480 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7481 }
7482
7483 return &dm_plane_state->base;
7484}
7485
dfd84d90 7486static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7487 struct drm_plane_state *state)
e7b07cee
HW
7488{
7489 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7490
3be5262e
HW
7491 if (dm_plane_state->dc_state)
7492 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7493
0627bbd3 7494 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7495}
7496
7497static const struct drm_plane_funcs dm_plane_funcs = {
7498 .update_plane = drm_atomic_helper_update_plane,
7499 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7500 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7501 .reset = dm_drm_plane_reset,
7502 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7503 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7504 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7505};
7506
3ee6b26b
AD
7507static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7508 struct drm_plane_state *new_state)
e7b07cee
HW
7509{
7510 struct amdgpu_framebuffer *afb;
7511 struct drm_gem_object *obj;
5d43be0c 7512 struct amdgpu_device *adev;
e7b07cee 7513 struct amdgpu_bo *rbo;
e7b07cee 7514 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7515 struct list_head list;
7516 struct ttm_validate_buffer tv;
7517 struct ww_acquire_ctx ticket;
5d43be0c
CK
7518 uint32_t domain;
7519 int r;
e7b07cee
HW
7520
7521 if (!new_state->fb) {
4711c033 7522 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7523 return 0;
7524 }
7525
7526 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7527 obj = new_state->fb->obj[0];
e7b07cee 7528 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7529 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7530 INIT_LIST_HEAD(&list);
7531
7532 tv.bo = &rbo->tbo;
7533 tv.num_shared = 1;
7534 list_add(&tv.head, &list);
7535
9165fb87 7536 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7537 if (r) {
7538 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7539 return r;
0f257b09 7540 }
e7b07cee 7541
5d43be0c 7542 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7543 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7544 else
7545 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7546
7b7c6c81 7547 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7548 if (unlikely(r != 0)) {
30b7c614
HW
7549 if (r != -ERESTARTSYS)
7550 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7551 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7552 return r;
7553 }
7554
bb812f1e
JZ
7555 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7556 if (unlikely(r != 0)) {
7557 amdgpu_bo_unpin(rbo);
0f257b09 7558 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7559 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7560 return r;
7561 }
7df7e505 7562
0f257b09 7563 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7564
7b7c6c81 7565 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7566
7567 amdgpu_bo_ref(rbo);
7568
cf322b49
NK
7569 /**
7570 * We don't do surface updates on planes that have been newly created,
7571 * but we also don't have the afb->address during atomic check.
7572 *
7573 * Fill in buffer attributes depending on the address here, but only on
7574 * newly created planes since they're not being used by DC yet and this
7575 * won't modify global state.
7576 */
7577 dm_plane_state_old = to_dm_plane_state(plane->state);
7578 dm_plane_state_new = to_dm_plane_state(new_state);
7579
3be5262e 7580 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7581 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7582 struct dc_plane_state *plane_state =
7583 dm_plane_state_new->dc_state;
7584 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7585
320932bf 7586 fill_plane_buffer_attributes(
695af5f9 7587 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7588 afb->tiling_flags,
cf322b49
NK
7589 &plane_state->tiling_info, &plane_state->plane_size,
7590 &plane_state->dcc, &plane_state->address,
6eed95b0 7591 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7592 }
7593
e7b07cee
HW
7594 return 0;
7595}
7596
3ee6b26b
AD
7597static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7598 struct drm_plane_state *old_state)
e7b07cee
HW
7599{
7600 struct amdgpu_bo *rbo;
e7b07cee
HW
7601 int r;
7602
7603 if (!old_state->fb)
7604 return;
7605
e68d14dd 7606 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7607 r = amdgpu_bo_reserve(rbo, false);
7608 if (unlikely(r)) {
7609 DRM_ERROR("failed to reserve rbo before unpin\n");
7610 return;
b830ebc9
HW
7611 }
7612
7613 amdgpu_bo_unpin(rbo);
7614 amdgpu_bo_unreserve(rbo);
7615 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7616}
7617
8c44515b
AP
7618static int dm_plane_helper_check_state(struct drm_plane_state *state,
7619 struct drm_crtc_state *new_crtc_state)
7620{
6300b3bd
MK
7621 struct drm_framebuffer *fb = state->fb;
7622 int min_downscale, max_upscale;
7623 int min_scale = 0;
7624 int max_scale = INT_MAX;
7625
40d916a2 7626 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7627 if (fb && state->crtc) {
40d916a2
NC
7628 /* Validate viewport to cover the case when only the position changes */
7629 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7630 int viewport_width = state->crtc_w;
7631 int viewport_height = state->crtc_h;
7632
7633 if (state->crtc_x < 0)
7634 viewport_width += state->crtc_x;
7635 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7636 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7637
7638 if (state->crtc_y < 0)
7639 viewport_height += state->crtc_y;
7640 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7641 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7642
4abdb72b
NC
7643 if (viewport_width < 0 || viewport_height < 0) {
7644 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7645 return -EINVAL;
7646 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7647 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7648 return -EINVAL;
4abdb72b
NC
7649 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7650 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7651 return -EINVAL;
4abdb72b
NC
7652 }
7653
40d916a2
NC
7654 }
7655
7656 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7657 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7658 &min_downscale, &max_upscale);
7659 /*
7660 * Convert to drm convention: 16.16 fixed point, instead of dc's
7661 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7662 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7663 */
7664 min_scale = (1000 << 16) / max_upscale;
7665 max_scale = (1000 << 16) / min_downscale;
7666 }
8c44515b 7667
8c44515b 7668 return drm_atomic_helper_check_plane_state(
6300b3bd 7669 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7670}
7671
7578ecda 7672static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7673 struct drm_atomic_state *state)
cbd19488 7674{
7c11b99a
MR
7675 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7676 plane);
1348969a 7677 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7678 struct dc *dc = adev->dm.dc;
78171832 7679 struct dm_plane_state *dm_plane_state;
695af5f9 7680 struct dc_scaling_info scaling_info;
8c44515b 7681 struct drm_crtc_state *new_crtc_state;
695af5f9 7682 int ret;
78171832 7683
ba5c1649 7684 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7685
ba5c1649 7686 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7687
3be5262e 7688 if (!dm_plane_state->dc_state)
9a3329b1 7689 return 0;
cbd19488 7690
8c44515b 7691 new_crtc_state =
dec92020 7692 drm_atomic_get_new_crtc_state(state,
ba5c1649 7693 new_plane_state->crtc);
8c44515b
AP
7694 if (!new_crtc_state)
7695 return -EINVAL;
7696
ba5c1649 7697 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7698 if (ret)
7699 return ret;
7700
4375d625 7701 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7702 if (ret)
7703 return ret;
a05bcff1 7704
62c933f9 7705 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7706 return 0;
7707
7708 return -EINVAL;
7709}
7710
674e78ac 7711static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7712 struct drm_atomic_state *state)
674e78ac
NK
7713{
7714 /* Only support async updates on cursor planes. */
7715 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7716 return -EINVAL;
7717
7718 return 0;
7719}
7720
7721static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7722 struct drm_atomic_state *state)
674e78ac 7723{
5ddb0bd4
MR
7724 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7725 plane);
674e78ac 7726 struct drm_plane_state *old_state =
5ddb0bd4 7727 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7728
e8a98235
RS
7729 trace_amdgpu_dm_atomic_update_cursor(new_state);
7730
332af874 7731 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7732
7733 plane->state->src_x = new_state->src_x;
7734 plane->state->src_y = new_state->src_y;
7735 plane->state->src_w = new_state->src_w;
7736 plane->state->src_h = new_state->src_h;
7737 plane->state->crtc_x = new_state->crtc_x;
7738 plane->state->crtc_y = new_state->crtc_y;
7739 plane->state->crtc_w = new_state->crtc_w;
7740 plane->state->crtc_h = new_state->crtc_h;
7741
7742 handle_cursor_update(plane, old_state);
7743}
7744
e7b07cee
HW
7745static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7746 .prepare_fb = dm_plane_helper_prepare_fb,
7747 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7748 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7749 .atomic_async_check = dm_plane_atomic_async_check,
7750 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7751};
7752
7753/*
7754 * TODO: these are currently initialized to rgb formats only.
7755 * For future use cases we should either initialize them dynamically based on
7756 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7757 * check will succeed, and let DC implement proper check
e7b07cee 7758 */
d90371b0 7759static const uint32_t rgb_formats[] = {
e7b07cee
HW
7760 DRM_FORMAT_XRGB8888,
7761 DRM_FORMAT_ARGB8888,
7762 DRM_FORMAT_RGBA8888,
7763 DRM_FORMAT_XRGB2101010,
7764 DRM_FORMAT_XBGR2101010,
7765 DRM_FORMAT_ARGB2101010,
7766 DRM_FORMAT_ABGR2101010,
58020403
MK
7767 DRM_FORMAT_XRGB16161616,
7768 DRM_FORMAT_XBGR16161616,
7769 DRM_FORMAT_ARGB16161616,
7770 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7771 DRM_FORMAT_XBGR8888,
7772 DRM_FORMAT_ABGR8888,
46dd9ff7 7773 DRM_FORMAT_RGB565,
e7b07cee
HW
7774};
7775
0d579c7e
NK
7776static const uint32_t overlay_formats[] = {
7777 DRM_FORMAT_XRGB8888,
7778 DRM_FORMAT_ARGB8888,
7779 DRM_FORMAT_RGBA8888,
7780 DRM_FORMAT_XBGR8888,
7781 DRM_FORMAT_ABGR8888,
7267a1a9 7782 DRM_FORMAT_RGB565
e7b07cee
HW
7783};
7784
7785static const u32 cursor_formats[] = {
7786 DRM_FORMAT_ARGB8888
7787};
7788
37c6a93b
NK
7789static int get_plane_formats(const struct drm_plane *plane,
7790 const struct dc_plane_cap *plane_cap,
7791 uint32_t *formats, int max_formats)
e7b07cee 7792{
37c6a93b
NK
7793 int i, num_formats = 0;
7794
7795 /*
7796 * TODO: Query support for each group of formats directly from
7797 * DC plane caps. This will require adding more formats to the
7798 * caps list.
7799 */
e7b07cee 7800
f180b4bc 7801 switch (plane->type) {
e7b07cee 7802 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7803 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7804 if (num_formats >= max_formats)
7805 break;
7806
7807 formats[num_formats++] = rgb_formats[i];
7808 }
7809
ea36ad34 7810 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7811 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7812 if (plane_cap && plane_cap->pixel_format_support.p010)
7813 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7814 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7815 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7816 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7817 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7818 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7819 }
e7b07cee 7820 break;
37c6a93b 7821
e7b07cee 7822 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7823 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7824 if (num_formats >= max_formats)
7825 break;
7826
7827 formats[num_formats++] = overlay_formats[i];
7828 }
e7b07cee 7829 break;
37c6a93b 7830
e7b07cee 7831 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7832 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7833 if (num_formats >= max_formats)
7834 break;
7835
7836 formats[num_formats++] = cursor_formats[i];
7837 }
e7b07cee
HW
7838 break;
7839 }
7840
37c6a93b
NK
7841 return num_formats;
7842}
7843
7844static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7845 struct drm_plane *plane,
7846 unsigned long possible_crtcs,
7847 const struct dc_plane_cap *plane_cap)
7848{
7849 uint32_t formats[32];
7850 int num_formats;
7851 int res = -EPERM;
ecc874a6 7852 unsigned int supported_rotations;
faa37f54 7853 uint64_t *modifiers = NULL;
37c6a93b
NK
7854
7855 num_formats = get_plane_formats(plane, plane_cap, formats,
7856 ARRAY_SIZE(formats));
7857
faa37f54
BN
7858 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7859 if (res)
7860 return res;
7861
4a580877 7862 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7863 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7864 modifiers, plane->type, NULL);
7865 kfree(modifiers);
37c6a93b
NK
7866 if (res)
7867 return res;
7868
cc1fec57
NK
7869 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7870 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7871 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7872 BIT(DRM_MODE_BLEND_PREMULTI);
7873
7874 drm_plane_create_alpha_property(plane);
7875 drm_plane_create_blend_mode_property(plane, blend_caps);
7876 }
7877
fc8e5230 7878 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7879 plane_cap &&
7880 (plane_cap->pixel_format_support.nv12 ||
7881 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7882 /* This only affects YUV formats. */
7883 drm_plane_create_color_properties(
7884 plane,
7885 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7886 BIT(DRM_COLOR_YCBCR_BT709) |
7887 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7888 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7889 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7890 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7891 }
7892
ecc874a6
PLG
7893 supported_rotations =
7894 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7895 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7896
1347385f
SS
7897 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7898 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7899 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7900 supported_rotations);
ecc874a6 7901
f180b4bc 7902 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7903
96719c54 7904 /* Create (reset) the plane state */
f180b4bc
HW
7905 if (plane->funcs->reset)
7906 plane->funcs->reset(plane);
96719c54 7907
37c6a93b 7908 return 0;
e7b07cee
HW
7909}
7910
7578ecda
AD
7911static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7912 struct drm_plane *plane,
7913 uint32_t crtc_index)
e7b07cee
HW
7914{
7915 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7916 struct drm_plane *cursor_plane;
e7b07cee
HW
7917
7918 int res = -ENOMEM;
7919
7920 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7921 if (!cursor_plane)
7922 goto fail;
7923
f180b4bc 7924 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7925 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7926
7927 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7928 if (!acrtc)
7929 goto fail;
7930
7931 res = drm_crtc_init_with_planes(
7932 dm->ddev,
7933 &acrtc->base,
7934 plane,
f180b4bc 7935 cursor_plane,
e7b07cee
HW
7936 &amdgpu_dm_crtc_funcs, NULL);
7937
7938 if (res)
7939 goto fail;
7940
7941 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7942
96719c54
HW
7943 /* Create (reset) the plane state */
7944 if (acrtc->base.funcs->reset)
7945 acrtc->base.funcs->reset(&acrtc->base);
7946
e7b07cee
HW
7947 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7948 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7949
7950 acrtc->crtc_id = crtc_index;
7951 acrtc->base.enabled = false;
c37e2d29 7952 acrtc->otg_inst = -1;
e7b07cee
HW
7953
7954 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7955 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7956 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7957 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7958
e7b07cee
HW
7959 return 0;
7960
7961fail:
b830ebc9
HW
7962 kfree(acrtc);
7963 kfree(cursor_plane);
e7b07cee
HW
7964 return res;
7965}
7966
7967
7968static int to_drm_connector_type(enum signal_type st)
7969{
7970 switch (st) {
7971 case SIGNAL_TYPE_HDMI_TYPE_A:
7972 return DRM_MODE_CONNECTOR_HDMIA;
7973 case SIGNAL_TYPE_EDP:
7974 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7975 case SIGNAL_TYPE_LVDS:
7976 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7977 case SIGNAL_TYPE_RGB:
7978 return DRM_MODE_CONNECTOR_VGA;
7979 case SIGNAL_TYPE_DISPLAY_PORT:
7980 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7981 return DRM_MODE_CONNECTOR_DisplayPort;
7982 case SIGNAL_TYPE_DVI_DUAL_LINK:
7983 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7984 return DRM_MODE_CONNECTOR_DVID;
7985 case SIGNAL_TYPE_VIRTUAL:
7986 return DRM_MODE_CONNECTOR_VIRTUAL;
7987
7988 default:
7989 return DRM_MODE_CONNECTOR_Unknown;
7990 }
7991}
7992
2b4c1c05
DV
7993static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7994{
62afb4ad
JRS
7995 struct drm_encoder *encoder;
7996
7997 /* There is only one encoder per connector */
7998 drm_connector_for_each_possible_encoder(connector, encoder)
7999 return encoder;
8000
8001 return NULL;
2b4c1c05
DV
8002}
8003
e7b07cee
HW
8004static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8005{
e7b07cee
HW
8006 struct drm_encoder *encoder;
8007 struct amdgpu_encoder *amdgpu_encoder;
8008
2b4c1c05 8009 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
8010
8011 if (encoder == NULL)
8012 return;
8013
8014 amdgpu_encoder = to_amdgpu_encoder(encoder);
8015
8016 amdgpu_encoder->native_mode.clock = 0;
8017
8018 if (!list_empty(&connector->probed_modes)) {
8019 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 8020
e7b07cee 8021 list_for_each_entry(preferred_mode,
b830ebc9
HW
8022 &connector->probed_modes,
8023 head) {
8024 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8025 amdgpu_encoder->native_mode = *preferred_mode;
8026
e7b07cee
HW
8027 break;
8028 }
8029
8030 }
8031}
8032
3ee6b26b
AD
8033static struct drm_display_mode *
8034amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8035 char *name,
8036 int hdisplay, int vdisplay)
e7b07cee
HW
8037{
8038 struct drm_device *dev = encoder->dev;
8039 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8040 struct drm_display_mode *mode = NULL;
8041 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8042
8043 mode = drm_mode_duplicate(dev, native_mode);
8044
b830ebc9 8045 if (mode == NULL)
e7b07cee
HW
8046 return NULL;
8047
8048 mode->hdisplay = hdisplay;
8049 mode->vdisplay = vdisplay;
8050 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 8051 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
8052
8053 return mode;
8054
8055}
8056
8057static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 8058 struct drm_connector *connector)
e7b07cee
HW
8059{
8060 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8061 struct drm_display_mode *mode = NULL;
8062 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
8063 struct amdgpu_dm_connector *amdgpu_dm_connector =
8064 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8065 int i;
8066 int n;
8067 struct mode_size {
8068 char name[DRM_DISPLAY_MODE_LEN];
8069 int w;
8070 int h;
b830ebc9 8071 } common_modes[] = {
e7b07cee
HW
8072 { "640x480", 640, 480},
8073 { "800x600", 800, 600},
8074 { "1024x768", 1024, 768},
8075 { "1280x720", 1280, 720},
8076 { "1280x800", 1280, 800},
8077 {"1280x1024", 1280, 1024},
8078 { "1440x900", 1440, 900},
8079 {"1680x1050", 1680, 1050},
8080 {"1600x1200", 1600, 1200},
8081 {"1920x1080", 1920, 1080},
8082 {"1920x1200", 1920, 1200}
8083 };
8084
b830ebc9 8085 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
8086
8087 for (i = 0; i < n; i++) {
8088 struct drm_display_mode *curmode = NULL;
8089 bool mode_existed = false;
8090
8091 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
8092 common_modes[i].h > native_mode->vdisplay ||
8093 (common_modes[i].w == native_mode->hdisplay &&
8094 common_modes[i].h == native_mode->vdisplay))
8095 continue;
e7b07cee
HW
8096
8097 list_for_each_entry(curmode, &connector->probed_modes, head) {
8098 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 8099 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
8100 mode_existed = true;
8101 break;
8102 }
8103 }
8104
8105 if (mode_existed)
8106 continue;
8107
8108 mode = amdgpu_dm_create_common_mode(encoder,
8109 common_modes[i].name, common_modes[i].w,
8110 common_modes[i].h);
8111 drm_mode_probed_add(connector, mode);
c84dec2f 8112 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
8113 }
8114}
8115
d77de788
SS
8116static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8117{
8118 struct drm_encoder *encoder;
8119 struct amdgpu_encoder *amdgpu_encoder;
8120 const struct drm_display_mode *native_mode;
8121
8122 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8123 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8124 return;
8125
8126 encoder = amdgpu_dm_connector_to_encoder(connector);
8127 if (!encoder)
8128 return;
8129
8130 amdgpu_encoder = to_amdgpu_encoder(encoder);
8131
8132 native_mode = &amdgpu_encoder->native_mode;
8133 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8134 return;
8135
8136 drm_connector_set_panel_orientation_with_quirk(connector,
8137 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8138 native_mode->hdisplay,
8139 native_mode->vdisplay);
8140}
8141
3ee6b26b
AD
8142static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8143 struct edid *edid)
e7b07cee 8144{
c84dec2f
HW
8145 struct amdgpu_dm_connector *amdgpu_dm_connector =
8146 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8147
8148 if (edid) {
8149 /* empty probed_modes */
8150 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8151 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8152 drm_add_edid_modes(connector, edid);
8153
f1e5e913
YMM
8154 /* sorting the probed modes before calling function
8155 * amdgpu_dm_get_native_mode() since EDID can have
8156 * more than one preferred mode. The modes that are
8157 * later in the probed mode list could be of higher
8158 * and preferred resolution. For example, 3840x2160
8159 * resolution in base EDID preferred timing and 4096x2160
8160 * preferred resolution in DID extension block later.
8161 */
8162 drm_mode_sort(&connector->probed_modes);
e7b07cee 8163 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8164
8165 /* Freesync capabilities are reset by calling
8166 * drm_add_edid_modes() and need to be
8167 * restored here.
8168 */
8169 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8170
8171 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8172 } else {
c84dec2f 8173 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8174 }
e7b07cee
HW
8175}
8176
a85ba005
NC
8177static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8178 struct drm_display_mode *mode)
8179{
8180 struct drm_display_mode *m;
8181
8182 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8183 if (drm_mode_equal(m, mode))
8184 return true;
8185 }
8186
8187 return false;
8188}
8189
8190static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8191{
8192 const struct drm_display_mode *m;
8193 struct drm_display_mode *new_mode;
8194 uint i;
8195 uint32_t new_modes_count = 0;
8196
8197 /* Standard FPS values
8198 *
12cdff6b
SC
8199 * 23.976 - TV/NTSC
8200 * 24 - Cinema
8201 * 25 - TV/PAL
8202 * 29.97 - TV/NTSC
8203 * 30 - TV/NTSC
8204 * 48 - Cinema HFR
8205 * 50 - TV/PAL
8206 * 60 - Commonly used
8207 * 48,72,96,120 - Multiples of 24
a85ba005 8208 */
9ce5ed6e
CIK
8209 static const uint32_t common_rates[] = {
8210 23976, 24000, 25000, 29970, 30000,
12cdff6b 8211 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8212 };
a85ba005
NC
8213
8214 /*
8215 * Find mode with highest refresh rate with the same resolution
8216 * as the preferred mode. Some monitors report a preferred mode
8217 * with lower resolution than the highest refresh rate supported.
8218 */
8219
8220 m = get_highest_refresh_rate_mode(aconnector, true);
8221 if (!m)
8222 return 0;
8223
8224 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8225 uint64_t target_vtotal, target_vtotal_diff;
8226 uint64_t num, den;
8227
8228 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8229 continue;
8230
8231 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8232 common_rates[i] > aconnector->max_vfreq * 1000)
8233 continue;
8234
8235 num = (unsigned long long)m->clock * 1000 * 1000;
8236 den = common_rates[i] * (unsigned long long)m->htotal;
8237 target_vtotal = div_u64(num, den);
8238 target_vtotal_diff = target_vtotal - m->vtotal;
8239
8240 /* Check for illegal modes */
8241 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8242 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8243 m->vtotal + target_vtotal_diff < m->vsync_end)
8244 continue;
8245
8246 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8247 if (!new_mode)
8248 goto out;
8249
8250 new_mode->vtotal += (u16)target_vtotal_diff;
8251 new_mode->vsync_start += (u16)target_vtotal_diff;
8252 new_mode->vsync_end += (u16)target_vtotal_diff;
8253 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8254 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8255
8256 if (!is_duplicate_mode(aconnector, new_mode)) {
8257 drm_mode_probed_add(&aconnector->base, new_mode);
8258 new_modes_count += 1;
8259 } else
8260 drm_mode_destroy(aconnector->base.dev, new_mode);
8261 }
8262 out:
8263 return new_modes_count;
8264}
8265
8266static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8267 struct edid *edid)
8268{
8269 struct amdgpu_dm_connector *amdgpu_dm_connector =
8270 to_amdgpu_dm_connector(connector);
8271
8272 if (!(amdgpu_freesync_vid_mode && edid))
8273 return;
fe8858bb 8274
a85ba005
NC
8275 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8276 amdgpu_dm_connector->num_modes +=
8277 add_fs_modes(amdgpu_dm_connector);
8278}
8279
7578ecda 8280static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8281{
c84dec2f
HW
8282 struct amdgpu_dm_connector *amdgpu_dm_connector =
8283 to_amdgpu_dm_connector(connector);
e7b07cee 8284 struct drm_encoder *encoder;
c84dec2f 8285 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8286
2b4c1c05 8287 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8288
5c0e6840 8289 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8290 amdgpu_dm_connector->num_modes =
8291 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8292 } else {
8293 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8294 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8295 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8296 }
3e332d3a 8297 amdgpu_dm_fbc_init(connector);
5099114b 8298
c84dec2f 8299 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8300}
8301
3ee6b26b
AD
8302void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8303 struct amdgpu_dm_connector *aconnector,
8304 int connector_type,
8305 struct dc_link *link,
8306 int link_index)
e7b07cee 8307{
1348969a 8308 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8309
f04bee34
NK
8310 /*
8311 * Some of the properties below require access to state, like bpc.
8312 * Allocate some default initial connector state with our reset helper.
8313 */
8314 if (aconnector->base.funcs->reset)
8315 aconnector->base.funcs->reset(&aconnector->base);
8316
e7b07cee
HW
8317 aconnector->connector_id = link_index;
8318 aconnector->dc_link = link;
8319 aconnector->base.interlace_allowed = false;
8320 aconnector->base.doublescan_allowed = false;
8321 aconnector->base.stereo_allowed = false;
8322 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8323 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8324 aconnector->audio_inst = -1;
e7b07cee
HW
8325 mutex_init(&aconnector->hpd_lock);
8326
1f6010a9
DF
8327 /*
8328 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8329 * which means HPD hot plug not supported
8330 */
e7b07cee
HW
8331 switch (connector_type) {
8332 case DRM_MODE_CONNECTOR_HDMIA:
8333 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8334 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8335 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8336 break;
8337 case DRM_MODE_CONNECTOR_DisplayPort:
8338 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7b201d53
JS
8339 link->link_enc = dp_get_link_enc(link);
8340 ASSERT(link->link_enc);
f6e03f80
JS
8341 if (link->link_enc)
8342 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8343 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8344 break;
8345 case DRM_MODE_CONNECTOR_DVID:
8346 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8347 break;
8348 default:
8349 break;
8350 }
8351
8352 drm_object_attach_property(&aconnector->base.base,
8353 dm->ddev->mode_config.scaling_mode_property,
8354 DRM_MODE_SCALE_NONE);
8355
8356 drm_object_attach_property(&aconnector->base.base,
8357 adev->mode_info.underscan_property,
8358 UNDERSCAN_OFF);
8359 drm_object_attach_property(&aconnector->base.base,
8360 adev->mode_info.underscan_hborder_property,
8361 0);
8362 drm_object_attach_property(&aconnector->base.base,
8363 adev->mode_info.underscan_vborder_property,
8364 0);
1825fd34 8365
8c61b31e
JFZ
8366 if (!aconnector->mst_port)
8367 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8368
4a8ca46b
RL
8369 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8370 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8371 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8372
c1ee92f9 8373 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8374 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8375 drm_object_attach_property(&aconnector->base.base,
8376 adev->mode_info.abm_level_property, 0);
8377 }
bb47de73
NK
8378
8379 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8380 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8381 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8382 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8383
8c61b31e
JFZ
8384 if (!aconnector->mst_port)
8385 drm_connector_attach_vrr_capable_property(&aconnector->base);
8386
0c8620d6 8387#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8388 if (adev->dm.hdcp_workqueue)
53e108aa 8389 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8390#endif
bb47de73 8391 }
e7b07cee
HW
8392}
8393
7578ecda
AD
8394static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8395 struct i2c_msg *msgs, int num)
e7b07cee
HW
8396{
8397 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8398 struct ddc_service *ddc_service = i2c->ddc_service;
8399 struct i2c_command cmd;
8400 int i;
8401 int result = -EIO;
8402
b830ebc9 8403 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8404
8405 if (!cmd.payloads)
8406 return result;
8407
8408 cmd.number_of_payloads = num;
8409 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8410 cmd.speed = 100;
8411
8412 for (i = 0; i < num; i++) {
8413 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8414 cmd.payloads[i].address = msgs[i].addr;
8415 cmd.payloads[i].length = msgs[i].len;
8416 cmd.payloads[i].data = msgs[i].buf;
8417 }
8418
c85e6e54
DF
8419 if (dc_submit_i2c(
8420 ddc_service->ctx->dc,
8421 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8422 &cmd))
8423 result = num;
8424
8425 kfree(cmd.payloads);
8426 return result;
8427}
8428
7578ecda 8429static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8430{
8431 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8432}
8433
8434static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8435 .master_xfer = amdgpu_dm_i2c_xfer,
8436 .functionality = amdgpu_dm_i2c_func,
8437};
8438
3ee6b26b
AD
8439static struct amdgpu_i2c_adapter *
8440create_i2c(struct ddc_service *ddc_service,
8441 int link_index,
8442 int *res)
e7b07cee
HW
8443{
8444 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8445 struct amdgpu_i2c_adapter *i2c;
8446
b830ebc9 8447 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8448 if (!i2c)
8449 return NULL;
e7b07cee
HW
8450 i2c->base.owner = THIS_MODULE;
8451 i2c->base.class = I2C_CLASS_DDC;
8452 i2c->base.dev.parent = &adev->pdev->dev;
8453 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8454 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8455 i2c_set_adapdata(&i2c->base, i2c);
8456 i2c->ddc_service = ddc_service;
f6e03f80
JS
8457 if (i2c->ddc_service->ddc_pin)
8458 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8459
8460 return i2c;
8461}
8462
89fc8d4e 8463
1f6010a9
DF
8464/*
8465 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8466 * dc_link which will be represented by this aconnector.
8467 */
7578ecda
AD
8468static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8469 struct amdgpu_dm_connector *aconnector,
8470 uint32_t link_index,
8471 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8472{
8473 int res = 0;
8474 int connector_type;
8475 struct dc *dc = dm->dc;
8476 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8477 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8478
8479 link->priv = aconnector;
e7b07cee 8480
f1ad2f5e 8481 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8482
8483 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8484 if (!i2c) {
8485 DRM_ERROR("Failed to create i2c adapter data\n");
8486 return -ENOMEM;
8487 }
8488
e7b07cee
HW
8489 aconnector->i2c = i2c;
8490 res = i2c_add_adapter(&i2c->base);
8491
8492 if (res) {
8493 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8494 goto out_free;
8495 }
8496
8497 connector_type = to_drm_connector_type(link->connector_signal);
8498
17165de2 8499 res = drm_connector_init_with_ddc(
e7b07cee
HW
8500 dm->ddev,
8501 &aconnector->base,
8502 &amdgpu_dm_connector_funcs,
17165de2
AP
8503 connector_type,
8504 &i2c->base);
e7b07cee
HW
8505
8506 if (res) {
8507 DRM_ERROR("connector_init failed\n");
8508 aconnector->connector_id = -1;
8509 goto out_free;
8510 }
8511
8512 drm_connector_helper_add(
8513 &aconnector->base,
8514 &amdgpu_dm_connector_helper_funcs);
8515
8516 amdgpu_dm_connector_init_helper(
8517 dm,
8518 aconnector,
8519 connector_type,
8520 link,
8521 link_index);
8522
cde4c44d 8523 drm_connector_attach_encoder(
e7b07cee
HW
8524 &aconnector->base, &aencoder->base);
8525
e7b07cee
HW
8526 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8527 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8528 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8529
e7b07cee
HW
8530out_free:
8531 if (res) {
8532 kfree(i2c);
8533 aconnector->i2c = NULL;
8534 }
8535 return res;
8536}
8537
8538int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8539{
8540 switch (adev->mode_info.num_crtc) {
8541 case 1:
8542 return 0x1;
8543 case 2:
8544 return 0x3;
8545 case 3:
8546 return 0x7;
8547 case 4:
8548 return 0xf;
8549 case 5:
8550 return 0x1f;
8551 case 6:
8552 default:
8553 return 0x3f;
8554 }
8555}
8556
7578ecda
AD
8557static int amdgpu_dm_encoder_init(struct drm_device *dev,
8558 struct amdgpu_encoder *aencoder,
8559 uint32_t link_index)
e7b07cee 8560{
1348969a 8561 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8562
8563 int res = drm_encoder_init(dev,
8564 &aencoder->base,
8565 &amdgpu_dm_encoder_funcs,
8566 DRM_MODE_ENCODER_TMDS,
8567 NULL);
8568
8569 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8570
8571 if (!res)
8572 aencoder->encoder_id = link_index;
8573 else
8574 aencoder->encoder_id = -1;
8575
8576 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8577
8578 return res;
8579}
8580
3ee6b26b
AD
8581static void manage_dm_interrupts(struct amdgpu_device *adev,
8582 struct amdgpu_crtc *acrtc,
8583 bool enable)
e7b07cee
HW
8584{
8585 /*
8fe684e9
NK
8586 * We have no guarantee that the frontend index maps to the same
8587 * backend index - some even map to more than one.
8588 *
8589 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8590 */
8591 int irq_type =
734dd01d 8592 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8593 adev,
8594 acrtc->crtc_id);
8595
8596 if (enable) {
8597 drm_crtc_vblank_on(&acrtc->base);
8598 amdgpu_irq_get(
8599 adev,
8600 &adev->pageflip_irq,
8601 irq_type);
86bc2219
WL
8602#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8603 amdgpu_irq_get(
8604 adev,
8605 &adev->vline0_irq,
8606 irq_type);
8607#endif
e7b07cee 8608 } else {
86bc2219
WL
8609#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8610 amdgpu_irq_put(
8611 adev,
8612 &adev->vline0_irq,
8613 irq_type);
8614#endif
e7b07cee
HW
8615 amdgpu_irq_put(
8616 adev,
8617 &adev->pageflip_irq,
8618 irq_type);
8619 drm_crtc_vblank_off(&acrtc->base);
8620 }
8621}
8622
8fe684e9
NK
8623static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8624 struct amdgpu_crtc *acrtc)
8625{
8626 int irq_type =
8627 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8628
8629 /**
8630 * This reads the current state for the IRQ and force reapplies
8631 * the setting to hardware.
8632 */
8633 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8634}
8635
3ee6b26b
AD
8636static bool
8637is_scaling_state_different(const struct dm_connector_state *dm_state,
8638 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8639{
8640 if (dm_state->scaling != old_dm_state->scaling)
8641 return true;
8642 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8643 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8644 return true;
8645 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8646 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8647 return true;
b830ebc9
HW
8648 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8649 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8650 return true;
e7b07cee
HW
8651 return false;
8652}
8653
0c8620d6
BL
8654#ifdef CONFIG_DRM_AMD_DC_HDCP
8655static bool is_content_protection_different(struct drm_connector_state *state,
8656 const struct drm_connector_state *old_state,
8657 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8658{
8659 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8660 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8661
31c0ed90 8662 /* Handle: Type0/1 change */
53e108aa
BL
8663 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8664 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8665 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8666 return true;
8667 }
8668
31c0ed90
BL
8669 /* CP is being re enabled, ignore this
8670 *
8671 * Handles: ENABLED -> DESIRED
8672 */
0c8620d6
BL
8673 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8674 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8675 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8676 return false;
8677 }
8678
31c0ed90
BL
8679 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8680 *
8681 * Handles: UNDESIRED -> ENABLED
8682 */
0c8620d6
BL
8683 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8684 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8685 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8686
0d9a947b
QZ
8687 /* Stream removed and re-enabled
8688 *
8689 * Can sometimes overlap with the HPD case,
8690 * thus set update_hdcp to false to avoid
8691 * setting HDCP multiple times.
8692 *
8693 * Handles: DESIRED -> DESIRED (Special case)
8694 */
8695 if (!(old_state->crtc && old_state->crtc->enabled) &&
8696 state->crtc && state->crtc->enabled &&
8697 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8698 dm_con_state->update_hdcp = false;
8699 return true;
8700 }
8701
8702 /* Hot-plug, headless s3, dpms
8703 *
8704 * Only start HDCP if the display is connected/enabled.
8705 * update_hdcp flag will be set to false until the next
8706 * HPD comes in.
31c0ed90
BL
8707 *
8708 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8709 */
97f6c917
BL
8710 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8711 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8712 dm_con_state->update_hdcp = false;
0c8620d6 8713 return true;
97f6c917 8714 }
0c8620d6 8715
31c0ed90
BL
8716 /*
8717 * Handles: UNDESIRED -> UNDESIRED
8718 * DESIRED -> DESIRED
8719 * ENABLED -> ENABLED
8720 */
0c8620d6
BL
8721 if (old_state->content_protection == state->content_protection)
8722 return false;
8723
31c0ed90
BL
8724 /*
8725 * Handles: UNDESIRED -> DESIRED
8726 * DESIRED -> UNDESIRED
8727 * ENABLED -> UNDESIRED
8728 */
97f6c917 8729 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8730 return true;
8731
31c0ed90
BL
8732 /*
8733 * Handles: DESIRED -> ENABLED
8734 */
0c8620d6
BL
8735 return false;
8736}
8737
0c8620d6 8738#endif
3ee6b26b
AD
8739static void remove_stream(struct amdgpu_device *adev,
8740 struct amdgpu_crtc *acrtc,
8741 struct dc_stream_state *stream)
e7b07cee
HW
8742{
8743 /* this is the update mode case */
e7b07cee
HW
8744
8745 acrtc->otg_inst = -1;
8746 acrtc->enabled = false;
8747}
8748
7578ecda
AD
8749static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8750 struct dc_cursor_position *position)
2a8f6ccb 8751{
f4c2cc43 8752 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8753 int x, y;
8754 int xorigin = 0, yorigin = 0;
8755
e371e19c 8756 if (!crtc || !plane->state->fb)
2a8f6ccb 8757 return 0;
2a8f6ccb
HW
8758
8759 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8760 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8761 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8762 __func__,
8763 plane->state->crtc_w,
8764 plane->state->crtc_h);
8765 return -EINVAL;
8766 }
8767
8768 x = plane->state->crtc_x;
8769 y = plane->state->crtc_y;
c14a005c 8770
e371e19c
NK
8771 if (x <= -amdgpu_crtc->max_cursor_width ||
8772 y <= -amdgpu_crtc->max_cursor_height)
8773 return 0;
8774
2a8f6ccb
HW
8775 if (x < 0) {
8776 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8777 x = 0;
8778 }
8779 if (y < 0) {
8780 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8781 y = 0;
8782 }
8783 position->enable = true;
d243b6ff 8784 position->translate_by_source = true;
2a8f6ccb
HW
8785 position->x = x;
8786 position->y = y;
8787 position->x_hotspot = xorigin;
8788 position->y_hotspot = yorigin;
8789
8790 return 0;
8791}
8792
3ee6b26b
AD
8793static void handle_cursor_update(struct drm_plane *plane,
8794 struct drm_plane_state *old_plane_state)
e7b07cee 8795{
1348969a 8796 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8797 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8798 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8799 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8800 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8801 uint64_t address = afb ? afb->address : 0;
6a30a929 8802 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8803 struct dc_cursor_attributes attributes;
8804 int ret;
8805
e7b07cee
HW
8806 if (!plane->state->fb && !old_plane_state->fb)
8807 return;
8808
cb2318b7 8809 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8810 __func__,
8811 amdgpu_crtc->crtc_id,
8812 plane->state->crtc_w,
8813 plane->state->crtc_h);
2a8f6ccb
HW
8814
8815 ret = get_cursor_position(plane, crtc, &position);
8816 if (ret)
8817 return;
8818
8819 if (!position.enable) {
8820 /* turn off cursor */
674e78ac
NK
8821 if (crtc_state && crtc_state->stream) {
8822 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8823 dc_stream_set_cursor_position(crtc_state->stream,
8824 &position);
674e78ac
NK
8825 mutex_unlock(&adev->dm.dc_lock);
8826 }
2a8f6ccb 8827 return;
e7b07cee 8828 }
e7b07cee 8829
2a8f6ccb
HW
8830 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8831 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8832
c1cefe11 8833 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8834 attributes.address.high_part = upper_32_bits(address);
8835 attributes.address.low_part = lower_32_bits(address);
8836 attributes.width = plane->state->crtc_w;
8837 attributes.height = plane->state->crtc_h;
8838 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8839 attributes.rotation_angle = 0;
8840 attributes.attribute_flags.value = 0;
8841
03a66367 8842 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8843
886daac9 8844 if (crtc_state->stream) {
674e78ac 8845 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8846 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8847 &attributes))
8848 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8849
2a8f6ccb
HW
8850 if (!dc_stream_set_cursor_position(crtc_state->stream,
8851 &position))
8852 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8853 mutex_unlock(&adev->dm.dc_lock);
886daac9 8854 }
2a8f6ccb 8855}
e7b07cee
HW
8856
8857static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8858{
8859
8860 assert_spin_locked(&acrtc->base.dev->event_lock);
8861 WARN_ON(acrtc->event);
8862
8863 acrtc->event = acrtc->base.state->event;
8864
8865 /* Set the flip status */
8866 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8867
8868 /* Mark this event as consumed */
8869 acrtc->base.state->event = NULL;
8870
cb2318b7
VL
8871 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8872 acrtc->crtc_id);
e7b07cee
HW
8873}
8874
bb47de73
NK
8875static void update_freesync_state_on_stream(
8876 struct amdgpu_display_manager *dm,
8877 struct dm_crtc_state *new_crtc_state,
180db303
NK
8878 struct dc_stream_state *new_stream,
8879 struct dc_plane_state *surface,
8880 u32 flip_timestamp_in_us)
bb47de73 8881{
09aef2c4 8882 struct mod_vrr_params vrr_params;
bb47de73 8883 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8884 struct amdgpu_device *adev = dm->adev;
585d450c 8885 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8886 unsigned long flags;
4cda3243 8887 bool pack_sdp_v1_3 = false;
bb47de73
NK
8888
8889 if (!new_stream)
8890 return;
8891
8892 /*
8893 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8894 * For now it's sufficient to just guard against these conditions.
8895 */
8896
8897 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8898 return;
8899
4a580877 8900 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8901 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8902
180db303
NK
8903 if (surface) {
8904 mod_freesync_handle_preflip(
8905 dm->freesync_module,
8906 surface,
8907 new_stream,
8908 flip_timestamp_in_us,
8909 &vrr_params);
09aef2c4
MK
8910
8911 if (adev->family < AMDGPU_FAMILY_AI &&
8912 amdgpu_dm_vrr_active(new_crtc_state)) {
8913 mod_freesync_handle_v_update(dm->freesync_module,
8914 new_stream, &vrr_params);
e63e2491
EB
8915
8916 /* Need to call this before the frame ends. */
8917 dc_stream_adjust_vmin_vmax(dm->dc,
8918 new_crtc_state->stream,
8919 &vrr_params.adjust);
09aef2c4 8920 }
180db303 8921 }
bb47de73
NK
8922
8923 mod_freesync_build_vrr_infopacket(
8924 dm->freesync_module,
8925 new_stream,
180db303 8926 &vrr_params,
ecd0136b
HT
8927 PACKET_TYPE_VRR,
8928 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8929 &vrr_infopacket,
8930 pack_sdp_v1_3);
bb47de73 8931
8a48b44c 8932 new_crtc_state->freesync_timing_changed |=
585d450c 8933 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8934 &vrr_params.adjust,
8935 sizeof(vrr_params.adjust)) != 0);
bb47de73 8936
8a48b44c 8937 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8938 (memcmp(&new_crtc_state->vrr_infopacket,
8939 &vrr_infopacket,
8940 sizeof(vrr_infopacket)) != 0);
8941
585d450c 8942 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8943 new_crtc_state->vrr_infopacket = vrr_infopacket;
8944
585d450c 8945 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8946 new_stream->vrr_infopacket = vrr_infopacket;
8947
8948 if (new_crtc_state->freesync_vrr_info_changed)
8949 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8950 new_crtc_state->base.crtc->base.id,
8951 (int)new_crtc_state->base.vrr_enabled,
180db303 8952 (int)vrr_params.state);
09aef2c4 8953
4a580877 8954 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8955}
8956
585d450c 8957static void update_stream_irq_parameters(
e854194c
MK
8958 struct amdgpu_display_manager *dm,
8959 struct dm_crtc_state *new_crtc_state)
8960{
8961 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8962 struct mod_vrr_params vrr_params;
e854194c 8963 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8964 struct amdgpu_device *adev = dm->adev;
585d450c 8965 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8966 unsigned long flags;
e854194c
MK
8967
8968 if (!new_stream)
8969 return;
8970
8971 /*
8972 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8973 * For now it's sufficient to just guard against these conditions.
8974 */
8975 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8976 return;
8977
4a580877 8978 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8979 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8980
e854194c
MK
8981 if (new_crtc_state->vrr_supported &&
8982 config.min_refresh_in_uhz &&
8983 config.max_refresh_in_uhz) {
a85ba005
NC
8984 /*
8985 * if freesync compatible mode was set, config.state will be set
8986 * in atomic check
8987 */
8988 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8989 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8990 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8991 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8992 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8993 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8994 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8995 } else {
8996 config.state = new_crtc_state->base.vrr_enabled ?
8997 VRR_STATE_ACTIVE_VARIABLE :
8998 VRR_STATE_INACTIVE;
8999 }
e854194c
MK
9000 } else {
9001 config.state = VRR_STATE_UNSUPPORTED;
9002 }
9003
9004 mod_freesync_build_vrr_params(dm->freesync_module,
9005 new_stream,
9006 &config, &vrr_params);
9007
9008 new_crtc_state->freesync_timing_changed |=
585d450c
AP
9009 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9010 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 9011
585d450c
AP
9012 new_crtc_state->freesync_config = config;
9013 /* Copy state for access from DM IRQ handler */
9014 acrtc->dm_irq_params.freesync_config = config;
9015 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9016 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 9017 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
9018}
9019
66b0c973
MK
9020static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9021 struct dm_crtc_state *new_state)
9022{
9023 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9024 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9025
9026 if (!old_vrr_active && new_vrr_active) {
9027 /* Transition VRR inactive -> active:
9028 * While VRR is active, we must not disable vblank irq, as a
9029 * reenable after disable would compute bogus vblank/pflip
9030 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
9031 *
9032 * We also need vupdate irq for the actual core vblank handling
9033 * at end of vblank.
66b0c973 9034 */
d2574c33 9035 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
9036 drm_crtc_vblank_get(new_state->base.crtc);
9037 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9038 __func__, new_state->base.crtc->base.id);
9039 } else if (old_vrr_active && !new_vrr_active) {
9040 /* Transition VRR active -> inactive:
9041 * Allow vblank irq disable again for fixed refresh rate.
9042 */
d2574c33 9043 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
9044 drm_crtc_vblank_put(new_state->base.crtc);
9045 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9046 __func__, new_state->base.crtc->base.id);
9047 }
9048}
9049
8ad27806
NK
9050static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9051{
9052 struct drm_plane *plane;
5760dcb9 9053 struct drm_plane_state *old_plane_state;
8ad27806
NK
9054 int i;
9055
9056 /*
9057 * TODO: Make this per-stream so we don't issue redundant updates for
9058 * commits with multiple streams.
9059 */
5760dcb9 9060 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
9061 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9062 handle_cursor_update(plane, old_plane_state);
9063}
9064
3be5262e 9065static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 9066 struct dc_state *dc_state,
3ee6b26b
AD
9067 struct drm_device *dev,
9068 struct amdgpu_display_manager *dm,
9069 struct drm_crtc *pcrtc,
420cd472 9070 bool wait_for_vblank)
e7b07cee 9071{
efc8278e 9072 uint32_t i;
8a48b44c 9073 uint64_t timestamp_ns;
e7b07cee 9074 struct drm_plane *plane;
0bc9706d 9075 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 9076 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
9077 struct drm_crtc_state *new_pcrtc_state =
9078 drm_atomic_get_new_crtc_state(state, pcrtc);
9079 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
9080 struct dm_crtc_state *dm_old_crtc_state =
9081 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 9082 int planes_count = 0, vpos, hpos;
570c91d5 9083 long r;
e7b07cee 9084 unsigned long flags;
8a48b44c 9085 struct amdgpu_bo *abo;
fdd1fe57
MK
9086 uint32_t target_vblank, last_flip_vblank;
9087 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 9088 bool pflip_present = false;
bc7f670e
DF
9089 struct {
9090 struct dc_surface_update surface_updates[MAX_SURFACES];
9091 struct dc_plane_info plane_infos[MAX_SURFACES];
9092 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 9093 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 9094 struct dc_stream_update stream_update;
74aa7bd4 9095 } *bundle;
bc7f670e 9096
74aa7bd4 9097 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 9098
74aa7bd4
DF
9099 if (!bundle) {
9100 dm_error("Failed to allocate update bundle\n");
4b510503
NK
9101 goto cleanup;
9102 }
e7b07cee 9103
8ad27806
NK
9104 /*
9105 * Disable the cursor first if we're disabling all the planes.
9106 * It'll remain on the screen after the planes are re-enabled
9107 * if we don't.
9108 */
9109 if (acrtc_state->active_planes == 0)
9110 amdgpu_dm_commit_cursors(state);
9111
e7b07cee 9112 /* update planes when needed */
efc8278e 9113 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9114 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9115 struct drm_crtc_state *new_crtc_state;
0bc9706d 9116 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9117 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9118 bool plane_needs_flip;
c7af5f77 9119 struct dc_plane_state *dc_plane;
54d76575 9120 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9121
80c218d5
NK
9122 /* Cursor plane is handled after stream updates */
9123 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9124 continue;
e7b07cee 9125
f5ba60fe
DD
9126 if (!fb || !crtc || pcrtc != crtc)
9127 continue;
9128
9129 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9130 if (!new_crtc_state->active)
e7b07cee
HW
9131 continue;
9132
bc7f670e 9133 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9134
74aa7bd4 9135 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9136 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9137 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9138 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9139 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9140 }
8a48b44c 9141
4375d625 9142 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9143 &bundle->scaling_infos[planes_count]);
8a48b44c 9144
695af5f9
NK
9145 bundle->surface_updates[planes_count].scaling_info =
9146 &bundle->scaling_infos[planes_count];
8a48b44c 9147
f5031000 9148 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9149
f5031000 9150 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9151
f5031000
DF
9152 if (!plane_needs_flip) {
9153 planes_count += 1;
9154 continue;
9155 }
8a48b44c 9156
2fac0f53
CK
9157 abo = gem_to_amdgpu_bo(fb->obj[0]);
9158
f8308898
AG
9159 /*
9160 * Wait for all fences on this FB. Do limited wait to avoid
9161 * deadlock during GPU reset when this fence will not signal
9162 * but we hold reservation lock for the BO.
9163 */
d3fae3b3
CK
9164 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9165 msecs_to_jiffies(5000));
f8308898 9166 if (unlikely(r <= 0))
ed8a5fb2 9167 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9168
695af5f9 9169 fill_dc_plane_info_and_addr(
8ce5d842 9170 dm->adev, new_plane_state,
6eed95b0 9171 afb->tiling_flags,
695af5f9 9172 &bundle->plane_infos[planes_count],
87b7ebc2 9173 &bundle->flip_addrs[planes_count].address,
6eed95b0 9174 afb->tmz_surface, false);
87b7ebc2 9175
4711c033 9176 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9177 new_plane_state->plane->index,
9178 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9179
9180 bundle->surface_updates[planes_count].plane_info =
9181 &bundle->plane_infos[planes_count];
8a48b44c 9182
caff0e66
NK
9183 /*
9184 * Only allow immediate flips for fast updates that don't
9185 * change FB pitch, DCC state, rotation or mirroing.
9186 */
f5031000 9187 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9188 crtc->state->async_flip &&
caff0e66 9189 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9190
f5031000
DF
9191 timestamp_ns = ktime_get_ns();
9192 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9193 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9194 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9195
f5031000
DF
9196 if (!bundle->surface_updates[planes_count].surface) {
9197 DRM_ERROR("No surface for CRTC: id=%d\n",
9198 acrtc_attach->crtc_id);
9199 continue;
bc7f670e
DF
9200 }
9201
f5031000
DF
9202 if (plane == pcrtc->primary)
9203 update_freesync_state_on_stream(
9204 dm,
9205 acrtc_state,
9206 acrtc_state->stream,
9207 dc_plane,
9208 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9209
4711c033 9210 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9211 __func__,
9212 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9213 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9214
9215 planes_count += 1;
9216
8a48b44c
DF
9217 }
9218
74aa7bd4 9219 if (pflip_present) {
634092b1
MK
9220 if (!vrr_active) {
9221 /* Use old throttling in non-vrr fixed refresh rate mode
9222 * to keep flip scheduling based on target vblank counts
9223 * working in a backwards compatible way, e.g., for
9224 * clients using the GLX_OML_sync_control extension or
9225 * DRI3/Present extension with defined target_msc.
9226 */
e3eff4b5 9227 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9228 }
9229 else {
9230 /* For variable refresh rate mode only:
9231 * Get vblank of last completed flip to avoid > 1 vrr
9232 * flips per video frame by use of throttling, but allow
9233 * flip programming anywhere in the possibly large
9234 * variable vrr vblank interval for fine-grained flip
9235 * timing control and more opportunity to avoid stutter
9236 * on late submission of flips.
9237 */
9238 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9239 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9240 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9241 }
9242
fdd1fe57 9243 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9244
9245 /*
9246 * Wait until we're out of the vertical blank period before the one
9247 * targeted by the flip
9248 */
9249 while ((acrtc_attach->enabled &&
9250 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9251 0, &vpos, &hpos, NULL,
9252 NULL, &pcrtc->hwmode)
9253 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9254 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9255 (int)(target_vblank -
e3eff4b5 9256 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9257 usleep_range(1000, 1100);
9258 }
9259
8fe684e9
NK
9260 /**
9261 * Prepare the flip event for the pageflip interrupt to handle.
9262 *
9263 * This only works in the case where we've already turned on the
9264 * appropriate hardware blocks (eg. HUBP) so in the transition case
9265 * from 0 -> n planes we have to skip a hardware generated event
9266 * and rely on sending it from software.
9267 */
9268 if (acrtc_attach->base.state->event &&
035f5496
AP
9269 acrtc_state->active_planes > 0 &&
9270 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9271 drm_crtc_vblank_get(pcrtc);
9272
9273 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9274
9275 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9276 prepare_flip_isr(acrtc_attach);
9277
9278 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9279 }
9280
9281 if (acrtc_state->stream) {
8a48b44c 9282 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9283 bundle->stream_update.vrr_infopacket =
8a48b44c 9284 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9285 }
e7b07cee
HW
9286 }
9287
bc92c065 9288 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9289 if ((planes_count || acrtc_state->active_planes == 0) &&
9290 acrtc_state->stream) {
96160687 9291#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9292 /*
9293 * If PSR or idle optimizations are enabled then flush out
9294 * any pending work before hardware programming.
9295 */
06dd1888
NK
9296 if (dm->vblank_control_workqueue)
9297 flush_workqueue(dm->vblank_control_workqueue);
96160687 9298#endif
58aa1c50 9299
b6e881c9 9300 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9301 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9302 bundle->stream_update.src = acrtc_state->stream->src;
9303 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9304 }
9305
cf020d49
NK
9306 if (new_pcrtc_state->color_mgmt_changed) {
9307 /*
9308 * TODO: This isn't fully correct since we've actually
9309 * already modified the stream in place.
9310 */
9311 bundle->stream_update.gamut_remap =
9312 &acrtc_state->stream->gamut_remap_matrix;
9313 bundle->stream_update.output_csc_transform =
9314 &acrtc_state->stream->csc_color_matrix;
9315 bundle->stream_update.out_transfer_func =
9316 acrtc_state->stream->out_transfer_func;
9317 }
bc7f670e 9318
8a48b44c 9319 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9320 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9321 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9322
e63e2491
EB
9323 /*
9324 * If FreeSync state on the stream has changed then we need to
9325 * re-adjust the min/max bounds now that DC doesn't handle this
9326 * as part of commit.
9327 */
a85ba005 9328 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9329 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9330 dc_stream_adjust_vmin_vmax(
9331 dm->dc, acrtc_state->stream,
585d450c 9332 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9333 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9334 }
bc7f670e 9335 mutex_lock(&dm->dc_lock);
8c322309 9336 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9337 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9338 amdgpu_dm_psr_disable(acrtc_state->stream);
9339
bc7f670e 9340 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9341 bundle->surface_updates,
bc7f670e
DF
9342 planes_count,
9343 acrtc_state->stream,
efc8278e
AJ
9344 &bundle->stream_update,
9345 dc_state);
8c322309 9346
8fe684e9
NK
9347 /**
9348 * Enable or disable the interrupts on the backend.
9349 *
9350 * Most pipes are put into power gating when unused.
9351 *
9352 * When power gating is enabled on a pipe we lose the
9353 * interrupt enablement state when power gating is disabled.
9354 *
9355 * So we need to update the IRQ control state in hardware
9356 * whenever the pipe turns on (since it could be previously
9357 * power gated) or off (since some pipes can't be power gated
9358 * on some ASICs).
9359 */
9360 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9361 dm_update_pflip_irq_state(drm_to_adev(dev),
9362 acrtc_attach);
8fe684e9 9363
8c322309 9364 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9365 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9366 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9367 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9368
9369 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9370 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9371 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9372 struct amdgpu_dm_connector *aconn =
9373 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9374
9375 if (aconn->psr_skip_count > 0)
9376 aconn->psr_skip_count--;
58aa1c50
NK
9377
9378 /* Allow PSR when skip count is 0. */
9379 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9380 } else {
9381 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9382 }
9383
bc7f670e 9384 mutex_unlock(&dm->dc_lock);
e7b07cee 9385 }
4b510503 9386
8ad27806
NK
9387 /*
9388 * Update cursor state *after* programming all the planes.
9389 * This avoids redundant programming in the case where we're going
9390 * to be disabling a single plane - those pipes are being disabled.
9391 */
9392 if (acrtc_state->active_planes)
9393 amdgpu_dm_commit_cursors(state);
80c218d5 9394
4b510503 9395cleanup:
74aa7bd4 9396 kfree(bundle);
e7b07cee
HW
9397}
9398
6ce8f316
NK
9399static void amdgpu_dm_commit_audio(struct drm_device *dev,
9400 struct drm_atomic_state *state)
9401{
1348969a 9402 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9403 struct amdgpu_dm_connector *aconnector;
9404 struct drm_connector *connector;
9405 struct drm_connector_state *old_con_state, *new_con_state;
9406 struct drm_crtc_state *new_crtc_state;
9407 struct dm_crtc_state *new_dm_crtc_state;
9408 const struct dc_stream_status *status;
9409 int i, inst;
9410
9411 /* Notify device removals. */
9412 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9413 if (old_con_state->crtc != new_con_state->crtc) {
9414 /* CRTC changes require notification. */
9415 goto notify;
9416 }
9417
9418 if (!new_con_state->crtc)
9419 continue;
9420
9421 new_crtc_state = drm_atomic_get_new_crtc_state(
9422 state, new_con_state->crtc);
9423
9424 if (!new_crtc_state)
9425 continue;
9426
9427 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9428 continue;
9429
9430 notify:
9431 aconnector = to_amdgpu_dm_connector(connector);
9432
9433 mutex_lock(&adev->dm.audio_lock);
9434 inst = aconnector->audio_inst;
9435 aconnector->audio_inst = -1;
9436 mutex_unlock(&adev->dm.audio_lock);
9437
9438 amdgpu_dm_audio_eld_notify(adev, inst);
9439 }
9440
9441 /* Notify audio device additions. */
9442 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9443 if (!new_con_state->crtc)
9444 continue;
9445
9446 new_crtc_state = drm_atomic_get_new_crtc_state(
9447 state, new_con_state->crtc);
9448
9449 if (!new_crtc_state)
9450 continue;
9451
9452 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9453 continue;
9454
9455 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9456 if (!new_dm_crtc_state->stream)
9457 continue;
9458
9459 status = dc_stream_get_status(new_dm_crtc_state->stream);
9460 if (!status)
9461 continue;
9462
9463 aconnector = to_amdgpu_dm_connector(connector);
9464
9465 mutex_lock(&adev->dm.audio_lock);
9466 inst = status->audio_inst;
9467 aconnector->audio_inst = inst;
9468 mutex_unlock(&adev->dm.audio_lock);
9469
9470 amdgpu_dm_audio_eld_notify(adev, inst);
9471 }
9472}
9473
1f6010a9 9474/*
27b3f4fc
LSL
9475 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9476 * @crtc_state: the DRM CRTC state
9477 * @stream_state: the DC stream state.
9478 *
9479 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9480 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9481 */
9482static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9483 struct dc_stream_state *stream_state)
9484{
b9952f93 9485 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9486}
e7b07cee 9487
b8592b48
LL
9488/**
9489 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9490 * @state: The atomic state to commit
9491 *
9492 * This will tell DC to commit the constructed DC state from atomic_check,
9493 * programming the hardware. Any failures here implies a hardware failure, since
9494 * atomic check should have filtered anything non-kosher.
9495 */
7578ecda 9496static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9497{
9498 struct drm_device *dev = state->dev;
1348969a 9499 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9500 struct amdgpu_display_manager *dm = &adev->dm;
9501 struct dm_atomic_state *dm_state;
eb3dc897 9502 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9503 uint32_t i, j;
5cc6dcbd 9504 struct drm_crtc *crtc;
0bc9706d 9505 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9506 unsigned long flags;
9507 bool wait_for_vblank = true;
9508 struct drm_connector *connector;
c2cea706 9509 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9510 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9511 int crtc_disable_count = 0;
6ee90e88 9512 bool mode_set_reset_required = false;
e7b07cee 9513
e8a98235
RS
9514 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9515
e7b07cee
HW
9516 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9517
eb3dc897
NK
9518 dm_state = dm_atomic_get_new_state(state);
9519 if (dm_state && dm_state->context) {
9520 dc_state = dm_state->context;
9521 } else {
9522 /* No state changes, retain current state. */
813d20dc 9523 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9524 ASSERT(dc_state_temp);
9525 dc_state = dc_state_temp;
9526 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9527 }
e7b07cee 9528
6d90a208
AP
9529 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9530 new_crtc_state, i) {
9531 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9532
9533 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9534
9535 if (old_crtc_state->active &&
9536 (!new_crtc_state->active ||
9537 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9538 manage_dm_interrupts(adev, acrtc, false);
9539 dc_stream_release(dm_old_crtc_state->stream);
9540 }
9541 }
9542
8976f73b
RS
9543 drm_atomic_helper_calc_timestamping_constants(state);
9544
e7b07cee 9545 /* update changed items */
0bc9706d 9546 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9547 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9548
54d76575
LSL
9549 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9550 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9551
4711c033 9552 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9553 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9554 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9555 "connectors_changed:%d\n",
9556 acrtc->crtc_id,
0bc9706d
LSL
9557 new_crtc_state->enable,
9558 new_crtc_state->active,
9559 new_crtc_state->planes_changed,
9560 new_crtc_state->mode_changed,
9561 new_crtc_state->active_changed,
9562 new_crtc_state->connectors_changed);
e7b07cee 9563
5c68c652
VL
9564 /* Disable cursor if disabling crtc */
9565 if (old_crtc_state->active && !new_crtc_state->active) {
9566 struct dc_cursor_position position;
9567
9568 memset(&position, 0, sizeof(position));
9569 mutex_lock(&dm->dc_lock);
9570 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9571 mutex_unlock(&dm->dc_lock);
9572 }
9573
27b3f4fc
LSL
9574 /* Copy all transient state flags into dc state */
9575 if (dm_new_crtc_state->stream) {
9576 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9577 dm_new_crtc_state->stream);
9578 }
9579
e7b07cee
HW
9580 /* handles headless hotplug case, updating new_state and
9581 * aconnector as needed
9582 */
9583
54d76575 9584 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9585
4711c033 9586 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9587
54d76575 9588 if (!dm_new_crtc_state->stream) {
e7b07cee 9589 /*
b830ebc9
HW
9590 * this could happen because of issues with
9591 * userspace notifications delivery.
9592 * In this case userspace tries to set mode on
1f6010a9
DF
9593 * display which is disconnected in fact.
9594 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9595 * We expect reset mode will come soon.
9596 *
9597 * This can also happen when unplug is done
9598 * during resume sequence ended
9599 *
9600 * In this case, we want to pretend we still
9601 * have a sink to keep the pipe running so that
9602 * hw state is consistent with the sw state
9603 */
f1ad2f5e 9604 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9605 __func__, acrtc->base.base.id);
9606 continue;
9607 }
9608
54d76575
LSL
9609 if (dm_old_crtc_state->stream)
9610 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9611
97028037
LP
9612 pm_runtime_get_noresume(dev->dev);
9613
e7b07cee 9614 acrtc->enabled = true;
0bc9706d
LSL
9615 acrtc->hw_mode = new_crtc_state->mode;
9616 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9617 mode_set_reset_required = true;
0bc9706d 9618 } else if (modereset_required(new_crtc_state)) {
4711c033 9619 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9620 /* i.e. reset mode */
6ee90e88 9621 if (dm_old_crtc_state->stream)
54d76575 9622 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9623
6ee90e88 9624 mode_set_reset_required = true;
e7b07cee
HW
9625 }
9626 } /* for_each_crtc_in_state() */
9627
eb3dc897 9628 if (dc_state) {
6ee90e88 9629 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9630 if (mode_set_reset_required) {
96160687 9631#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9632 if (dm->vblank_control_workqueue)
9633 flush_workqueue(dm->vblank_control_workqueue);
96160687 9634#endif
6ee90e88 9635 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9636 }
6ee90e88 9637
eb3dc897 9638 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9639 mutex_lock(&dm->dc_lock);
eb3dc897 9640 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9641#if defined(CONFIG_DRM_AMD_DC_DCN)
9642 /* Allow idle optimization when vblank count is 0 for display off */
9643 if (dm->active_vblank_irq_count == 0)
9644 dc_allow_idle_optimizations(dm->dc,true);
9645#endif
674e78ac 9646 mutex_unlock(&dm->dc_lock);
fa2123db 9647 }
fe8858bb 9648
0bc9706d 9649 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9650 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9651
54d76575 9652 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9653
54d76575 9654 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9655 const struct dc_stream_status *status =
54d76575 9656 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9657
eb3dc897 9658 if (!status)
09f609c3
LL
9659 status = dc_stream_get_status_from_state(dc_state,
9660 dm_new_crtc_state->stream);
e7b07cee 9661 if (!status)
54d76575 9662 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9663 else
9664 acrtc->otg_inst = status->primary_otg_inst;
9665 }
9666 }
0c8620d6
BL
9667#ifdef CONFIG_DRM_AMD_DC_HDCP
9668 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9669 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9670 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9671 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9672
9673 new_crtc_state = NULL;
9674
9675 if (acrtc)
9676 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9677
9678 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9679
9680 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9681 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9682 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9683 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9684 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9685 continue;
9686 }
9687
9688 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9689 hdcp_update_display(
9690 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9691 new_con_state->hdcp_content_type,
0e86d3d4 9692 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9693 }
9694#endif
e7b07cee 9695
02d6a6fc 9696 /* Handle connector state changes */
c2cea706 9697 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9698 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9699 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9700 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9701 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9702 struct dc_stream_update stream_update;
b232d4ed 9703 struct dc_info_packet hdr_packet;
e7b07cee 9704 struct dc_stream_status *status = NULL;
b232d4ed 9705 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9706
efc8278e 9707 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9708 memset(&stream_update, 0, sizeof(stream_update));
9709
44d09c6a 9710 if (acrtc) {
0bc9706d 9711 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9712 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9713 }
0bc9706d 9714
e7b07cee 9715 /* Skip any modesets/resets */
0bc9706d 9716 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9717 continue;
9718
54d76575 9719 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9720 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9721
b232d4ed
NK
9722 scaling_changed = is_scaling_state_different(dm_new_con_state,
9723 dm_old_con_state);
9724
9725 abm_changed = dm_new_crtc_state->abm_level !=
9726 dm_old_crtc_state->abm_level;
9727
9728 hdr_changed =
72921cdf 9729 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9730
9731 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9732 continue;
e7b07cee 9733
b6e881c9 9734 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9735 if (scaling_changed) {
02d6a6fc 9736 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9737 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9738
02d6a6fc
DF
9739 stream_update.src = dm_new_crtc_state->stream->src;
9740 stream_update.dst = dm_new_crtc_state->stream->dst;
9741 }
9742
b232d4ed 9743 if (abm_changed) {
02d6a6fc
DF
9744 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9745
9746 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9747 }
70e8ffc5 9748
b232d4ed
NK
9749 if (hdr_changed) {
9750 fill_hdr_info_packet(new_con_state, &hdr_packet);
9751 stream_update.hdr_static_metadata = &hdr_packet;
9752 }
9753
54d76575 9754 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9755
9756 if (WARN_ON(!status))
9757 continue;
9758
3be5262e 9759 WARN_ON(!status->plane_count);
e7b07cee 9760
02d6a6fc
DF
9761 /*
9762 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9763 * Here we create an empty update on each plane.
9764 * To fix this, DC should permit updating only stream properties.
9765 */
9766 for (j = 0; j < status->plane_count; j++)
efc8278e 9767 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9768
9769
9770 mutex_lock(&dm->dc_lock);
9771 dc_commit_updates_for_stream(dm->dc,
efc8278e 9772 dummy_updates,
02d6a6fc
DF
9773 status->plane_count,
9774 dm_new_crtc_state->stream,
efc8278e
AJ
9775 &stream_update,
9776 dc_state);
02d6a6fc 9777 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9778 }
9779
b5e83f6f 9780 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9781 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9782 new_crtc_state, i) {
fe2a1965
LP
9783 if (old_crtc_state->active && !new_crtc_state->active)
9784 crtc_disable_count++;
9785
54d76575 9786 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9787 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9788
585d450c
AP
9789 /* For freesync config update on crtc state and params for irq */
9790 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9791
66b0c973
MK
9792 /* Handle vrr on->off / off->on transitions */
9793 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9794 dm_new_crtc_state);
e7b07cee
HW
9795 }
9796
8fe684e9
NK
9797 /**
9798 * Enable interrupts for CRTCs that are newly enabled or went through
9799 * a modeset. It was intentionally deferred until after the front end
9800 * state was modified to wait until the OTG was on and so the IRQ
9801 * handlers didn't access stale or invalid state.
9802 */
9803 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9804 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9805#ifdef CONFIG_DEBUG_FS
86bc2219 9806 bool configure_crc = false;
8e7b6fee 9807 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9808#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9809 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9810#endif
9811 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9812 cur_crc_src = acrtc->dm_irq_params.crc_src;
9813 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9814#endif
585d450c
AP
9815 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9816
8fe684e9
NK
9817 if (new_crtc_state->active &&
9818 (!old_crtc_state->active ||
9819 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9820 dc_stream_retain(dm_new_crtc_state->stream);
9821 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9822 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9823
24eb9374 9824#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9825 /**
9826 * Frontend may have changed so reapply the CRC capture
9827 * settings for the stream.
9828 */
9829 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9830
8e7b6fee 9831 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9832 configure_crc = true;
9833#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9834 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9835 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9836 acrtc->dm_irq_params.crc_window.update_win = true;
9837 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9838 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9839 crc_rd_wrk->crtc = crtc;
9840 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9841 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9842 }
86bc2219 9843#endif
e2881d6d 9844 }
c920888c 9845
86bc2219 9846 if (configure_crc)
bbc49fc0
WL
9847 if (amdgpu_dm_crtc_configure_crc_source(
9848 crtc, dm_new_crtc_state, cur_crc_src))
9849 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9850#endif
8fe684e9
NK
9851 }
9852 }
e7b07cee 9853
420cd472 9854 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9855 if (new_crtc_state->async_flip)
420cd472
DF
9856 wait_for_vblank = false;
9857
e7b07cee 9858 /* update planes when needed per crtc*/
5cc6dcbd 9859 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9860 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9861
54d76575 9862 if (dm_new_crtc_state->stream)
eb3dc897 9863 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9864 dm, crtc, wait_for_vblank);
e7b07cee
HW
9865 }
9866
6ce8f316
NK
9867 /* Update audio instances for each connector. */
9868 amdgpu_dm_commit_audio(dev, state);
9869
7230362c
AD
9870#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9871 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9872 /* restore the backlight level */
7fd13bae
AD
9873 for (i = 0; i < dm->num_of_edps; i++) {
9874 if (dm->backlight_dev[i] &&
9875 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9876 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9877 }
7230362c 9878#endif
e7b07cee
HW
9879 /*
9880 * send vblank event on all events not handled in flip and
9881 * mark consumed event for drm_atomic_helper_commit_hw_done
9882 */
4a580877 9883 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9884 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9885
0bc9706d
LSL
9886 if (new_crtc_state->event)
9887 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9888
0bc9706d 9889 new_crtc_state->event = NULL;
e7b07cee 9890 }
4a580877 9891 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9892
29c8f234
LL
9893 /* Signal HW programming completion */
9894 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9895
9896 if (wait_for_vblank)
320a1274 9897 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9898
9899 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9900
5f6fab24
AD
9901 /* return the stolen vga memory back to VRAM */
9902 if (!adev->mman.keep_stolen_vga_memory)
9903 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9904 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9905
1f6010a9
DF
9906 /*
9907 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9908 * so we can put the GPU into runtime suspend if we're not driving any
9909 * displays anymore
9910 */
fe2a1965
LP
9911 for (i = 0; i < crtc_disable_count; i++)
9912 pm_runtime_put_autosuspend(dev->dev);
97028037 9913 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9914
9915 if (dc_state_temp)
9916 dc_release_state(dc_state_temp);
e7b07cee
HW
9917}
9918
9919
9920static int dm_force_atomic_commit(struct drm_connector *connector)
9921{
9922 int ret = 0;
9923 struct drm_device *ddev = connector->dev;
9924 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9925 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9926 struct drm_plane *plane = disconnected_acrtc->base.primary;
9927 struct drm_connector_state *conn_state;
9928 struct drm_crtc_state *crtc_state;
9929 struct drm_plane_state *plane_state;
9930
9931 if (!state)
9932 return -ENOMEM;
9933
9934 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9935
9936 /* Construct an atomic state to restore previous display setting */
9937
9938 /*
9939 * Attach connectors to drm_atomic_state
9940 */
9941 conn_state = drm_atomic_get_connector_state(state, connector);
9942
9943 ret = PTR_ERR_OR_ZERO(conn_state);
9944 if (ret)
2dc39051 9945 goto out;
e7b07cee
HW
9946
9947 /* Attach crtc to drm_atomic_state*/
9948 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9949
9950 ret = PTR_ERR_OR_ZERO(crtc_state);
9951 if (ret)
2dc39051 9952 goto out;
e7b07cee
HW
9953
9954 /* force a restore */
9955 crtc_state->mode_changed = true;
9956
9957 /* Attach plane to drm_atomic_state */
9958 plane_state = drm_atomic_get_plane_state(state, plane);
9959
9960 ret = PTR_ERR_OR_ZERO(plane_state);
9961 if (ret)
2dc39051 9962 goto out;
e7b07cee
HW
9963
9964 /* Call commit internally with the state we just constructed */
9965 ret = drm_atomic_commit(state);
e7b07cee 9966
2dc39051 9967out:
e7b07cee 9968 drm_atomic_state_put(state);
2dc39051
VL
9969 if (ret)
9970 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9971
9972 return ret;
9973}
9974
9975/*
1f6010a9
DF
9976 * This function handles all cases when set mode does not come upon hotplug.
9977 * This includes when a display is unplugged then plugged back into the
9978 * same port and when running without usermode desktop manager supprot
e7b07cee 9979 */
3ee6b26b
AD
9980void dm_restore_drm_connector_state(struct drm_device *dev,
9981 struct drm_connector *connector)
e7b07cee 9982{
c84dec2f 9983 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9984 struct amdgpu_crtc *disconnected_acrtc;
9985 struct dm_crtc_state *acrtc_state;
9986
9987 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9988 return;
9989
9990 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9991 if (!disconnected_acrtc)
9992 return;
e7b07cee 9993
70e8ffc5
HW
9994 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9995 if (!acrtc_state->stream)
e7b07cee
HW
9996 return;
9997
9998 /*
9999 * If the previous sink is not released and different from the current,
10000 * we deduce we are in a state where we can not rely on usermode call
10001 * to turn on the display, so we do it here
10002 */
10003 if (acrtc_state->stream->sink != aconnector->dc_sink)
10004 dm_force_atomic_commit(&aconnector->base);
10005}
10006
1f6010a9 10007/*
e7b07cee
HW
10008 * Grabs all modesetting locks to serialize against any blocking commits,
10009 * Waits for completion of all non blocking commits.
10010 */
3ee6b26b
AD
10011static int do_aquire_global_lock(struct drm_device *dev,
10012 struct drm_atomic_state *state)
e7b07cee
HW
10013{
10014 struct drm_crtc *crtc;
10015 struct drm_crtc_commit *commit;
10016 long ret;
10017
1f6010a9
DF
10018 /*
10019 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
10020 * ensure that when the framework release it the
10021 * extra locks we are locking here will get released to
10022 */
10023 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10024 if (ret)
10025 return ret;
10026
10027 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10028 spin_lock(&crtc->commit_lock);
10029 commit = list_first_entry_or_null(&crtc->commit_list,
10030 struct drm_crtc_commit, commit_entry);
10031 if (commit)
10032 drm_crtc_commit_get(commit);
10033 spin_unlock(&crtc->commit_lock);
10034
10035 if (!commit)
10036 continue;
10037
1f6010a9
DF
10038 /*
10039 * Make sure all pending HW programming completed and
e7b07cee
HW
10040 * page flips done
10041 */
10042 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10043
10044 if (ret > 0)
10045 ret = wait_for_completion_interruptible_timeout(
10046 &commit->flip_done, 10*HZ);
10047
10048 if (ret == 0)
10049 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 10050 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
10051
10052 drm_crtc_commit_put(commit);
10053 }
10054
10055 return ret < 0 ? ret : 0;
10056}
10057
bb47de73
NK
10058static void get_freesync_config_for_crtc(
10059 struct dm_crtc_state *new_crtc_state,
10060 struct dm_connector_state *new_con_state)
98e6436d
AK
10061{
10062 struct mod_freesync_config config = {0};
98e6436d
AK
10063 struct amdgpu_dm_connector *aconnector =
10064 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 10065 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 10066 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 10067 bool fs_vid_mode = false;
98e6436d 10068
a057ec46 10069 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
10070 vrefresh >= aconnector->min_vfreq &&
10071 vrefresh <= aconnector->max_vfreq;
bb47de73 10072
a057ec46
IB
10073 if (new_crtc_state->vrr_supported) {
10074 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
10075 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10076
10077 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10078 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 10079 config.vsif_supported = true;
180db303 10080 config.btr = true;
98e6436d 10081
a85ba005
NC
10082 if (fs_vid_mode) {
10083 config.state = VRR_STATE_ACTIVE_FIXED;
10084 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10085 goto out;
10086 } else if (new_crtc_state->base.vrr_enabled) {
10087 config.state = VRR_STATE_ACTIVE_VARIABLE;
10088 } else {
10089 config.state = VRR_STATE_INACTIVE;
10090 }
10091 }
10092out:
bb47de73
NK
10093 new_crtc_state->freesync_config = config;
10094}
98e6436d 10095
bb47de73
NK
10096static void reset_freesync_config_for_crtc(
10097 struct dm_crtc_state *new_crtc_state)
10098{
10099 new_crtc_state->vrr_supported = false;
98e6436d 10100
bb47de73
NK
10101 memset(&new_crtc_state->vrr_infopacket, 0,
10102 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
10103}
10104
a85ba005
NC
10105static bool
10106is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10107 struct drm_crtc_state *new_crtc_state)
10108{
10109 struct drm_display_mode old_mode, new_mode;
10110
10111 if (!old_crtc_state || !new_crtc_state)
10112 return false;
10113
10114 old_mode = old_crtc_state->mode;
10115 new_mode = new_crtc_state->mode;
10116
10117 if (old_mode.clock == new_mode.clock &&
10118 old_mode.hdisplay == new_mode.hdisplay &&
10119 old_mode.vdisplay == new_mode.vdisplay &&
10120 old_mode.htotal == new_mode.htotal &&
10121 old_mode.vtotal != new_mode.vtotal &&
10122 old_mode.hsync_start == new_mode.hsync_start &&
10123 old_mode.vsync_start != new_mode.vsync_start &&
10124 old_mode.hsync_end == new_mode.hsync_end &&
10125 old_mode.vsync_end != new_mode.vsync_end &&
10126 old_mode.hskew == new_mode.hskew &&
10127 old_mode.vscan == new_mode.vscan &&
10128 (old_mode.vsync_end - old_mode.vsync_start) ==
10129 (new_mode.vsync_end - new_mode.vsync_start))
10130 return true;
10131
10132 return false;
10133}
10134
10135static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10136 uint64_t num, den, res;
10137 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10138
10139 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10140
10141 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10142 den = (unsigned long long)new_crtc_state->mode.htotal *
10143 (unsigned long long)new_crtc_state->mode.vtotal;
10144
10145 res = div_u64(num, den);
10146 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10147}
10148
4b9674e5
LL
10149static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10150 struct drm_atomic_state *state,
10151 struct drm_crtc *crtc,
10152 struct drm_crtc_state *old_crtc_state,
10153 struct drm_crtc_state *new_crtc_state,
10154 bool enable,
10155 bool *lock_and_validation_needed)
e7b07cee 10156{
eb3dc897 10157 struct dm_atomic_state *dm_state = NULL;
54d76575 10158 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10159 struct dc_stream_state *new_stream;
62f55537 10160 int ret = 0;
d4d4a645 10161
1f6010a9
DF
10162 /*
10163 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10164 * update changed items
10165 */
4b9674e5
LL
10166 struct amdgpu_crtc *acrtc = NULL;
10167 struct amdgpu_dm_connector *aconnector = NULL;
10168 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10169 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10170
4b9674e5 10171 new_stream = NULL;
9635b754 10172
4b9674e5
LL
10173 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10174 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10175 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10176 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10177
4b9674e5
LL
10178 /* TODO This hack should go away */
10179 if (aconnector && enable) {
10180 /* Make sure fake sink is created in plug-in scenario */
10181 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10182 &aconnector->base);
10183 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10184 &aconnector->base);
19f89e23 10185
4b9674e5
LL
10186 if (IS_ERR(drm_new_conn_state)) {
10187 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10188 goto fail;
10189 }
19f89e23 10190
4b9674e5
LL
10191 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10192 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10193
02d35a67
JFZ
10194 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10195 goto skip_modeset;
10196
cbd14ae7
SW
10197 new_stream = create_validate_stream_for_sink(aconnector,
10198 &new_crtc_state->mode,
10199 dm_new_conn_state,
10200 dm_old_crtc_state->stream);
19f89e23 10201
4b9674e5
LL
10202 /*
10203 * we can have no stream on ACTION_SET if a display
10204 * was disconnected during S3, in this case it is not an
10205 * error, the OS will be updated after detection, and
10206 * will do the right thing on next atomic commit
10207 */
19f89e23 10208
4b9674e5
LL
10209 if (!new_stream) {
10210 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10211 __func__, acrtc->base.base.id);
10212 ret = -ENOMEM;
10213 goto fail;
10214 }
e7b07cee 10215
3d4e52d0
VL
10216 /*
10217 * TODO: Check VSDB bits to decide whether this should
10218 * be enabled or not.
10219 */
10220 new_stream->triggered_crtc_reset.enabled =
10221 dm->force_timing_sync;
10222
4b9674e5 10223 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10224
88694af9
NK
10225 ret = fill_hdr_info_packet(drm_new_conn_state,
10226 &new_stream->hdr_static_metadata);
10227 if (ret)
10228 goto fail;
10229
7e930949
NK
10230 /*
10231 * If we already removed the old stream from the context
10232 * (and set the new stream to NULL) then we can't reuse
10233 * the old stream even if the stream and scaling are unchanged.
10234 * We'll hit the BUG_ON and black screen.
10235 *
10236 * TODO: Refactor this function to allow this check to work
10237 * in all conditions.
10238 */
a85ba005
NC
10239 if (amdgpu_freesync_vid_mode &&
10240 dm_new_crtc_state->stream &&
10241 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10242 goto skip_modeset;
10243
7e930949
NK
10244 if (dm_new_crtc_state->stream &&
10245 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10246 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10247 new_crtc_state->mode_changed = false;
10248 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10249 new_crtc_state->mode_changed);
62f55537 10250 }
4b9674e5 10251 }
b830ebc9 10252
02d35a67 10253 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10254 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10255 goto skip_modeset;
e7b07cee 10256
4711c033 10257 DRM_DEBUG_ATOMIC(
4b9674e5
LL
10258 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10259 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10260 "connectors_changed:%d\n",
10261 acrtc->crtc_id,
10262 new_crtc_state->enable,
10263 new_crtc_state->active,
10264 new_crtc_state->planes_changed,
10265 new_crtc_state->mode_changed,
10266 new_crtc_state->active_changed,
10267 new_crtc_state->connectors_changed);
62f55537 10268
4b9674e5
LL
10269 /* Remove stream for any changed/disabled CRTC */
10270 if (!enable) {
62f55537 10271
4b9674e5
LL
10272 if (!dm_old_crtc_state->stream)
10273 goto skip_modeset;
eb3dc897 10274
a85ba005
NC
10275 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10276 is_timing_unchanged_for_freesync(new_crtc_state,
10277 old_crtc_state)) {
10278 new_crtc_state->mode_changed = false;
10279 DRM_DEBUG_DRIVER(
10280 "Mode change not required for front porch change, "
10281 "setting mode_changed to %d",
10282 new_crtc_state->mode_changed);
10283
10284 set_freesync_fixed_config(dm_new_crtc_state);
10285
10286 goto skip_modeset;
10287 } else if (amdgpu_freesync_vid_mode && aconnector &&
10288 is_freesync_video_mode(&new_crtc_state->mode,
10289 aconnector)) {
e88ebd83
SC
10290 struct drm_display_mode *high_mode;
10291
10292 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10293 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10294 set_freesync_fixed_config(dm_new_crtc_state);
10295 }
a85ba005
NC
10296 }
10297
4b9674e5
LL
10298 ret = dm_atomic_get_state(state, &dm_state);
10299 if (ret)
10300 goto fail;
e7b07cee 10301
4b9674e5
LL
10302 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10303 crtc->base.id);
62f55537 10304
4b9674e5
LL
10305 /* i.e. reset mode */
10306 if (dc_remove_stream_from_ctx(
10307 dm->dc,
10308 dm_state->context,
10309 dm_old_crtc_state->stream) != DC_OK) {
10310 ret = -EINVAL;
10311 goto fail;
10312 }
62f55537 10313
4b9674e5
LL
10314 dc_stream_release(dm_old_crtc_state->stream);
10315 dm_new_crtc_state->stream = NULL;
bb47de73 10316
4b9674e5 10317 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10318
4b9674e5 10319 *lock_and_validation_needed = true;
62f55537 10320
4b9674e5
LL
10321 } else {/* Add stream for any updated/enabled CRTC */
10322 /*
10323 * Quick fix to prevent NULL pointer on new_stream when
10324 * added MST connectors not found in existing crtc_state in the chained mode
10325 * TODO: need to dig out the root cause of that
10326 */
10327 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10328 goto skip_modeset;
62f55537 10329
4b9674e5
LL
10330 if (modereset_required(new_crtc_state))
10331 goto skip_modeset;
62f55537 10332
4b9674e5
LL
10333 if (modeset_required(new_crtc_state, new_stream,
10334 dm_old_crtc_state->stream)) {
62f55537 10335
4b9674e5 10336 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10337
4b9674e5
LL
10338 ret = dm_atomic_get_state(state, &dm_state);
10339 if (ret)
10340 goto fail;
27b3f4fc 10341
4b9674e5 10342 dm_new_crtc_state->stream = new_stream;
62f55537 10343
4b9674e5 10344 dc_stream_retain(new_stream);
1dc90497 10345
4711c033
LT
10346 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10347 crtc->base.id);
1dc90497 10348
4b9674e5
LL
10349 if (dc_add_stream_to_ctx(
10350 dm->dc,
10351 dm_state->context,
10352 dm_new_crtc_state->stream) != DC_OK) {
10353 ret = -EINVAL;
10354 goto fail;
9b690ef3
BL
10355 }
10356
4b9674e5
LL
10357 *lock_and_validation_needed = true;
10358 }
10359 }
e277adc5 10360
4b9674e5
LL
10361skip_modeset:
10362 /* Release extra reference */
10363 if (new_stream)
10364 dc_stream_release(new_stream);
e277adc5 10365
4b9674e5
LL
10366 /*
10367 * We want to do dc stream updates that do not require a
10368 * full modeset below.
10369 */
2afda735 10370 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10371 return 0;
10372 /*
10373 * Given above conditions, the dc state cannot be NULL because:
10374 * 1. We're in the process of enabling CRTCs (just been added
10375 * to the dc context, or already is on the context)
10376 * 2. Has a valid connector attached, and
10377 * 3. Is currently active and enabled.
10378 * => The dc stream state currently exists.
10379 */
10380 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10381
4b9674e5 10382 /* Scaling or underscan settings */
c521fc31
RL
10383 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10384 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10385 update_stream_scaling_settings(
10386 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10387
b05e2c5e
DF
10388 /* ABM settings */
10389 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10390
4b9674e5
LL
10391 /*
10392 * Color management settings. We also update color properties
10393 * when a modeset is needed, to ensure it gets reprogrammed.
10394 */
10395 if (dm_new_crtc_state->base.color_mgmt_changed ||
10396 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10397 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10398 if (ret)
10399 goto fail;
62f55537 10400 }
e7b07cee 10401
4b9674e5
LL
10402 /* Update Freesync settings. */
10403 get_freesync_config_for_crtc(dm_new_crtc_state,
10404 dm_new_conn_state);
10405
62f55537 10406 return ret;
9635b754
DS
10407
10408fail:
10409 if (new_stream)
10410 dc_stream_release(new_stream);
10411 return ret;
62f55537 10412}
9b690ef3 10413
f6ff2a08
NK
10414static bool should_reset_plane(struct drm_atomic_state *state,
10415 struct drm_plane *plane,
10416 struct drm_plane_state *old_plane_state,
10417 struct drm_plane_state *new_plane_state)
10418{
10419 struct drm_plane *other;
10420 struct drm_plane_state *old_other_state, *new_other_state;
10421 struct drm_crtc_state *new_crtc_state;
10422 int i;
10423
70a1efac
NK
10424 /*
10425 * TODO: Remove this hack once the checks below are sufficient
10426 * enough to determine when we need to reset all the planes on
10427 * the stream.
10428 */
10429 if (state->allow_modeset)
10430 return true;
10431
f6ff2a08
NK
10432 /* Exit early if we know that we're adding or removing the plane. */
10433 if (old_plane_state->crtc != new_plane_state->crtc)
10434 return true;
10435
10436 /* old crtc == new_crtc == NULL, plane not in context. */
10437 if (!new_plane_state->crtc)
10438 return false;
10439
10440 new_crtc_state =
10441 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10442
10443 if (!new_crtc_state)
10444 return true;
10445
7316c4ad
NK
10446 /* CRTC Degamma changes currently require us to recreate planes. */
10447 if (new_crtc_state->color_mgmt_changed)
10448 return true;
10449
f6ff2a08
NK
10450 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10451 return true;
10452
10453 /*
10454 * If there are any new primary or overlay planes being added or
10455 * removed then the z-order can potentially change. To ensure
10456 * correct z-order and pipe acquisition the current DC architecture
10457 * requires us to remove and recreate all existing planes.
10458 *
10459 * TODO: Come up with a more elegant solution for this.
10460 */
10461 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10462 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10463 if (other->type == DRM_PLANE_TYPE_CURSOR)
10464 continue;
10465
10466 if (old_other_state->crtc != new_plane_state->crtc &&
10467 new_other_state->crtc != new_plane_state->crtc)
10468 continue;
10469
10470 if (old_other_state->crtc != new_other_state->crtc)
10471 return true;
10472
dc4cb30d
NK
10473 /* Src/dst size and scaling updates. */
10474 if (old_other_state->src_w != new_other_state->src_w ||
10475 old_other_state->src_h != new_other_state->src_h ||
10476 old_other_state->crtc_w != new_other_state->crtc_w ||
10477 old_other_state->crtc_h != new_other_state->crtc_h)
10478 return true;
10479
10480 /* Rotation / mirroring updates. */
10481 if (old_other_state->rotation != new_other_state->rotation)
10482 return true;
10483
10484 /* Blending updates. */
10485 if (old_other_state->pixel_blend_mode !=
10486 new_other_state->pixel_blend_mode)
10487 return true;
10488
10489 /* Alpha updates. */
10490 if (old_other_state->alpha != new_other_state->alpha)
10491 return true;
10492
10493 /* Colorspace changes. */
10494 if (old_other_state->color_range != new_other_state->color_range ||
10495 old_other_state->color_encoding != new_other_state->color_encoding)
10496 return true;
10497
9a81cc60
NK
10498 /* Framebuffer checks fall at the end. */
10499 if (!old_other_state->fb || !new_other_state->fb)
10500 continue;
10501
10502 /* Pixel format changes can require bandwidth updates. */
10503 if (old_other_state->fb->format != new_other_state->fb->format)
10504 return true;
10505
6eed95b0
BN
10506 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10507 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10508
10509 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10510 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10511 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10512 return true;
10513 }
10514
10515 return false;
10516}
10517
b0455fda
SS
10518static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10519 struct drm_plane_state *new_plane_state,
10520 struct drm_framebuffer *fb)
10521{
e72868c4
SS
10522 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10523 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10524 unsigned int pitch;
e72868c4 10525 bool linear;
b0455fda
SS
10526
10527 if (fb->width > new_acrtc->max_cursor_width ||
10528 fb->height > new_acrtc->max_cursor_height) {
10529 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10530 new_plane_state->fb->width,
10531 new_plane_state->fb->height);
10532 return -EINVAL;
10533 }
10534 if (new_plane_state->src_w != fb->width << 16 ||
10535 new_plane_state->src_h != fb->height << 16) {
10536 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10537 return -EINVAL;
10538 }
10539
10540 /* Pitch in pixels */
10541 pitch = fb->pitches[0] / fb->format->cpp[0];
10542
10543 if (fb->width != pitch) {
10544 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10545 fb->width, pitch);
10546 return -EINVAL;
10547 }
10548
10549 switch (pitch) {
10550 case 64:
10551 case 128:
10552 case 256:
10553 /* FB pitch is supported by cursor plane */
10554 break;
10555 default:
10556 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10557 return -EINVAL;
10558 }
10559
e72868c4
SS
10560 /* Core DRM takes care of checking FB modifiers, so we only need to
10561 * check tiling flags when the FB doesn't have a modifier. */
10562 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10563 if (adev->family < AMDGPU_FAMILY_AI) {
10564 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10565 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10566 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10567 } else {
10568 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10569 }
10570 if (!linear) {
10571 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10572 return -EINVAL;
10573 }
10574 }
10575
b0455fda
SS
10576 return 0;
10577}
10578
9e869063
LL
10579static int dm_update_plane_state(struct dc *dc,
10580 struct drm_atomic_state *state,
10581 struct drm_plane *plane,
10582 struct drm_plane_state *old_plane_state,
10583 struct drm_plane_state *new_plane_state,
10584 bool enable,
10585 bool *lock_and_validation_needed)
62f55537 10586{
eb3dc897
NK
10587
10588 struct dm_atomic_state *dm_state = NULL;
62f55537 10589 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10590 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10591 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10592 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10593 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10594 bool needs_reset;
62f55537 10595 int ret = 0;
e7b07cee 10596
9b690ef3 10597
9e869063
LL
10598 new_plane_crtc = new_plane_state->crtc;
10599 old_plane_crtc = old_plane_state->crtc;
10600 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10601 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10602
626bf90f
SS
10603 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10604 if (!enable || !new_plane_crtc ||
10605 drm_atomic_plane_disabling(plane->state, new_plane_state))
10606 return 0;
10607
10608 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10609
5f581248
SS
10610 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10611 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10612 return -EINVAL;
10613 }
10614
24f99d2b 10615 if (new_plane_state->fb) {
b0455fda
SS
10616 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10617 new_plane_state->fb);
10618 if (ret)
10619 return ret;
24f99d2b
SS
10620 }
10621
9e869063 10622 return 0;
626bf90f 10623 }
9b690ef3 10624
f6ff2a08
NK
10625 needs_reset = should_reset_plane(state, plane, old_plane_state,
10626 new_plane_state);
10627
9e869063
LL
10628 /* Remove any changed/removed planes */
10629 if (!enable) {
f6ff2a08 10630 if (!needs_reset)
9e869063 10631 return 0;
a7b06724 10632
9e869063
LL
10633 if (!old_plane_crtc)
10634 return 0;
62f55537 10635
9e869063
LL
10636 old_crtc_state = drm_atomic_get_old_crtc_state(
10637 state, old_plane_crtc);
10638 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10639
9e869063
LL
10640 if (!dm_old_crtc_state->stream)
10641 return 0;
62f55537 10642
9e869063
LL
10643 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10644 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10645
9e869063
LL
10646 ret = dm_atomic_get_state(state, &dm_state);
10647 if (ret)
10648 return ret;
eb3dc897 10649
9e869063
LL
10650 if (!dc_remove_plane_from_context(
10651 dc,
10652 dm_old_crtc_state->stream,
10653 dm_old_plane_state->dc_state,
10654 dm_state->context)) {
62f55537 10655
c3537613 10656 return -EINVAL;
9e869063 10657 }
e7b07cee 10658
9b690ef3 10659
9e869063
LL
10660 dc_plane_state_release(dm_old_plane_state->dc_state);
10661 dm_new_plane_state->dc_state = NULL;
1dc90497 10662
9e869063 10663 *lock_and_validation_needed = true;
1dc90497 10664
9e869063
LL
10665 } else { /* Add new planes */
10666 struct dc_plane_state *dc_new_plane_state;
1dc90497 10667
9e869063
LL
10668 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10669 return 0;
e7b07cee 10670
9e869063
LL
10671 if (!new_plane_crtc)
10672 return 0;
e7b07cee 10673
9e869063
LL
10674 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10675 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10676
9e869063
LL
10677 if (!dm_new_crtc_state->stream)
10678 return 0;
62f55537 10679
f6ff2a08 10680 if (!needs_reset)
9e869063 10681 return 0;
62f55537 10682
8c44515b
AP
10683 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10684 if (ret)
10685 return ret;
10686
9e869063 10687 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10688
9e869063
LL
10689 dc_new_plane_state = dc_create_plane_state(dc);
10690 if (!dc_new_plane_state)
10691 return -ENOMEM;
62f55537 10692
4711c033
LT
10693 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10694 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10695
695af5f9 10696 ret = fill_dc_plane_attributes(
1348969a 10697 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10698 dc_new_plane_state,
10699 new_plane_state,
10700 new_crtc_state);
10701 if (ret) {
10702 dc_plane_state_release(dc_new_plane_state);
10703 return ret;
10704 }
62f55537 10705
9e869063
LL
10706 ret = dm_atomic_get_state(state, &dm_state);
10707 if (ret) {
10708 dc_plane_state_release(dc_new_plane_state);
10709 return ret;
10710 }
eb3dc897 10711
9e869063
LL
10712 /*
10713 * Any atomic check errors that occur after this will
10714 * not need a release. The plane state will be attached
10715 * to the stream, and therefore part of the atomic
10716 * state. It'll be released when the atomic state is
10717 * cleaned.
10718 */
10719 if (!dc_add_plane_to_context(
10720 dc,
10721 dm_new_crtc_state->stream,
10722 dc_new_plane_state,
10723 dm_state->context)) {
62f55537 10724
9e869063
LL
10725 dc_plane_state_release(dc_new_plane_state);
10726 return -EINVAL;
10727 }
8c45c5db 10728
9e869063 10729 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10730
9e869063
LL
10731 /* Tell DC to do a full surface update every time there
10732 * is a plane change. Inefficient, but works for now.
10733 */
10734 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10735
10736 *lock_and_validation_needed = true;
62f55537 10737 }
e7b07cee
HW
10738
10739
62f55537
AG
10740 return ret;
10741}
a87fa993 10742
69cb5629
VZ
10743static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10744 int *src_w, int *src_h)
10745{
10746 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10747 case DRM_MODE_ROTATE_90:
10748 case DRM_MODE_ROTATE_270:
10749 *src_w = plane_state->src_h >> 16;
10750 *src_h = plane_state->src_w >> 16;
10751 break;
10752 case DRM_MODE_ROTATE_0:
10753 case DRM_MODE_ROTATE_180:
10754 default:
10755 *src_w = plane_state->src_w >> 16;
10756 *src_h = plane_state->src_h >> 16;
10757 break;
10758 }
10759}
10760
12f4849a
SS
10761static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10762 struct drm_crtc *crtc,
10763 struct drm_crtc_state *new_crtc_state)
10764{
d1bfbe8a
SS
10765 struct drm_plane *cursor = crtc->cursor, *underlying;
10766 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10767 int i;
10768 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
69cb5629
VZ
10769 int cursor_src_w, cursor_src_h;
10770 int underlying_src_w, underlying_src_h;
12f4849a
SS
10771
10772 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10773 * cursor per pipe but it's going to inherit the scaling and
10774 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 10775 * blending properties match the underlying planes'. */
12f4849a 10776
d1bfbe8a
SS
10777 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10778 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
10779 return 0;
10780 }
10781
69cb5629
VZ
10782 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10783 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10784 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
12f4849a 10785
d1bfbe8a
SS
10786 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10787 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10788 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10789 continue;
12f4849a 10790
d1bfbe8a
SS
10791 /* Ignore disabled planes */
10792 if (!new_underlying_state->fb)
10793 continue;
10794
69cb5629
VZ
10795 dm_get_oriented_plane_size(new_underlying_state,
10796 &underlying_src_w, &underlying_src_h);
10797 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10798 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
d1bfbe8a
SS
10799
10800 if (cursor_scale_w != underlying_scale_w ||
10801 cursor_scale_h != underlying_scale_h) {
10802 drm_dbg_atomic(crtc->dev,
10803 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10804 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10805 return -EINVAL;
10806 }
10807
10808 /* If this plane covers the whole CRTC, no need to check planes underneath */
10809 if (new_underlying_state->crtc_x <= 0 &&
10810 new_underlying_state->crtc_y <= 0 &&
10811 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10812 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10813 break;
12f4849a
SS
10814 }
10815
10816 return 0;
10817}
10818
e10517b3 10819#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10820static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10821{
10822 struct drm_connector *connector;
10823 struct drm_connector_state *conn_state;
10824 struct amdgpu_dm_connector *aconnector = NULL;
10825 int i;
10826 for_each_new_connector_in_state(state, connector, conn_state, i) {
10827 if (conn_state->crtc != crtc)
10828 continue;
10829
10830 aconnector = to_amdgpu_dm_connector(connector);
10831 if (!aconnector->port || !aconnector->mst_port)
10832 aconnector = NULL;
10833 else
10834 break;
10835 }
10836
10837 if (!aconnector)
10838 return 0;
10839
10840 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10841}
e10517b3 10842#endif
44be939f 10843
b8592b48
LL
10844/**
10845 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10846 * @dev: The DRM device
10847 * @state: The atomic state to commit
10848 *
10849 * Validate that the given atomic state is programmable by DC into hardware.
10850 * This involves constructing a &struct dc_state reflecting the new hardware
10851 * state we wish to commit, then querying DC to see if it is programmable. It's
10852 * important not to modify the existing DC state. Otherwise, atomic_check
10853 * may unexpectedly commit hardware changes.
10854 *
10855 * When validating the DC state, it's important that the right locks are
10856 * acquired. For full updates case which removes/adds/updates streams on one
10857 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10858 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10859 * flip using DRMs synchronization events.
b8592b48
LL
10860 *
10861 * Note that DM adds the affected connectors for all CRTCs in state, when that
10862 * might not seem necessary. This is because DC stream creation requires the
10863 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10864 * be possible but non-trivial - a possible TODO item.
10865 *
10866 * Return: -Error code if validation failed.
10867 */
7578ecda
AD
10868static int amdgpu_dm_atomic_check(struct drm_device *dev,
10869 struct drm_atomic_state *state)
62f55537 10870{
1348969a 10871 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10872 struct dm_atomic_state *dm_state = NULL;
62f55537 10873 struct dc *dc = adev->dm.dc;
62f55537 10874 struct drm_connector *connector;
c2cea706 10875 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10876 struct drm_crtc *crtc;
fc9e9920 10877 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10878 struct drm_plane *plane;
10879 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10880 enum dc_status status;
1e88ad0a 10881 int ret, i;
62f55537 10882 bool lock_and_validation_needed = false;
886876ec 10883 struct dm_crtc_state *dm_old_crtc_state;
6513104b
HW
10884#if defined(CONFIG_DRM_AMD_DC_DCN)
10885 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
10886 struct drm_dp_mst_topology_state *mst_state;
10887 struct drm_dp_mst_topology_mgr *mgr;
6513104b 10888#endif
62f55537 10889
e8a98235 10890 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10891
62f55537 10892 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
10893 if (ret) {
10894 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 10895 goto fail;
68ca1c3e 10896 }
62f55537 10897
c5892a10
SW
10898 /* Check connector changes */
10899 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10900 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10901 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10902
10903 /* Skip connectors that are disabled or part of modeset already. */
10904 if (!old_con_state->crtc && !new_con_state->crtc)
10905 continue;
10906
10907 if (!new_con_state->crtc)
10908 continue;
10909
10910 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10911 if (IS_ERR(new_crtc_state)) {
68ca1c3e 10912 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
10913 ret = PTR_ERR(new_crtc_state);
10914 goto fail;
10915 }
10916
10917 if (dm_old_con_state->abm_level !=
10918 dm_new_con_state->abm_level)
10919 new_crtc_state->connectors_changed = true;
10920 }
10921
e10517b3 10922#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10923 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10924 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10925 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10926 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
10927 if (ret) {
10928 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 10929 goto fail;
68ca1c3e 10930 }
44be939f
ML
10931 }
10932 }
10933 }
e10517b3 10934#endif
1e88ad0a 10935 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10936 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10937
1e88ad0a 10938 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10939 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10940 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10941 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10942 continue;
7bef1af3 10943
03fc4cf4 10944 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
10945 if (ret) {
10946 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 10947 goto fail;
68ca1c3e 10948 }
03fc4cf4 10949
1e88ad0a
S
10950 if (!new_crtc_state->enable)
10951 continue;
fc9e9920 10952
1e88ad0a 10953 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
10954 if (ret) {
10955 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 10956 goto fail;
68ca1c3e 10957 }
fc9e9920 10958
1e88ad0a 10959 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
10960 if (ret) {
10961 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 10962 goto fail;
68ca1c3e 10963 }
115a385c 10964
cbac53f7 10965 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10966 new_crtc_state->mode_changed = true;
e7b07cee
HW
10967 }
10968
2d9e6431
NK
10969 /*
10970 * Add all primary and overlay planes on the CRTC to the state
10971 * whenever a plane is enabled to maintain correct z-ordering
10972 * and to enable fast surface updates.
10973 */
10974 drm_for_each_crtc(crtc, dev) {
10975 bool modified = false;
10976
10977 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10978 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10979 continue;
10980
10981 if (new_plane_state->crtc == crtc ||
10982 old_plane_state->crtc == crtc) {
10983 modified = true;
10984 break;
10985 }
10986 }
10987
10988 if (!modified)
10989 continue;
10990
10991 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10992 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10993 continue;
10994
10995 new_plane_state =
10996 drm_atomic_get_plane_state(state, plane);
10997
10998 if (IS_ERR(new_plane_state)) {
10999 ret = PTR_ERR(new_plane_state);
68ca1c3e 11000 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
11001 goto fail;
11002 }
11003 }
11004 }
11005
62f55537 11006 /* Remove exiting planes if they are modified */
9e869063
LL
11007 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11008 ret = dm_update_plane_state(dc, state, plane,
11009 old_plane_state,
11010 new_plane_state,
11011 false,
11012 &lock_and_validation_needed);
68ca1c3e
S
11013 if (ret) {
11014 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11015 goto fail;
68ca1c3e 11016 }
62f55537
AG
11017 }
11018
11019 /* Disable all crtcs which require disable */
4b9674e5
LL
11020 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11021 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11022 old_crtc_state,
11023 new_crtc_state,
11024 false,
11025 &lock_and_validation_needed);
68ca1c3e
S
11026 if (ret) {
11027 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 11028 goto fail;
68ca1c3e 11029 }
62f55537
AG
11030 }
11031
11032 /* Enable all crtcs which require enable */
4b9674e5
LL
11033 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11034 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11035 old_crtc_state,
11036 new_crtc_state,
11037 true,
11038 &lock_and_validation_needed);
68ca1c3e
S
11039 if (ret) {
11040 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 11041 goto fail;
68ca1c3e 11042 }
62f55537
AG
11043 }
11044
11045 /* Add new/modified planes */
9e869063
LL
11046 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11047 ret = dm_update_plane_state(dc, state, plane,
11048 old_plane_state,
11049 new_plane_state,
11050 true,
11051 &lock_and_validation_needed);
68ca1c3e
S
11052 if (ret) {
11053 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11054 goto fail;
68ca1c3e 11055 }
62f55537
AG
11056 }
11057
b349f76e
ES
11058 /* Run this here since we want to validate the streams we created */
11059 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
11060 if (ret) {
11061 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 11062 goto fail;
68ca1c3e 11063 }
62f55537 11064
12f4849a
SS
11065 /* Check cursor planes scaling */
11066 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11067 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
11068 if (ret) {
11069 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 11070 goto fail;
68ca1c3e 11071 }
12f4849a
SS
11072 }
11073
43d10d30
NK
11074 if (state->legacy_cursor_update) {
11075 /*
11076 * This is a fast cursor update coming from the plane update
11077 * helper, check if it can be done asynchronously for better
11078 * performance.
11079 */
11080 state->async_update =
11081 !drm_atomic_helper_async_check(dev, state);
11082
11083 /*
11084 * Skip the remaining global validation if this is an async
11085 * update. Cursor updates can be done without affecting
11086 * state or bandwidth calcs and this avoids the performance
11087 * penalty of locking the private state object and
11088 * allocating a new dc_state.
11089 */
11090 if (state->async_update)
11091 return 0;
11092 }
11093
ebdd27e1 11094 /* Check scaling and underscan changes*/
1f6010a9 11095 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
11096 * new stream into context w\o causing full reset. Need to
11097 * decide how to handle.
11098 */
c2cea706 11099 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
11100 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11101 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11102 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
11103
11104 /* Skip any modesets/resets */
0bc9706d
LSL
11105 if (!acrtc || drm_atomic_crtc_needs_modeset(
11106 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
11107 continue;
11108
b830ebc9 11109 /* Skip any thing not scale or underscan changes */
54d76575 11110 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
11111 continue;
11112
11113 lock_and_validation_needed = true;
11114 }
11115
41724ea2
BL
11116#if defined(CONFIG_DRM_AMD_DC_DCN)
11117 /* set the slot info for each mst_state based on the link encoding format */
11118 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11119 struct amdgpu_dm_connector *aconnector;
11120 struct drm_connector *connector;
11121 struct drm_connector_list_iter iter;
11122 u8 link_coding_cap;
11123
11124 if (!mgr->mst_state )
11125 continue;
11126
11127 drm_connector_list_iter_begin(dev, &iter);
11128 drm_for_each_connector_iter(connector, &iter) {
11129 int id = connector->index;
11130
11131 if (id == mst_state->mgr->conn_base_id) {
11132 aconnector = to_amdgpu_dm_connector(connector);
11133 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11134 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11135
11136 break;
11137 }
11138 }
11139 drm_connector_list_iter_end(&iter);
11140
11141 }
11142#endif
f6d7c7fa
NK
11143 /**
11144 * Streams and planes are reset when there are changes that affect
11145 * bandwidth. Anything that affects bandwidth needs to go through
11146 * DC global validation to ensure that the configuration can be applied
11147 * to hardware.
11148 *
11149 * We have to currently stall out here in atomic_check for outstanding
11150 * commits to finish in this case because our IRQ handlers reference
11151 * DRM state directly - we can end up disabling interrupts too early
11152 * if we don't.
11153 *
11154 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11155 */
f6d7c7fa 11156 if (lock_and_validation_needed) {
eb3dc897 11157 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11158 if (ret) {
11159 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11160 goto fail;
68ca1c3e 11161 }
e7b07cee
HW
11162
11163 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11164 if (ret) {
11165 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11166 goto fail;
68ca1c3e 11167 }
1dc90497 11168
d9fe1a4c 11169#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11170 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11171 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
8c20a1ed 11172 goto fail;
68ca1c3e 11173 }
8c20a1ed 11174
6513104b 11175 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11176 if (ret) {
11177 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11178 goto fail;
68ca1c3e 11179 }
d9fe1a4c 11180#endif
29b9ba74 11181
ded58c7b
ZL
11182 /*
11183 * Perform validation of MST topology in the state:
11184 * We need to perform MST atomic check before calling
11185 * dc_validate_global_state(), or there is a chance
11186 * to get stuck in an infinite loop and hang eventually.
11187 */
11188 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11189 if (ret) {
11190 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11191 goto fail;
68ca1c3e 11192 }
85fb8bb9 11193 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 11194 if (status != DC_OK) {
68ca1c3e 11195 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11196 dc_status_to_str(status), status);
e7b07cee
HW
11197 ret = -EINVAL;
11198 goto fail;
11199 }
bd200d19 11200 } else {
674e78ac 11201 /*
bd200d19
NK
11202 * The commit is a fast update. Fast updates shouldn't change
11203 * the DC context, affect global validation, and can have their
11204 * commit work done in parallel with other commits not touching
11205 * the same resource. If we have a new DC context as part of
11206 * the DM atomic state from validation we need to free it and
11207 * retain the existing one instead.
fde9f39a
MR
11208 *
11209 * Furthermore, since the DM atomic state only contains the DC
11210 * context and can safely be annulled, we can free the state
11211 * and clear the associated private object now to free
11212 * some memory and avoid a possible use-after-free later.
674e78ac 11213 */
bd200d19 11214
fde9f39a
MR
11215 for (i = 0; i < state->num_private_objs; i++) {
11216 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11217
fde9f39a
MR
11218 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11219 int j = state->num_private_objs-1;
bd200d19 11220
fde9f39a
MR
11221 dm_atomic_destroy_state(obj,
11222 state->private_objs[i].state);
11223
11224 /* If i is not at the end of the array then the
11225 * last element needs to be moved to where i was
11226 * before the array can safely be truncated.
11227 */
11228 if (i != j)
11229 state->private_objs[i] =
11230 state->private_objs[j];
bd200d19 11231
fde9f39a
MR
11232 state->private_objs[j].ptr = NULL;
11233 state->private_objs[j].state = NULL;
11234 state->private_objs[j].old_state = NULL;
11235 state->private_objs[j].new_state = NULL;
11236
11237 state->num_private_objs = j;
11238 break;
11239 }
bd200d19 11240 }
e7b07cee
HW
11241 }
11242
caff0e66
NK
11243 /* Store the overall update type for use later in atomic check. */
11244 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11245 struct dm_crtc_state *dm_new_crtc_state =
11246 to_dm_crtc_state(new_crtc_state);
11247
f6d7c7fa
NK
11248 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11249 UPDATE_TYPE_FULL :
11250 UPDATE_TYPE_FAST;
e7b07cee
HW
11251 }
11252
11253 /* Must be success */
11254 WARN_ON(ret);
e8a98235
RS
11255
11256 trace_amdgpu_dm_atomic_check_finish(state, ret);
11257
e7b07cee
HW
11258 return ret;
11259
11260fail:
11261 if (ret == -EDEADLK)
01e28f9c 11262 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11263 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11264 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11265 else
01e28f9c 11266 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11267
e8a98235
RS
11268 trace_amdgpu_dm_atomic_check_finish(state, ret);
11269
e7b07cee
HW
11270 return ret;
11271}
11272
3ee6b26b
AD
11273static bool is_dp_capable_without_timing_msa(struct dc *dc,
11274 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11275{
11276 uint8_t dpcd_data;
11277 bool capable = false;
11278
c84dec2f 11279 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11280 dm_helpers_dp_read_dpcd(
11281 NULL,
c84dec2f 11282 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11283 DP_DOWN_STREAM_PORT_COUNT,
11284 &dpcd_data,
11285 sizeof(dpcd_data))) {
11286 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11287 }
11288
11289 return capable;
11290}
f9b4f20c 11291
46db138d
SW
11292static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11293 unsigned int offset,
11294 unsigned int total_length,
11295 uint8_t *data,
11296 unsigned int length,
11297 struct amdgpu_hdmi_vsdb_info *vsdb)
11298{
11299 bool res;
11300 union dmub_rb_cmd cmd;
11301 struct dmub_cmd_send_edid_cea *input;
11302 struct dmub_cmd_edid_cea_output *output;
11303
11304 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11305 return false;
11306
11307 memset(&cmd, 0, sizeof(cmd));
11308
11309 input = &cmd.edid_cea.data.input;
11310
11311 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11312 cmd.edid_cea.header.sub_type = 0;
11313 cmd.edid_cea.header.payload_bytes =
11314 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11315 input->offset = offset;
11316 input->length = length;
eb9e59eb 11317 input->cea_total_length = total_length;
46db138d
SW
11318 memcpy(input->payload, data, length);
11319
11320 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11321 if (!res) {
11322 DRM_ERROR("EDID CEA parser failed\n");
11323 return false;
11324 }
11325
11326 output = &cmd.edid_cea.data.output;
11327
11328 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11329 if (!output->ack.success) {
11330 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11331 output->ack.offset);
11332 }
11333 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11334 if (!output->amd_vsdb.vsdb_found)
11335 return false;
11336
11337 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11338 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11339 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11340 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11341 } else {
b76a8062 11342 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11343 return false;
11344 }
11345
11346 return true;
11347}
11348
11349static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11350 uint8_t *edid_ext, int len,
11351 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11352{
11353 int i;
f9b4f20c
SW
11354
11355 /* send extension block to DMCU for parsing */
11356 for (i = 0; i < len; i += 8) {
11357 bool res;
11358 int offset;
11359
11360 /* send 8 bytes a time */
46db138d 11361 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11362 return false;
11363
11364 if (i+8 == len) {
11365 /* EDID block sent completed, expect result */
11366 int version, min_rate, max_rate;
11367
46db138d 11368 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11369 if (res) {
11370 /* amd vsdb found */
11371 vsdb_info->freesync_supported = 1;
11372 vsdb_info->amd_vsdb_version = version;
11373 vsdb_info->min_refresh_rate_hz = min_rate;
11374 vsdb_info->max_refresh_rate_hz = max_rate;
11375 return true;
11376 }
11377 /* not amd vsdb */
11378 return false;
11379 }
11380
11381 /* check for ack*/
46db138d 11382 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11383 if (!res)
11384 return false;
11385 }
11386
11387 return false;
11388}
11389
46db138d
SW
11390static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11391 uint8_t *edid_ext, int len,
11392 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11393{
11394 int i;
11395
11396 /* send extension block to DMCU for parsing */
11397 for (i = 0; i < len; i += 8) {
11398 /* send 8 bytes a time */
11399 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11400 return false;
11401 }
11402
11403 return vsdb_info->freesync_supported;
11404}
11405
11406static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11407 uint8_t *edid_ext, int len,
11408 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11409{
11410 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11411
11412 if (adev->dm.dmub_srv)
11413 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11414 else
11415 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11416}
11417
7c7dd774 11418static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11419 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11420{
11421 uint8_t *edid_ext = NULL;
11422 int i;
11423 bool valid_vsdb_found = false;
11424
11425 /*----- drm_find_cea_extension() -----*/
11426 /* No EDID or EDID extensions */
11427 if (edid == NULL || edid->extensions == 0)
7c7dd774 11428 return -ENODEV;
f9b4f20c
SW
11429
11430 /* Find CEA extension */
11431 for (i = 0; i < edid->extensions; i++) {
11432 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11433 if (edid_ext[0] == CEA_EXT)
11434 break;
11435 }
11436
11437 if (i == edid->extensions)
7c7dd774 11438 return -ENODEV;
f9b4f20c
SW
11439
11440 /*----- cea_db_offsets() -----*/
11441 if (edid_ext[0] != CEA_EXT)
7c7dd774 11442 return -ENODEV;
f9b4f20c
SW
11443
11444 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11445
11446 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11447}
11448
98e6436d
AK
11449void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11450 struct edid *edid)
e7b07cee 11451{
eb0709ba 11452 int i = 0;
e7b07cee
HW
11453 struct detailed_timing *timing;
11454 struct detailed_non_pixel *data;
11455 struct detailed_data_monitor_range *range;
c84dec2f
HW
11456 struct amdgpu_dm_connector *amdgpu_dm_connector =
11457 to_amdgpu_dm_connector(connector);
bb47de73 11458 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11459 struct dc_sink *sink;
e7b07cee
HW
11460
11461 struct drm_device *dev = connector->dev;
1348969a 11462 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11463 bool freesync_capable = false;
f9b4f20c 11464 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11465
8218d7f1
HW
11466 if (!connector->state) {
11467 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11468 goto update;
8218d7f1
HW
11469 }
11470
9b2fdc33
AP
11471 sink = amdgpu_dm_connector->dc_sink ?
11472 amdgpu_dm_connector->dc_sink :
11473 amdgpu_dm_connector->dc_em_sink;
11474
11475 if (!edid || !sink) {
98e6436d
AK
11476 dm_con_state = to_dm_connector_state(connector->state);
11477
11478 amdgpu_dm_connector->min_vfreq = 0;
11479 amdgpu_dm_connector->max_vfreq = 0;
11480 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11481 connector->display_info.monitor_range.min_vfreq = 0;
11482 connector->display_info.monitor_range.max_vfreq = 0;
11483 freesync_capable = false;
98e6436d 11484
bb47de73 11485 goto update;
98e6436d
AK
11486 }
11487
8218d7f1
HW
11488 dm_con_state = to_dm_connector_state(connector->state);
11489
e7b07cee 11490 if (!adev->dm.freesync_module)
bb47de73 11491 goto update;
f9b4f20c
SW
11492
11493
9b2fdc33
AP
11494 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11495 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11496 bool edid_check_required = false;
11497
11498 if (edid) {
e7b07cee
HW
11499 edid_check_required = is_dp_capable_without_timing_msa(
11500 adev->dm.dc,
c84dec2f 11501 amdgpu_dm_connector);
e7b07cee 11502 }
e7b07cee 11503
f9b4f20c
SW
11504 if (edid_check_required == true && (edid->version > 1 ||
11505 (edid->version == 1 && edid->revision > 1))) {
11506 for (i = 0; i < 4; i++) {
e7b07cee 11507
f9b4f20c
SW
11508 timing = &edid->detailed_timings[i];
11509 data = &timing->data.other_data;
11510 range = &data->data.range;
11511 /*
11512 * Check if monitor has continuous frequency mode
11513 */
11514 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11515 continue;
11516 /*
11517 * Check for flag range limits only. If flag == 1 then
11518 * no additional timing information provided.
11519 * Default GTF, GTF Secondary curve and CVT are not
11520 * supported
11521 */
11522 if (range->flags != 1)
11523 continue;
a0ffc3fd 11524
f9b4f20c
SW
11525 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11526 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11527 amdgpu_dm_connector->pixel_clock_mhz =
11528 range->pixel_clock_mhz * 10;
a0ffc3fd 11529
f9b4f20c
SW
11530 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11531 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11532
f9b4f20c
SW
11533 break;
11534 }
98e6436d 11535
f9b4f20c
SW
11536 if (amdgpu_dm_connector->max_vfreq -
11537 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11538
f9b4f20c
SW
11539 freesync_capable = true;
11540 }
11541 }
9b2fdc33 11542 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11543 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11544 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11545 timing = &edid->detailed_timings[i];
11546 data = &timing->data.other_data;
11547
11548 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11549 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11550 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11551 freesync_capable = true;
11552
11553 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11554 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11555 }
11556 }
bb47de73
NK
11557
11558update:
11559 if (dm_con_state)
11560 dm_con_state->freesync_capable = freesync_capable;
11561
11562 if (connector->vrr_capable_property)
11563 drm_connector_set_vrr_capable_property(connector,
11564 freesync_capable);
e7b07cee
HW
11565}
11566
3d4e52d0
VL
11567void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11568{
1348969a 11569 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11570 struct dc *dc = adev->dm.dc;
11571 int i;
11572
11573 mutex_lock(&adev->dm.dc_lock);
11574 if (dc->current_state) {
11575 for (i = 0; i < dc->current_state->stream_count; ++i)
11576 dc->current_state->streams[i]
11577 ->triggered_crtc_reset.enabled =
11578 adev->dm.force_timing_sync;
11579
11580 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11581 dc_trigger_sync(dc, dc->current_state);
11582 }
11583 mutex_unlock(&adev->dm.dc_lock);
11584}
9d83722d
RS
11585
11586void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11587 uint32_t value, const char *func_name)
11588{
11589#ifdef DM_CHECK_ADDR_0
11590 if (address == 0) {
11591 DC_ERR("invalid register write. address = 0");
11592 return;
11593 }
11594#endif
11595 cgs_write_register(ctx->cgs_device, address, value);
11596 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11597}
11598
11599uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11600 const char *func_name)
11601{
11602 uint32_t value;
11603#ifdef DM_CHECK_ADDR_0
11604 if (address == 0) {
11605 DC_ERR("invalid register read; address = 0\n");
11606 return 0;
11607 }
11608#endif
11609
11610 if (ctx->dmub_srv &&
11611 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11612 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11613 ASSERT(false);
11614 return 0;
11615 }
11616
11617 value = cgs_read_register(ctx->cgs_device, address);
11618
11619 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11620
11621 return value;
11622}
81927e28 11623
88f52b1f
JS
11624int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11625 uint8_t status_type, uint32_t *operation_result)
11626{
11627 struct amdgpu_device *adev = ctx->driver_context;
11628 int return_status = -1;
11629 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11630
11631 if (is_cmd_aux) {
11632 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11633 return_status = p_notify->aux_reply.length;
11634 *operation_result = p_notify->result;
11635 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11636 *operation_result = AUX_RET_ERROR_TIMEOUT;
11637 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11638 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11639 } else {
11640 *operation_result = AUX_RET_ERROR_UNKNOWN;
11641 }
11642 } else {
11643 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11644 return_status = 0;
11645 *operation_result = p_notify->sc_status;
11646 } else {
11647 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11648 }
11649 }
11650
11651 return return_status;
11652}
11653
11654int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11655 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11656{
11657 struct amdgpu_device *adev = ctx->driver_context;
11658 int ret = 0;
11659
88f52b1f
JS
11660 if (is_cmd_aux) {
11661 dc_process_dmub_aux_transfer_async(ctx->dc,
11662 link_index, (struct aux_payload *)cmd_payload);
11663 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11664 (struct set_config_cmd_payload *)cmd_payload,
11665 adev->dm.dmub_notify)) {
11666 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11667 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11668 (uint32_t *)operation_result);
11669 }
11670
9e3a50d2 11671 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11672 if (ret == 0) {
9e3a50d2 11673 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11674 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11675 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11676 (uint32_t *)operation_result);
81927e28 11677 }
81927e28 11678
88f52b1f
JS
11679 if (is_cmd_aux) {
11680 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11681 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11682
88f52b1f
JS
11683 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11684 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11685 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11686 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11687 adev->dm.dmub_notify->aux_reply.length);
11688 }
11689 }
81927e28
JS
11690 }
11691
88f52b1f
JS
11692 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11693 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11694 (uint32_t *)operation_result);
81927e28 11695}
1edf5ae1
ZL
11696
11697/*
11698 * Check whether seamless boot is supported.
11699 *
11700 * So far we only support seamless boot on CHIP_VANGOGH.
11701 * If everything goes well, we may consider expanding
11702 * seamless boot to other ASICs.
11703 */
11704bool check_seamless_boot_capability(struct amdgpu_device *adev)
11705{
11706 switch (adev->asic_type) {
11707 case CHIP_VANGOGH:
11708 if (!adev->mman.keep_stolen_vga_memory)
11709 return true;
11710 break;
11711 default:
11712 break;
11713 }
11714
11715 return false;
11716}