drm/amdgpu: Fix a NULL pointer dereference in amdgpu_connector_lcd_native_mode()
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
53e108aa 51#include <drm/drm_hdcp.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
1f579254 54#include "amdgpu_atombios.h"
4562236b
HW
55
56#include "amd_shared.h"
57#include "amdgpu_dm_irq.h"
58#include "dm_helpers.h"
e7b07cee 59#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
60#if defined(CONFIG_DEBUG_FS)
61#include "amdgpu_dm_debugfs.h"
62#endif
f4594cd1 63#include "amdgpu_dm_psr.h"
4562236b
HW
64
65#include "ivsrcid/ivsrcid_vislands30.h"
66
81927e28 67#include "i2caux_interface.h"
4562236b
HW
68#include <linux/module.h>
69#include <linux/moduleparam.h>
e7b07cee 70#include <linux/types.h>
97028037 71#include <linux/pm_runtime.h>
09d21852 72#include <linux/pci.h>
a94d5569 73#include <linux/firmware.h>
6ce8f316 74#include <linux/component.h>
4562236b
HW
75
76#include <drm/drm_atomic.h>
674e78ac 77#include <drm/drm_atomic_uapi.h>
4562236b
HW
78#include <drm/drm_atomic_helper.h>
79#include <drm/drm_dp_mst_helper.h>
e7b07cee 80#include <drm/drm_fb_helper.h>
09d21852 81#include <drm/drm_fourcc.h>
e7b07cee 82#include <drm/drm_edid.h>
09d21852 83#include <drm/drm_vblank.h>
6ce8f316 84#include <drm/drm_audio_component.h>
4562236b 85
b86a1aa3 86#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 87#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 88
ad941f7a
FX
89#include "dcn/dcn_1_0_offset.h"
90#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
91#include "soc15_hw_ip.h"
92#include "vega10_ip_offset.h"
ff5ef992
AD
93
94#include "soc15_common.h"
95#endif
96
e7b07cee 97#include "modules/inc/mod_freesync.h"
bbf854dc 98#include "modules/power/power_helpers.h"
ecd0136b 99#include "modules/inc/mod_info_packet.h"
e7b07cee 100
743b9786
NK
101#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
103#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
105#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
107#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
109#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
111#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
113#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
115#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 117
a94d5569
DF
118#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
119MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 120
5ea23931
RL
121#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
122MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
8c7aea40
NK
124/* Number of bytes in PSP header for firmware. */
125#define PSP_HEADER_BYTES 0x100
126
127/* Number of bytes in PSP footer for firmware. */
128#define PSP_FOOTER_BYTES 0x100
129
b8592b48
LL
130/**
131 * DOC: overview
132 *
133 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 134 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
135 * requests into DC requests, and DC responses into DRM responses.
136 *
137 * The root control structure is &struct amdgpu_display_manager.
138 */
139
7578ecda
AD
140/* basic init/fini API */
141static int amdgpu_dm_init(struct amdgpu_device *adev);
142static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 143static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 144
0f877894
OV
145static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146{
147 switch (link->dpcd_caps.dongle_type) {
148 case DISPLAY_DONGLE_NONE:
149 return DRM_MODE_SUBCONNECTOR_Native;
150 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 return DRM_MODE_SUBCONNECTOR_VGA;
152 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_DVID;
155 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_HDMIA;
158 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 default:
160 return DRM_MODE_SUBCONNECTOR_Unknown;
161 }
162}
163
164static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165{
166 struct dc_link *link = aconnector->dc_link;
167 struct drm_connector *connector = &aconnector->base;
168 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 return;
172
173 if (aconnector->dc_sink)
174 subconnector = get_subconnector_type(link);
175
176 drm_object_property_set_value(&connector->base,
177 connector->dev->mode_config.dp_subconnector_property,
178 subconnector);
179}
180
1f6010a9
DF
181/*
182 * initializes drm_device display related structures, based on the information
7578ecda
AD
183 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184 * drm_encoder, drm_mode_config
185 *
186 * Returns 0 on success
187 */
188static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189/* removes and deallocates the drm structures, created by the above function */
190static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
7578ecda 192static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 193 struct drm_plane *plane,
cc1fec57
NK
194 unsigned long possible_crtcs,
195 const struct dc_plane_cap *plane_cap);
7578ecda
AD
196static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 uint32_t link_index);
199static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 struct amdgpu_dm_connector *amdgpu_dm_connector,
201 uint32_t link_index,
202 struct amdgpu_encoder *amdgpu_encoder);
203static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 struct amdgpu_encoder *aencoder,
205 uint32_t link_index);
206
207static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
7578ecda
AD
209static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 struct drm_atomic_state *state);
213
674e78ac
NK
214static void handle_cursor_update(struct drm_plane *plane,
215 struct drm_plane_state *old_plane_state);
7578ecda 216
dfbbfe3c
BN
217static const struct drm_format_info *
218amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
e27c41d5 220static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 221static void handle_hpd_rx_irq(void *param);
e27c41d5 222
a85ba005
NC
223static bool
224is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 struct drm_crtc_state *new_crtc_state);
4562236b
HW
226/*
227 * dm_vblank_get_counter
228 *
229 * @brief
230 * Get counter for number of vertical blanks
231 *
232 * @param
233 * struct amdgpu_device *adev - [in] desired amdgpu device
234 * int disp_idx - [in] which CRTC to get the counter from
235 *
236 * @return
237 * Counter for vertical blanks
238 */
239static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240{
241 if (crtc >= adev->mode_info.num_crtc)
242 return 0;
243 else {
244 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
585d450c 246 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
247 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 crtc);
4562236b
HW
249 return 0;
250 }
251
585d450c 252 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
253 }
254}
255
256static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 257 u32 *vbl, u32 *position)
4562236b 258{
81c50963
ST
259 uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
4562236b
HW
261 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 return -EINVAL;
263 else {
264 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
585d450c 266 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
267 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 crtc);
4562236b
HW
269 return 0;
270 }
271
81c50963
ST
272 /*
273 * TODO rework base driver to use values directly.
274 * for now parse it back into reg-format
275 */
585d450c 276 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
277 &v_blank_start,
278 &v_blank_end,
279 &h_position,
280 &v_position);
281
e806208d
AG
282 *position = v_position | (h_position << 16);
283 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
284 }
285
286 return 0;
287}
288
289static bool dm_is_idle(void *handle)
290{
291 /* XXX todo */
292 return true;
293}
294
295static int dm_wait_for_idle(void *handle)
296{
297 /* XXX todo */
298 return 0;
299}
300
301static bool dm_check_soft_reset(void *handle)
302{
303 return false;
304}
305
306static int dm_soft_reset(void *handle)
307{
308 /* XXX todo */
309 return 0;
310}
311
3ee6b26b
AD
312static struct amdgpu_crtc *
313get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 int otg_inst)
4562236b 315{
4a580877 316 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
317 struct drm_crtc *crtc;
318 struct amdgpu_crtc *amdgpu_crtc;
319
bcd74374 320 if (WARN_ON(otg_inst == -1))
4562236b 321 return adev->mode_info.crtcs[0];
4562236b
HW
322
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326 if (amdgpu_crtc->otg_inst == otg_inst)
327 return amdgpu_crtc;
328 }
329
330 return NULL;
331}
332
585d450c
AP
333static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334{
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
339}
340
66b0c973
MK
341static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342{
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345}
346
a85ba005
NC
347static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
349{
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
351 return true;
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 return true;
354 else
355 return false;
356}
357
b8e8c934
HW
358/**
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
361 *
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
364 */
4562236b
HW
365static void dm_pflip_high_irq(void *interrupt_params)
366{
4562236b
HW
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
370 unsigned long flags;
71bbe51a 371 struct drm_pending_vblank_event *e;
71bbe51a
MK
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 bool vrr_active;
4562236b
HW
374
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377 /* IRQ could occur when in initial stage */
1f6010a9 378 /* TODO work and BO cleanup */
4562236b 379 if (amdgpu_crtc == NULL) {
cb2318b7 380 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
381 return;
382 }
383
4a580877 384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
385
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
391 amdgpu_crtc);
4a580877 392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
393 return;
394 }
395
71bbe51a
MK
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
4562236b 399
bcd74374 400 WARN_ON(!e);
1159898a 401
585d450c 402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
403
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 if (!vrr_active ||
585d450c 406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
412 */
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 414
71bbe51a
MK
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
417 */
418 if (e) {
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
423 }
424 } else if (e) {
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
431 *
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
436 */
437
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
441
4a580877 442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
443 e = NULL;
444 }
4562236b 445
fdd1fe57
MK
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
450 */
5d1c59c4 451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 453
54f5499a 454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 456
cb2318b7
VL
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
4562236b
HW
460}
461
d2574c33
MK
462static void dm_vupdate_high_irq(void *interrupt_params)
463{
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
47588233
RS
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 470 unsigned long flags;
585d450c 471 int vrr_active;
d2574c33
MK
472
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475 if (acrtc) {
585d450c 476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
481
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 frame_duration_ns,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 }
d2574c33 488
cb2318b7 489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 490 acrtc->crtc_id,
585d450c 491 vrr_active);
d2574c33
MK
492
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
498 */
585d450c 499 if (vrr_active) {
d2574c33 500 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
501
502 /* BTR processing for pre-DCE12 ASICs */
585d450c 503 if (acrtc->dm_irq_params.stream &&
09aef2c4 504 adev->family < AMDGPU_FAMILY_AI) {
4a580877 505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
585d450c
AP
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
510
511 dc_stream_adjust_vmin_vmax(
512 adev->dm.dc,
585d450c
AP
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
516 }
517 }
d2574c33
MK
518 }
519}
520
b8e8c934
HW
521/**
522 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 523 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
524 *
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526 * event handler.
527 */
4562236b
HW
528static void dm_crtc_high_irq(void *interrupt_params)
529{
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
4562236b 532 struct amdgpu_crtc *acrtc;
09aef2c4 533 unsigned long flags;
585d450c 534 int vrr_active;
4562236b 535
b57de80a 536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
537 if (!acrtc)
538 return;
539
585d450c 540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 541
cb2318b7 542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 543 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 544
2346ef47
NK
545 /**
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
550 */
585d450c 551 if (!vrr_active)
2346ef47
NK
552 drm_crtc_handle_vblank(&acrtc->base);
553
554 /**
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
557 */
16f17eda 558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
559
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
562 return;
16f17eda 563
4a580877 564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 565
585d450c
AP
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 570 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
16f17eda 573
585d450c
AP
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
576 }
577
2b5aed9a
MK
578 /*
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
583 *
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
587 */
2346ef47
NK
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 590 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
591 if (acrtc->event) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 acrtc->event = NULL;
594 drm_crtc_vblank_put(&acrtc->base);
595 }
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 }
598
4a580877 599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
600}
601
86bc2219 602#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 603#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
604/**
605 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606 * DCN generation ASICs
48e01bf4 607 * @interrupt_params: interrupt parameters
86bc2219
WL
608 *
609 * Used to set crc window/read out crc value at vertical line 0 position
610 */
86bc2219
WL
611static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612{
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
616
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619 if (!acrtc)
620 return;
621
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623}
433e5dec 624#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 625
e27c41d5
JS
626/**
627 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628 * @adev: amdgpu_device pointer
629 * @notify: dmub notification structure
630 *
631 * Dmub AUX or SET_CONFIG command completion processing callback
632 * Copies dmub notification to DM which is to be read by AUX command.
633 * issuing thread and also signals the event to wake up the thread.
634 */
635void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636{
637 if (adev->dm.dmub_notify)
638 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 complete(&adev->dm.dmub_aux_transfer_done);
641}
642
643/**
644 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645 * @adev: amdgpu_device pointer
646 * @notify: dmub notification structure
647 *
648 * Dmub Hpd interrupt processing callback. Gets displayindex through the
649 * ink index and calls helper to do the processing.
650 */
651void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652{
653 struct amdgpu_dm_connector *aconnector;
f6e03f80 654 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
655 struct drm_connector *connector;
656 struct drm_connector_list_iter iter;
657 struct dc_link *link;
658 uint8_t link_index = 0;
659 struct drm_device *dev = adev->dm.ddev;
660
661 if (adev == NULL)
662 return;
663
664 if (notify == NULL) {
665 DRM_ERROR("DMUB HPD callback notification was NULL");
666 return;
667 }
668
669 if (notify->link_index > adev->dm.dc->link_count) {
670 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671 return;
672 }
673
e27c41d5 674 link_index = notify->link_index;
e27c41d5
JS
675 link = adev->dm.dc->links[link_index];
676
677 drm_connector_list_iter_begin(dev, &iter);
678 drm_for_each_connector_iter(connector, &iter) {
679 aconnector = to_amdgpu_dm_connector(connector);
680 if (link && aconnector->dc_link == link) {
681 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 682 hpd_aconnector = aconnector;
e27c41d5
JS
683 break;
684 }
685 }
686 drm_connector_list_iter_end(&iter);
e27c41d5 687
c40a09e5
NK
688 if (hpd_aconnector) {
689 if (notify->type == DMUB_NOTIFICATION_HPD)
690 handle_hpd_irq_helper(hpd_aconnector);
691 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 handle_hpd_rx_irq(hpd_aconnector);
693 }
e27c41d5
JS
694}
695
696/**
697 * register_dmub_notify_callback - Sets callback for DMUB notify
698 * @adev: amdgpu_device pointer
699 * @type: Type of dmub notification
700 * @callback: Dmub interrupt callback function
701 * @dmub_int_thread_offload: offload indicator
702 *
703 * API to register a dmub callback handler for a dmub notification
704 * Also sets indicator whether callback processing to be offloaded.
705 * to dmub interrupt handling thread
706 * Return: true if successfully registered, false if there is existing registration
707 */
708bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710{
711 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 adev->dm.dmub_callback[type] = callback;
713 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714 } else
715 return false;
716
717 return true;
718}
719
720static void dm_handle_hpd_work(struct work_struct *work)
721{
722 struct dmub_hpd_work *dmub_hpd_wrk;
723
724 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725
726 if (!dmub_hpd_wrk->dmub_notify) {
727 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728 return;
729 }
730
731 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 dmub_hpd_wrk->dmub_notify);
734 }
094b21c1
JS
735
736 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
737 kfree(dmub_hpd_wrk);
738
739}
740
e25515e2 741#define DMUB_TRACE_MAX_READ 64
81927e28
JS
742/**
743 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744 * @interrupt_params: used for determining the Outbox instance
745 *
746 * Handles the Outbox Interrupt
747 * event handler.
748 */
81927e28
JS
749static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750{
751 struct dmub_notification notify;
752 struct common_irq_params *irq_params = interrupt_params;
753 struct amdgpu_device *adev = irq_params->adev;
754 struct amdgpu_display_manager *dm = &adev->dm;
755 struct dmcub_trace_buf_entry entry = { 0 };
756 uint32_t count = 0;
e27c41d5 757 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 758 struct dc_link *plink = NULL;
81927e28 759
f6e03f80
JS
760 if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 762
f6e03f80
JS
763 do {
764 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 DRM_ERROR("DM: notify type %d invalid!", notify.type);
767 continue;
768 }
c40a09e5
NK
769 if (!dm->dmub_callback[notify.type]) {
770 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771 continue;
772 }
f6e03f80 773 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
774 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775 if (!dmub_hpd_wrk) {
776 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777 return;
778 }
779 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 if (!dmub_hpd_wrk->dmub_notify) {
781 kfree(dmub_hpd_wrk);
782 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783 return;
784 }
785 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 if (dmub_hpd_wrk->dmub_notify)
787 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
788 dmub_hpd_wrk->adev = adev;
789 if (notify.type == DMUB_NOTIFICATION_HPD) {
790 plink = adev->dm.dc->links[notify.link_index];
791 if (plink) {
792 plink->hpd_status =
b97788e5 793 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 794 }
e27c41d5 795 }
f6e03f80
JS
796 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797 } else {
798 dm->dmub_callback[notify.type](adev, &notify);
799 }
800 } while (notify.pending_notification);
81927e28
JS
801 }
802
803
804 do {
805 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807 entry.param0, entry.param1);
808
809 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811 } else
812 break;
813
814 count++;
815
816 } while (count <= DMUB_TRACE_MAX_READ);
817
f6e03f80
JS
818 if (count > DMUB_TRACE_MAX_READ)
819 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 820}
433e5dec 821#endif /* CONFIG_DRM_AMD_DC_DCN */
86bc2219 822
4562236b
HW
823static int dm_set_clockgating_state(void *handle,
824 enum amd_clockgating_state state)
825{
826 return 0;
827}
828
829static int dm_set_powergating_state(void *handle,
830 enum amd_powergating_state state)
831{
832 return 0;
833}
834
835/* Prototypes of private functions */
836static int dm_early_init(void* handle);
837
a32e24b4 838/* Allocate memory for FBC compressed data */
3e332d3a 839static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 840{
3e332d3a 841 struct drm_device *dev = connector->dev;
1348969a 842 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 843 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
844 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845 struct drm_display_mode *mode;
42e67c3b
RL
846 unsigned long max_size = 0;
847
848 if (adev->dm.dc->fbc_compressor == NULL)
849 return;
a32e24b4 850
3e332d3a 851 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
852 return;
853
3e332d3a
RL
854 if (compressor->bo_ptr)
855 return;
42e67c3b 856
42e67c3b 857
3e332d3a
RL
858 list_for_each_entry(mode, &connector->modes, head) {
859 if (max_size < mode->htotal * mode->vtotal)
860 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
861 }
862
863 if (max_size) {
864 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 865 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 866 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
867
868 if (r)
42e67c3b
RL
869 DRM_ERROR("DM: Failed to initialize FBC\n");
870 else {
871 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873 }
874
a32e24b4
RL
875 }
876
877}
a32e24b4 878
6ce8f316
NK
879static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880 int pipe, bool *enabled,
881 unsigned char *buf, int max_bytes)
882{
883 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 884 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
885 struct drm_connector *connector;
886 struct drm_connector_list_iter conn_iter;
887 struct amdgpu_dm_connector *aconnector;
888 int ret = 0;
889
890 *enabled = false;
891
892 mutex_lock(&adev->dm.audio_lock);
893
894 drm_connector_list_iter_begin(dev, &conn_iter);
895 drm_for_each_connector_iter(connector, &conn_iter) {
896 aconnector = to_amdgpu_dm_connector(connector);
897 if (aconnector->audio_inst != port)
898 continue;
899
900 *enabled = true;
901 ret = drm_eld_size(connector->eld);
902 memcpy(buf, connector->eld, min(max_bytes, ret));
903
904 break;
905 }
906 drm_connector_list_iter_end(&conn_iter);
907
908 mutex_unlock(&adev->dm.audio_lock);
909
910 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911
912 return ret;
913}
914
915static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916 .get_eld = amdgpu_dm_audio_component_get_eld,
917};
918
919static int amdgpu_dm_audio_component_bind(struct device *kdev,
920 struct device *hda_kdev, void *data)
921{
922 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 923 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
924 struct drm_audio_component *acomp = data;
925
926 acomp->ops = &amdgpu_dm_audio_component_ops;
927 acomp->dev = kdev;
928 adev->dm.audio_component = acomp;
929
930 return 0;
931}
932
933static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934 struct device *hda_kdev, void *data)
935{
936 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 937 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
938 struct drm_audio_component *acomp = data;
939
940 acomp->ops = NULL;
941 acomp->dev = NULL;
942 adev->dm.audio_component = NULL;
943}
944
945static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946 .bind = amdgpu_dm_audio_component_bind,
947 .unbind = amdgpu_dm_audio_component_unbind,
948};
949
950static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951{
952 int i, ret;
953
954 if (!amdgpu_audio)
955 return 0;
956
957 adev->mode_info.audio.enabled = true;
958
959 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960
961 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962 adev->mode_info.audio.pin[i].channels = -1;
963 adev->mode_info.audio.pin[i].rate = -1;
964 adev->mode_info.audio.pin[i].bits_per_sample = -1;
965 adev->mode_info.audio.pin[i].status_bits = 0;
966 adev->mode_info.audio.pin[i].category_code = 0;
967 adev->mode_info.audio.pin[i].connected = false;
968 adev->mode_info.audio.pin[i].id =
969 adev->dm.dc->res_pool->audios[i]->inst;
970 adev->mode_info.audio.pin[i].offset = 0;
971 }
972
973 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974 if (ret < 0)
975 return ret;
976
977 adev->dm.audio_registered = true;
978
979 return 0;
980}
981
982static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983{
984 if (!amdgpu_audio)
985 return;
986
987 if (!adev->mode_info.audio.enabled)
988 return;
989
990 if (adev->dm.audio_registered) {
991 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992 adev->dm.audio_registered = false;
993 }
994
995 /* TODO: Disable audio? */
996
997 adev->mode_info.audio.enabled = false;
998}
999
dfd84d90 1000static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1001{
1002 struct drm_audio_component *acomp = adev->dm.audio_component;
1003
1004 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006
1007 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008 pin, -1);
1009 }
1010}
1011
743b9786
NK
1012static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013{
743b9786
NK
1014 const struct dmcub_firmware_header_v1_0 *hdr;
1015 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1016 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1017 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1020 struct dmub_srv_hw_params hw_params;
1021 enum dmub_status status;
1022 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1023 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786 1024 bool has_hw_support;
5b109397 1025 struct dc *dc = adev->dm.dc;
743b9786
NK
1026
1027 if (!dmub_srv)
1028 /* DMUB isn't supported on the ASIC. */
1029 return 0;
1030
8c7aea40
NK
1031 if (!fb_info) {
1032 DRM_ERROR("No framebuffer info for DMUB service.\n");
1033 return -EINVAL;
1034 }
1035
743b9786
NK
1036 if (!dmub_fw) {
1037 /* Firmware required for DMUB support. */
1038 DRM_ERROR("No firmware provided for DMUB.\n");
1039 return -EINVAL;
1040 }
1041
1042 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043 if (status != DMUB_STATUS_OK) {
1044 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045 return -EINVAL;
1046 }
1047
1048 if (!has_hw_support) {
1049 DRM_INFO("DMUB unsupported on ASIC\n");
1050 return 0;
1051 }
1052
1053 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054
743b9786
NK
1055 fw_inst_const = dmub_fw->data +
1056 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1057 PSP_HEADER_BYTES;
743b9786
NK
1058
1059 fw_bss_data = dmub_fw->data +
1060 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 le32_to_cpu(hdr->inst_const_bytes);
1062
1063 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1064 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066
1067 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068
ddde28a5
HW
1069 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070 * amdgpu_ucode_init_single_fw will load dmub firmware
1071 * fw_inst_const part to cw0; otherwise, the firmware back door load
1072 * will be done by dm_dmub_hw_init
1073 */
1074 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076 fw_inst_const_size);
1077 }
1078
a576b345
NK
1079 if (fw_bss_data_size)
1080 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1082
1083 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1084 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085 adev->bios_size);
1086
1087 /* Reset regions that need to be reset. */
1088 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090
1091 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093
1094 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1096
1097 /* Initialize hardware. */
1098 memset(&hw_params, 0, sizeof(hw_params));
1099 hw_params.fb_base = adev->gmc.fb_start;
1100 hw_params.fb_offset = adev->gmc.aper_base;
1101
31a7f4bb
HW
1102 /* backdoor load firmware and trigger dmub running */
1103 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104 hw_params.load_inst_const = true;
1105
743b9786
NK
1106 if (dmcu)
1107 hw_params.psp_version = dmcu->psp_version;
1108
8c7aea40
NK
1109 for (i = 0; i < fb_info->num_fb; ++i)
1110 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1111
5b109397
JS
1112 switch (adev->asic_type) {
1113 case CHIP_YELLOW_CARP:
1114 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115 hw_params.dpia_supported = true;
1116#if defined(CONFIG_DRM_AMD_DC_DCN)
1117 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118#endif
1119 }
1120 break;
1121 default:
1122 break;
1123 }
1124
743b9786
NK
1125 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126 if (status != DMUB_STATUS_OK) {
1127 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128 return -EINVAL;
1129 }
1130
1131 /* Wait for firmware load to finish. */
1132 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133 if (status != DMUB_STATUS_OK)
1134 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135
1136 /* Init DMCU and ABM if available. */
1137 if (dmcu && abm) {
1138 dmcu->funcs->dmcu_init(dmcu);
1139 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140 }
1141
051b7887
RL
1142 if (!adev->dm.dc->ctx->dmub_srv)
1143 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1144 if (!adev->dm.dc->ctx->dmub_srv) {
1145 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146 return -ENOMEM;
1147 }
1148
743b9786
NK
1149 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150 adev->dm.dmcub_fw_version);
1151
1152 return 0;
1153}
1154
a3fe0e33 1155#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1156static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1157{
c0fb85ae
YZ
1158 uint64_t pt_base;
1159 uint32_t logical_addr_low;
1160 uint32_t logical_addr_high;
1161 uint32_t agp_base, agp_bot, agp_top;
1162 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1163
a0f884f5
NK
1164 memset(pa_config, 0, sizeof(*pa_config));
1165
c0fb85ae
YZ
1166 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1168
c0fb85ae
YZ
1169 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170 /*
1171 * Raven2 has a HW issue that it is unable to use the vram which
1172 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173 * workaround that increase system aperture high address (add 1)
1174 * to get rid of the VM fault and hardware hang.
1175 */
1176 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177 else
1178 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1179
c0fb85ae
YZ
1180 agp_base = 0;
1181 agp_bot = adev->gmc.agp_start >> 24;
1182 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1183
c44a22b3 1184
c0fb85ae
YZ
1185 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1191
c0fb85ae
YZ
1192 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194
1195 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198
1199 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202
1203 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206
1207 pa_config->is_hvm_enabled = 0;
c44a22b3 1208
c44a22b3 1209}
e6cd859d 1210#endif
ea3b4242 1211#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1212static void vblank_control_worker(struct work_struct *work)
ea3b4242 1213{
09a5df6c
NK
1214 struct vblank_control_work *vblank_work =
1215 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1216 struct amdgpu_display_manager *dm = vblank_work->dm;
1217
1218 mutex_lock(&dm->dc_lock);
1219
1220 if (vblank_work->enable)
1221 dm->active_vblank_irq_count++;
5af50b0b 1222 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1223 dm->active_vblank_irq_count--;
1224
2cbcb78c 1225 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1226
4711c033 1227 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1228
58aa1c50
NK
1229 /* Control PSR based on vblank requirements from OS */
1230 if (vblank_work->stream && vblank_work->stream->link) {
1231 if (vblank_work->enable) {
1232 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233 amdgpu_dm_psr_disable(vblank_work->stream);
1234 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237 amdgpu_dm_psr_enable(vblank_work->stream);
1238 }
1239 }
1240
ea3b4242 1241 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1242
1243 dc_stream_release(vblank_work->stream);
1244
09a5df6c 1245 kfree(vblank_work);
ea3b4242
QZ
1246}
1247
ea3b4242 1248#endif
8e794421
WL
1249
1250static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251{
1252 struct hpd_rx_irq_offload_work *offload_work;
1253 struct amdgpu_dm_connector *aconnector;
1254 struct dc_link *dc_link;
1255 struct amdgpu_device *adev;
1256 enum dc_connection_type new_connection_type = dc_connection_none;
1257 unsigned long flags;
1258
1259 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260 aconnector = offload_work->offload_wq->aconnector;
1261
1262 if (!aconnector) {
1263 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264 goto skip;
1265 }
1266
1267 adev = drm_to_adev(aconnector->base.dev);
1268 dc_link = aconnector->dc_link;
1269
1270 mutex_lock(&aconnector->hpd_lock);
1271 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272 DRM_ERROR("KMS: Failed to detect connector\n");
1273 mutex_unlock(&aconnector->hpd_lock);
1274
1275 if (new_connection_type == dc_connection_none)
1276 goto skip;
1277
1278 if (amdgpu_in_reset(adev))
1279 goto skip;
1280
1281 mutex_lock(&adev->dm.dc_lock);
1282 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283 dc_link_dp_handle_automated_test(dc_link);
1284 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287 dc_link_dp_handle_link_loss(dc_link);
1288 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289 offload_work->offload_wq->is_handling_link_loss = false;
1290 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291 }
1292 mutex_unlock(&adev->dm.dc_lock);
1293
1294skip:
1295 kfree(offload_work);
1296
1297}
1298
1299static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300{
1301 int max_caps = dc->caps.max_links;
1302 int i = 0;
1303 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304
1305 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306
1307 if (!hpd_rx_offload_wq)
1308 return NULL;
1309
1310
1311 for (i = 0; i < max_caps; i++) {
1312 hpd_rx_offload_wq[i].wq =
1313 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314
1315 if (hpd_rx_offload_wq[i].wq == NULL) {
1316 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317 return NULL;
1318 }
1319
1320 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321 }
1322
1323 return hpd_rx_offload_wq;
1324}
1325
3ce51649
AD
1326struct amdgpu_stutter_quirk {
1327 u16 chip_vendor;
1328 u16 chip_device;
1329 u16 subsys_vendor;
1330 u16 subsys_device;
1331 u8 revision;
1332};
1333
1334static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337 { 0, 0, 0, 0, 0 },
1338};
1339
1340static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341{
1342 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343
1344 while (p && p->chip_device != 0) {
1345 if (pdev->vendor == p->chip_vendor &&
1346 pdev->device == p->chip_device &&
1347 pdev->subsystem_vendor == p->subsys_vendor &&
1348 pdev->subsystem_device == p->subsys_device &&
1349 pdev->revision == p->revision) {
1350 return true;
1351 }
1352 ++p;
1353 }
1354 return false;
1355}
1356
7578ecda 1357static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1358{
1359 struct dc_init_data init_data;
52704fca
BL
1360#ifdef CONFIG_DRM_AMD_DC_HDCP
1361 struct dc_callback_init init_params;
1362#endif
743b9786 1363 int r;
52704fca 1364
4a580877 1365 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1366 adev->dm.adev = adev;
1367
4562236b
HW
1368 /* Zero all the fields */
1369 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1370#ifdef CONFIG_DRM_AMD_DC_HDCP
1371 memset(&init_params, 0, sizeof(init_params));
1372#endif
4562236b 1373
674e78ac 1374 mutex_init(&adev->dm.dc_lock);
6ce8f316 1375 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1376#if defined(CONFIG_DRM_AMD_DC_DCN)
1377 spin_lock_init(&adev->dm.vblank_lock);
1378#endif
674e78ac 1379
4562236b
HW
1380 if(amdgpu_dm_irq_init(adev)) {
1381 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382 goto error;
1383 }
1384
1385 init_data.asic_id.chip_family = adev->family;
1386
2dc31ca1 1387 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1388 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1389 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1390
770d13b1 1391 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1392 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393 init_data.asic_id.atombios_base_address =
1394 adev->mode_info.atom_context->bios;
1395
1396 init_data.driver = adev;
1397
1398 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399
1400 if (!adev->dm.cgs_device) {
1401 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402 goto error;
1403 }
1404
1405 init_data.cgs_device = adev->dm.cgs_device;
1406
4562236b
HW
1407 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408
60fb100b
AD
1409 switch (adev->asic_type) {
1410 case CHIP_CARRIZO:
1411 case CHIP_STONEY:
1ebcaebd
NK
1412 init_data.flags.gpu_vm_support = true;
1413 break;
60fb100b 1414 default:
1d789535 1415 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1416 case IP_VERSION(2, 1, 0):
1417 init_data.flags.gpu_vm_support = true;
91adec9e
ML
1418 switch (adev->dm.dmcub_fw_version) {
1419 case 0: /* development */
1420 case 0x1: /* linux-firmware.git hash 6d9f399 */
1421 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422 init_data.flags.disable_dmcu = false;
1423 break;
1424 default:
1425 init_data.flags.disable_dmcu = true;
1426 }
c08182f2 1427 break;
559f591d
AD
1428 case IP_VERSION(1, 0, 0):
1429 case IP_VERSION(1, 0, 1):
c08182f2
AD
1430 case IP_VERSION(3, 0, 1):
1431 case IP_VERSION(3, 1, 2):
1432 case IP_VERSION(3, 1, 3):
1433 init_data.flags.gpu_vm_support = true;
1434 break;
1435 case IP_VERSION(2, 0, 3):
1436 init_data.flags.disable_dmcu = true;
1437 break;
1438 default:
1439 break;
1440 }
60fb100b
AD
1441 break;
1442 }
6e227308 1443
04b94af4
AD
1444 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445 init_data.flags.fbc_support = true;
1446
d99f38ae
AD
1447 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448 init_data.flags.multi_mon_pp_mclk_switch = true;
1449
eaf56410
LL
1450 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1452
1453 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1455
27eaa492 1456 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1457
1edf5ae1
ZL
1458 if (check_seamless_boot_capability(adev)) {
1459 init_data.flags.power_down_display_on_boot = false;
1460 init_data.flags.allow_seamless_boot_optimization = true;
1461 DRM_INFO("Seamless boot condition check passed\n");
1462 }
1463
0dd79532 1464 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1465 /* Display Core create. */
1466 adev->dm.dc = dc_create(&init_data);
1467
423788c7 1468 if (adev->dm.dc) {
76121231 1469 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1470 } else {
76121231 1471 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1472 goto error;
1473 }
4562236b 1474
8a791dab
HW
1475 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1476 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1477 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1478 }
1479
f99d8762
HW
1480 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1481 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1482 if (dm_should_disable_stutter(adev->pdev))
1483 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1484
8a791dab
HW
1485 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1486 adev->dm.dc->debug.disable_stutter = true;
1487
2665f63a 1488 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1489 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1490 adev->dm.dc->debug.disable_dsc_edp = true;
1491 }
8a791dab
HW
1492
1493 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1494 adev->dm.dc->debug.disable_clock_gate = true;
1495
743b9786
NK
1496 r = dm_dmub_hw_init(adev);
1497 if (r) {
1498 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1499 goto error;
1500 }
1501
bb6785c1
NK
1502 dc_hardware_init(adev->dm.dc);
1503
8e794421
WL
1504 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1505 if (!adev->dm.hpd_rx_offload_wq) {
1506 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1507 goto error;
1508 }
1509
0b08c54b 1510#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1511 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1512 struct dc_phy_addr_space_config pa_config;
1513
0b08c54b 1514 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1515
0b08c54b
YZ
1516 // Call the DC init_memory func
1517 dc_setup_system_context(adev->dm.dc, &pa_config);
1518 }
1519#endif
c0fb85ae 1520
4562236b
HW
1521 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1522 if (!adev->dm.freesync_module) {
1523 DRM_ERROR(
1524 "amdgpu: failed to initialize freesync_module.\n");
1525 } else
f1ad2f5e 1526 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1527 adev->dm.freesync_module);
1528
e277adc5
LSL
1529 amdgpu_dm_init_color_mod();
1530
ea3b4242
QZ
1531#if defined(CONFIG_DRM_AMD_DC_DCN)
1532 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1533 adev->dm.vblank_control_workqueue =
1534 create_singlethread_workqueue("dm_vblank_control_workqueue");
1535 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1536 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1537 }
1538#endif
1539
52704fca 1540#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1541 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1542 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1543
96a3b32e
BL
1544 if (!adev->dm.hdcp_workqueue)
1545 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1546 else
1547 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1548
96a3b32e
BL
1549 dc_init_callbacks(adev->dm.dc, &init_params);
1550 }
9a65df19
WL
1551#endif
1552#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1553 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1554#endif
81927e28
JS
1555 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1556 init_completion(&adev->dm.dmub_aux_transfer_done);
1557 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1558 if (!adev->dm.dmub_notify) {
1559 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1560 goto error;
1561 }
e27c41d5
JS
1562
1563 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1564 if (!adev->dm.delayed_hpd_wq) {
1565 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1566 goto error;
1567 }
1568
81927e28 1569 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1570#if defined(CONFIG_DRM_AMD_DC_DCN)
1571 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1572 dmub_aux_setconfig_callback, false)) {
1573 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1574 goto error;
1575 }
1576 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1577 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1578 goto error;
1579 }
c40a09e5
NK
1580 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1581 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1582 goto error;
1583 }
433e5dec 1584#endif /* CONFIG_DRM_AMD_DC_DCN */
81927e28
JS
1585 }
1586
4562236b
HW
1587 if (amdgpu_dm_initialize_drm_device(adev)) {
1588 DRM_ERROR(
1589 "amdgpu: failed to initialize sw for display support.\n");
1590 goto error;
1591 }
1592
f74367e4
AD
1593 /* create fake encoders for MST */
1594 dm_dp_create_fake_mst_encoders(adev);
1595
4562236b
HW
1596 /* TODO: Add_display_info? */
1597
1598 /* TODO use dynamic cursor width */
4a580877
LT
1599 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1600 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1601
4a580877 1602 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1603 DRM_ERROR(
1604 "amdgpu: failed to initialize sw for display support.\n");
1605 goto error;
1606 }
1607
c0fb85ae 1608
f1ad2f5e 1609 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1610
1611 return 0;
1612error:
1613 amdgpu_dm_fini(adev);
1614
59d0f396 1615 return -EINVAL;
4562236b
HW
1616}
1617
e9669fb7
AG
1618static int amdgpu_dm_early_fini(void *handle)
1619{
1620 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1621
1622 amdgpu_dm_audio_fini(adev);
1623
1624 return 0;
1625}
1626
7578ecda 1627static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1628{
f74367e4
AD
1629 int i;
1630
09a5df6c
NK
1631#if defined(CONFIG_DRM_AMD_DC_DCN)
1632 if (adev->dm.vblank_control_workqueue) {
1633 destroy_workqueue(adev->dm.vblank_control_workqueue);
1634 adev->dm.vblank_control_workqueue = NULL;
1635 }
1636#endif
1637
f74367e4
AD
1638 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1639 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1640 }
1641
4562236b 1642 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1643
9a65df19
WL
1644#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1645 if (adev->dm.crc_rd_wrk) {
1646 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1647 kfree(adev->dm.crc_rd_wrk);
1648 adev->dm.crc_rd_wrk = NULL;
1649 }
1650#endif
52704fca
BL
1651#ifdef CONFIG_DRM_AMD_DC_HDCP
1652 if (adev->dm.hdcp_workqueue) {
e96b1b29 1653 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1654 adev->dm.hdcp_workqueue = NULL;
1655 }
1656
1657 if (adev->dm.dc)
1658 dc_deinit_callbacks(adev->dm.dc);
1659#endif
51ba6912 1660
3beac533 1661 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1662
81927e28
JS
1663 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1664 kfree(adev->dm.dmub_notify);
1665 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1666 destroy_workqueue(adev->dm.delayed_hpd_wq);
1667 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1668 }
1669
743b9786
NK
1670 if (adev->dm.dmub_bo)
1671 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1672 &adev->dm.dmub_bo_gpu_addr,
1673 &adev->dm.dmub_bo_cpu_addr);
52704fca 1674
006c26a0
AG
1675 if (adev->dm.hpd_rx_offload_wq) {
1676 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1677 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1678 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1679 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1680 }
1681 }
1682
1683 kfree(adev->dm.hpd_rx_offload_wq);
1684 adev->dm.hpd_rx_offload_wq = NULL;
1685 }
1686
c8bdf2b6
ED
1687 /* DC Destroy TODO: Replace destroy DAL */
1688 if (adev->dm.dc)
1689 dc_destroy(&adev->dm.dc);
4562236b
HW
1690 /*
1691 * TODO: pageflip, vlank interrupt
1692 *
1693 * amdgpu_dm_irq_fini(adev);
1694 */
1695
1696 if (adev->dm.cgs_device) {
1697 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1698 adev->dm.cgs_device = NULL;
1699 }
1700 if (adev->dm.freesync_module) {
1701 mod_freesync_destroy(adev->dm.freesync_module);
1702 adev->dm.freesync_module = NULL;
1703 }
674e78ac 1704
6ce8f316 1705 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1706 mutex_destroy(&adev->dm.dc_lock);
1707
4562236b
HW
1708 return;
1709}
1710
a94d5569 1711static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1712{
a7669aff 1713 const char *fw_name_dmcu = NULL;
a94d5569
DF
1714 int r;
1715 const struct dmcu_firmware_header_v1_0 *hdr;
1716
1717 switch(adev->asic_type) {
55e56389
MR
1718#if defined(CONFIG_DRM_AMD_DC_SI)
1719 case CHIP_TAHITI:
1720 case CHIP_PITCAIRN:
1721 case CHIP_VERDE:
1722 case CHIP_OLAND:
1723#endif
a94d5569
DF
1724 case CHIP_BONAIRE:
1725 case CHIP_HAWAII:
1726 case CHIP_KAVERI:
1727 case CHIP_KABINI:
1728 case CHIP_MULLINS:
1729 case CHIP_TONGA:
1730 case CHIP_FIJI:
1731 case CHIP_CARRIZO:
1732 case CHIP_STONEY:
1733 case CHIP_POLARIS11:
1734 case CHIP_POLARIS10:
1735 case CHIP_POLARIS12:
1736 case CHIP_VEGAM:
1737 case CHIP_VEGA10:
1738 case CHIP_VEGA12:
1739 case CHIP_VEGA20:
1740 return 0;
5ea23931
RL
1741 case CHIP_NAVI12:
1742 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1743 break;
a94d5569 1744 case CHIP_RAVEN:
a7669aff
HW
1745 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1746 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1747 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1748 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1749 else
a7669aff 1750 return 0;
a94d5569
DF
1751 break;
1752 default:
1d789535 1753 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1754 case IP_VERSION(2, 0, 2):
1755 case IP_VERSION(2, 0, 3):
1756 case IP_VERSION(2, 0, 0):
1757 case IP_VERSION(2, 1, 0):
1758 case IP_VERSION(3, 0, 0):
1759 case IP_VERSION(3, 0, 2):
1760 case IP_VERSION(3, 0, 3):
1761 case IP_VERSION(3, 0, 1):
1762 case IP_VERSION(3, 1, 2):
1763 case IP_VERSION(3, 1, 3):
1764 return 0;
1765 default:
1766 break;
1767 }
a94d5569 1768 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1769 return -EINVAL;
a94d5569
DF
1770 }
1771
1772 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1773 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1774 return 0;
1775 }
1776
1777 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1778 if (r == -ENOENT) {
1779 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1780 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1781 adev->dm.fw_dmcu = NULL;
1782 return 0;
1783 }
1784 if (r) {
1785 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1786 fw_name_dmcu);
1787 return r;
1788 }
1789
1790 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1791 if (r) {
1792 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1793 fw_name_dmcu);
1794 release_firmware(adev->dm.fw_dmcu);
1795 adev->dm.fw_dmcu = NULL;
1796 return r;
1797 }
1798
1799 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1800 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1801 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1802 adev->firmware.fw_size +=
1803 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1804
1805 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1806 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1807 adev->firmware.fw_size +=
1808 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1809
ee6e89c0
DF
1810 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1811
a94d5569
DF
1812 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1813
4562236b
HW
1814 return 0;
1815}
1816
743b9786
NK
1817static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1818{
1819 struct amdgpu_device *adev = ctx;
1820
1821 return dm_read_reg(adev->dm.dc->ctx, address);
1822}
1823
1824static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1825 uint32_t value)
1826{
1827 struct amdgpu_device *adev = ctx;
1828
1829 return dm_write_reg(adev->dm.dc->ctx, address, value);
1830}
1831
1832static int dm_dmub_sw_init(struct amdgpu_device *adev)
1833{
1834 struct dmub_srv_create_params create_params;
8c7aea40
NK
1835 struct dmub_srv_region_params region_params;
1836 struct dmub_srv_region_info region_info;
1837 struct dmub_srv_fb_params fb_params;
1838 struct dmub_srv_fb_info *fb_info;
1839 struct dmub_srv *dmub_srv;
743b9786
NK
1840 const struct dmcub_firmware_header_v1_0 *hdr;
1841 const char *fw_name_dmub;
1842 enum dmub_asic dmub_asic;
1843 enum dmub_status status;
1844 int r;
1845
1d789535 1846 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1847 case IP_VERSION(2, 1, 0):
743b9786
NK
1848 dmub_asic = DMUB_ASIC_DCN21;
1849 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1850 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1851 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1852 break;
c08182f2 1853 case IP_VERSION(3, 0, 0):
1d789535 1854 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1855 dmub_asic = DMUB_ASIC_DCN30;
1856 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1857 } else {
1858 dmub_asic = DMUB_ASIC_DCN30;
1859 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1860 }
79037324 1861 break;
c08182f2 1862 case IP_VERSION(3, 0, 1):
469989ca
RL
1863 dmub_asic = DMUB_ASIC_DCN301;
1864 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1865 break;
c08182f2 1866 case IP_VERSION(3, 0, 2):
2a411205
BL
1867 dmub_asic = DMUB_ASIC_DCN302;
1868 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1869 break;
c08182f2 1870 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1871 dmub_asic = DMUB_ASIC_DCN303;
1872 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1873 break;
c08182f2
AD
1874 case IP_VERSION(3, 1, 2):
1875 case IP_VERSION(3, 1, 3):
3137f792 1876 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1877 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1878 break;
743b9786
NK
1879
1880 default:
1881 /* ASIC doesn't support DMUB. */
1882 return 0;
1883 }
1884
743b9786
NK
1885 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1886 if (r) {
1887 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1888 return 0;
1889 }
1890
1891 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1892 if (r) {
1893 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1894 return 0;
1895 }
1896
743b9786 1897 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1898 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1899
9a6ed547
NK
1900 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1901 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1902 AMDGPU_UCODE_ID_DMCUB;
1903 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1904 adev->dm.dmub_fw;
1905 adev->firmware.fw_size +=
1906 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1907
9a6ed547
NK
1908 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1909 adev->dm.dmcub_fw_version);
1910 }
1911
743b9786 1912
8c7aea40
NK
1913 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1914 dmub_srv = adev->dm.dmub_srv;
1915
1916 if (!dmub_srv) {
1917 DRM_ERROR("Failed to allocate DMUB service!\n");
1918 return -ENOMEM;
1919 }
1920
1921 memset(&create_params, 0, sizeof(create_params));
1922 create_params.user_ctx = adev;
1923 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1924 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1925 create_params.asic = dmub_asic;
1926
1927 /* Create the DMUB service. */
1928 status = dmub_srv_create(dmub_srv, &create_params);
1929 if (status != DMUB_STATUS_OK) {
1930 DRM_ERROR("Error creating DMUB service: %d\n", status);
1931 return -EINVAL;
1932 }
1933
1934 /* Calculate the size of all the regions for the DMUB service. */
1935 memset(&region_params, 0, sizeof(region_params));
1936
1937 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1938 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1939 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1940 region_params.vbios_size = adev->bios_size;
0922b899 1941 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1942 adev->dm.dmub_fw->data +
1943 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1944 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1945 region_params.fw_inst_const =
1946 adev->dm.dmub_fw->data +
1947 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1948 PSP_HEADER_BYTES;
8c7aea40
NK
1949
1950 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1951 &region_info);
1952
1953 if (status != DMUB_STATUS_OK) {
1954 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1955 return -EINVAL;
1956 }
1957
1958 /*
1959 * Allocate a framebuffer based on the total size of all the regions.
1960 * TODO: Move this into GART.
1961 */
1962 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1963 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1964 &adev->dm.dmub_bo_gpu_addr,
1965 &adev->dm.dmub_bo_cpu_addr);
1966 if (r)
1967 return r;
1968
1969 /* Rebase the regions on the framebuffer address. */
1970 memset(&fb_params, 0, sizeof(fb_params));
1971 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1972 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1973 fb_params.region_info = &region_info;
1974
1975 adev->dm.dmub_fb_info =
1976 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1977 fb_info = adev->dm.dmub_fb_info;
1978
1979 if (!fb_info) {
1980 DRM_ERROR(
1981 "Failed to allocate framebuffer info for DMUB service!\n");
1982 return -ENOMEM;
1983 }
1984
1985 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1986 if (status != DMUB_STATUS_OK) {
1987 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1988 return -EINVAL;
1989 }
1990
743b9786
NK
1991 return 0;
1992}
1993
a94d5569
DF
1994static int dm_sw_init(void *handle)
1995{
1996 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1997 int r;
1998
1999 r = dm_dmub_sw_init(adev);
2000 if (r)
2001 return r;
a94d5569
DF
2002
2003 return load_dmcu_fw(adev);
2004}
2005
4562236b
HW
2006static int dm_sw_fini(void *handle)
2007{
a94d5569
DF
2008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2009
8c7aea40
NK
2010 kfree(adev->dm.dmub_fb_info);
2011 adev->dm.dmub_fb_info = NULL;
2012
743b9786
NK
2013 if (adev->dm.dmub_srv) {
2014 dmub_srv_destroy(adev->dm.dmub_srv);
2015 adev->dm.dmub_srv = NULL;
2016 }
2017
75e1658e
ND
2018 release_firmware(adev->dm.dmub_fw);
2019 adev->dm.dmub_fw = NULL;
743b9786 2020
75e1658e
ND
2021 release_firmware(adev->dm.fw_dmcu);
2022 adev->dm.fw_dmcu = NULL;
a94d5569 2023
4562236b
HW
2024 return 0;
2025}
2026
7abcf6b5 2027static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2028{
c84dec2f 2029 struct amdgpu_dm_connector *aconnector;
4562236b 2030 struct drm_connector *connector;
f8d2d39e 2031 struct drm_connector_list_iter iter;
7abcf6b5 2032 int ret = 0;
4562236b 2033
f8d2d39e
LP
2034 drm_connector_list_iter_begin(dev, &iter);
2035 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2036 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2037 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2038 aconnector->mst_mgr.aux) {
f1ad2f5e 2039 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2040 aconnector,
2041 aconnector->base.base.id);
7abcf6b5
AG
2042
2043 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2044 if (ret < 0) {
2045 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2046 aconnector->dc_link->type =
2047 dc_connection_single;
2048 break;
7abcf6b5 2049 }
f8d2d39e 2050 }
4562236b 2051 }
f8d2d39e 2052 drm_connector_list_iter_end(&iter);
4562236b 2053
7abcf6b5
AG
2054 return ret;
2055}
2056
2057static int dm_late_init(void *handle)
2058{
42e67c3b 2059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2060
bbf854dc
DF
2061 struct dmcu_iram_parameters params;
2062 unsigned int linear_lut[16];
2063 int i;
17bdb4a8 2064 struct dmcu *dmcu = NULL;
bbf854dc 2065
17bdb4a8
JFZ
2066 dmcu = adev->dm.dc->res_pool->dmcu;
2067
bbf854dc
DF
2068 for (i = 0; i < 16; i++)
2069 linear_lut[i] = 0xFFFF * i / 15;
2070
2071 params.set = 0;
75068994 2072 params.backlight_ramping_override = false;
bbf854dc
DF
2073 params.backlight_ramping_start = 0xCCCC;
2074 params.backlight_ramping_reduction = 0xCCCCCCCC;
2075 params.backlight_lut_array_size = 16;
2076 params.backlight_lut_array = linear_lut;
2077
2ad0cdf9
AK
2078 /* Min backlight level after ABM reduction, Don't allow below 1%
2079 * 0xFFFF x 0.01 = 0x28F
2080 */
2081 params.min_abm_backlight = 0x28F;
5cb32419 2082 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2083 * dmcu object will be null.
2084 * ABM 2.4 and up are implemented on dmcub.
2085 */
2086 if (dmcu) {
2087 if (!dmcu_load_iram(dmcu, params))
2088 return -EINVAL;
2089 } else if (adev->dm.dc->ctx->dmub_srv) {
2090 struct dc_link *edp_links[MAX_NUM_EDP];
2091 int edp_num;
bbf854dc 2092
6e568e43
JW
2093 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2094 for (i = 0; i < edp_num; i++) {
2095 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2096 return -EINVAL;
2097 }
2098 }
bbf854dc 2099
4a580877 2100 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2101}
2102
2103static void s3_handle_mst(struct drm_device *dev, bool suspend)
2104{
c84dec2f 2105 struct amdgpu_dm_connector *aconnector;
4562236b 2106 struct drm_connector *connector;
f8d2d39e 2107 struct drm_connector_list_iter iter;
fe7553be
LP
2108 struct drm_dp_mst_topology_mgr *mgr;
2109 int ret;
2110 bool need_hotplug = false;
4562236b 2111
f8d2d39e
LP
2112 drm_connector_list_iter_begin(dev, &iter);
2113 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2114 aconnector = to_amdgpu_dm_connector(connector);
2115 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2116 aconnector->mst_port)
2117 continue;
2118
2119 mgr = &aconnector->mst_mgr;
2120
2121 if (suspend) {
2122 drm_dp_mst_topology_mgr_suspend(mgr);
2123 } else {
6f85f738 2124 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2125 if (ret < 0) {
2126 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2127 need_hotplug = true;
2128 }
2129 }
4562236b 2130 }
f8d2d39e 2131 drm_connector_list_iter_end(&iter);
fe7553be
LP
2132
2133 if (need_hotplug)
2134 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2135}
2136
9340dfd3
HW
2137static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2138{
2139 struct smu_context *smu = &adev->smu;
2140 int ret = 0;
2141
2142 if (!is_support_sw_smu(adev))
2143 return 0;
2144
2145 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2146 * on window driver dc implementation.
2147 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2148 * should be passed to smu during boot up and resume from s3.
2149 * boot up: dc calculate dcn watermark clock settings within dc_create,
2150 * dcn20_resource_construct
2151 * then call pplib functions below to pass the settings to smu:
2152 * smu_set_watermarks_for_clock_ranges
2153 * smu_set_watermarks_table
2154 * navi10_set_watermarks_table
2155 * smu_write_watermarks_table
2156 *
2157 * For Renoir, clock settings of dcn watermark are also fixed values.
2158 * dc has implemented different flow for window driver:
2159 * dc_hardware_init / dc_set_power_state
2160 * dcn10_init_hw
2161 * notify_wm_ranges
2162 * set_wm_ranges
2163 * -- Linux
2164 * smu_set_watermarks_for_clock_ranges
2165 * renoir_set_watermarks_table
2166 * smu_write_watermarks_table
2167 *
2168 * For Linux,
2169 * dc_hardware_init -> amdgpu_dm_init
2170 * dc_set_power_state --> dm_resume
2171 *
2172 * therefore, this function apply to navi10/12/14 but not Renoir
2173 * *
2174 */
1d789535 2175 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2176 case IP_VERSION(2, 0, 2):
2177 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2178 break;
2179 default:
2180 return 0;
2181 }
2182
e7a95eea
EQ
2183 ret = smu_write_watermarks_table(smu);
2184 if (ret) {
2185 DRM_ERROR("Failed to update WMTABLE!\n");
2186 return ret;
9340dfd3
HW
2187 }
2188
9340dfd3
HW
2189 return 0;
2190}
2191
b8592b48
LL
2192/**
2193 * dm_hw_init() - Initialize DC device
28d687ea 2194 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2195 *
2196 * Initialize the &struct amdgpu_display_manager device. This involves calling
2197 * the initializers of each DM component, then populating the struct with them.
2198 *
2199 * Although the function implies hardware initialization, both hardware and
2200 * software are initialized here. Splitting them out to their relevant init
2201 * hooks is a future TODO item.
2202 *
2203 * Some notable things that are initialized here:
2204 *
2205 * - Display Core, both software and hardware
2206 * - DC modules that we need (freesync and color management)
2207 * - DRM software states
2208 * - Interrupt sources and handlers
2209 * - Vblank support
2210 * - Debug FS entries, if enabled
2211 */
4562236b
HW
2212static int dm_hw_init(void *handle)
2213{
2214 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2215 /* Create DAL display manager */
2216 amdgpu_dm_init(adev);
4562236b
HW
2217 amdgpu_dm_hpd_init(adev);
2218
4562236b
HW
2219 return 0;
2220}
2221
b8592b48
LL
2222/**
2223 * dm_hw_fini() - Teardown DC device
28d687ea 2224 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2225 *
2226 * Teardown components within &struct amdgpu_display_manager that require
2227 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2228 * were loaded. Also flush IRQ workqueues and disable them.
2229 */
4562236b
HW
2230static int dm_hw_fini(void *handle)
2231{
2232 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2233
2234 amdgpu_dm_hpd_fini(adev);
2235
2236 amdgpu_dm_irq_fini(adev);
21de3396 2237 amdgpu_dm_fini(adev);
4562236b
HW
2238 return 0;
2239}
2240
cdaae837
BL
2241
2242static int dm_enable_vblank(struct drm_crtc *crtc);
2243static void dm_disable_vblank(struct drm_crtc *crtc);
2244
2245static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2246 struct dc_state *state, bool enable)
2247{
2248 enum dc_irq_source irq_source;
2249 struct amdgpu_crtc *acrtc;
2250 int rc = -EBUSY;
2251 int i = 0;
2252
2253 for (i = 0; i < state->stream_count; i++) {
2254 acrtc = get_crtc_by_otg_inst(
2255 adev, state->stream_status[i].primary_otg_inst);
2256
2257 if (acrtc && state->stream_status[i].plane_count != 0) {
2258 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2259 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2260 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2261 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2262 if (rc)
2263 DRM_WARN("Failed to %s pflip interrupts\n",
2264 enable ? "enable" : "disable");
2265
2266 if (enable) {
2267 rc = dm_enable_vblank(&acrtc->base);
2268 if (rc)
2269 DRM_WARN("Failed to enable vblank interrupts\n");
2270 } else {
2271 dm_disable_vblank(&acrtc->base);
2272 }
2273
2274 }
2275 }
2276
2277}
2278
dfd84d90 2279static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2280{
2281 struct dc_state *context = NULL;
2282 enum dc_status res = DC_ERROR_UNEXPECTED;
2283 int i;
2284 struct dc_stream_state *del_streams[MAX_PIPES];
2285 int del_streams_count = 0;
2286
2287 memset(del_streams, 0, sizeof(del_streams));
2288
2289 context = dc_create_state(dc);
2290 if (context == NULL)
2291 goto context_alloc_fail;
2292
2293 dc_resource_state_copy_construct_current(dc, context);
2294
2295 /* First remove from context all streams */
2296 for (i = 0; i < context->stream_count; i++) {
2297 struct dc_stream_state *stream = context->streams[i];
2298
2299 del_streams[del_streams_count++] = stream;
2300 }
2301
2302 /* Remove all planes for removed streams and then remove the streams */
2303 for (i = 0; i < del_streams_count; i++) {
2304 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2305 res = DC_FAIL_DETACH_SURFACES;
2306 goto fail;
2307 }
2308
2309 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2310 if (res != DC_OK)
2311 goto fail;
2312 }
2313
cdaae837
BL
2314 res = dc_commit_state(dc, context);
2315
2316fail:
2317 dc_release_state(context);
2318
2319context_alloc_fail:
2320 return res;
2321}
2322
8e794421
WL
2323static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2324{
2325 int i;
2326
2327 if (dm->hpd_rx_offload_wq) {
2328 for (i = 0; i < dm->dc->caps.max_links; i++)
2329 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2330 }
2331}
2332
4562236b
HW
2333static int dm_suspend(void *handle)
2334{
2335 struct amdgpu_device *adev = handle;
2336 struct amdgpu_display_manager *dm = &adev->dm;
2337 int ret = 0;
4562236b 2338
53b3f8f4 2339 if (amdgpu_in_reset(adev)) {
cdaae837 2340 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2341
2342#if defined(CONFIG_DRM_AMD_DC_DCN)
2343 dc_allow_idle_optimizations(adev->dm.dc, false);
2344#endif
2345
cdaae837
BL
2346 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2347
2348 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2349
2350 amdgpu_dm_commit_zero_streams(dm->dc);
2351
2352 amdgpu_dm_irq_suspend(adev);
2353
8e794421
WL
2354 hpd_rx_irq_work_suspend(dm);
2355
cdaae837
BL
2356 return ret;
2357 }
4562236b 2358
d2f0b53b 2359 WARN_ON(adev->dm.cached_state);
4a580877 2360 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2361
4a580877 2362 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2363
4562236b
HW
2364 amdgpu_dm_irq_suspend(adev);
2365
8e794421
WL
2366 hpd_rx_irq_work_suspend(dm);
2367
32f5062d 2368 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2369
1c2075d4 2370 return 0;
4562236b
HW
2371}
2372
1daf8c63
AD
2373static struct amdgpu_dm_connector *
2374amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2375 struct drm_crtc *crtc)
4562236b
HW
2376{
2377 uint32_t i;
c2cea706 2378 struct drm_connector_state *new_con_state;
4562236b
HW
2379 struct drm_connector *connector;
2380 struct drm_crtc *crtc_from_state;
2381
c2cea706
LSL
2382 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2383 crtc_from_state = new_con_state->crtc;
4562236b
HW
2384
2385 if (crtc_from_state == crtc)
c84dec2f 2386 return to_amdgpu_dm_connector(connector);
4562236b
HW
2387 }
2388
2389 return NULL;
2390}
2391
fbbdadf2
BL
2392static void emulated_link_detect(struct dc_link *link)
2393{
2394 struct dc_sink_init_data sink_init_data = { 0 };
2395 struct display_sink_capability sink_caps = { 0 };
2396 enum dc_edid_status edid_status;
2397 struct dc_context *dc_ctx = link->ctx;
2398 struct dc_sink *sink = NULL;
2399 struct dc_sink *prev_sink = NULL;
2400
2401 link->type = dc_connection_none;
2402 prev_sink = link->local_sink;
2403
30164a16
VL
2404 if (prev_sink)
2405 dc_sink_release(prev_sink);
fbbdadf2
BL
2406
2407 switch (link->connector_signal) {
2408 case SIGNAL_TYPE_HDMI_TYPE_A: {
2409 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2410 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2411 break;
2412 }
2413
2414 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2415 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2416 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2417 break;
2418 }
2419
2420 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2421 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2422 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2423 break;
2424 }
2425
2426 case SIGNAL_TYPE_LVDS: {
2427 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2428 sink_caps.signal = SIGNAL_TYPE_LVDS;
2429 break;
2430 }
2431
2432 case SIGNAL_TYPE_EDP: {
2433 sink_caps.transaction_type =
2434 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2435 sink_caps.signal = SIGNAL_TYPE_EDP;
2436 break;
2437 }
2438
2439 case SIGNAL_TYPE_DISPLAY_PORT: {
2440 sink_caps.transaction_type =
2441 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2443 break;
2444 }
2445
2446 default:
2447 DC_ERROR("Invalid connector type! signal:%d\n",
2448 link->connector_signal);
2449 return;
2450 }
2451
2452 sink_init_data.link = link;
2453 sink_init_data.sink_signal = sink_caps.signal;
2454
2455 sink = dc_sink_create(&sink_init_data);
2456 if (!sink) {
2457 DC_ERROR("Failed to create sink!\n");
2458 return;
2459 }
2460
dcd5fb82 2461 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2462 link->local_sink = sink;
2463
2464 edid_status = dm_helpers_read_local_edid(
2465 link->ctx,
2466 link,
2467 sink);
2468
2469 if (edid_status != EDID_OK)
2470 DC_ERROR("Failed to read EDID");
2471
2472}
2473
cdaae837
BL
2474static void dm_gpureset_commit_state(struct dc_state *dc_state,
2475 struct amdgpu_display_manager *dm)
2476{
2477 struct {
2478 struct dc_surface_update surface_updates[MAX_SURFACES];
2479 struct dc_plane_info plane_infos[MAX_SURFACES];
2480 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2481 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2482 struct dc_stream_update stream_update;
2483 } * bundle;
2484 int k, m;
2485
2486 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2487
2488 if (!bundle) {
2489 dm_error("Failed to allocate update bundle\n");
2490 goto cleanup;
2491 }
2492
2493 for (k = 0; k < dc_state->stream_count; k++) {
2494 bundle->stream_update.stream = dc_state->streams[k];
2495
2496 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2497 bundle->surface_updates[m].surface =
2498 dc_state->stream_status->plane_states[m];
2499 bundle->surface_updates[m].surface->force_full_update =
2500 true;
2501 }
2502 dc_commit_updates_for_stream(
2503 dm->dc, bundle->surface_updates,
2504 dc_state->stream_status->plane_count,
efc8278e 2505 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2506 }
2507
2508cleanup:
2509 kfree(bundle);
2510
2511 return;
2512}
2513
035f5496 2514static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2515{
2516 struct dc_stream_state *stream_state;
2517 struct amdgpu_dm_connector *aconnector = link->priv;
2518 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2519 struct dc_stream_update stream_update;
2520 bool dpms_off = true;
2521
2522 memset(&stream_update, 0, sizeof(stream_update));
2523 stream_update.dpms_off = &dpms_off;
2524
2525 mutex_lock(&adev->dm.dc_lock);
2526 stream_state = dc_stream_find_from_link(link);
2527
2528 if (stream_state == NULL) {
2529 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2530 mutex_unlock(&adev->dm.dc_lock);
2531 return;
2532 }
2533
2534 stream_update.stream = stream_state;
035f5496 2535 acrtc_state->force_dpms_off = true;
3c4d55c9 2536 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2537 stream_state, &stream_update,
2538 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2539 mutex_unlock(&adev->dm.dc_lock);
2540}
2541
4562236b
HW
2542static int dm_resume(void *handle)
2543{
2544 struct amdgpu_device *adev = handle;
4a580877 2545 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2546 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2547 struct amdgpu_dm_connector *aconnector;
4562236b 2548 struct drm_connector *connector;
f8d2d39e 2549 struct drm_connector_list_iter iter;
4562236b 2550 struct drm_crtc *crtc;
c2cea706 2551 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2552 struct dm_crtc_state *dm_new_crtc_state;
2553 struct drm_plane *plane;
2554 struct drm_plane_state *new_plane_state;
2555 struct dm_plane_state *dm_new_plane_state;
113b7a01 2556 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2557 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2558 struct dc_state *dc_state;
2559 int i, r, j;
4562236b 2560
53b3f8f4 2561 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2562 dc_state = dm->cached_dc_state;
2563
6d63fcc2
NK
2564 /*
2565 * The dc->current_state is backed up into dm->cached_dc_state
2566 * before we commit 0 streams.
2567 *
2568 * DC will clear link encoder assignments on the real state
2569 * but the changes won't propagate over to the copy we made
2570 * before the 0 streams commit.
2571 *
2572 * DC expects that link encoder assignments are *not* valid
2573 * when committing a state, so as a workaround it needs to be
2574 * cleared here.
2575 */
2576 link_enc_cfg_init(dm->dc, dc_state);
2577
524a0ba6
NK
2578 amdgpu_dm_outbox_init(adev);
2579
cdaae837
BL
2580 r = dm_dmub_hw_init(adev);
2581 if (r)
2582 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2583
2584 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2585 dc_resume(dm->dc);
2586
2587 amdgpu_dm_irq_resume_early(adev);
2588
2589 for (i = 0; i < dc_state->stream_count; i++) {
2590 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2591 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2592 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2593 = 0xffffffff;
2594 }
2595 }
2596
2597 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2598
cdaae837
BL
2599 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2600
2601 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2602
2603 dc_release_state(dm->cached_dc_state);
2604 dm->cached_dc_state = NULL;
2605
2606 amdgpu_dm_irq_resume_late(adev);
2607
2608 mutex_unlock(&dm->dc_lock);
2609
2610 return 0;
2611 }
113b7a01
LL
2612 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2613 dc_release_state(dm_state->context);
2614 dm_state->context = dc_create_state(dm->dc);
2615 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2616 dc_resource_state_construct(dm->dc, dm_state->context);
2617
8c7aea40
NK
2618 /* Before powering on DC we need to re-initialize DMUB. */
2619 r = dm_dmub_hw_init(adev);
2620 if (r)
2621 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2622
a80aa93d
ML
2623 /* power on hardware */
2624 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2625
4562236b
HW
2626 /* program HPD filter */
2627 dc_resume(dm->dc);
2628
4562236b
HW
2629 /*
2630 * early enable HPD Rx IRQ, should be done before set mode as short
2631 * pulse interrupts are used for MST
2632 */
2633 amdgpu_dm_irq_resume_early(adev);
2634
d20ebea8 2635 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2636 s3_handle_mst(ddev, false);
2637
4562236b 2638 /* Do detection*/
f8d2d39e
LP
2639 drm_connector_list_iter_begin(ddev, &iter);
2640 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2641 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2642
2643 /*
2644 * this is the case when traversing through already created
2645 * MST connectors, should be skipped
2646 */
2647 if (aconnector->mst_port)
2648 continue;
2649
03ea364c 2650 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2651 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2652 DRM_ERROR("KMS: Failed to detect connector\n");
2653
2654 if (aconnector->base.force && new_connection_type == dc_connection_none)
2655 emulated_link_detect(aconnector->dc_link);
2656 else
2657 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2658
2659 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2660 aconnector->fake_enable = false;
2661
dcd5fb82
MF
2662 if (aconnector->dc_sink)
2663 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2664 aconnector->dc_sink = NULL;
2665 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2666 mutex_unlock(&aconnector->hpd_lock);
4562236b 2667 }
f8d2d39e 2668 drm_connector_list_iter_end(&iter);
4562236b 2669
1f6010a9 2670 /* Force mode set in atomic commit */
a80aa93d 2671 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2672 new_crtc_state->active_changed = true;
4f346e65 2673
fcb4019e
LSL
2674 /*
2675 * atomic_check is expected to create the dc states. We need to release
2676 * them here, since they were duplicated as part of the suspend
2677 * procedure.
2678 */
a80aa93d 2679 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2680 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2681 if (dm_new_crtc_state->stream) {
2682 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2683 dc_stream_release(dm_new_crtc_state->stream);
2684 dm_new_crtc_state->stream = NULL;
2685 }
2686 }
2687
a80aa93d 2688 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2689 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2690 if (dm_new_plane_state->dc_state) {
2691 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2692 dc_plane_state_release(dm_new_plane_state->dc_state);
2693 dm_new_plane_state->dc_state = NULL;
2694 }
2695 }
2696
2d1af6a1 2697 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2698
a80aa93d 2699 dm->cached_state = NULL;
0a214e2f 2700
9faa4237 2701 amdgpu_dm_irq_resume_late(adev);
4562236b 2702
9340dfd3
HW
2703 amdgpu_dm_smu_write_watermarks_table(adev);
2704
2d1af6a1 2705 return 0;
4562236b
HW
2706}
2707
b8592b48
LL
2708/**
2709 * DOC: DM Lifecycle
2710 *
2711 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2712 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2713 * the base driver's device list to be initialized and torn down accordingly.
2714 *
2715 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2716 */
2717
4562236b
HW
2718static const struct amd_ip_funcs amdgpu_dm_funcs = {
2719 .name = "dm",
2720 .early_init = dm_early_init,
7abcf6b5 2721 .late_init = dm_late_init,
4562236b
HW
2722 .sw_init = dm_sw_init,
2723 .sw_fini = dm_sw_fini,
e9669fb7 2724 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2725 .hw_init = dm_hw_init,
2726 .hw_fini = dm_hw_fini,
2727 .suspend = dm_suspend,
2728 .resume = dm_resume,
2729 .is_idle = dm_is_idle,
2730 .wait_for_idle = dm_wait_for_idle,
2731 .check_soft_reset = dm_check_soft_reset,
2732 .soft_reset = dm_soft_reset,
2733 .set_clockgating_state = dm_set_clockgating_state,
2734 .set_powergating_state = dm_set_powergating_state,
2735};
2736
2737const struct amdgpu_ip_block_version dm_ip_block =
2738{
2739 .type = AMD_IP_BLOCK_TYPE_DCE,
2740 .major = 1,
2741 .minor = 0,
2742 .rev = 0,
2743 .funcs = &amdgpu_dm_funcs,
2744};
2745
ca3268c4 2746
b8592b48
LL
2747/**
2748 * DOC: atomic
2749 *
2750 * *WIP*
2751 */
0a323b84 2752
b3663f70 2753static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2754 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2755 .get_format_info = amd_get_format_info,
366c1baa 2756 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2757 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2758 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2759};
2760
2761static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2762 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2763};
2764
94562810
RS
2765static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2766{
2767 u32 max_cll, min_cll, max, min, q, r;
2768 struct amdgpu_dm_backlight_caps *caps;
2769 struct amdgpu_display_manager *dm;
2770 struct drm_connector *conn_base;
2771 struct amdgpu_device *adev;
ec11fe37 2772 struct dc_link *link = NULL;
94562810
RS
2773 static const u8 pre_computed_values[] = {
2774 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2775 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2776 int i;
94562810
RS
2777
2778 if (!aconnector || !aconnector->dc_link)
2779 return;
2780
ec11fe37 2781 link = aconnector->dc_link;
2782 if (link->connector_signal != SIGNAL_TYPE_EDP)
2783 return;
2784
94562810 2785 conn_base = &aconnector->base;
1348969a 2786 adev = drm_to_adev(conn_base->dev);
94562810 2787 dm = &adev->dm;
7fd13bae
AD
2788 for (i = 0; i < dm->num_of_edps; i++) {
2789 if (link == dm->backlight_link[i])
2790 break;
2791 }
2792 if (i >= dm->num_of_edps)
2793 return;
2794 caps = &dm->backlight_caps[i];
94562810
RS
2795 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2796 caps->aux_support = false;
2797 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2798 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2799
d0ae0b64 2800 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2801 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2802 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2803 caps->aux_support = true;
2804
7a46f05e
TI
2805 if (amdgpu_backlight == 0)
2806 caps->aux_support = false;
2807 else if (amdgpu_backlight == 1)
2808 caps->aux_support = true;
2809
94562810
RS
2810 /* From the specification (CTA-861-G), for calculating the maximum
2811 * luminance we need to use:
2812 * Luminance = 50*2**(CV/32)
2813 * Where CV is a one-byte value.
2814 * For calculating this expression we may need float point precision;
2815 * to avoid this complexity level, we take advantage that CV is divided
2816 * by a constant. From the Euclids division algorithm, we know that CV
2817 * can be written as: CV = 32*q + r. Next, we replace CV in the
2818 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2819 * need to pre-compute the value of r/32. For pre-computing the values
2820 * We just used the following Ruby line:
2821 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2822 * The results of the above expressions can be verified at
2823 * pre_computed_values.
2824 */
2825 q = max_cll >> 5;
2826 r = max_cll % 32;
2827 max = (1 << q) * pre_computed_values[r];
2828
2829 // min luminance: maxLum * (CV/255)^2 / 100
2830 q = DIV_ROUND_CLOSEST(min_cll, 255);
2831 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2832
2833 caps->aux_max_input_signal = max;
2834 caps->aux_min_input_signal = min;
2835}
2836
97e51c16
HW
2837void amdgpu_dm_update_connector_after_detect(
2838 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2839{
2840 struct drm_connector *connector = &aconnector->base;
2841 struct drm_device *dev = connector->dev;
b73a22d3 2842 struct dc_sink *sink;
4562236b
HW
2843
2844 /* MST handled by drm_mst framework */
2845 if (aconnector->mst_mgr.mst_state == true)
2846 return;
2847
4562236b 2848 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2849 if (sink)
2850 dc_sink_retain(sink);
4562236b 2851
1f6010a9
DF
2852 /*
2853 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2854 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2855 * Skip if already done during boot.
4562236b
HW
2856 */
2857 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2858 && aconnector->dc_em_sink) {
2859
1f6010a9
DF
2860 /*
2861 * For S3 resume with headless use eml_sink to fake stream
2862 * because on resume connector->sink is set to NULL
4562236b
HW
2863 */
2864 mutex_lock(&dev->mode_config.mutex);
2865
2866 if (sink) {
922aa1e1 2867 if (aconnector->dc_sink) {
98e6436d 2868 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2869 /*
2870 * retain and release below are used to
2871 * bump up refcount for sink because the link doesn't point
2872 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2873 * reshuffle by UMD we will get into unwanted dc_sink release
2874 */
dcd5fb82 2875 dc_sink_release(aconnector->dc_sink);
922aa1e1 2876 }
4562236b 2877 aconnector->dc_sink = sink;
dcd5fb82 2878 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2879 amdgpu_dm_update_freesync_caps(connector,
2880 aconnector->edid);
4562236b 2881 } else {
98e6436d 2882 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2883 if (!aconnector->dc_sink) {
4562236b 2884 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2885 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2886 }
4562236b
HW
2887 }
2888
2889 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2890
2891 if (sink)
2892 dc_sink_release(sink);
4562236b
HW
2893 return;
2894 }
2895
2896 /*
2897 * TODO: temporary guard to look for proper fix
2898 * if this sink is MST sink, we should not do anything
2899 */
dcd5fb82
MF
2900 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2901 dc_sink_release(sink);
4562236b 2902 return;
dcd5fb82 2903 }
4562236b
HW
2904
2905 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2906 /*
2907 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2908 * Do nothing!!
2909 */
f1ad2f5e 2910 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2911 aconnector->connector_id);
dcd5fb82
MF
2912 if (sink)
2913 dc_sink_release(sink);
4562236b
HW
2914 return;
2915 }
2916
f1ad2f5e 2917 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2918 aconnector->connector_id, aconnector->dc_sink, sink);
2919
2920 mutex_lock(&dev->mode_config.mutex);
2921
1f6010a9
DF
2922 /*
2923 * 1. Update status of the drm connector
2924 * 2. Send an event and let userspace tell us what to do
2925 */
4562236b 2926 if (sink) {
1f6010a9
DF
2927 /*
2928 * TODO: check if we still need the S3 mode update workaround.
2929 * If yes, put it here.
2930 */
c64b0d6b 2931 if (aconnector->dc_sink) {
98e6436d 2932 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2933 dc_sink_release(aconnector->dc_sink);
2934 }
4562236b
HW
2935
2936 aconnector->dc_sink = sink;
dcd5fb82 2937 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2938 if (sink->dc_edid.length == 0) {
4562236b 2939 aconnector->edid = NULL;
e6142dd5
AP
2940 if (aconnector->dc_link->aux_mode) {
2941 drm_dp_cec_unset_edid(
2942 &aconnector->dm_dp_aux.aux);
2943 }
900b3cb1 2944 } else {
4562236b 2945 aconnector->edid =
e6142dd5 2946 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2947
c555f023 2948 drm_connector_update_edid_property(connector,
e6142dd5 2949 aconnector->edid);
e6142dd5
AP
2950 if (aconnector->dc_link->aux_mode)
2951 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2952 aconnector->edid);
4562236b 2953 }
e6142dd5 2954
98e6436d 2955 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2956 update_connector_ext_caps(aconnector);
4562236b 2957 } else {
e86e8947 2958 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2959 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2960 drm_connector_update_edid_property(connector, NULL);
4562236b 2961 aconnector->num_modes = 0;
dcd5fb82 2962 dc_sink_release(aconnector->dc_sink);
4562236b 2963 aconnector->dc_sink = NULL;
5326c452 2964 aconnector->edid = NULL;
0c8620d6
BL
2965#ifdef CONFIG_DRM_AMD_DC_HDCP
2966 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2967 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2968 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2969#endif
4562236b
HW
2970 }
2971
2972 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2973
0f877894
OV
2974 update_subconnector_property(aconnector);
2975
dcd5fb82
MF
2976 if (sink)
2977 dc_sink_release(sink);
4562236b
HW
2978}
2979
e27c41d5 2980static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 2981{
4562236b
HW
2982 struct drm_connector *connector = &aconnector->base;
2983 struct drm_device *dev = connector->dev;
fbbdadf2 2984 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2985 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2986 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 2987 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 2988
b972b4f9
HW
2989 if (adev->dm.disable_hpd_irq)
2990 return;
2991
035f5496
AP
2992 if (dm_con_state->base.state && dm_con_state->base.crtc)
2993 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2994 dm_con_state->base.state,
2995 dm_con_state->base.crtc));
1f6010a9
DF
2996 /*
2997 * In case of failure or MST no need to update connector status or notify the OS
2998 * since (for MST case) MST does this in its own context.
4562236b
HW
2999 */
3000 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3001
0c8620d6 3002#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3003 if (adev->dm.hdcp_workqueue) {
96a3b32e 3004 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3005 dm_con_state->update_hdcp = true;
3006 }
0c8620d6 3007#endif
2e0ac3d6
HW
3008 if (aconnector->fake_enable)
3009 aconnector->fake_enable = false;
3010
fbbdadf2
BL
3011 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3012 DRM_ERROR("KMS: Failed to detect connector\n");
3013
3014 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3015 emulated_link_detect(aconnector->dc_link);
3016
fbbdadf2
BL
3017 drm_modeset_lock_all(dev);
3018 dm_restore_drm_connector_state(dev, connector);
3019 drm_modeset_unlock_all(dev);
3020
3021 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3022 drm_kms_helper_hotplug_event(dev);
3023
3024 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3025 if (new_connection_type == dc_connection_none &&
035f5496
AP
3026 aconnector->dc_link->type == dc_connection_none &&
3027 dm_crtc_state)
3028 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 3029
3c4d55c9 3030 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3031
3032 drm_modeset_lock_all(dev);
3033 dm_restore_drm_connector_state(dev, connector);
3034 drm_modeset_unlock_all(dev);
3035
3036 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3037 drm_kms_helper_hotplug_event(dev);
3038 }
3039 mutex_unlock(&aconnector->hpd_lock);
3040
3041}
3042
e27c41d5
JS
3043static void handle_hpd_irq(void *param)
3044{
3045 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3046
3047 handle_hpd_irq_helper(aconnector);
3048
3049}
3050
8e794421 3051static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3052{
3053 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3054 uint8_t dret;
3055 bool new_irq_handled = false;
3056 int dpcd_addr;
3057 int dpcd_bytes_to_read;
3058
3059 const int max_process_count = 30;
3060 int process_count = 0;
3061
3062 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3063
3064 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3065 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3066 /* DPCD 0x200 - 0x201 for downstream IRQ */
3067 dpcd_addr = DP_SINK_COUNT;
3068 } else {
3069 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3070 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3071 dpcd_addr = DP_SINK_COUNT_ESI;
3072 }
3073
3074 dret = drm_dp_dpcd_read(
3075 &aconnector->dm_dp_aux.aux,
3076 dpcd_addr,
3077 esi,
3078 dpcd_bytes_to_read);
3079
3080 while (dret == dpcd_bytes_to_read &&
3081 process_count < max_process_count) {
3082 uint8_t retry;
3083 dret = 0;
3084
3085 process_count++;
3086
f1ad2f5e 3087 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3088 /* handle HPD short pulse irq */
3089 if (aconnector->mst_mgr.mst_state)
3090 drm_dp_mst_hpd_irq(
3091 &aconnector->mst_mgr,
3092 esi,
3093 &new_irq_handled);
4562236b
HW
3094
3095 if (new_irq_handled) {
3096 /* ACK at DPCD to notify down stream */
3097 const int ack_dpcd_bytes_to_write =
3098 dpcd_bytes_to_read - 1;
3099
3100 for (retry = 0; retry < 3; retry++) {
3101 uint8_t wret;
3102
3103 wret = drm_dp_dpcd_write(
3104 &aconnector->dm_dp_aux.aux,
3105 dpcd_addr + 1,
3106 &esi[1],
3107 ack_dpcd_bytes_to_write);
3108 if (wret == ack_dpcd_bytes_to_write)
3109 break;
3110 }
3111
1f6010a9 3112 /* check if there is new irq to be handled */
4562236b
HW
3113 dret = drm_dp_dpcd_read(
3114 &aconnector->dm_dp_aux.aux,
3115 dpcd_addr,
3116 esi,
3117 dpcd_bytes_to_read);
3118
3119 new_irq_handled = false;
d4a6e8a9 3120 } else {
4562236b 3121 break;
d4a6e8a9 3122 }
4562236b
HW
3123 }
3124
3125 if (process_count == max_process_count)
f1ad2f5e 3126 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3127}
3128
8e794421
WL
3129static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3130 union hpd_irq_data hpd_irq_data)
3131{
3132 struct hpd_rx_irq_offload_work *offload_work =
3133 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3134
3135 if (!offload_work) {
3136 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3137 return;
3138 }
3139
3140 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3141 offload_work->data = hpd_irq_data;
3142 offload_work->offload_wq = offload_wq;
3143
3144 queue_work(offload_wq->wq, &offload_work->work);
3145 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3146}
3147
4562236b
HW
3148static void handle_hpd_rx_irq(void *param)
3149{
c84dec2f 3150 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3151 struct drm_connector *connector = &aconnector->base;
3152 struct drm_device *dev = connector->dev;
53cbf65c 3153 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3154 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3155 bool result = false;
fbbdadf2 3156 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3157 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3158 union hpd_irq_data hpd_irq_data;
8e794421
WL
3159 bool link_loss = false;
3160 bool has_left_work = false;
3161 int idx = aconnector->base.index;
3162 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3163
3164 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3165
b972b4f9
HW
3166 if (adev->dm.disable_hpd_irq)
3167 return;
3168
1f6010a9
DF
3169 /*
3170 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3171 * conflict, after implement i2c helper, this mutex should be
3172 * retired.
3173 */
b86e7eef 3174 mutex_lock(&aconnector->hpd_lock);
4562236b 3175
8e794421
WL
3176 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3177 &link_loss, true, &has_left_work);
3083a984 3178
8e794421
WL
3179 if (!has_left_work)
3180 goto out;
3181
3182 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3183 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3184 goto out;
3185 }
3186
3187 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3188 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3189 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3190 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3191 goto out;
3192 }
3083a984 3193
8e794421
WL
3194 if (link_loss) {
3195 bool skip = false;
d2aa1356 3196
8e794421
WL
3197 spin_lock(&offload_wq->offload_lock);
3198 skip = offload_wq->is_handling_link_loss;
3199
3200 if (!skip)
3201 offload_wq->is_handling_link_loss = true;
3202
3203 spin_unlock(&offload_wq->offload_lock);
3204
3205 if (!skip)
3206 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3207
3208 goto out;
3209 }
3210 }
c8ea79a8 3211
3083a984 3212out:
c8ea79a8 3213 if (result && !is_mst_root_connector) {
4562236b 3214 /* Downstream Port status changed. */
fbbdadf2
BL
3215 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3216 DRM_ERROR("KMS: Failed to detect connector\n");
3217
3218 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3219 emulated_link_detect(dc_link);
3220
3221 if (aconnector->fake_enable)
3222 aconnector->fake_enable = false;
3223
3224 amdgpu_dm_update_connector_after_detect(aconnector);
3225
3226
3227 drm_modeset_lock_all(dev);
3228 dm_restore_drm_connector_state(dev, connector);
3229 drm_modeset_unlock_all(dev);
3230
3231 drm_kms_helper_hotplug_event(dev);
3232 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3233
3234 if (aconnector->fake_enable)
3235 aconnector->fake_enable = false;
3236
4562236b
HW
3237 amdgpu_dm_update_connector_after_detect(aconnector);
3238
3239
3240 drm_modeset_lock_all(dev);
3241 dm_restore_drm_connector_state(dev, connector);
3242 drm_modeset_unlock_all(dev);
3243
3244 drm_kms_helper_hotplug_event(dev);
3245 }
3246 }
2a0f9270 3247#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3248 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3249 if (adev->dm.hdcp_workqueue)
3250 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3251 }
2a0f9270 3252#endif
4562236b 3253
b86e7eef 3254 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3255 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3256
3257 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3258}
3259
3260static void register_hpd_handlers(struct amdgpu_device *adev)
3261{
4a580877 3262 struct drm_device *dev = adev_to_drm(adev);
4562236b 3263 struct drm_connector *connector;
c84dec2f 3264 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3265 const struct dc_link *dc_link;
3266 struct dc_interrupt_params int_params = {0};
3267
3268 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3269 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3270
3271 list_for_each_entry(connector,
3272 &dev->mode_config.connector_list, head) {
3273
c84dec2f 3274 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3275 dc_link = aconnector->dc_link;
3276
3277 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3278 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3279 int_params.irq_source = dc_link->irq_source_hpd;
3280
3281 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3282 handle_hpd_irq,
3283 (void *) aconnector);
3284 }
3285
3286 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3287
3288 /* Also register for DP short pulse (hpd_rx). */
3289 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3290 int_params.irq_source = dc_link->irq_source_hpd_rx;
3291
3292 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3293 handle_hpd_rx_irq,
3294 (void *) aconnector);
8e794421
WL
3295
3296 if (adev->dm.hpd_rx_offload_wq)
3297 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3298 aconnector;
4562236b
HW
3299 }
3300 }
3301}
3302
55e56389
MR
3303#if defined(CONFIG_DRM_AMD_DC_SI)
3304/* Register IRQ sources and initialize IRQ callbacks */
3305static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3306{
3307 struct dc *dc = adev->dm.dc;
3308 struct common_irq_params *c_irq_params;
3309 struct dc_interrupt_params int_params = {0};
3310 int r;
3311 int i;
3312 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3313
3314 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3315 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3316
3317 /*
3318 * Actions of amdgpu_irq_add_id():
3319 * 1. Register a set() function with base driver.
3320 * Base driver will call set() function to enable/disable an
3321 * interrupt in DC hardware.
3322 * 2. Register amdgpu_dm_irq_handler().
3323 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3324 * coming from DC hardware.
3325 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3326 * for acknowledging and handling. */
3327
3328 /* Use VBLANK interrupt */
3329 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3330 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3331 if (r) {
3332 DRM_ERROR("Failed to add crtc irq id!\n");
3333 return r;
3334 }
3335
3336 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3337 int_params.irq_source =
3338 dc_interrupt_to_irq_source(dc, i+1 , 0);
3339
3340 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3341
3342 c_irq_params->adev = adev;
3343 c_irq_params->irq_src = int_params.irq_source;
3344
3345 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3346 dm_crtc_high_irq, c_irq_params);
3347 }
3348
3349 /* Use GRPH_PFLIP interrupt */
3350 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3351 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3352 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3353 if (r) {
3354 DRM_ERROR("Failed to add page flip irq id!\n");
3355 return r;
3356 }
3357
3358 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3359 int_params.irq_source =
3360 dc_interrupt_to_irq_source(dc, i, 0);
3361
3362 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3363
3364 c_irq_params->adev = adev;
3365 c_irq_params->irq_src = int_params.irq_source;
3366
3367 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3368 dm_pflip_high_irq, c_irq_params);
3369
3370 }
3371
3372 /* HPD */
3373 r = amdgpu_irq_add_id(adev, client_id,
3374 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3375 if (r) {
3376 DRM_ERROR("Failed to add hpd irq id!\n");
3377 return r;
3378 }
3379
3380 register_hpd_handlers(adev);
3381
3382 return 0;
3383}
3384#endif
3385
4562236b
HW
3386/* Register IRQ sources and initialize IRQ callbacks */
3387static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3388{
3389 struct dc *dc = adev->dm.dc;
3390 struct common_irq_params *c_irq_params;
3391 struct dc_interrupt_params int_params = {0};
3392 int r;
3393 int i;
1ffdeca6 3394 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3395
c08182f2 3396 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3397 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3398
3399 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3400 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3401
1f6010a9
DF
3402 /*
3403 * Actions of amdgpu_irq_add_id():
4562236b
HW
3404 * 1. Register a set() function with base driver.
3405 * Base driver will call set() function to enable/disable an
3406 * interrupt in DC hardware.
3407 * 2. Register amdgpu_dm_irq_handler().
3408 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3409 * coming from DC hardware.
3410 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3411 * for acknowledging and handling. */
3412
b57de80a 3413 /* Use VBLANK interrupt */
e9029155 3414 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3415 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3416 if (r) {
3417 DRM_ERROR("Failed to add crtc irq id!\n");
3418 return r;
3419 }
3420
3421 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3422 int_params.irq_source =
3d761e79 3423 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3424
b57de80a 3425 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3426
3427 c_irq_params->adev = adev;
3428 c_irq_params->irq_src = int_params.irq_source;
3429
3430 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3431 dm_crtc_high_irq, c_irq_params);
3432 }
3433
d2574c33
MK
3434 /* Use VUPDATE interrupt */
3435 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3436 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3437 if (r) {
3438 DRM_ERROR("Failed to add vupdate irq id!\n");
3439 return r;
3440 }
3441
3442 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3443 int_params.irq_source =
3444 dc_interrupt_to_irq_source(dc, i, 0);
3445
3446 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3447
3448 c_irq_params->adev = adev;
3449 c_irq_params->irq_src = int_params.irq_source;
3450
3451 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3452 dm_vupdate_high_irq, c_irq_params);
3453 }
3454
3d761e79 3455 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3456 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3457 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3458 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3459 if (r) {
3460 DRM_ERROR("Failed to add page flip irq id!\n");
3461 return r;
3462 }
3463
3464 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3465 int_params.irq_source =
3466 dc_interrupt_to_irq_source(dc, i, 0);
3467
3468 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3469
3470 c_irq_params->adev = adev;
3471 c_irq_params->irq_src = int_params.irq_source;
3472
3473 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3474 dm_pflip_high_irq, c_irq_params);
3475
3476 }
3477
3478 /* HPD */
2c8ad2d5
AD
3479 r = amdgpu_irq_add_id(adev, client_id,
3480 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3481 if (r) {
3482 DRM_ERROR("Failed to add hpd irq id!\n");
3483 return r;
3484 }
3485
3486 register_hpd_handlers(adev);
3487
3488 return 0;
3489}
3490
b86a1aa3 3491#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3492/* Register IRQ sources and initialize IRQ callbacks */
3493static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3494{
3495 struct dc *dc = adev->dm.dc;
3496 struct common_irq_params *c_irq_params;
3497 struct dc_interrupt_params int_params = {0};
3498 int r;
3499 int i;
660d5406
WL
3500#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3501 static const unsigned int vrtl_int_srcid[] = {
3502 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3503 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3504 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3505 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3506 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3507 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3508 };
3509#endif
ff5ef992
AD
3510
3511 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3512 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3513
1f6010a9
DF
3514 /*
3515 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3516 * 1. Register a set() function with base driver.
3517 * Base driver will call set() function to enable/disable an
3518 * interrupt in DC hardware.
3519 * 2. Register amdgpu_dm_irq_handler().
3520 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3521 * coming from DC hardware.
3522 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3523 * for acknowledging and handling.
1f6010a9 3524 */
ff5ef992
AD
3525
3526 /* Use VSTARTUP interrupt */
3527 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3528 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3529 i++) {
3760f76c 3530 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3531
3532 if (r) {
3533 DRM_ERROR("Failed to add crtc irq id!\n");
3534 return r;
3535 }
3536
3537 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3538 int_params.irq_source =
3539 dc_interrupt_to_irq_source(dc, i, 0);
3540
3541 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3542
3543 c_irq_params->adev = adev;
3544 c_irq_params->irq_src = int_params.irq_source;
3545
2346ef47
NK
3546 amdgpu_dm_irq_register_interrupt(
3547 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3548 }
3549
86bc2219
WL
3550 /* Use otg vertical line interrupt */
3551#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3552 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3553 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3554 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3555
3556 if (r) {
3557 DRM_ERROR("Failed to add vline0 irq id!\n");
3558 return r;
3559 }
3560
3561 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3562 int_params.irq_source =
660d5406
WL
3563 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3564
3565 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3566 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3567 break;
3568 }
86bc2219
WL
3569
3570 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3571 - DC_IRQ_SOURCE_DC1_VLINE0];
3572
3573 c_irq_params->adev = adev;
3574 c_irq_params->irq_src = int_params.irq_source;
3575
3576 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3577 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3578 }
3579#endif
3580
2346ef47
NK
3581 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3582 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3583 * to trigger at end of each vblank, regardless of state of the lock,
3584 * matching DCE behaviour.
3585 */
3586 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3587 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3588 i++) {
3589 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3590
3591 if (r) {
3592 DRM_ERROR("Failed to add vupdate irq id!\n");
3593 return r;
3594 }
3595
3596 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3597 int_params.irq_source =
3598 dc_interrupt_to_irq_source(dc, i, 0);
3599
3600 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3601
3602 c_irq_params->adev = adev;
3603 c_irq_params->irq_src = int_params.irq_source;
3604
ff5ef992 3605 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3606 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3607 }
3608
ff5ef992
AD
3609 /* Use GRPH_PFLIP interrupt */
3610 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3611 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3612 i++) {
3760f76c 3613 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3614 if (r) {
3615 DRM_ERROR("Failed to add page flip irq id!\n");
3616 return r;
3617 }
3618
3619 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3620 int_params.irq_source =
3621 dc_interrupt_to_irq_source(dc, i, 0);
3622
3623 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3624
3625 c_irq_params->adev = adev;
3626 c_irq_params->irq_src = int_params.irq_source;
3627
3628 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3629 dm_pflip_high_irq, c_irq_params);
3630
3631 }
3632
81927e28
JS
3633 /* HPD */
3634 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3635 &adev->hpd_irq);
3636 if (r) {
3637 DRM_ERROR("Failed to add hpd irq id!\n");
3638 return r;
3639 }
a08f16cf 3640
81927e28 3641 register_hpd_handlers(adev);
a08f16cf 3642
81927e28
JS
3643 return 0;
3644}
3645/* Register Outbox IRQ sources and initialize IRQ callbacks */
3646static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3647{
3648 struct dc *dc = adev->dm.dc;
3649 struct common_irq_params *c_irq_params;
3650 struct dc_interrupt_params int_params = {0};
3651 int r, i;
3652
3653 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3654 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3655
3656 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3657 &adev->dmub_outbox_irq);
3658 if (r) {
3659 DRM_ERROR("Failed to add outbox irq id!\n");
3660 return r;
3661 }
3662
3663 if (dc->ctx->dmub_srv) {
3664 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3665 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3666 int_params.irq_source =
81927e28 3667 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3668
81927e28 3669 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3670
3671 c_irq_params->adev = adev;
3672 c_irq_params->irq_src = int_params.irq_source;
3673
3674 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3675 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3676 }
3677
ff5ef992
AD
3678 return 0;
3679}
3680#endif
3681
eb3dc897
NK
3682/*
3683 * Acquires the lock for the atomic state object and returns
3684 * the new atomic state.
3685 *
3686 * This should only be called during atomic check.
3687 */
3688static int dm_atomic_get_state(struct drm_atomic_state *state,
3689 struct dm_atomic_state **dm_state)
3690{
3691 struct drm_device *dev = state->dev;
1348969a 3692 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3693 struct amdgpu_display_manager *dm = &adev->dm;
3694 struct drm_private_state *priv_state;
eb3dc897
NK
3695
3696 if (*dm_state)
3697 return 0;
3698
eb3dc897
NK
3699 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3700 if (IS_ERR(priv_state))
3701 return PTR_ERR(priv_state);
3702
3703 *dm_state = to_dm_atomic_state(priv_state);
3704
3705 return 0;
3706}
3707
dfd84d90 3708static struct dm_atomic_state *
eb3dc897
NK
3709dm_atomic_get_new_state(struct drm_atomic_state *state)
3710{
3711 struct drm_device *dev = state->dev;
1348969a 3712 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3713 struct amdgpu_display_manager *dm = &adev->dm;
3714 struct drm_private_obj *obj;
3715 struct drm_private_state *new_obj_state;
3716 int i;
3717
3718 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3719 if (obj->funcs == dm->atomic_obj.funcs)
3720 return to_dm_atomic_state(new_obj_state);
3721 }
3722
3723 return NULL;
3724}
3725
eb3dc897
NK
3726static struct drm_private_state *
3727dm_atomic_duplicate_state(struct drm_private_obj *obj)
3728{
3729 struct dm_atomic_state *old_state, *new_state;
3730
3731 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3732 if (!new_state)
3733 return NULL;
3734
3735 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3736
813d20dc
AW
3737 old_state = to_dm_atomic_state(obj->state);
3738
3739 if (old_state && old_state->context)
3740 new_state->context = dc_copy_state(old_state->context);
3741
eb3dc897
NK
3742 if (!new_state->context) {
3743 kfree(new_state);
3744 return NULL;
3745 }
3746
eb3dc897
NK
3747 return &new_state->base;
3748}
3749
3750static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3751 struct drm_private_state *state)
3752{
3753 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3754
3755 if (dm_state && dm_state->context)
3756 dc_release_state(dm_state->context);
3757
3758 kfree(dm_state);
3759}
3760
3761static struct drm_private_state_funcs dm_atomic_state_funcs = {
3762 .atomic_duplicate_state = dm_atomic_duplicate_state,
3763 .atomic_destroy_state = dm_atomic_destroy_state,
3764};
3765
4562236b
HW
3766static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3767{
eb3dc897 3768 struct dm_atomic_state *state;
4562236b
HW
3769 int r;
3770
3771 adev->mode_info.mode_config_initialized = true;
3772
4a580877
LT
3773 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3774 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3775
4a580877
LT
3776 adev_to_drm(adev)->mode_config.max_width = 16384;
3777 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3778
4a580877
LT
3779 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3780 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3781 /* indicates support for immediate flip */
4a580877 3782 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3783
4a580877 3784 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3785
eb3dc897
NK
3786 state = kzalloc(sizeof(*state), GFP_KERNEL);
3787 if (!state)
3788 return -ENOMEM;
3789
813d20dc 3790 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3791 if (!state->context) {
3792 kfree(state);
3793 return -ENOMEM;
3794 }
3795
3796 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3797
4a580877 3798 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3799 &adev->dm.atomic_obj,
eb3dc897
NK
3800 &state->base,
3801 &dm_atomic_state_funcs);
3802
3dc9b1ce 3803 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3804 if (r) {
3805 dc_release_state(state->context);
3806 kfree(state);
4562236b 3807 return r;
b67a468a 3808 }
4562236b 3809
6ce8f316 3810 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3811 if (r) {
3812 dc_release_state(state->context);
3813 kfree(state);
6ce8f316 3814 return r;
b67a468a 3815 }
6ce8f316 3816
4562236b
HW
3817 return 0;
3818}
3819
206bbafe
DF
3820#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3821#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3822#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3823
4562236b
HW
3824#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3825 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3826
7fd13bae
AD
3827static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3828 int bl_idx)
206bbafe
DF
3829{
3830#if defined(CONFIG_ACPI)
3831 struct amdgpu_dm_backlight_caps caps;
3832
58965855
FS
3833 memset(&caps, 0, sizeof(caps));
3834
7fd13bae 3835 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3836 return;
3837
f9b7f370 3838 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3839 if (caps.caps_valid) {
7fd13bae 3840 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3841 if (caps.aux_support)
3842 return;
7fd13bae
AD
3843 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3844 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3845 } else {
7fd13bae 3846 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3847 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3848 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3849 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3850 }
3851#else
7fd13bae 3852 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3853 return;
3854
7fd13bae
AD
3855 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3856 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3857#endif
3858}
3859
69d9f427
AM
3860static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3861 unsigned *min, unsigned *max)
94562810 3862{
94562810 3863 if (!caps)
69d9f427 3864 return 0;
94562810 3865
69d9f427
AM
3866 if (caps->aux_support) {
3867 // Firmware limits are in nits, DC API wants millinits.
3868 *max = 1000 * caps->aux_max_input_signal;
3869 *min = 1000 * caps->aux_min_input_signal;
94562810 3870 } else {
69d9f427
AM
3871 // Firmware limits are 8-bit, PWM control is 16-bit.
3872 *max = 0x101 * caps->max_input_signal;
3873 *min = 0x101 * caps->min_input_signal;
94562810 3874 }
69d9f427
AM
3875 return 1;
3876}
94562810 3877
69d9f427
AM
3878static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3879 uint32_t brightness)
3880{
3881 unsigned min, max;
94562810 3882
69d9f427
AM
3883 if (!get_brightness_range(caps, &min, &max))
3884 return brightness;
3885
3886 // Rescale 0..255 to min..max
3887 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3888 AMDGPU_MAX_BL_LEVEL);
3889}
3890
3891static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3892 uint32_t brightness)
3893{
3894 unsigned min, max;
3895
3896 if (!get_brightness_range(caps, &min, &max))
3897 return brightness;
3898
3899 if (brightness < min)
3900 return 0;
3901 // Rescale min..max to 0..255
3902 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3903 max - min);
94562810
RS
3904}
3905
3d6c9164 3906static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3907 int bl_idx,
3d6c9164 3908 u32 user_brightness)
4562236b 3909{
206bbafe 3910 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3911 struct dc_link *link;
3912 u32 brightness;
94562810 3913 bool rc;
4562236b 3914
7fd13bae
AD
3915 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3916 caps = dm->backlight_caps[bl_idx];
94562810 3917
7fd13bae 3918 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
3919 /* update scratch register */
3920 if (bl_idx == 0)
3921 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
3922 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3923 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3924
3d6c9164 3925 /* Change brightness based on AUX property */
118b4627 3926 if (caps.aux_support) {
7fd13bae
AD
3927 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3928 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3929 if (!rc)
3930 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3931 } else {
7fd13bae
AD
3932 rc = dc_link_set_backlight_level(link, brightness, 0);
3933 if (!rc)
3934 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3935 }
94562810
RS
3936
3937 return rc ? 0 : 1;
4562236b
HW
3938}
3939
3d6c9164 3940static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3941{
620a0d27 3942 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3943 int i;
3d6c9164 3944
7fd13bae
AD
3945 for (i = 0; i < dm->num_of_edps; i++) {
3946 if (bd == dm->backlight_dev[i])
3947 break;
3948 }
3949 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3950 i = 0;
3951 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3952
3953 return 0;
3954}
3955
7fd13bae
AD
3956static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3957 int bl_idx)
3d6c9164 3958{
0ad3e64e 3959 struct amdgpu_dm_backlight_caps caps;
7fd13bae 3960 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 3961
7fd13bae
AD
3962 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3963 caps = dm->backlight_caps[bl_idx];
620a0d27 3964
0ad3e64e 3965 if (caps.aux_support) {
0ad3e64e
AD
3966 u32 avg, peak;
3967 bool rc;
3968
3969 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3970 if (!rc)
7fd13bae 3971 return dm->brightness[bl_idx];
0ad3e64e
AD
3972 return convert_brightness_to_user(&caps, avg);
3973 } else {
7fd13bae 3974 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
3975
3976 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 3977 return dm->brightness[bl_idx];
0ad3e64e
AD
3978 return convert_brightness_to_user(&caps, ret);
3979 }
4562236b
HW
3980}
3981
3d6c9164
AD
3982static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3983{
3984 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3985 int i;
3d6c9164 3986
7fd13bae
AD
3987 for (i = 0; i < dm->num_of_edps; i++) {
3988 if (bd == dm->backlight_dev[i])
3989 break;
3990 }
3991 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3992 i = 0;
3993 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
3994}
3995
4562236b 3996static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3997 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3998 .get_brightness = amdgpu_dm_backlight_get_brightness,
3999 .update_status = amdgpu_dm_backlight_update_status,
4000};
4001
7578ecda
AD
4002static void
4003amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4004{
4005 char bl_name[16];
4006 struct backlight_properties props = { 0 };
4007
7fd13bae
AD
4008 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4009 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4010
4562236b 4011 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4012 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4013 props.type = BACKLIGHT_RAW;
4014
4015 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4016 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4017
7fd13bae
AD
4018 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4019 adev_to_drm(dm->adev)->dev,
4020 dm,
4021 &amdgpu_dm_backlight_ops,
4022 &props);
4562236b 4023
7fd13bae 4024 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4025 DRM_ERROR("DM: Backlight registration failed!\n");
4026 else
f1ad2f5e 4027 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4028}
4562236b
HW
4029#endif
4030
df534fff 4031static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4032 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4033 enum drm_plane_type plane_type,
4034 const struct dc_plane_cap *plane_cap)
df534fff 4035{
f180b4bc 4036 struct drm_plane *plane;
df534fff
S
4037 unsigned long possible_crtcs;
4038 int ret = 0;
4039
f180b4bc 4040 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4041 if (!plane) {
4042 DRM_ERROR("KMS: Failed to allocate plane\n");
4043 return -ENOMEM;
4044 }
b2fddb13 4045 plane->type = plane_type;
df534fff
S
4046
4047 /*
b2fddb13
NK
4048 * HACK: IGT tests expect that the primary plane for a CRTC
4049 * can only have one possible CRTC. Only expose support for
4050 * any CRTC if they're not going to be used as a primary plane
4051 * for a CRTC - like overlay or underlay planes.
df534fff
S
4052 */
4053 possible_crtcs = 1 << plane_id;
4054 if (plane_id >= dm->dc->caps.max_streams)
4055 possible_crtcs = 0xff;
4056
cc1fec57 4057 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4058
4059 if (ret) {
4060 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4061 kfree(plane);
df534fff
S
4062 return ret;
4063 }
4064
54087768
NK
4065 if (mode_info)
4066 mode_info->planes[plane_id] = plane;
4067
df534fff
S
4068 return ret;
4069}
4070
89fc8d4e
HW
4071
4072static void register_backlight_device(struct amdgpu_display_manager *dm,
4073 struct dc_link *link)
4074{
4075#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4076 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4077
4078 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4079 link->type != dc_connection_none) {
1f6010a9
DF
4080 /*
4081 * Event if registration failed, we should continue with
89fc8d4e
HW
4082 * DM initialization because not having a backlight control
4083 * is better then a black screen.
4084 */
7fd13bae 4085 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4086 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4087
7fd13bae 4088 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4089 dm->backlight_link[dm->num_of_edps] = link;
4090 dm->num_of_edps++;
4091 }
89fc8d4e
HW
4092 }
4093#endif
4094}
4095
4096
1f6010a9
DF
4097/*
4098 * In this architecture, the association
4562236b
HW
4099 * connector -> encoder -> crtc
4100 * id not really requried. The crtc and connector will hold the
4101 * display_index as an abstraction to use with DAL component
4102 *
4103 * Returns 0 on success
4104 */
7578ecda 4105static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4106{
4107 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4108 int32_t i;
c84dec2f 4109 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4110 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4111 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4112 uint32_t link_cnt;
cc1fec57 4113 int32_t primary_planes;
fbbdadf2 4114 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4115 const struct dc_plane_cap *plane;
9470620e 4116 bool psr_feature_enabled = false;
4562236b 4117
d58159de
AD
4118 dm->display_indexes_num = dm->dc->caps.max_streams;
4119 /* Update the actual used number of crtc */
4120 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4121
4562236b 4122 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4123 if (amdgpu_dm_mode_config_init(dm->adev)) {
4124 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4125 return -EINVAL;
4562236b
HW
4126 }
4127
b2fddb13
NK
4128 /* There is one primary plane per CRTC */
4129 primary_planes = dm->dc->caps.max_streams;
54087768 4130 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4131
b2fddb13
NK
4132 /*
4133 * Initialize primary planes, implicit planes for legacy IOCTLS.
4134 * Order is reversed to match iteration order in atomic check.
4135 */
4136 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4137 plane = &dm->dc->caps.planes[i];
4138
b2fddb13 4139 if (initialize_plane(dm, mode_info, i,
cc1fec57 4140 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4141 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4142 goto fail;
d4e13b0d 4143 }
df534fff 4144 }
92f3ac40 4145
0d579c7e
NK
4146 /*
4147 * Initialize overlay planes, index starting after primary planes.
4148 * These planes have a higher DRM index than the primary planes since
4149 * they should be considered as having a higher z-order.
4150 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4151 *
4152 * Only support DCN for now, and only expose one so we don't encourage
4153 * userspace to use up all the pipes.
0d579c7e 4154 */
cc1fec57
NK
4155 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4156 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4157
4158 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4159 continue;
4160
4161 if (!plane->blends_with_above || !plane->blends_with_below)
4162 continue;
4163
ea36ad34 4164 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4165 continue;
4166
54087768 4167 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4168 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4169 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4170 goto fail;
d4e13b0d 4171 }
cc1fec57
NK
4172
4173 /* Only create one overlay plane. */
4174 break;
d4e13b0d 4175 }
4562236b 4176
d4e13b0d 4177 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4178 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4179 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4180 goto fail;
4562236b 4181 }
4562236b 4182
50610b74 4183#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28 4184 /* Use Outbox interrupt */
1d789535 4185 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4186 case IP_VERSION(3, 0, 0):
4187 case IP_VERSION(3, 1, 2):
4188 case IP_VERSION(3, 1, 3):
4189 case IP_VERSION(2, 1, 0):
81927e28
JS
4190 if (register_outbox_irq_handlers(dm->adev)) {
4191 DRM_ERROR("DM: Failed to initialize IRQ\n");
4192 goto fail;
4193 }
4194 break;
4195 default:
c08182f2 4196 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4197 adev->ip_versions[DCE_HWIP][0]);
81927e28 4198 }
9470620e
NK
4199
4200 /* Determine whether to enable PSR support by default. */
4201 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4202 switch (adev->ip_versions[DCE_HWIP][0]) {
4203 case IP_VERSION(3, 1, 2):
4204 case IP_VERSION(3, 1, 3):
4205 psr_feature_enabled = true;
4206 break;
4207 default:
4208 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4209 break;
4210 }
4211 }
50610b74 4212#endif
81927e28 4213
4562236b
HW
4214 /* loops over all connectors on the board */
4215 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4216 struct dc_link *link = NULL;
4562236b
HW
4217
4218 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4219 DRM_ERROR(
4220 "KMS: Cannot support more than %d display indexes\n",
4221 AMDGPU_DM_MAX_DISPLAY_INDEX);
4222 continue;
4223 }
4224
4225 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4226 if (!aconnector)
cd8a2ae8 4227 goto fail;
4562236b
HW
4228
4229 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4230 if (!aencoder)
cd8a2ae8 4231 goto fail;
4562236b
HW
4232
4233 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4234 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4235 goto fail;
4562236b
HW
4236 }
4237
4238 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4239 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4240 goto fail;
4562236b
HW
4241 }
4242
89fc8d4e
HW
4243 link = dc_get_link_at_index(dm->dc, i);
4244
fbbdadf2
BL
4245 if (!dc_link_detect_sink(link, &new_connection_type))
4246 DRM_ERROR("KMS: Failed to detect connector\n");
4247
4248 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4249 emulated_link_detect(link);
4250 amdgpu_dm_update_connector_after_detect(aconnector);
4251
4252 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4253 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4254 register_backlight_device(dm, link);
b295ce39
RL
4255 if (dm->num_of_edps)
4256 update_connector_ext_caps(aconnector);
9470620e 4257 if (psr_feature_enabled)
397a9bc5 4258 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
4259 }
4260
4261
4562236b
HW
4262 }
4263
70897848
NK
4264 /*
4265 * Disable vblank IRQs aggressively for power-saving.
4266 *
4267 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4268 * is also supported.
4269 */
4270 adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4271
4562236b
HW
4272 /* Software is initialized. Now we can register interrupt handlers. */
4273 switch (adev->asic_type) {
55e56389
MR
4274#if defined(CONFIG_DRM_AMD_DC_SI)
4275 case CHIP_TAHITI:
4276 case CHIP_PITCAIRN:
4277 case CHIP_VERDE:
4278 case CHIP_OLAND:
4279 if (dce60_register_irq_handlers(dm->adev)) {
4280 DRM_ERROR("DM: Failed to initialize IRQ\n");
4281 goto fail;
4282 }
4283 break;
4284#endif
4562236b
HW
4285 case CHIP_BONAIRE:
4286 case CHIP_HAWAII:
cd4b356f
AD
4287 case CHIP_KAVERI:
4288 case CHIP_KABINI:
4289 case CHIP_MULLINS:
4562236b
HW
4290 case CHIP_TONGA:
4291 case CHIP_FIJI:
4292 case CHIP_CARRIZO:
4293 case CHIP_STONEY:
4294 case CHIP_POLARIS11:
4295 case CHIP_POLARIS10:
b264d345 4296 case CHIP_POLARIS12:
7737de91 4297 case CHIP_VEGAM:
2c8ad2d5 4298 case CHIP_VEGA10:
2325ff30 4299 case CHIP_VEGA12:
1fe6bf2f 4300 case CHIP_VEGA20:
4562236b
HW
4301 if (dce110_register_irq_handlers(dm->adev)) {
4302 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4303 goto fail;
4562236b
HW
4304 }
4305 break;
4306 default:
c08182f2 4307#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4308 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4309 case IP_VERSION(1, 0, 0):
4310 case IP_VERSION(1, 0, 1):
c08182f2
AD
4311 case IP_VERSION(2, 0, 2):
4312 case IP_VERSION(2, 0, 3):
4313 case IP_VERSION(2, 0, 0):
4314 case IP_VERSION(2, 1, 0):
4315 case IP_VERSION(3, 0, 0):
4316 case IP_VERSION(3, 0, 2):
4317 case IP_VERSION(3, 0, 3):
4318 case IP_VERSION(3, 0, 1):
4319 case IP_VERSION(3, 1, 2):
4320 case IP_VERSION(3, 1, 3):
4321 if (dcn10_register_irq_handlers(dm->adev)) {
4322 DRM_ERROR("DM: Failed to initialize IRQ\n");
4323 goto fail;
4324 }
4325 break;
4326 default:
2cbc6f42 4327 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4328 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4329 goto fail;
c08182f2
AD
4330 }
4331#endif
2cbc6f42 4332 break;
4562236b
HW
4333 }
4334
4562236b 4335 return 0;
cd8a2ae8 4336fail:
4562236b 4337 kfree(aencoder);
4562236b 4338 kfree(aconnector);
54087768 4339
59d0f396 4340 return -EINVAL;
4562236b
HW
4341}
4342
7578ecda 4343static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4344{
eb3dc897 4345 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4346 return;
4347}
4348
4349/******************************************************************************
4350 * amdgpu_display_funcs functions
4351 *****************************************************************************/
4352
1f6010a9 4353/*
4562236b
HW
4354 * dm_bandwidth_update - program display watermarks
4355 *
4356 * @adev: amdgpu_device pointer
4357 *
4358 * Calculate and program the display watermarks and line buffer allocation.
4359 */
4360static void dm_bandwidth_update(struct amdgpu_device *adev)
4361{
49c07a99 4362 /* TODO: implement later */
4562236b
HW
4363}
4364
39cc5be2 4365static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4366 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4367 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4368 .backlight_set_level = NULL, /* never called for DC */
4369 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4370 .hpd_sense = NULL,/* called unconditionally */
4371 .hpd_set_polarity = NULL, /* called unconditionally */
4372 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4373 .page_flip_get_scanoutpos =
4374 dm_crtc_get_scanoutpos,/* called unconditionally */
4375 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4376 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4377};
4378
4379#if defined(CONFIG_DEBUG_KERNEL_DC)
4380
3ee6b26b
AD
4381static ssize_t s3_debug_store(struct device *device,
4382 struct device_attribute *attr,
4383 const char *buf,
4384 size_t count)
4562236b
HW
4385{
4386 int ret;
4387 int s3_state;
ef1de361 4388 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4389 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4390
4391 ret = kstrtoint(buf, 0, &s3_state);
4392
4393 if (ret == 0) {
4394 if (s3_state) {
4395 dm_resume(adev);
4a580877 4396 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4397 } else
4398 dm_suspend(adev);
4399 }
4400
4401 return ret == 0 ? count : 0;
4402}
4403
4404DEVICE_ATTR_WO(s3_debug);
4405
4406#endif
4407
4408static int dm_early_init(void *handle)
4409{
4410 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4411
4562236b 4412 switch (adev->asic_type) {
55e56389
MR
4413#if defined(CONFIG_DRM_AMD_DC_SI)
4414 case CHIP_TAHITI:
4415 case CHIP_PITCAIRN:
4416 case CHIP_VERDE:
4417 adev->mode_info.num_crtc = 6;
4418 adev->mode_info.num_hpd = 6;
4419 adev->mode_info.num_dig = 6;
4420 break;
4421 case CHIP_OLAND:
4422 adev->mode_info.num_crtc = 2;
4423 adev->mode_info.num_hpd = 2;
4424 adev->mode_info.num_dig = 2;
4425 break;
4426#endif
4562236b
HW
4427 case CHIP_BONAIRE:
4428 case CHIP_HAWAII:
4429 adev->mode_info.num_crtc = 6;
4430 adev->mode_info.num_hpd = 6;
4431 adev->mode_info.num_dig = 6;
4562236b 4432 break;
cd4b356f
AD
4433 case CHIP_KAVERI:
4434 adev->mode_info.num_crtc = 4;
4435 adev->mode_info.num_hpd = 6;
4436 adev->mode_info.num_dig = 7;
cd4b356f
AD
4437 break;
4438 case CHIP_KABINI:
4439 case CHIP_MULLINS:
4440 adev->mode_info.num_crtc = 2;
4441 adev->mode_info.num_hpd = 6;
4442 adev->mode_info.num_dig = 6;
cd4b356f 4443 break;
4562236b
HW
4444 case CHIP_FIJI:
4445 case CHIP_TONGA:
4446 adev->mode_info.num_crtc = 6;
4447 adev->mode_info.num_hpd = 6;
4448 adev->mode_info.num_dig = 7;
4562236b
HW
4449 break;
4450 case CHIP_CARRIZO:
4451 adev->mode_info.num_crtc = 3;
4452 adev->mode_info.num_hpd = 6;
4453 adev->mode_info.num_dig = 9;
4562236b
HW
4454 break;
4455 case CHIP_STONEY:
4456 adev->mode_info.num_crtc = 2;
4457 adev->mode_info.num_hpd = 6;
4458 adev->mode_info.num_dig = 9;
4562236b
HW
4459 break;
4460 case CHIP_POLARIS11:
b264d345 4461 case CHIP_POLARIS12:
4562236b
HW
4462 adev->mode_info.num_crtc = 5;
4463 adev->mode_info.num_hpd = 5;
4464 adev->mode_info.num_dig = 5;
4562236b
HW
4465 break;
4466 case CHIP_POLARIS10:
7737de91 4467 case CHIP_VEGAM:
4562236b
HW
4468 adev->mode_info.num_crtc = 6;
4469 adev->mode_info.num_hpd = 6;
4470 adev->mode_info.num_dig = 6;
4562236b 4471 break;
2c8ad2d5 4472 case CHIP_VEGA10:
2325ff30 4473 case CHIP_VEGA12:
1fe6bf2f 4474 case CHIP_VEGA20:
2c8ad2d5
AD
4475 adev->mode_info.num_crtc = 6;
4476 adev->mode_info.num_hpd = 6;
4477 adev->mode_info.num_dig = 6;
4478 break;
4562236b 4479 default:
c08182f2 4480#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4481 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4482 case IP_VERSION(2, 0, 2):
4483 case IP_VERSION(3, 0, 0):
4484 adev->mode_info.num_crtc = 6;
4485 adev->mode_info.num_hpd = 6;
4486 adev->mode_info.num_dig = 6;
4487 break;
4488 case IP_VERSION(2, 0, 0):
4489 case IP_VERSION(3, 0, 2):
4490 adev->mode_info.num_crtc = 5;
4491 adev->mode_info.num_hpd = 5;
4492 adev->mode_info.num_dig = 5;
4493 break;
4494 case IP_VERSION(2, 0, 3):
4495 case IP_VERSION(3, 0, 3):
4496 adev->mode_info.num_crtc = 2;
4497 adev->mode_info.num_hpd = 2;
4498 adev->mode_info.num_dig = 2;
4499 break;
559f591d
AD
4500 case IP_VERSION(1, 0, 0):
4501 case IP_VERSION(1, 0, 1):
c08182f2
AD
4502 case IP_VERSION(3, 0, 1):
4503 case IP_VERSION(2, 1, 0):
4504 case IP_VERSION(3, 1, 2):
4505 case IP_VERSION(3, 1, 3):
4506 adev->mode_info.num_crtc = 4;
4507 adev->mode_info.num_hpd = 4;
4508 adev->mode_info.num_dig = 4;
4509 break;
4510 default:
2cbc6f42 4511 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4512 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4513 return -EINVAL;
c08182f2
AD
4514 }
4515#endif
2cbc6f42 4516 break;
4562236b
HW
4517 }
4518
c8dd5715
MD
4519 amdgpu_dm_set_irq_funcs(adev);
4520
39cc5be2
AD
4521 if (adev->mode_info.funcs == NULL)
4522 adev->mode_info.funcs = &dm_display_funcs;
4523
1f6010a9
DF
4524 /*
4525 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4526 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4527 * amdgpu_device_init()
4528 */
4562236b
HW
4529#if defined(CONFIG_DEBUG_KERNEL_DC)
4530 device_create_file(
4a580877 4531 adev_to_drm(adev)->dev,
4562236b
HW
4532 &dev_attr_s3_debug);
4533#endif
4534
4535 return 0;
4536}
4537
9b690ef3 4538static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4539 struct dc_stream_state *new_stream,
4540 struct dc_stream_state *old_stream)
9b690ef3 4541{
2afda735 4542 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4543}
4544
4545static bool modereset_required(struct drm_crtc_state *crtc_state)
4546{
2afda735 4547 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4548}
4549
7578ecda 4550static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4551{
4552 drm_encoder_cleanup(encoder);
4553 kfree(encoder);
4554}
4555
4556static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4557 .destroy = amdgpu_dm_encoder_destroy,
4558};
4559
e7b07cee 4560
6300b3bd
MK
4561static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4562 struct drm_framebuffer *fb,
4563 int *min_downscale, int *max_upscale)
4564{
4565 struct amdgpu_device *adev = drm_to_adev(dev);
4566 struct dc *dc = adev->dm.dc;
4567 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4568 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4569
4570 switch (fb->format->format) {
4571 case DRM_FORMAT_P010:
4572 case DRM_FORMAT_NV12:
4573 case DRM_FORMAT_NV21:
4574 *max_upscale = plane_cap->max_upscale_factor.nv12;
4575 *min_downscale = plane_cap->max_downscale_factor.nv12;
4576 break;
4577
4578 case DRM_FORMAT_XRGB16161616F:
4579 case DRM_FORMAT_ARGB16161616F:
4580 case DRM_FORMAT_XBGR16161616F:
4581 case DRM_FORMAT_ABGR16161616F:
4582 *max_upscale = plane_cap->max_upscale_factor.fp16;
4583 *min_downscale = plane_cap->max_downscale_factor.fp16;
4584 break;
4585
4586 default:
4587 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4588 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4589 break;
4590 }
4591
4592 /*
4593 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4594 * scaling factor of 1.0 == 1000 units.
4595 */
4596 if (*max_upscale == 1)
4597 *max_upscale = 1000;
4598
4599 if (*min_downscale == 1)
4600 *min_downscale = 1000;
4601}
4602
4603
4375d625
S
4604static int fill_dc_scaling_info(struct amdgpu_device *adev,
4605 const struct drm_plane_state *state,
695af5f9 4606 struct dc_scaling_info *scaling_info)
e7b07cee 4607{
6300b3bd 4608 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4609
695af5f9 4610 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4611
695af5f9
NK
4612 /* Source is fixed 16.16 but we ignore mantissa for now... */
4613 scaling_info->src_rect.x = state->src_x >> 16;
4614 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4615
d89f6048
HW
4616 /*
4617 * For reasons we don't (yet) fully understand a non-zero
4618 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4619 * system hang on DCN1x.
4620 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4621 * let's reject both non-zero src_x and src_y.
4622 *
4623 * We currently know of only one use-case to reproduce a
4624 * scenario with non-zero src_x and src_y for NV12, which
4625 * is to gesture the YouTube Android app into full screen
4626 * on ChromeOS.
4627 */
4375d625
S
4628 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4629 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4630 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4631 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4632 return -EINVAL;
4633
695af5f9
NK
4634 scaling_info->src_rect.width = state->src_w >> 16;
4635 if (scaling_info->src_rect.width == 0)
4636 return -EINVAL;
4637
4638 scaling_info->src_rect.height = state->src_h >> 16;
4639 if (scaling_info->src_rect.height == 0)
4640 return -EINVAL;
4641
4642 scaling_info->dst_rect.x = state->crtc_x;
4643 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4644
4645 if (state->crtc_w == 0)
695af5f9 4646 return -EINVAL;
e7b07cee 4647
695af5f9 4648 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4649
4650 if (state->crtc_h == 0)
695af5f9 4651 return -EINVAL;
e7b07cee 4652
695af5f9 4653 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4654
695af5f9
NK
4655 /* DRM doesn't specify clipping on destination output. */
4656 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4657
6300b3bd
MK
4658 /* Validate scaling per-format with DC plane caps */
4659 if (state->plane && state->plane->dev && state->fb) {
4660 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4661 &min_downscale, &max_upscale);
4662 } else {
4663 min_downscale = 250;
4664 max_upscale = 16000;
4665 }
4666
6491f0c0
NK
4667 scale_w = scaling_info->dst_rect.width * 1000 /
4668 scaling_info->src_rect.width;
e7b07cee 4669
6300b3bd 4670 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4671 return -EINVAL;
4672
4673 scale_h = scaling_info->dst_rect.height * 1000 /
4674 scaling_info->src_rect.height;
4675
6300b3bd 4676 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4677 return -EINVAL;
4678
695af5f9
NK
4679 /*
4680 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4681 * assume reasonable defaults based on the format.
4682 */
e7b07cee 4683
695af5f9 4684 return 0;
4562236b 4685}
695af5f9 4686
a3241991
BN
4687static void
4688fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4689 uint64_t tiling_flags)
e7b07cee 4690{
a3241991
BN
4691 /* Fill GFX8 params */
4692 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4693 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4694
a3241991
BN
4695 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4696 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4697 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4698 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4699 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4700
a3241991
BN
4701 /* XXX fix me for VI */
4702 tiling_info->gfx8.num_banks = num_banks;
4703 tiling_info->gfx8.array_mode =
4704 DC_ARRAY_2D_TILED_THIN1;
4705 tiling_info->gfx8.tile_split = tile_split;
4706 tiling_info->gfx8.bank_width = bankw;
4707 tiling_info->gfx8.bank_height = bankh;
4708 tiling_info->gfx8.tile_aspect = mtaspect;
4709 tiling_info->gfx8.tile_mode =
4710 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4711 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4712 == DC_ARRAY_1D_TILED_THIN1) {
4713 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4714 }
4715
a3241991
BN
4716 tiling_info->gfx8.pipe_config =
4717 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4718}
4719
a3241991
BN
4720static void
4721fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4722 union dc_tiling_info *tiling_info)
4723{
4724 tiling_info->gfx9.num_pipes =
4725 adev->gfx.config.gb_addr_config_fields.num_pipes;
4726 tiling_info->gfx9.num_banks =
4727 adev->gfx.config.gb_addr_config_fields.num_banks;
4728 tiling_info->gfx9.pipe_interleave =
4729 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4730 tiling_info->gfx9.num_shader_engines =
4731 adev->gfx.config.gb_addr_config_fields.num_se;
4732 tiling_info->gfx9.max_compressed_frags =
4733 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4734 tiling_info->gfx9.num_rb_per_se =
4735 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4736 tiling_info->gfx9.shaderEnable = 1;
1d789535 4737 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4738 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4739}
4740
695af5f9 4741static int
a3241991
BN
4742validate_dcc(struct amdgpu_device *adev,
4743 const enum surface_pixel_format format,
4744 const enum dc_rotation_angle rotation,
4745 const union dc_tiling_info *tiling_info,
4746 const struct dc_plane_dcc_param *dcc,
4747 const struct dc_plane_address *address,
4748 const struct plane_size *plane_size)
7df7e505
NK
4749{
4750 struct dc *dc = adev->dm.dc;
8daa1218
NC
4751 struct dc_dcc_surface_param input;
4752 struct dc_surface_dcc_cap output;
7df7e505 4753
8daa1218
NC
4754 memset(&input, 0, sizeof(input));
4755 memset(&output, 0, sizeof(output));
4756
a3241991 4757 if (!dcc->enable)
87b7ebc2
RS
4758 return 0;
4759
a3241991
BN
4760 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4761 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4762 return -EINVAL;
7df7e505 4763
695af5f9 4764 input.format = format;
12e2b2d4
DL
4765 input.surface_size.width = plane_size->surface_size.width;
4766 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4767 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4768
695af5f9 4769 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4770 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4771 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4772 input.scan = SCAN_DIRECTION_VERTICAL;
4773
4774 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4775 return -EINVAL;
7df7e505
NK
4776
4777 if (!output.capable)
09e5665a 4778 return -EINVAL;
7df7e505 4779
a3241991
BN
4780 if (dcc->independent_64b_blks == 0 &&
4781 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4782 return -EINVAL;
7df7e505 4783
a3241991
BN
4784 return 0;
4785}
4786
37384b3f
BN
4787static bool
4788modifier_has_dcc(uint64_t modifier)
4789{
4790 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4791}
4792
4793static unsigned
4794modifier_gfx9_swizzle_mode(uint64_t modifier)
4795{
4796 if (modifier == DRM_FORMAT_MOD_LINEAR)
4797 return 0;
4798
4799 return AMD_FMT_MOD_GET(TILE, modifier);
4800}
4801
dfbbfe3c
BN
4802static const struct drm_format_info *
4803amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4804{
816853f9 4805 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4806}
4807
37384b3f
BN
4808static void
4809fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4810 union dc_tiling_info *tiling_info,
4811 uint64_t modifier)
4812{
4813 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4814 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4815 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4816 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4817
4818 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4819
4820 if (!IS_AMD_FMT_MOD(modifier))
4821 return;
4822
4823 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4824 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4825
4826 if (adev->family >= AMDGPU_FAMILY_NV) {
4827 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4828 } else {
4829 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4830
4831 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4832 }
4833}
4834
faa37f54
BN
4835enum dm_micro_swizzle {
4836 MICRO_SWIZZLE_Z = 0,
4837 MICRO_SWIZZLE_S = 1,
4838 MICRO_SWIZZLE_D = 2,
4839 MICRO_SWIZZLE_R = 3
4840};
4841
4842static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4843 uint32_t format,
4844 uint64_t modifier)
4845{
4846 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4847 const struct drm_format_info *info = drm_format_info(format);
fe180178 4848 int i;
faa37f54
BN
4849
4850 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4851
4852 if (!info)
4853 return false;
4854
4855 /*
fe180178
QZ
4856 * We always have to allow these modifiers:
4857 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4858 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4859 */
fe180178
QZ
4860 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4861 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4862 return true;
fe180178 4863 }
faa37f54 4864
fe180178
QZ
4865 /* Check that the modifier is on the list of the plane's supported modifiers. */
4866 for (i = 0; i < plane->modifier_count; i++) {
4867 if (modifier == plane->modifiers[i])
4868 break;
4869 }
4870 if (i == plane->modifier_count)
faa37f54
BN
4871 return false;
4872
4873 /*
4874 * For D swizzle the canonical modifier depends on the bpp, so check
4875 * it here.
4876 */
4877 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4878 adev->family >= AMDGPU_FAMILY_NV) {
4879 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4880 return false;
4881 }
4882
4883 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4884 info->cpp[0] < 8)
4885 return false;
4886
4887 if (modifier_has_dcc(modifier)) {
4888 /* Per radeonsi comments 16/64 bpp are more complicated. */
4889 if (info->cpp[0] != 4)
4890 return false;
951796f2
SS
4891 /* We support multi-planar formats, but not when combined with
4892 * additional DCC metadata planes. */
4893 if (info->num_planes > 1)
4894 return false;
faa37f54
BN
4895 }
4896
4897 return true;
4898}
4899
4900static void
4901add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4902{
4903 if (!*mods)
4904 return;
4905
4906 if (*cap - *size < 1) {
4907 uint64_t new_cap = *cap * 2;
4908 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4909
4910 if (!new_mods) {
4911 kfree(*mods);
4912 *mods = NULL;
4913 return;
4914 }
4915
4916 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4917 kfree(*mods);
4918 *mods = new_mods;
4919 *cap = new_cap;
4920 }
4921
4922 (*mods)[*size] = mod;
4923 *size += 1;
4924}
4925
4926static void
4927add_gfx9_modifiers(const struct amdgpu_device *adev,
4928 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4929{
4930 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4931 int pipe_xor_bits = min(8, pipes +
4932 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4933 int bank_xor_bits = min(8 - pipe_xor_bits,
4934 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4935 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4936 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4937
4938
4939 if (adev->family == AMDGPU_FAMILY_RV) {
4940 /* Raven2 and later */
4941 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4942
4943 /*
4944 * No _D DCC swizzles yet because we only allow 32bpp, which
4945 * doesn't support _D on DCN
4946 */
4947
4948 if (has_constant_encode) {
4949 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4950 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4951 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4952 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4953 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4954 AMD_FMT_MOD_SET(DCC, 1) |
4955 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4956 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4957 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4958 }
4959
4960 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4961 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4962 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4963 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4964 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4965 AMD_FMT_MOD_SET(DCC, 1) |
4966 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4967 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4968 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4969
4970 if (has_constant_encode) {
4971 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4972 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4973 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4974 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4975 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4976 AMD_FMT_MOD_SET(DCC, 1) |
4977 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4978 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4979 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4980
4981 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4982 AMD_FMT_MOD_SET(RB, rb) |
4983 AMD_FMT_MOD_SET(PIPE, pipes));
4984 }
4985
4986 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4987 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4988 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4989 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4990 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4991 AMD_FMT_MOD_SET(DCC, 1) |
4992 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4993 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4994 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4995 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4996 AMD_FMT_MOD_SET(RB, rb) |
4997 AMD_FMT_MOD_SET(PIPE, pipes));
4998 }
4999
5000 /*
5001 * Only supported for 64bpp on Raven, will be filtered on format in
5002 * dm_plane_format_mod_supported.
5003 */
5004 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5005 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5006 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5007 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5008 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5009
5010 if (adev->family == AMDGPU_FAMILY_RV) {
5011 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5012 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5013 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5014 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5015 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5016 }
5017
5018 /*
5019 * Only supported for 64bpp on Raven, will be filtered on format in
5020 * dm_plane_format_mod_supported.
5021 */
5022 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5023 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5024 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5025
5026 if (adev->family == AMDGPU_FAMILY_RV) {
5027 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5028 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5029 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5030 }
5031}
5032
5033static void
5034add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5035 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5036{
5037 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5038
5039 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5040 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5041 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5042 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5043 AMD_FMT_MOD_SET(DCC, 1) |
5044 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5045 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5046 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5047
5048 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5049 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5050 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5051 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5052 AMD_FMT_MOD_SET(DCC, 1) |
5053 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5054 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5055 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5056 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5057
5058 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5059 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5060 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5061 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5062
5063 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5065 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5066 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5067
5068
5069 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5070 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5071 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5072 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5073
5074 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5076 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5077}
5078
5079static void
5080add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5081 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5082{
5083 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5084 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5085
5086 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5087 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5088 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5089 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5090 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5091 AMD_FMT_MOD_SET(DCC, 1) |
5092 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5093 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5094 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5095 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5096
7f6ab50a
JA
5097 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5098 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5099 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5100 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5101 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5102 AMD_FMT_MOD_SET(DCC, 1) |
5103 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5104 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5105 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5106
faa37f54
BN
5107 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5108 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5109 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5110 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5111 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5112 AMD_FMT_MOD_SET(DCC, 1) |
5113 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5114 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5115 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5116 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5117 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5118
7f6ab50a
JA
5119 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5120 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5121 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5122 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5123 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5124 AMD_FMT_MOD_SET(DCC, 1) |
5125 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5126 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5127 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5128 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5129
faa37f54
BN
5130 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5131 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5132 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5133 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5134 AMD_FMT_MOD_SET(PACKERS, pkrs));
5135
5136 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5137 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5138 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5139 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5140 AMD_FMT_MOD_SET(PACKERS, pkrs));
5141
5142 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5143 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5144 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5145 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5146
5147 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5149 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5150}
5151
5152static int
5153get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5154{
5155 uint64_t size = 0, capacity = 128;
5156 *mods = NULL;
5157
5158 /* We have not hooked up any pre-GFX9 modifiers. */
5159 if (adev->family < AMDGPU_FAMILY_AI)
5160 return 0;
5161
5162 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5163
5164 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5165 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5166 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5167 return *mods ? 0 : -ENOMEM;
5168 }
5169
5170 switch (adev->family) {
5171 case AMDGPU_FAMILY_AI:
5172 case AMDGPU_FAMILY_RV:
5173 add_gfx9_modifiers(adev, mods, &size, &capacity);
5174 break;
5175 case AMDGPU_FAMILY_NV:
5176 case AMDGPU_FAMILY_VGH:
1ebcaebd 5177 case AMDGPU_FAMILY_YC:
1d789535 5178 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5179 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5180 else
5181 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5182 break;
5183 }
5184
5185 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5186
5187 /* INVALID marks the end of the list. */
5188 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5189
5190 if (!*mods)
5191 return -ENOMEM;
5192
5193 return 0;
5194}
5195
37384b3f
BN
5196static int
5197fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5198 const struct amdgpu_framebuffer *afb,
5199 const enum surface_pixel_format format,
5200 const enum dc_rotation_angle rotation,
5201 const struct plane_size *plane_size,
5202 union dc_tiling_info *tiling_info,
5203 struct dc_plane_dcc_param *dcc,
5204 struct dc_plane_address *address,
5205 const bool force_disable_dcc)
5206{
5207 const uint64_t modifier = afb->base.modifier;
2be7f77f 5208 int ret = 0;
37384b3f
BN
5209
5210 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5211 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5212
5213 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5214 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5215 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5216 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5217
5218 dcc->enable = 1;
5219 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5220 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5221 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5222 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5223 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5224 else if (independent_128b_blks)
5225 dcc->dcc_ind_blk = hubp_ind_block_128b;
5226 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5227 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5228 else
5229 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5230 } else {
5231 if (independent_64b_blks)
5232 dcc->dcc_ind_blk = hubp_ind_block_64b;
5233 else
5234 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5235 }
37384b3f
BN
5236
5237 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5238 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5239 }
5240
5241 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5242 if (ret)
2be7f77f 5243 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5244
2be7f77f 5245 return ret;
09e5665a
NK
5246}
5247
5248static int
320932bf 5249fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5250 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5251 const enum surface_pixel_format format,
5252 const enum dc_rotation_angle rotation,
5253 const uint64_t tiling_flags,
09e5665a 5254 union dc_tiling_info *tiling_info,
12e2b2d4 5255 struct plane_size *plane_size,
09e5665a 5256 struct dc_plane_dcc_param *dcc,
87b7ebc2 5257 struct dc_plane_address *address,
5888f07a 5258 bool tmz_surface,
87b7ebc2 5259 bool force_disable_dcc)
09e5665a 5260{
320932bf 5261 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5262 int ret;
5263
5264 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5265 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5266 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5267 memset(address, 0, sizeof(*address));
5268
5888f07a
HW
5269 address->tmz_surface = tmz_surface;
5270
695af5f9 5271 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5272 uint64_t addr = afb->address + fb->offsets[0];
5273
12e2b2d4
DL
5274 plane_size->surface_size.x = 0;
5275 plane_size->surface_size.y = 0;
5276 plane_size->surface_size.width = fb->width;
5277 plane_size->surface_size.height = fb->height;
5278 plane_size->surface_pitch =
320932bf
NK
5279 fb->pitches[0] / fb->format->cpp[0];
5280
e0634e8d 5281 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5282 address->grph.addr.low_part = lower_32_bits(addr);
5283 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5284 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5285 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5286 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5287
12e2b2d4
DL
5288 plane_size->surface_size.x = 0;
5289 plane_size->surface_size.y = 0;
5290 plane_size->surface_size.width = fb->width;
5291 plane_size->surface_size.height = fb->height;
5292 plane_size->surface_pitch =
320932bf
NK
5293 fb->pitches[0] / fb->format->cpp[0];
5294
12e2b2d4
DL
5295 plane_size->chroma_size.x = 0;
5296 plane_size->chroma_size.y = 0;
320932bf 5297 /* TODO: set these based on surface format */
12e2b2d4
DL
5298 plane_size->chroma_size.width = fb->width / 2;
5299 plane_size->chroma_size.height = fb->height / 2;
320932bf 5300
12e2b2d4 5301 plane_size->chroma_pitch =
320932bf
NK
5302 fb->pitches[1] / fb->format->cpp[1];
5303
e0634e8d
NK
5304 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5305 address->video_progressive.luma_addr.low_part =
be7b9b32 5306 lower_32_bits(luma_addr);
e0634e8d 5307 address->video_progressive.luma_addr.high_part =
be7b9b32 5308 upper_32_bits(luma_addr);
e0634e8d
NK
5309 address->video_progressive.chroma_addr.low_part =
5310 lower_32_bits(chroma_addr);
5311 address->video_progressive.chroma_addr.high_part =
5312 upper_32_bits(chroma_addr);
5313 }
09e5665a 5314
a3241991 5315 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5316 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5317 rotation, plane_size,
5318 tiling_info, dcc,
5319 address,
5320 force_disable_dcc);
09e5665a
NK
5321 if (ret)
5322 return ret;
a3241991
BN
5323 } else {
5324 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5325 }
5326
5327 return 0;
7df7e505
NK
5328}
5329
d74004b6 5330static void
695af5f9 5331fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5332 bool *per_pixel_alpha, bool *global_alpha,
5333 int *global_alpha_value)
5334{
5335 *per_pixel_alpha = false;
5336 *global_alpha = false;
5337 *global_alpha_value = 0xff;
5338
5339 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5340 return;
5341
5342 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5343 static const uint32_t alpha_formats[] = {
5344 DRM_FORMAT_ARGB8888,
5345 DRM_FORMAT_RGBA8888,
5346 DRM_FORMAT_ABGR8888,
5347 };
5348 uint32_t format = plane_state->fb->format->format;
5349 unsigned int i;
5350
5351 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5352 if (format == alpha_formats[i]) {
5353 *per_pixel_alpha = true;
5354 break;
5355 }
5356 }
5357 }
5358
5359 if (plane_state->alpha < 0xffff) {
5360 *global_alpha = true;
5361 *global_alpha_value = plane_state->alpha >> 8;
5362 }
5363}
5364
004fefa3
NK
5365static int
5366fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5367 const enum surface_pixel_format format,
004fefa3
NK
5368 enum dc_color_space *color_space)
5369{
5370 bool full_range;
5371
5372 *color_space = COLOR_SPACE_SRGB;
5373
5374 /* DRM color properties only affect non-RGB formats. */
695af5f9 5375 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5376 return 0;
5377
5378 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5379
5380 switch (plane_state->color_encoding) {
5381 case DRM_COLOR_YCBCR_BT601:
5382 if (full_range)
5383 *color_space = COLOR_SPACE_YCBCR601;
5384 else
5385 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5386 break;
5387
5388 case DRM_COLOR_YCBCR_BT709:
5389 if (full_range)
5390 *color_space = COLOR_SPACE_YCBCR709;
5391 else
5392 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5393 break;
5394
5395 case DRM_COLOR_YCBCR_BT2020:
5396 if (full_range)
5397 *color_space = COLOR_SPACE_2020_YCBCR;
5398 else
5399 return -EINVAL;
5400 break;
5401
5402 default:
5403 return -EINVAL;
5404 }
5405
5406 return 0;
5407}
5408
695af5f9
NK
5409static int
5410fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5411 const struct drm_plane_state *plane_state,
5412 const uint64_t tiling_flags,
5413 struct dc_plane_info *plane_info,
87b7ebc2 5414 struct dc_plane_address *address,
5888f07a 5415 bool tmz_surface,
87b7ebc2 5416 bool force_disable_dcc)
695af5f9
NK
5417{
5418 const struct drm_framebuffer *fb = plane_state->fb;
5419 const struct amdgpu_framebuffer *afb =
5420 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5421 int ret;
5422
5423 memset(plane_info, 0, sizeof(*plane_info));
5424
5425 switch (fb->format->format) {
5426 case DRM_FORMAT_C8:
5427 plane_info->format =
5428 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5429 break;
5430 case DRM_FORMAT_RGB565:
5431 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5432 break;
5433 case DRM_FORMAT_XRGB8888:
5434 case DRM_FORMAT_ARGB8888:
5435 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5436 break;
5437 case DRM_FORMAT_XRGB2101010:
5438 case DRM_FORMAT_ARGB2101010:
5439 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5440 break;
5441 case DRM_FORMAT_XBGR2101010:
5442 case DRM_FORMAT_ABGR2101010:
5443 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5444 break;
5445 case DRM_FORMAT_XBGR8888:
5446 case DRM_FORMAT_ABGR8888:
5447 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5448 break;
5449 case DRM_FORMAT_NV21:
5450 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5451 break;
5452 case DRM_FORMAT_NV12:
5453 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5454 break;
cbec6477
SW
5455 case DRM_FORMAT_P010:
5456 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5457 break;
492548dc
SW
5458 case DRM_FORMAT_XRGB16161616F:
5459 case DRM_FORMAT_ARGB16161616F:
5460 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5461 break;
2a5195dc
MK
5462 case DRM_FORMAT_XBGR16161616F:
5463 case DRM_FORMAT_ABGR16161616F:
5464 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5465 break;
58020403
MK
5466 case DRM_FORMAT_XRGB16161616:
5467 case DRM_FORMAT_ARGB16161616:
5468 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5469 break;
5470 case DRM_FORMAT_XBGR16161616:
5471 case DRM_FORMAT_ABGR16161616:
5472 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5473 break;
695af5f9
NK
5474 default:
5475 DRM_ERROR(
92f1d09c
SA
5476 "Unsupported screen format %p4cc\n",
5477 &fb->format->format);
695af5f9
NK
5478 return -EINVAL;
5479 }
5480
5481 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5482 case DRM_MODE_ROTATE_0:
5483 plane_info->rotation = ROTATION_ANGLE_0;
5484 break;
5485 case DRM_MODE_ROTATE_90:
5486 plane_info->rotation = ROTATION_ANGLE_90;
5487 break;
5488 case DRM_MODE_ROTATE_180:
5489 plane_info->rotation = ROTATION_ANGLE_180;
5490 break;
5491 case DRM_MODE_ROTATE_270:
5492 plane_info->rotation = ROTATION_ANGLE_270;
5493 break;
5494 default:
5495 plane_info->rotation = ROTATION_ANGLE_0;
5496 break;
5497 }
5498
5499 plane_info->visible = true;
5500 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5501
6d83a32d
MS
5502 plane_info->layer_index = 0;
5503
695af5f9
NK
5504 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5505 &plane_info->color_space);
5506 if (ret)
5507 return ret;
5508
5509 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5510 plane_info->rotation, tiling_flags,
5511 &plane_info->tiling_info,
5512 &plane_info->plane_size,
5888f07a 5513 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5514 force_disable_dcc);
695af5f9
NK
5515 if (ret)
5516 return ret;
5517
5518 fill_blending_from_plane_state(
5519 plane_state, &plane_info->per_pixel_alpha,
5520 &plane_info->global_alpha, &plane_info->global_alpha_value);
5521
5522 return 0;
5523}
5524
5525static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5526 struct dc_plane_state *dc_plane_state,
5527 struct drm_plane_state *plane_state,
5528 struct drm_crtc_state *crtc_state)
e7b07cee 5529{
cf020d49 5530 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5531 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5532 struct dc_scaling_info scaling_info;
5533 struct dc_plane_info plane_info;
695af5f9 5534 int ret;
87b7ebc2 5535 bool force_disable_dcc = false;
e7b07cee 5536
4375d625 5537 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5538 if (ret)
5539 return ret;
e7b07cee 5540
695af5f9
NK
5541 dc_plane_state->src_rect = scaling_info.src_rect;
5542 dc_plane_state->dst_rect = scaling_info.dst_rect;
5543 dc_plane_state->clip_rect = scaling_info.clip_rect;
5544 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5545
87b7ebc2 5546 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5547 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5548 afb->tiling_flags,
695af5f9 5549 &plane_info,
87b7ebc2 5550 &dc_plane_state->address,
6eed95b0 5551 afb->tmz_surface,
87b7ebc2 5552 force_disable_dcc);
004fefa3
NK
5553 if (ret)
5554 return ret;
5555
695af5f9
NK
5556 dc_plane_state->format = plane_info.format;
5557 dc_plane_state->color_space = plane_info.color_space;
5558 dc_plane_state->format = plane_info.format;
5559 dc_plane_state->plane_size = plane_info.plane_size;
5560 dc_plane_state->rotation = plane_info.rotation;
5561 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5562 dc_plane_state->stereo_format = plane_info.stereo_format;
5563 dc_plane_state->tiling_info = plane_info.tiling_info;
5564 dc_plane_state->visible = plane_info.visible;
5565 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5566 dc_plane_state->global_alpha = plane_info.global_alpha;
5567 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5568 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5569 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5570 dc_plane_state->flip_int_enabled = true;
695af5f9 5571
e277adc5
LSL
5572 /*
5573 * Always set input transfer function, since plane state is refreshed
5574 * every time.
5575 */
cf020d49
NK
5576 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5577 if (ret)
5578 return ret;
e7b07cee 5579
cf020d49 5580 return 0;
e7b07cee
HW
5581}
5582
3ee6b26b
AD
5583static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5584 const struct dm_connector_state *dm_state,
5585 struct dc_stream_state *stream)
e7b07cee
HW
5586{
5587 enum amdgpu_rmx_type rmx_type;
5588
5589 struct rect src = { 0 }; /* viewport in composition space*/
5590 struct rect dst = { 0 }; /* stream addressable area */
5591
5592 /* no mode. nothing to be done */
5593 if (!mode)
5594 return;
5595
5596 /* Full screen scaling by default */
5597 src.width = mode->hdisplay;
5598 src.height = mode->vdisplay;
5599 dst.width = stream->timing.h_addressable;
5600 dst.height = stream->timing.v_addressable;
5601
f4791779
HW
5602 if (dm_state) {
5603 rmx_type = dm_state->scaling;
5604 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5605 if (src.width * dst.height <
5606 src.height * dst.width) {
5607 /* height needs less upscaling/more downscaling */
5608 dst.width = src.width *
5609 dst.height / src.height;
5610 } else {
5611 /* width needs less upscaling/more downscaling */
5612 dst.height = src.height *
5613 dst.width / src.width;
5614 }
5615 } else if (rmx_type == RMX_CENTER) {
5616 dst = src;
e7b07cee 5617 }
e7b07cee 5618
f4791779
HW
5619 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5620 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5621
f4791779
HW
5622 if (dm_state->underscan_enable) {
5623 dst.x += dm_state->underscan_hborder / 2;
5624 dst.y += dm_state->underscan_vborder / 2;
5625 dst.width -= dm_state->underscan_hborder;
5626 dst.height -= dm_state->underscan_vborder;
5627 }
e7b07cee
HW
5628 }
5629
5630 stream->src = src;
5631 stream->dst = dst;
5632
4711c033
LT
5633 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5634 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5635
5636}
5637
3ee6b26b 5638static enum dc_color_depth
42ba01fc 5639convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5640 bool is_y420, int requested_bpc)
e7b07cee 5641{
1bc22f20 5642 uint8_t bpc;
01c22997 5643
1bc22f20
SW
5644 if (is_y420) {
5645 bpc = 8;
5646
5647 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5648 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5649 bpc = 16;
5650 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5651 bpc = 12;
5652 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5653 bpc = 10;
5654 } else {
5655 bpc = (uint8_t)connector->display_info.bpc;
5656 /* Assume 8 bpc by default if no bpc is specified. */
5657 bpc = bpc ? bpc : 8;
5658 }
e7b07cee 5659
cbd14ae7 5660 if (requested_bpc > 0) {
01c22997
NK
5661 /*
5662 * Cap display bpc based on the user requested value.
5663 *
5664 * The value for state->max_bpc may not correctly updated
5665 * depending on when the connector gets added to the state
5666 * or if this was called outside of atomic check, so it
5667 * can't be used directly.
5668 */
cbd14ae7 5669 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5670
1825fd34
NK
5671 /* Round down to the nearest even number. */
5672 bpc = bpc - (bpc & 1);
5673 }
07e3a1cf 5674
e7b07cee
HW
5675 switch (bpc) {
5676 case 0:
1f6010a9
DF
5677 /*
5678 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5679 * EDID revision before 1.4
5680 * TODO: Fix edid parsing
5681 */
5682 return COLOR_DEPTH_888;
5683 case 6:
5684 return COLOR_DEPTH_666;
5685 case 8:
5686 return COLOR_DEPTH_888;
5687 case 10:
5688 return COLOR_DEPTH_101010;
5689 case 12:
5690 return COLOR_DEPTH_121212;
5691 case 14:
5692 return COLOR_DEPTH_141414;
5693 case 16:
5694 return COLOR_DEPTH_161616;
5695 default:
5696 return COLOR_DEPTH_UNDEFINED;
5697 }
5698}
5699
3ee6b26b
AD
5700static enum dc_aspect_ratio
5701get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5702{
e11d4147
LSL
5703 /* 1-1 mapping, since both enums follow the HDMI spec. */
5704 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5705}
5706
3ee6b26b
AD
5707static enum dc_color_space
5708get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5709{
5710 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5711
5712 switch (dc_crtc_timing->pixel_encoding) {
5713 case PIXEL_ENCODING_YCBCR422:
5714 case PIXEL_ENCODING_YCBCR444:
5715 case PIXEL_ENCODING_YCBCR420:
5716 {
5717 /*
5718 * 27030khz is the separation point between HDTV and SDTV
5719 * according to HDMI spec, we use YCbCr709 and YCbCr601
5720 * respectively
5721 */
380604e2 5722 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5723 if (dc_crtc_timing->flags.Y_ONLY)
5724 color_space =
5725 COLOR_SPACE_YCBCR709_LIMITED;
5726 else
5727 color_space = COLOR_SPACE_YCBCR709;
5728 } else {
5729 if (dc_crtc_timing->flags.Y_ONLY)
5730 color_space =
5731 COLOR_SPACE_YCBCR601_LIMITED;
5732 else
5733 color_space = COLOR_SPACE_YCBCR601;
5734 }
5735
5736 }
5737 break;
5738 case PIXEL_ENCODING_RGB:
5739 color_space = COLOR_SPACE_SRGB;
5740 break;
5741
5742 default:
5743 WARN_ON(1);
5744 break;
5745 }
5746
5747 return color_space;
5748}
5749
ea117312
TA
5750static bool adjust_colour_depth_from_display_info(
5751 struct dc_crtc_timing *timing_out,
5752 const struct drm_display_info *info)
400443e8 5753{
ea117312 5754 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5755 int normalized_clk;
400443e8 5756 do {
380604e2 5757 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5758 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5759 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5760 normalized_clk /= 2;
5761 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5762 switch (depth) {
5763 case COLOR_DEPTH_888:
5764 break;
400443e8
ML
5765 case COLOR_DEPTH_101010:
5766 normalized_clk = (normalized_clk * 30) / 24;
5767 break;
5768 case COLOR_DEPTH_121212:
5769 normalized_clk = (normalized_clk * 36) / 24;
5770 break;
5771 case COLOR_DEPTH_161616:
5772 normalized_clk = (normalized_clk * 48) / 24;
5773 break;
5774 default:
ea117312
TA
5775 /* The above depths are the only ones valid for HDMI. */
5776 return false;
400443e8 5777 }
ea117312
TA
5778 if (normalized_clk <= info->max_tmds_clock) {
5779 timing_out->display_color_depth = depth;
5780 return true;
5781 }
5782 } while (--depth > COLOR_DEPTH_666);
5783 return false;
400443e8 5784}
e7b07cee 5785
42ba01fc
NK
5786static void fill_stream_properties_from_drm_display_mode(
5787 struct dc_stream_state *stream,
5788 const struct drm_display_mode *mode_in,
5789 const struct drm_connector *connector,
5790 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5791 const struct dc_stream_state *old_stream,
5792 int requested_bpc)
e7b07cee
HW
5793{
5794 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5795 const struct drm_display_info *info = &connector->display_info;
d4252eee 5796 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5797 struct hdmi_vendor_infoframe hv_frame;
5798 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5799
acf83f86
WL
5800 memset(&hv_frame, 0, sizeof(hv_frame));
5801 memset(&avi_frame, 0, sizeof(avi_frame));
5802
e7b07cee
HW
5803 timing_out->h_border_left = 0;
5804 timing_out->h_border_right = 0;
5805 timing_out->v_border_top = 0;
5806 timing_out->v_border_bottom = 0;
5807 /* TODO: un-hardcode */
fe61a2f1 5808 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5809 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5810 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5811 else if (drm_mode_is_420_also(info, mode_in)
5812 && aconnector->force_yuv420_output)
5813 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5814 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5815 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5816 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5817 else
5818 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5819
5820 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5821 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5822 connector,
5823 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5824 requested_bpc);
e7b07cee
HW
5825 timing_out->scan_type = SCANNING_TYPE_NODATA;
5826 timing_out->hdmi_vic = 0;
b333730d
BL
5827
5828 if(old_stream) {
5829 timing_out->vic = old_stream->timing.vic;
5830 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5831 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5832 } else {
5833 timing_out->vic = drm_match_cea_mode(mode_in);
5834 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5835 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5836 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5837 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5838 }
e7b07cee 5839
1cb1d477
WL
5840 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5841 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5842 timing_out->vic = avi_frame.video_code;
5843 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5844 timing_out->hdmi_vic = hv_frame.vic;
5845 }
5846
fe8858bb
NC
5847 if (is_freesync_video_mode(mode_in, aconnector)) {
5848 timing_out->h_addressable = mode_in->hdisplay;
5849 timing_out->h_total = mode_in->htotal;
5850 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5851 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5852 timing_out->v_total = mode_in->vtotal;
5853 timing_out->v_addressable = mode_in->vdisplay;
5854 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5855 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5856 timing_out->pix_clk_100hz = mode_in->clock * 10;
5857 } else {
5858 timing_out->h_addressable = mode_in->crtc_hdisplay;
5859 timing_out->h_total = mode_in->crtc_htotal;
5860 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5861 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5862 timing_out->v_total = mode_in->crtc_vtotal;
5863 timing_out->v_addressable = mode_in->crtc_vdisplay;
5864 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5865 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5866 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5867 }
a85ba005 5868
e7b07cee 5869 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5870
5871 stream->output_color_space = get_output_color_space(timing_out);
5872
e43a432c
AK
5873 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5874 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5875 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5876 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5877 drm_mode_is_420_also(info, mode_in) &&
5878 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5879 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5880 adjust_colour_depth_from_display_info(timing_out, info);
5881 }
5882 }
e7b07cee
HW
5883}
5884
3ee6b26b
AD
5885static void fill_audio_info(struct audio_info *audio_info,
5886 const struct drm_connector *drm_connector,
5887 const struct dc_sink *dc_sink)
e7b07cee
HW
5888{
5889 int i = 0;
5890 int cea_revision = 0;
5891 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5892
5893 audio_info->manufacture_id = edid_caps->manufacturer_id;
5894 audio_info->product_id = edid_caps->product_id;
5895
5896 cea_revision = drm_connector->display_info.cea_rev;
5897
090afc1e 5898 strscpy(audio_info->display_name,
d2b2562c 5899 edid_caps->display_name,
090afc1e 5900 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5901
b830ebc9 5902 if (cea_revision >= 3) {
e7b07cee
HW
5903 audio_info->mode_count = edid_caps->audio_mode_count;
5904
5905 for (i = 0; i < audio_info->mode_count; ++i) {
5906 audio_info->modes[i].format_code =
5907 (enum audio_format_code)
5908 (edid_caps->audio_modes[i].format_code);
5909 audio_info->modes[i].channel_count =
5910 edid_caps->audio_modes[i].channel_count;
5911 audio_info->modes[i].sample_rates.all =
5912 edid_caps->audio_modes[i].sample_rate;
5913 audio_info->modes[i].sample_size =
5914 edid_caps->audio_modes[i].sample_size;
5915 }
5916 }
5917
5918 audio_info->flags.all = edid_caps->speaker_flags;
5919
5920 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5921 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5922 audio_info->video_latency = drm_connector->video_latency[0];
5923 audio_info->audio_latency = drm_connector->audio_latency[0];
5924 }
5925
5926 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5927
5928}
5929
3ee6b26b
AD
5930static void
5931copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5932 struct drm_display_mode *dst_mode)
e7b07cee
HW
5933{
5934 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5935 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5936 dst_mode->crtc_clock = src_mode->crtc_clock;
5937 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5938 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5939 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5940 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5941 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5942 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5943 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5944 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5945 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5946 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5947 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5948}
5949
3ee6b26b
AD
5950static void
5951decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5952 const struct drm_display_mode *native_mode,
5953 bool scale_enabled)
e7b07cee
HW
5954{
5955 if (scale_enabled) {
5956 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5957 } else if (native_mode->clock == drm_mode->clock &&
5958 native_mode->htotal == drm_mode->htotal &&
5959 native_mode->vtotal == drm_mode->vtotal) {
5960 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5961 } else {
5962 /* no scaling nor amdgpu inserted, no need to patch */
5963 }
5964}
5965
aed15309
ML
5966static struct dc_sink *
5967create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5968{
2e0ac3d6 5969 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5970 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5971 sink_init_data.link = aconnector->dc_link;
5972 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5973
5974 sink = dc_sink_create(&sink_init_data);
423788c7 5975 if (!sink) {
2e0ac3d6 5976 DRM_ERROR("Failed to create sink!\n");
aed15309 5977 return NULL;
423788c7 5978 }
2e0ac3d6 5979 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5980
aed15309 5981 return sink;
2e0ac3d6
HW
5982}
5983
fa2123db
ML
5984static void set_multisync_trigger_params(
5985 struct dc_stream_state *stream)
5986{
ec372186
ML
5987 struct dc_stream_state *master = NULL;
5988
fa2123db 5989 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5990 master = stream->triggered_crtc_reset.event_source;
5991 stream->triggered_crtc_reset.event =
5992 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5993 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5994 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5995 }
5996}
5997
5998static void set_master_stream(struct dc_stream_state *stream_set[],
5999 int stream_count)
6000{
6001 int j, highest_rfr = 0, master_stream = 0;
6002
6003 for (j = 0; j < stream_count; j++) {
6004 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6005 int refresh_rate = 0;
6006
380604e2 6007 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
6008 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6009 if (refresh_rate > highest_rfr) {
6010 highest_rfr = refresh_rate;
6011 master_stream = j;
6012 }
6013 }
6014 }
6015 for (j = 0; j < stream_count; j++) {
03736f4c 6016 if (stream_set[j])
fa2123db
ML
6017 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6018 }
6019}
6020
6021static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6022{
6023 int i = 0;
ec372186 6024 struct dc_stream_state *stream;
fa2123db
ML
6025
6026 if (context->stream_count < 2)
6027 return;
6028 for (i = 0; i < context->stream_count ; i++) {
6029 if (!context->streams[i])
6030 continue;
1f6010a9
DF
6031 /*
6032 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6033 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6034 * For now it's set to false
fa2123db 6035 */
fa2123db 6036 }
ec372186 6037
fa2123db 6038 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6039
6040 for (i = 0; i < context->stream_count ; i++) {
6041 stream = context->streams[i];
6042
6043 if (!stream)
6044 continue;
6045
6046 set_multisync_trigger_params(stream);
6047 }
fa2123db
ML
6048}
6049
ea2be5c0 6050#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6051static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6052 struct dc_sink *sink, struct dc_stream_state *stream,
6053 struct dsc_dec_dpcd_caps *dsc_caps)
6054{
6055 stream->timing.flags.DSC = 0;
6056
2665f63a
ML
6057 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6058 sink->sink_signal == SIGNAL_TYPE_EDP)) {
50b1f44e
FZ
6059 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6060 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6061 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6062 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6063 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6064 dsc_caps);
998b7ad2
FZ
6065 }
6066}
6067
2665f63a
ML
6068static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6069 struct dc_sink *sink, struct dc_stream_state *stream,
6070 struct dsc_dec_dpcd_caps *dsc_caps,
6071 uint32_t max_dsc_target_bpp_limit_override)
6072{
6073 const struct dc_link_settings *verified_link_cap = NULL;
6074 uint32_t link_bw_in_kbps;
6075 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6076 struct dc *dc = sink->ctx->dc;
6077 struct dc_dsc_bw_range bw_range = {0};
6078 struct dc_dsc_config dsc_cfg = {0};
6079
6080 verified_link_cap = dc_link_get_link_cap(stream->link);
6081 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6082 edp_min_bpp_x16 = 8 * 16;
6083 edp_max_bpp_x16 = 8 * 16;
6084
6085 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6086 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6087
6088 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6089 edp_min_bpp_x16 = edp_max_bpp_x16;
6090
6091 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6092 dc->debug.dsc_min_slice_height_override,
6093 edp_min_bpp_x16, edp_max_bpp_x16,
6094 dsc_caps,
6095 &stream->timing,
6096 &bw_range)) {
6097
6098 if (bw_range.max_kbps < link_bw_in_kbps) {
6099 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6100 dsc_caps,
6101 dc->debug.dsc_min_slice_height_override,
6102 max_dsc_target_bpp_limit_override,
6103 0,
6104 &stream->timing,
6105 &dsc_cfg)) {
6106 stream->timing.dsc_cfg = dsc_cfg;
6107 stream->timing.flags.DSC = 1;
6108 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6109 }
6110 return;
6111 }
6112 }
6113
6114 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6115 dsc_caps,
6116 dc->debug.dsc_min_slice_height_override,
6117 max_dsc_target_bpp_limit_override,
6118 link_bw_in_kbps,
6119 &stream->timing,
6120 &dsc_cfg)) {
6121 stream->timing.dsc_cfg = dsc_cfg;
6122 stream->timing.flags.DSC = 1;
6123 }
6124}
6125
998b7ad2
FZ
6126static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6127 struct dc_sink *sink, struct dc_stream_state *stream,
6128 struct dsc_dec_dpcd_caps *dsc_caps)
6129{
6130 struct drm_connector *drm_connector = &aconnector->base;
6131 uint32_t link_bandwidth_kbps;
f1c1a982 6132 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 6133 struct dc *dc = sink->ctx->dc;
50b1f44e
FZ
6134 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6135 uint32_t dsc_max_supported_bw_in_kbps;
998b7ad2
FZ
6136
6137 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6138 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6139
6140 if (stream->link && stream->link->local_sink)
6141 max_dsc_target_bpp_limit_override =
6142 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6143
998b7ad2
FZ
6144 /* Set DSC policy according to dsc_clock_en */
6145 dc_dsc_policy_set_enable_dsc_when_not_needed(
6146 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6147
2665f63a
ML
6148 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6149 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6150
6151 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6152
6153 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
50b1f44e
FZ
6154 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6155 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
998b7ad2
FZ
6156 dsc_caps,
6157 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6158 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6159 link_bandwidth_kbps,
6160 &stream->timing,
6161 &stream->timing.dsc_cfg)) {
50b1f44e
FZ
6162 stream->timing.flags.DSC = 1;
6163 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6164 __func__, drm_connector->name);
6165 }
6166 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6167 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6168 max_supported_bw_in_kbps = link_bandwidth_kbps;
6169 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6170
6171 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6172 max_supported_bw_in_kbps > 0 &&
6173 dsc_max_supported_bw_in_kbps > 0)
6174 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6175 dsc_caps,
6176 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6177 max_dsc_target_bpp_limit_override,
6178 dsc_max_supported_bw_in_kbps,
6179 &stream->timing,
6180 &stream->timing.dsc_cfg)) {
6181 stream->timing.flags.DSC = 1;
6182 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6183 __func__, drm_connector->name);
6184 }
998b7ad2
FZ
6185 }
6186 }
6187
6188 /* Overwrite the stream flag if DSC is enabled through debugfs */
6189 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6190 stream->timing.flags.DSC = 1;
6191
6192 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6193 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6194
6195 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6196 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6197
6198 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6199 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6200}
433e5dec 6201#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6202
5fd953a3
RS
6203/**
6204 * DOC: FreeSync Video
6205 *
6206 * When a userspace application wants to play a video, the content follows a
6207 * standard format definition that usually specifies the FPS for that format.
6208 * The below list illustrates some video format and the expected FPS,
6209 * respectively:
6210 *
6211 * - TV/NTSC (23.976 FPS)
6212 * - Cinema (24 FPS)
6213 * - TV/PAL (25 FPS)
6214 * - TV/NTSC (29.97 FPS)
6215 * - TV/NTSC (30 FPS)
6216 * - Cinema HFR (48 FPS)
6217 * - TV/PAL (50 FPS)
6218 * - Commonly used (60 FPS)
12cdff6b 6219 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6220 *
6221 * The list of standards video format is not huge and can be added to the
6222 * connector modeset list beforehand. With that, userspace can leverage
6223 * FreeSync to extends the front porch in order to attain the target refresh
6224 * rate. Such a switch will happen seamlessly, without screen blanking or
6225 * reprogramming of the output in any other way. If the userspace requests a
6226 * modesetting change compatible with FreeSync modes that only differ in the
6227 * refresh rate, DC will skip the full update and avoid blink during the
6228 * transition. For example, the video player can change the modesetting from
6229 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6230 * causing any display blink. This same concept can be applied to a mode
6231 * setting change.
6232 */
a85ba005
NC
6233static struct drm_display_mode *
6234get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6235 bool use_probed_modes)
6236{
6237 struct drm_display_mode *m, *m_pref = NULL;
6238 u16 current_refresh, highest_refresh;
6239 struct list_head *list_head = use_probed_modes ?
6240 &aconnector->base.probed_modes :
6241 &aconnector->base.modes;
6242
6243 if (aconnector->freesync_vid_base.clock != 0)
6244 return &aconnector->freesync_vid_base;
6245
6246 /* Find the preferred mode */
6247 list_for_each_entry (m, list_head, head) {
6248 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6249 m_pref = m;
6250 break;
6251 }
6252 }
6253
6254 if (!m_pref) {
6255 /* Probably an EDID with no preferred mode. Fallback to first entry */
6256 m_pref = list_first_entry_or_null(
6257 &aconnector->base.modes, struct drm_display_mode, head);
6258 if (!m_pref) {
6259 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6260 return NULL;
6261 }
6262 }
6263
6264 highest_refresh = drm_mode_vrefresh(m_pref);
6265
6266 /*
6267 * Find the mode with highest refresh rate with same resolution.
6268 * For some monitors, preferred mode is not the mode with highest
6269 * supported refresh rate.
6270 */
6271 list_for_each_entry (m, list_head, head) {
6272 current_refresh = drm_mode_vrefresh(m);
6273
6274 if (m->hdisplay == m_pref->hdisplay &&
6275 m->vdisplay == m_pref->vdisplay &&
6276 highest_refresh < current_refresh) {
6277 highest_refresh = current_refresh;
6278 m_pref = m;
6279 }
6280 }
6281
6282 aconnector->freesync_vid_base = *m_pref;
6283 return m_pref;
6284}
6285
fe8858bb 6286static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6287 struct amdgpu_dm_connector *aconnector)
6288{
6289 struct drm_display_mode *high_mode;
6290 int timing_diff;
6291
6292 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6293 if (!high_mode || !mode)
6294 return false;
6295
6296 timing_diff = high_mode->vtotal - mode->vtotal;
6297
6298 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6299 high_mode->hdisplay != mode->hdisplay ||
6300 high_mode->vdisplay != mode->vdisplay ||
6301 high_mode->hsync_start != mode->hsync_start ||
6302 high_mode->hsync_end != mode->hsync_end ||
6303 high_mode->htotal != mode->htotal ||
6304 high_mode->hskew != mode->hskew ||
6305 high_mode->vscan != mode->vscan ||
6306 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6307 high_mode->vsync_end - mode->vsync_end != timing_diff)
6308 return false;
6309 else
6310 return true;
6311}
6312
3ee6b26b
AD
6313static struct dc_stream_state *
6314create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6315 const struct drm_display_mode *drm_mode,
b333730d 6316 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6317 const struct dc_stream_state *old_stream,
6318 int requested_bpc)
e7b07cee
HW
6319{
6320 struct drm_display_mode *preferred_mode = NULL;
391ef035 6321 struct drm_connector *drm_connector;
42ba01fc
NK
6322 const struct drm_connector_state *con_state =
6323 dm_state ? &dm_state->base : NULL;
0971c40e 6324 struct dc_stream_state *stream = NULL;
e7b07cee 6325 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6326 struct drm_display_mode saved_mode;
6327 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6328 bool native_mode_found = false;
b0781603
NK
6329 bool recalculate_timing = false;
6330 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6331 int mode_refresh;
58124bf8 6332 int preferred_refresh = 0;
defeb878 6333#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6334 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6335#endif
aed15309 6336 struct dc_sink *sink = NULL;
a85ba005
NC
6337
6338 memset(&saved_mode, 0, sizeof(saved_mode));
6339
b830ebc9 6340 if (aconnector == NULL) {
e7b07cee 6341 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6342 return stream;
e7b07cee
HW
6343 }
6344
e7b07cee 6345 drm_connector = &aconnector->base;
2e0ac3d6 6346
f4ac176e 6347 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6348 sink = create_fake_sink(aconnector);
6349 if (!sink)
6350 return stream;
aed15309
ML
6351 } else {
6352 sink = aconnector->dc_sink;
dcd5fb82 6353 dc_sink_retain(sink);
f4ac176e 6354 }
2e0ac3d6 6355
aed15309 6356 stream = dc_create_stream_for_sink(sink);
4562236b 6357
b830ebc9 6358 if (stream == NULL) {
e7b07cee 6359 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6360 goto finish;
e7b07cee
HW
6361 }
6362
ceb3dbb4
JL
6363 stream->dm_stream_context = aconnector;
6364
4a36fcba
WL
6365 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6366 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6367
e7b07cee
HW
6368 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6369 /* Search for preferred mode */
6370 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6371 native_mode_found = true;
6372 break;
6373 }
6374 }
6375 if (!native_mode_found)
6376 preferred_mode = list_first_entry_or_null(
6377 &aconnector->base.modes,
6378 struct drm_display_mode,
6379 head);
6380
b333730d
BL
6381 mode_refresh = drm_mode_vrefresh(&mode);
6382
b830ebc9 6383 if (preferred_mode == NULL) {
1f6010a9
DF
6384 /*
6385 * This may not be an error, the use case is when we have no
e7b07cee
HW
6386 * usermode calls to reset and set mode upon hotplug. In this
6387 * case, we call set mode ourselves to restore the previous mode
6388 * and the modelist may not be filled in in time.
6389 */
f1ad2f5e 6390 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6391 } else {
b0781603 6392 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
6393 is_freesync_video_mode(&mode, aconnector);
6394 if (recalculate_timing) {
6395 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6396 saved_mode = mode;
6397 mode = *freesync_mode;
6398 } else {
6399 decide_crtc_timing_for_drm_display_mode(
b0781603 6400 &mode, preferred_mode, scale);
a85ba005 6401
b0781603
NK
6402 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6403 }
e7b07cee
HW
6404 }
6405
a85ba005
NC
6406 if (recalculate_timing)
6407 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6408 else if (!dm_state)
f783577c
JFZ
6409 drm_mode_set_crtcinfo(&mode, 0);
6410
a85ba005 6411 /*
b333730d
BL
6412 * If scaling is enabled and refresh rate didn't change
6413 * we copy the vic and polarities of the old timings
6414 */
b0781603 6415 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6416 fill_stream_properties_from_drm_display_mode(
6417 stream, &mode, &aconnector->base, con_state, NULL,
6418 requested_bpc);
b333730d 6419 else
a85ba005
NC
6420 fill_stream_properties_from_drm_display_mode(
6421 stream, &mode, &aconnector->base, con_state, old_stream,
6422 requested_bpc);
b333730d 6423
defeb878 6424#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6425 /* SST DSC determination policy */
6426 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6427 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6428 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6429#endif
6430
e7b07cee
HW
6431 update_stream_scaling_settings(&mode, dm_state, stream);
6432
6433 fill_audio_info(
6434 &stream->audio_info,
6435 drm_connector,
aed15309 6436 sink);
e7b07cee 6437
ceb3dbb4 6438 update_stream_signal(stream, sink);
9182b4cb 6439
d832fc3b 6440 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6441 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6442
8a488f5d
RL
6443 if (stream->link->psr_settings.psr_feature_enabled) {
6444 //
6445 // should decide stream support vsc sdp colorimetry capability
6446 // before building vsc info packet
6447 //
6448 stream->use_vsc_sdp_for_colorimetry = false;
6449 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6450 stream->use_vsc_sdp_for_colorimetry =
6451 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6452 } else {
6453 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6454 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6455 }
8a488f5d 6456 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
6457 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6458
8c322309 6459 }
aed15309 6460finish:
dcd5fb82 6461 dc_sink_release(sink);
9e3efe3e 6462
e7b07cee
HW
6463 return stream;
6464}
6465
7578ecda 6466static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6467{
6468 drm_crtc_cleanup(crtc);
6469 kfree(crtc);
6470}
6471
6472static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6473 struct drm_crtc_state *state)
e7b07cee
HW
6474{
6475 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6476
6477 /* TODO Destroy dc_stream objects are stream object is flattened */
6478 if (cur->stream)
6479 dc_stream_release(cur->stream);
6480
6481
6482 __drm_atomic_helper_crtc_destroy_state(state);
6483
6484
6485 kfree(state);
6486}
6487
6488static void dm_crtc_reset_state(struct drm_crtc *crtc)
6489{
6490 struct dm_crtc_state *state;
6491
6492 if (crtc->state)
6493 dm_crtc_destroy_state(crtc, crtc->state);
6494
6495 state = kzalloc(sizeof(*state), GFP_KERNEL);
6496 if (WARN_ON(!state))
6497 return;
6498
1f8a52ec 6499 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6500}
6501
6502static struct drm_crtc_state *
6503dm_crtc_duplicate_state(struct drm_crtc *crtc)
6504{
6505 struct dm_crtc_state *state, *cur;
6506
6507 cur = to_dm_crtc_state(crtc->state);
6508
6509 if (WARN_ON(!crtc->state))
6510 return NULL;
6511
2004f45e 6512 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6513 if (!state)
6514 return NULL;
e7b07cee
HW
6515
6516 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6517
6518 if (cur->stream) {
6519 state->stream = cur->stream;
6520 dc_stream_retain(state->stream);
6521 }
6522
d6ef9b41 6523 state->active_planes = cur->active_planes;
98e6436d 6524 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6525 state->abm_level = cur->abm_level;
bb47de73
NK
6526 state->vrr_supported = cur->vrr_supported;
6527 state->freesync_config = cur->freesync_config;
cf020d49
NK
6528 state->cm_has_degamma = cur->cm_has_degamma;
6529 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6530 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6531 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6532
6533 return &state->base;
6534}
6535
86bc2219 6536#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6537static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6538{
6539 crtc_debugfs_init(crtc);
6540
6541 return 0;
6542}
6543#endif
6544
d2574c33
MK
6545static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6546{
6547 enum dc_irq_source irq_source;
6548 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6549 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6550 int rc;
6551
6552 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6553
6554 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6555
4711c033
LT
6556 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6557 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6558 return rc;
6559}
589d2739
HW
6560
6561static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6562{
6563 enum dc_irq_source irq_source;
6564 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6565 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6566 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6567#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6568 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6569 struct vblank_control_work *work;
ea3b4242 6570#endif
d2574c33
MK
6571 int rc = 0;
6572
6573 if (enable) {
6574 /* vblank irq on -> Only need vupdate irq in vrr mode */
6575 if (amdgpu_dm_vrr_active(acrtc_state))
6576 rc = dm_set_vupdate_irq(crtc, true);
6577 } else {
6578 /* vblank irq off -> vupdate irq off */
6579 rc = dm_set_vupdate_irq(crtc, false);
6580 }
6581
6582 if (rc)
6583 return rc;
589d2739
HW
6584
6585 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6586
6587 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6588 return -EBUSY;
6589
98ab5f35
BL
6590 if (amdgpu_in_reset(adev))
6591 return 0;
6592
4928b480 6593#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6594 if (dm->vblank_control_workqueue) {
6595 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6596 if (!work)
6597 return -ENOMEM;
09a5df6c 6598
06dd1888
NK
6599 INIT_WORK(&work->work, vblank_control_worker);
6600 work->dm = dm;
6601 work->acrtc = acrtc;
6602 work->enable = enable;
09a5df6c 6603
06dd1888
NK
6604 if (acrtc_state->stream) {
6605 dc_stream_retain(acrtc_state->stream);
6606 work->stream = acrtc_state->stream;
6607 }
58aa1c50 6608
06dd1888
NK
6609 queue_work(dm->vblank_control_workqueue, &work->work);
6610 }
4928b480 6611#endif
71338cb4 6612
71338cb4 6613 return 0;
589d2739
HW
6614}
6615
6616static int dm_enable_vblank(struct drm_crtc *crtc)
6617{
6618 return dm_set_vblank(crtc, true);
6619}
6620
6621static void dm_disable_vblank(struct drm_crtc *crtc)
6622{
6623 dm_set_vblank(crtc, false);
6624}
6625
e7b07cee
HW
6626/* Implemented only the options currently availible for the driver */
6627static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6628 .reset = dm_crtc_reset_state,
6629 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6630 .set_config = drm_atomic_helper_set_config,
6631 .page_flip = drm_atomic_helper_page_flip,
6632 .atomic_duplicate_state = dm_crtc_duplicate_state,
6633 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6634 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6635 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6636 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6637 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6638 .enable_vblank = dm_enable_vblank,
6639 .disable_vblank = dm_disable_vblank,
e3eff4b5 6640 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6641#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6642 .late_register = amdgpu_dm_crtc_late_register,
6643#endif
e7b07cee
HW
6644};
6645
6646static enum drm_connector_status
6647amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6648{
6649 bool connected;
c84dec2f 6650 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6651
1f6010a9
DF
6652 /*
6653 * Notes:
e7b07cee
HW
6654 * 1. This interface is NOT called in context of HPD irq.
6655 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6656 * makes it a bad place for *any* MST-related activity.
6657 */
e7b07cee 6658
8580d60b
HW
6659 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6660 !aconnector->fake_enable)
e7b07cee
HW
6661 connected = (aconnector->dc_sink != NULL);
6662 else
6663 connected = (aconnector->base.force == DRM_FORCE_ON);
6664
0f877894
OV
6665 update_subconnector_property(aconnector);
6666
e7b07cee
HW
6667 return (connected ? connector_status_connected :
6668 connector_status_disconnected);
6669}
6670
3ee6b26b
AD
6671int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6672 struct drm_connector_state *connector_state,
6673 struct drm_property *property,
6674 uint64_t val)
e7b07cee
HW
6675{
6676 struct drm_device *dev = connector->dev;
1348969a 6677 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6678 struct dm_connector_state *dm_old_state =
6679 to_dm_connector_state(connector->state);
6680 struct dm_connector_state *dm_new_state =
6681 to_dm_connector_state(connector_state);
6682
6683 int ret = -EINVAL;
6684
6685 if (property == dev->mode_config.scaling_mode_property) {
6686 enum amdgpu_rmx_type rmx_type;
6687
6688 switch (val) {
6689 case DRM_MODE_SCALE_CENTER:
6690 rmx_type = RMX_CENTER;
6691 break;
6692 case DRM_MODE_SCALE_ASPECT:
6693 rmx_type = RMX_ASPECT;
6694 break;
6695 case DRM_MODE_SCALE_FULLSCREEN:
6696 rmx_type = RMX_FULL;
6697 break;
6698 case DRM_MODE_SCALE_NONE:
6699 default:
6700 rmx_type = RMX_OFF;
6701 break;
6702 }
6703
6704 if (dm_old_state->scaling == rmx_type)
6705 return 0;
6706
6707 dm_new_state->scaling = rmx_type;
6708 ret = 0;
6709 } else if (property == adev->mode_info.underscan_hborder_property) {
6710 dm_new_state->underscan_hborder = val;
6711 ret = 0;
6712 } else if (property == adev->mode_info.underscan_vborder_property) {
6713 dm_new_state->underscan_vborder = val;
6714 ret = 0;
6715 } else if (property == adev->mode_info.underscan_property) {
6716 dm_new_state->underscan_enable = val;
6717 ret = 0;
c1ee92f9
DF
6718 } else if (property == adev->mode_info.abm_level_property) {
6719 dm_new_state->abm_level = val;
6720 ret = 0;
e7b07cee
HW
6721 }
6722
6723 return ret;
6724}
6725
3ee6b26b
AD
6726int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6727 const struct drm_connector_state *state,
6728 struct drm_property *property,
6729 uint64_t *val)
e7b07cee
HW
6730{
6731 struct drm_device *dev = connector->dev;
1348969a 6732 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6733 struct dm_connector_state *dm_state =
6734 to_dm_connector_state(state);
6735 int ret = -EINVAL;
6736
6737 if (property == dev->mode_config.scaling_mode_property) {
6738 switch (dm_state->scaling) {
6739 case RMX_CENTER:
6740 *val = DRM_MODE_SCALE_CENTER;
6741 break;
6742 case RMX_ASPECT:
6743 *val = DRM_MODE_SCALE_ASPECT;
6744 break;
6745 case RMX_FULL:
6746 *val = DRM_MODE_SCALE_FULLSCREEN;
6747 break;
6748 case RMX_OFF:
6749 default:
6750 *val = DRM_MODE_SCALE_NONE;
6751 break;
6752 }
6753 ret = 0;
6754 } else if (property == adev->mode_info.underscan_hborder_property) {
6755 *val = dm_state->underscan_hborder;
6756 ret = 0;
6757 } else if (property == adev->mode_info.underscan_vborder_property) {
6758 *val = dm_state->underscan_vborder;
6759 ret = 0;
6760 } else if (property == adev->mode_info.underscan_property) {
6761 *val = dm_state->underscan_enable;
6762 ret = 0;
c1ee92f9
DF
6763 } else if (property == adev->mode_info.abm_level_property) {
6764 *val = dm_state->abm_level;
6765 ret = 0;
e7b07cee 6766 }
c1ee92f9 6767
e7b07cee
HW
6768 return ret;
6769}
6770
526c654a
ED
6771static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6772{
6773 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6774
6775 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6776}
6777
7578ecda 6778static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6779{
c84dec2f 6780 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6781 const struct dc_link *link = aconnector->dc_link;
1348969a 6782 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6783 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6784 int i;
ada8ce15 6785
5dff80bd
AG
6786 /*
6787 * Call only if mst_mgr was iniitalized before since it's not done
6788 * for all connector types.
6789 */
6790 if (aconnector->mst_mgr.dev)
6791 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6792
e7b07cee
HW
6793#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6794 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6795 for (i = 0; i < dm->num_of_edps; i++) {
6796 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6797 backlight_device_unregister(dm->backlight_dev[i]);
6798 dm->backlight_dev[i] = NULL;
6799 }
e7b07cee
HW
6800 }
6801#endif
dcd5fb82
MF
6802
6803 if (aconnector->dc_em_sink)
6804 dc_sink_release(aconnector->dc_em_sink);
6805 aconnector->dc_em_sink = NULL;
6806 if (aconnector->dc_sink)
6807 dc_sink_release(aconnector->dc_sink);
6808 aconnector->dc_sink = NULL;
6809
e86e8947 6810 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6811 drm_connector_unregister(connector);
6812 drm_connector_cleanup(connector);
526c654a
ED
6813 if (aconnector->i2c) {
6814 i2c_del_adapter(&aconnector->i2c->base);
6815 kfree(aconnector->i2c);
6816 }
7daec99f 6817 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6818
e7b07cee
HW
6819 kfree(connector);
6820}
6821
6822void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6823{
6824 struct dm_connector_state *state =
6825 to_dm_connector_state(connector->state);
6826
df099b9b
LSL
6827 if (connector->state)
6828 __drm_atomic_helper_connector_destroy_state(connector->state);
6829
e7b07cee
HW
6830 kfree(state);
6831
6832 state = kzalloc(sizeof(*state), GFP_KERNEL);
6833
6834 if (state) {
6835 state->scaling = RMX_OFF;
6836 state->underscan_enable = false;
6837 state->underscan_hborder = 0;
6838 state->underscan_vborder = 0;
01933ba4 6839 state->base.max_requested_bpc = 8;
3261e013
ML
6840 state->vcpi_slots = 0;
6841 state->pbn = 0;
c3e50f89
NK
6842 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6843 state->abm_level = amdgpu_dm_abm_level;
6844
df099b9b 6845 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6846 }
6847}
6848
3ee6b26b
AD
6849struct drm_connector_state *
6850amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6851{
6852 struct dm_connector_state *state =
6853 to_dm_connector_state(connector->state);
6854
6855 struct dm_connector_state *new_state =
6856 kmemdup(state, sizeof(*state), GFP_KERNEL);
6857
98e6436d
AK
6858 if (!new_state)
6859 return NULL;
e7b07cee 6860
98e6436d
AK
6861 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6862
6863 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6864 new_state->abm_level = state->abm_level;
922454c2
NK
6865 new_state->scaling = state->scaling;
6866 new_state->underscan_enable = state->underscan_enable;
6867 new_state->underscan_hborder = state->underscan_hborder;
6868 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6869 new_state->vcpi_slots = state->vcpi_slots;
6870 new_state->pbn = state->pbn;
98e6436d 6871 return &new_state->base;
e7b07cee
HW
6872}
6873
14f04fa4
AD
6874static int
6875amdgpu_dm_connector_late_register(struct drm_connector *connector)
6876{
6877 struct amdgpu_dm_connector *amdgpu_dm_connector =
6878 to_amdgpu_dm_connector(connector);
00a8037e 6879 int r;
14f04fa4 6880
00a8037e
AD
6881 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6882 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6883 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6884 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6885 if (r)
6886 return r;
6887 }
6888
6889#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6890 connector_debugfs_init(amdgpu_dm_connector);
6891#endif
6892
6893 return 0;
6894}
6895
e7b07cee
HW
6896static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6897 .reset = amdgpu_dm_connector_funcs_reset,
6898 .detect = amdgpu_dm_connector_detect,
6899 .fill_modes = drm_helper_probe_single_connector_modes,
6900 .destroy = amdgpu_dm_connector_destroy,
6901 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6902 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6903 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6904 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6905 .late_register = amdgpu_dm_connector_late_register,
526c654a 6906 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6907};
6908
e7b07cee
HW
6909static int get_modes(struct drm_connector *connector)
6910{
6911 return amdgpu_dm_connector_get_modes(connector);
6912}
6913
c84dec2f 6914static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6915{
6916 struct dc_sink_init_data init_params = {
6917 .link = aconnector->dc_link,
6918 .sink_signal = SIGNAL_TYPE_VIRTUAL
6919 };
70e8ffc5 6920 struct edid *edid;
e7b07cee 6921
a89ff457 6922 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6923 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6924 aconnector->base.name);
6925
6926 aconnector->base.force = DRM_FORCE_OFF;
6927 aconnector->base.override_edid = false;
6928 return;
6929 }
6930
70e8ffc5
HW
6931 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6932
e7b07cee
HW
6933 aconnector->edid = edid;
6934
6935 aconnector->dc_em_sink = dc_link_add_remote_sink(
6936 aconnector->dc_link,
6937 (uint8_t *)edid,
6938 (edid->extensions + 1) * EDID_LENGTH,
6939 &init_params);
6940
dcd5fb82 6941 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6942 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6943 aconnector->dc_link->local_sink :
6944 aconnector->dc_em_sink;
dcd5fb82
MF
6945 dc_sink_retain(aconnector->dc_sink);
6946 }
e7b07cee
HW
6947}
6948
c84dec2f 6949static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6950{
6951 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6952
1f6010a9
DF
6953 /*
6954 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6955 * Those settings have to be != 0 to get initial modeset
6956 */
6957 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6958 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6959 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6960 }
6961
6962
6963 aconnector->base.override_edid = true;
6964 create_eml_sink(aconnector);
6965}
6966
cbd14ae7
SW
6967static struct dc_stream_state *
6968create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6969 const struct drm_display_mode *drm_mode,
6970 const struct dm_connector_state *dm_state,
6971 const struct dc_stream_state *old_stream)
6972{
6973 struct drm_connector *connector = &aconnector->base;
1348969a 6974 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6975 struct dc_stream_state *stream;
4b7da34b
SW
6976 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6977 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6978 enum dc_status dc_result = DC_OK;
6979
6980 do {
6981 stream = create_stream_for_sink(aconnector, drm_mode,
6982 dm_state, old_stream,
6983 requested_bpc);
6984 if (stream == NULL) {
6985 DRM_ERROR("Failed to create stream for sink!\n");
6986 break;
6987 }
6988
6989 dc_result = dc_validate_stream(adev->dm.dc, stream);
6990
6991 if (dc_result != DC_OK) {
74a16675 6992 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6993 drm_mode->hdisplay,
6994 drm_mode->vdisplay,
6995 drm_mode->clock,
74a16675
RS
6996 dc_result,
6997 dc_status_to_str(dc_result));
cbd14ae7
SW
6998
6999 dc_stream_release(stream);
7000 stream = NULL;
7001 requested_bpc -= 2; /* lower bpc to retry validation */
7002 }
7003
7004 } while (stream == NULL && requested_bpc >= 6);
7005
68eb3ae3
WS
7006 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7007 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7008
7009 aconnector->force_yuv420_output = true;
7010 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7011 dm_state, old_stream);
7012 aconnector->force_yuv420_output = false;
7013 }
7014
cbd14ae7
SW
7015 return stream;
7016}
7017
ba9ca088 7018enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 7019 struct drm_display_mode *mode)
e7b07cee
HW
7020{
7021 int result = MODE_ERROR;
7022 struct dc_sink *dc_sink;
e7b07cee 7023 /* TODO: Unhardcode stream count */
0971c40e 7024 struct dc_stream_state *stream;
c84dec2f 7025 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7026
7027 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7028 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7029 return result;
7030
1f6010a9
DF
7031 /*
7032 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
7033 * EDID mgmt
7034 */
7035 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7036 !aconnector->dc_em_sink)
7037 handle_edid_mgmt(aconnector);
7038
c84dec2f 7039 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 7040
ad975f44
VL
7041 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7042 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
7043 DRM_ERROR("dc_sink is NULL!\n");
7044 goto fail;
7045 }
7046
cbd14ae7
SW
7047 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7048 if (stream) {
7049 dc_stream_release(stream);
e7b07cee 7050 result = MODE_OK;
cbd14ae7 7051 }
e7b07cee
HW
7052
7053fail:
7054 /* TODO: error handling*/
7055 return result;
7056}
7057
88694af9
NK
7058static int fill_hdr_info_packet(const struct drm_connector_state *state,
7059 struct dc_info_packet *out)
7060{
7061 struct hdmi_drm_infoframe frame;
7062 unsigned char buf[30]; /* 26 + 4 */
7063 ssize_t len;
7064 int ret, i;
7065
7066 memset(out, 0, sizeof(*out));
7067
7068 if (!state->hdr_output_metadata)
7069 return 0;
7070
7071 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7072 if (ret)
7073 return ret;
7074
7075 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7076 if (len < 0)
7077 return (int)len;
7078
7079 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7080 if (len != 30)
7081 return -EINVAL;
7082
7083 /* Prepare the infopacket for DC. */
7084 switch (state->connector->connector_type) {
7085 case DRM_MODE_CONNECTOR_HDMIA:
7086 out->hb0 = 0x87; /* type */
7087 out->hb1 = 0x01; /* version */
7088 out->hb2 = 0x1A; /* length */
7089 out->sb[0] = buf[3]; /* checksum */
7090 i = 1;
7091 break;
7092
7093 case DRM_MODE_CONNECTOR_DisplayPort:
7094 case DRM_MODE_CONNECTOR_eDP:
7095 out->hb0 = 0x00; /* sdp id, zero */
7096 out->hb1 = 0x87; /* type */
7097 out->hb2 = 0x1D; /* payload len - 1 */
7098 out->hb3 = (0x13 << 2); /* sdp version */
7099 out->sb[0] = 0x01; /* version */
7100 out->sb[1] = 0x1A; /* length */
7101 i = 2;
7102 break;
7103
7104 default:
7105 return -EINVAL;
7106 }
7107
7108 memcpy(&out->sb[i], &buf[4], 26);
7109 out->valid = true;
7110
7111 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7112 sizeof(out->sb), false);
7113
7114 return 0;
7115}
7116
88694af9
NK
7117static int
7118amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7119 struct drm_atomic_state *state)
88694af9 7120{
51e857af
SP
7121 struct drm_connector_state *new_con_state =
7122 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7123 struct drm_connector_state *old_con_state =
7124 drm_atomic_get_old_connector_state(state, conn);
7125 struct drm_crtc *crtc = new_con_state->crtc;
7126 struct drm_crtc_state *new_crtc_state;
7127 int ret;
7128
e8a98235
RS
7129 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7130
88694af9
NK
7131 if (!crtc)
7132 return 0;
7133
72921cdf 7134 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7135 struct dc_info_packet hdr_infopacket;
7136
7137 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7138 if (ret)
7139 return ret;
7140
7141 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7142 if (IS_ERR(new_crtc_state))
7143 return PTR_ERR(new_crtc_state);
7144
7145 /*
7146 * DC considers the stream backends changed if the
7147 * static metadata changes. Forcing the modeset also
7148 * gives a simple way for userspace to switch from
b232d4ed
NK
7149 * 8bpc to 10bpc when setting the metadata to enter
7150 * or exit HDR.
7151 *
7152 * Changing the static metadata after it's been
7153 * set is permissible, however. So only force a
7154 * modeset if we're entering or exiting HDR.
88694af9 7155 */
b232d4ed
NK
7156 new_crtc_state->mode_changed =
7157 !old_con_state->hdr_output_metadata ||
7158 !new_con_state->hdr_output_metadata;
88694af9
NK
7159 }
7160
7161 return 0;
7162}
7163
e7b07cee
HW
7164static const struct drm_connector_helper_funcs
7165amdgpu_dm_connector_helper_funcs = {
7166 /*
1f6010a9 7167 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7168 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7169 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7170 * in get_modes call back, not just return the modes count
7171 */
e7b07cee
HW
7172 .get_modes = get_modes,
7173 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7174 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7175};
7176
7177static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7178{
7179}
7180
d6ef9b41 7181static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7182{
7183 struct drm_atomic_state *state = new_crtc_state->state;
7184 struct drm_plane *plane;
7185 int num_active = 0;
7186
7187 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7188 struct drm_plane_state *new_plane_state;
7189
7190 /* Cursor planes are "fake". */
7191 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7192 continue;
7193
7194 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7195
7196 if (!new_plane_state) {
7197 /*
7198 * The plane is enable on the CRTC and hasn't changed
7199 * state. This means that it previously passed
7200 * validation and is therefore enabled.
7201 */
7202 num_active += 1;
7203 continue;
7204 }
7205
7206 /* We need a framebuffer to be considered enabled. */
7207 num_active += (new_plane_state->fb != NULL);
7208 }
7209
d6ef9b41
NK
7210 return num_active;
7211}
7212
8fe684e9
NK
7213static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7214 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7215{
7216 struct dm_crtc_state *dm_new_crtc_state =
7217 to_dm_crtc_state(new_crtc_state);
7218
7219 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7220
7221 if (!dm_new_crtc_state->stream)
7222 return;
7223
7224 dm_new_crtc_state->active_planes =
7225 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7226}
7227
3ee6b26b 7228static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7229 struct drm_atomic_state *state)
e7b07cee 7230{
29b77ad7
MR
7231 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7232 crtc);
1348969a 7233 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7234 struct dc *dc = adev->dm.dc;
29b77ad7 7235 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7236 int ret = -EINVAL;
7237
5b8c5969 7238 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7239
29b77ad7 7240 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7241
bcd74374
ND
7242 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7243 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7244 return ret;
7245 }
7246
bc92c065 7247 /*
b836a274
MD
7248 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7249 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7250 * planes are disabled, which is not supported by the hardware. And there is legacy
7251 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7252 */
29b77ad7 7253 if (crtc_state->enable &&
ea9522f5
SS
7254 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7255 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7256 return -EINVAL;
ea9522f5 7257 }
c14a005c 7258
b836a274
MD
7259 /* In some use cases, like reset, no stream is attached */
7260 if (!dm_crtc_state->stream)
7261 return 0;
7262
62c933f9 7263 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7264 return 0;
7265
ea9522f5 7266 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7267 return ret;
7268}
7269
3ee6b26b
AD
7270static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7271 const struct drm_display_mode *mode,
7272 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7273{
7274 return true;
7275}
7276
7277static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7278 .disable = dm_crtc_helper_disable,
7279 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7280 .mode_fixup = dm_crtc_helper_mode_fixup,
7281 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7282};
7283
7284static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7285{
7286
7287}
7288
3261e013
ML
7289static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7290{
7291 switch (display_color_depth) {
7292 case COLOR_DEPTH_666:
7293 return 6;
7294 case COLOR_DEPTH_888:
7295 return 8;
7296 case COLOR_DEPTH_101010:
7297 return 10;
7298 case COLOR_DEPTH_121212:
7299 return 12;
7300 case COLOR_DEPTH_141414:
7301 return 14;
7302 case COLOR_DEPTH_161616:
7303 return 16;
7304 default:
7305 break;
7306 }
7307 return 0;
7308}
7309
3ee6b26b
AD
7310static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7311 struct drm_crtc_state *crtc_state,
7312 struct drm_connector_state *conn_state)
e7b07cee 7313{
3261e013
ML
7314 struct drm_atomic_state *state = crtc_state->state;
7315 struct drm_connector *connector = conn_state->connector;
7316 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7317 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7318 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7319 struct drm_dp_mst_topology_mgr *mst_mgr;
7320 struct drm_dp_mst_port *mst_port;
7321 enum dc_color_depth color_depth;
7322 int clock, bpp = 0;
1bc22f20 7323 bool is_y420 = false;
3261e013
ML
7324
7325 if (!aconnector->port || !aconnector->dc_sink)
7326 return 0;
7327
7328 mst_port = aconnector->port;
7329 mst_mgr = &aconnector->mst_port->mst_mgr;
7330
7331 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7332 return 0;
7333
7334 if (!state->duplicated) {
cbd14ae7 7335 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7336 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7337 aconnector->force_yuv420_output;
cbd14ae7
SW
7338 color_depth = convert_color_depth_from_display_info(connector,
7339 is_y420,
7340 max_bpc);
3261e013
ML
7341 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7342 clock = adjusted_mode->clock;
dc48529f 7343 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7344 }
7345 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7346 mst_mgr,
7347 mst_port,
1c6c1cb5 7348 dm_new_connector_state->pbn,
03ca9600 7349 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7350 if (dm_new_connector_state->vcpi_slots < 0) {
7351 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7352 return dm_new_connector_state->vcpi_slots;
7353 }
e7b07cee
HW
7354 return 0;
7355}
7356
7357const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7358 .disable = dm_encoder_helper_disable,
7359 .atomic_check = dm_encoder_helper_atomic_check
7360};
7361
d9fe1a4c 7362#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7363static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7364 struct dc_state *dc_state,
7365 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7366{
7367 struct dc_stream_state *stream = NULL;
7368 struct drm_connector *connector;
5760dcb9 7369 struct drm_connector_state *new_con_state;
29b9ba74
ML
7370 struct amdgpu_dm_connector *aconnector;
7371 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7372 int i, j;
7373 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7374
5760dcb9 7375 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7376
7377 aconnector = to_amdgpu_dm_connector(connector);
7378
7379 if (!aconnector->port)
7380 continue;
7381
7382 if (!new_con_state || !new_con_state->crtc)
7383 continue;
7384
7385 dm_conn_state = to_dm_connector_state(new_con_state);
7386
7387 for (j = 0; j < dc_state->stream_count; j++) {
7388 stream = dc_state->streams[j];
7389 if (!stream)
7390 continue;
7391
7392 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7393 break;
7394
7395 stream = NULL;
7396 }
7397
7398 if (!stream)
7399 continue;
7400
29b9ba74 7401 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7402 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7403 for (j = 0; j < dc_state->stream_count; j++) {
7404 if (vars[j].aconnector == aconnector) {
7405 pbn = vars[j].pbn;
7406 break;
7407 }
7408 }
7409
a550bb16
HW
7410 if (j == dc_state->stream_count)
7411 continue;
7412
7413 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7414
7415 if (stream->timing.flags.DSC != 1) {
7416 dm_conn_state->pbn = pbn;
7417 dm_conn_state->vcpi_slots = slot_num;
7418
7419 drm_dp_mst_atomic_enable_dsc(state,
7420 aconnector->port,
7421 dm_conn_state->pbn,
7422 0,
7423 false);
7424 continue;
7425 }
7426
29b9ba74
ML
7427 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7428 aconnector->port,
7429 pbn, pbn_div,
7430 true);
7431 if (vcpi < 0)
7432 return vcpi;
7433
7434 dm_conn_state->pbn = pbn;
7435 dm_conn_state->vcpi_slots = vcpi;
7436 }
7437 return 0;
7438}
d9fe1a4c 7439#endif
29b9ba74 7440
e7b07cee
HW
7441static void dm_drm_plane_reset(struct drm_plane *plane)
7442{
7443 struct dm_plane_state *amdgpu_state = NULL;
7444
7445 if (plane->state)
7446 plane->funcs->atomic_destroy_state(plane, plane->state);
7447
7448 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7449 WARN_ON(amdgpu_state == NULL);
1f6010a9 7450
7ddaef96
NK
7451 if (amdgpu_state)
7452 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7453}
7454
7455static struct drm_plane_state *
7456dm_drm_plane_duplicate_state(struct drm_plane *plane)
7457{
7458 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7459
7460 old_dm_plane_state = to_dm_plane_state(plane->state);
7461 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7462 if (!dm_plane_state)
7463 return NULL;
7464
7465 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7466
3be5262e
HW
7467 if (old_dm_plane_state->dc_state) {
7468 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7469 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7470 }
7471
7472 return &dm_plane_state->base;
7473}
7474
dfd84d90 7475static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7476 struct drm_plane_state *state)
e7b07cee
HW
7477{
7478 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7479
3be5262e
HW
7480 if (dm_plane_state->dc_state)
7481 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7482
0627bbd3 7483 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7484}
7485
7486static const struct drm_plane_funcs dm_plane_funcs = {
7487 .update_plane = drm_atomic_helper_update_plane,
7488 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7489 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7490 .reset = dm_drm_plane_reset,
7491 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7492 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7493 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7494};
7495
3ee6b26b
AD
7496static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7497 struct drm_plane_state *new_state)
e7b07cee
HW
7498{
7499 struct amdgpu_framebuffer *afb;
7500 struct drm_gem_object *obj;
5d43be0c 7501 struct amdgpu_device *adev;
e7b07cee 7502 struct amdgpu_bo *rbo;
e7b07cee 7503 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7504 struct list_head list;
7505 struct ttm_validate_buffer tv;
7506 struct ww_acquire_ctx ticket;
5d43be0c
CK
7507 uint32_t domain;
7508 int r;
e7b07cee
HW
7509
7510 if (!new_state->fb) {
4711c033 7511 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7512 return 0;
7513 }
7514
7515 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7516 obj = new_state->fb->obj[0];
e7b07cee 7517 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7518 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7519 INIT_LIST_HEAD(&list);
7520
7521 tv.bo = &rbo->tbo;
7522 tv.num_shared = 1;
7523 list_add(&tv.head, &list);
7524
9165fb87 7525 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7526 if (r) {
7527 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7528 return r;
0f257b09 7529 }
e7b07cee 7530
5d43be0c 7531 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7532 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7533 else
7534 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7535
7b7c6c81 7536 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7537 if (unlikely(r != 0)) {
30b7c614
HW
7538 if (r != -ERESTARTSYS)
7539 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7540 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7541 return r;
7542 }
7543
bb812f1e
JZ
7544 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7545 if (unlikely(r != 0)) {
7546 amdgpu_bo_unpin(rbo);
0f257b09 7547 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7548 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7549 return r;
7550 }
7df7e505 7551
0f257b09 7552 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7553
7b7c6c81 7554 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7555
7556 amdgpu_bo_ref(rbo);
7557
cf322b49
NK
7558 /**
7559 * We don't do surface updates on planes that have been newly created,
7560 * but we also don't have the afb->address during atomic check.
7561 *
7562 * Fill in buffer attributes depending on the address here, but only on
7563 * newly created planes since they're not being used by DC yet and this
7564 * won't modify global state.
7565 */
7566 dm_plane_state_old = to_dm_plane_state(plane->state);
7567 dm_plane_state_new = to_dm_plane_state(new_state);
7568
3be5262e 7569 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7570 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7571 struct dc_plane_state *plane_state =
7572 dm_plane_state_new->dc_state;
7573 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7574
320932bf 7575 fill_plane_buffer_attributes(
695af5f9 7576 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7577 afb->tiling_flags,
cf322b49
NK
7578 &plane_state->tiling_info, &plane_state->plane_size,
7579 &plane_state->dcc, &plane_state->address,
6eed95b0 7580 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7581 }
7582
e7b07cee
HW
7583 return 0;
7584}
7585
3ee6b26b
AD
7586static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7587 struct drm_plane_state *old_state)
e7b07cee
HW
7588{
7589 struct amdgpu_bo *rbo;
e7b07cee
HW
7590 int r;
7591
7592 if (!old_state->fb)
7593 return;
7594
e68d14dd 7595 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7596 r = amdgpu_bo_reserve(rbo, false);
7597 if (unlikely(r)) {
7598 DRM_ERROR("failed to reserve rbo before unpin\n");
7599 return;
b830ebc9
HW
7600 }
7601
7602 amdgpu_bo_unpin(rbo);
7603 amdgpu_bo_unreserve(rbo);
7604 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7605}
7606
8c44515b
AP
7607static int dm_plane_helper_check_state(struct drm_plane_state *state,
7608 struct drm_crtc_state *new_crtc_state)
7609{
6300b3bd
MK
7610 struct drm_framebuffer *fb = state->fb;
7611 int min_downscale, max_upscale;
7612 int min_scale = 0;
7613 int max_scale = INT_MAX;
7614
40d916a2 7615 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7616 if (fb && state->crtc) {
40d916a2
NC
7617 /* Validate viewport to cover the case when only the position changes */
7618 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7619 int viewport_width = state->crtc_w;
7620 int viewport_height = state->crtc_h;
7621
7622 if (state->crtc_x < 0)
7623 viewport_width += state->crtc_x;
7624 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7625 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7626
7627 if (state->crtc_y < 0)
7628 viewport_height += state->crtc_y;
7629 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7630 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7631
4abdb72b
NC
7632 if (viewport_width < 0 || viewport_height < 0) {
7633 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7634 return -EINVAL;
7635 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7636 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7637 return -EINVAL;
4abdb72b
NC
7638 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7639 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7640 return -EINVAL;
4abdb72b
NC
7641 }
7642
40d916a2
NC
7643 }
7644
7645 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7646 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7647 &min_downscale, &max_upscale);
7648 /*
7649 * Convert to drm convention: 16.16 fixed point, instead of dc's
7650 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7651 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7652 */
7653 min_scale = (1000 << 16) / max_upscale;
7654 max_scale = (1000 << 16) / min_downscale;
7655 }
8c44515b 7656
8c44515b 7657 return drm_atomic_helper_check_plane_state(
6300b3bd 7658 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7659}
7660
7578ecda 7661static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7662 struct drm_atomic_state *state)
cbd19488 7663{
7c11b99a
MR
7664 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7665 plane);
1348969a 7666 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7667 struct dc *dc = adev->dm.dc;
78171832 7668 struct dm_plane_state *dm_plane_state;
695af5f9 7669 struct dc_scaling_info scaling_info;
8c44515b 7670 struct drm_crtc_state *new_crtc_state;
695af5f9 7671 int ret;
78171832 7672
ba5c1649 7673 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7674
ba5c1649 7675 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7676
3be5262e 7677 if (!dm_plane_state->dc_state)
9a3329b1 7678 return 0;
cbd19488 7679
8c44515b 7680 new_crtc_state =
dec92020 7681 drm_atomic_get_new_crtc_state(state,
ba5c1649 7682 new_plane_state->crtc);
8c44515b
AP
7683 if (!new_crtc_state)
7684 return -EINVAL;
7685
ba5c1649 7686 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7687 if (ret)
7688 return ret;
7689
4375d625 7690 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7691 if (ret)
7692 return ret;
a05bcff1 7693
62c933f9 7694 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7695 return 0;
7696
7697 return -EINVAL;
7698}
7699
674e78ac 7700static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7701 struct drm_atomic_state *state)
674e78ac
NK
7702{
7703 /* Only support async updates on cursor planes. */
7704 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7705 return -EINVAL;
7706
7707 return 0;
7708}
7709
7710static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7711 struct drm_atomic_state *state)
674e78ac 7712{
5ddb0bd4
MR
7713 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7714 plane);
674e78ac 7715 struct drm_plane_state *old_state =
5ddb0bd4 7716 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7717
e8a98235
RS
7718 trace_amdgpu_dm_atomic_update_cursor(new_state);
7719
332af874 7720 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7721
7722 plane->state->src_x = new_state->src_x;
7723 plane->state->src_y = new_state->src_y;
7724 plane->state->src_w = new_state->src_w;
7725 plane->state->src_h = new_state->src_h;
7726 plane->state->crtc_x = new_state->crtc_x;
7727 plane->state->crtc_y = new_state->crtc_y;
7728 plane->state->crtc_w = new_state->crtc_w;
7729 plane->state->crtc_h = new_state->crtc_h;
7730
7731 handle_cursor_update(plane, old_state);
7732}
7733
e7b07cee
HW
7734static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7735 .prepare_fb = dm_plane_helper_prepare_fb,
7736 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7737 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7738 .atomic_async_check = dm_plane_atomic_async_check,
7739 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7740};
7741
7742/*
7743 * TODO: these are currently initialized to rgb formats only.
7744 * For future use cases we should either initialize them dynamically based on
7745 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7746 * check will succeed, and let DC implement proper check
e7b07cee 7747 */
d90371b0 7748static const uint32_t rgb_formats[] = {
e7b07cee
HW
7749 DRM_FORMAT_XRGB8888,
7750 DRM_FORMAT_ARGB8888,
7751 DRM_FORMAT_RGBA8888,
7752 DRM_FORMAT_XRGB2101010,
7753 DRM_FORMAT_XBGR2101010,
7754 DRM_FORMAT_ARGB2101010,
7755 DRM_FORMAT_ABGR2101010,
58020403
MK
7756 DRM_FORMAT_XRGB16161616,
7757 DRM_FORMAT_XBGR16161616,
7758 DRM_FORMAT_ARGB16161616,
7759 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7760 DRM_FORMAT_XBGR8888,
7761 DRM_FORMAT_ABGR8888,
46dd9ff7 7762 DRM_FORMAT_RGB565,
e7b07cee
HW
7763};
7764
0d579c7e
NK
7765static const uint32_t overlay_formats[] = {
7766 DRM_FORMAT_XRGB8888,
7767 DRM_FORMAT_ARGB8888,
7768 DRM_FORMAT_RGBA8888,
7769 DRM_FORMAT_XBGR8888,
7770 DRM_FORMAT_ABGR8888,
7267a1a9 7771 DRM_FORMAT_RGB565
e7b07cee
HW
7772};
7773
7774static const u32 cursor_formats[] = {
7775 DRM_FORMAT_ARGB8888
7776};
7777
37c6a93b
NK
7778static int get_plane_formats(const struct drm_plane *plane,
7779 const struct dc_plane_cap *plane_cap,
7780 uint32_t *formats, int max_formats)
e7b07cee 7781{
37c6a93b
NK
7782 int i, num_formats = 0;
7783
7784 /*
7785 * TODO: Query support for each group of formats directly from
7786 * DC plane caps. This will require adding more formats to the
7787 * caps list.
7788 */
e7b07cee 7789
f180b4bc 7790 switch (plane->type) {
e7b07cee 7791 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7792 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7793 if (num_formats >= max_formats)
7794 break;
7795
7796 formats[num_formats++] = rgb_formats[i];
7797 }
7798
ea36ad34 7799 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7800 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7801 if (plane_cap && plane_cap->pixel_format_support.p010)
7802 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7803 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7804 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7805 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7806 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7807 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7808 }
e7b07cee 7809 break;
37c6a93b 7810
e7b07cee 7811 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7812 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7813 if (num_formats >= max_formats)
7814 break;
7815
7816 formats[num_formats++] = overlay_formats[i];
7817 }
e7b07cee 7818 break;
37c6a93b 7819
e7b07cee 7820 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7821 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7822 if (num_formats >= max_formats)
7823 break;
7824
7825 formats[num_formats++] = cursor_formats[i];
7826 }
e7b07cee
HW
7827 break;
7828 }
7829
37c6a93b
NK
7830 return num_formats;
7831}
7832
7833static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7834 struct drm_plane *plane,
7835 unsigned long possible_crtcs,
7836 const struct dc_plane_cap *plane_cap)
7837{
7838 uint32_t formats[32];
7839 int num_formats;
7840 int res = -EPERM;
ecc874a6 7841 unsigned int supported_rotations;
faa37f54 7842 uint64_t *modifiers = NULL;
37c6a93b
NK
7843
7844 num_formats = get_plane_formats(plane, plane_cap, formats,
7845 ARRAY_SIZE(formats));
7846
faa37f54
BN
7847 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7848 if (res)
7849 return res;
7850
4a580877 7851 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7852 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7853 modifiers, plane->type, NULL);
7854 kfree(modifiers);
37c6a93b
NK
7855 if (res)
7856 return res;
7857
cc1fec57
NK
7858 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7859 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7860 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7861 BIT(DRM_MODE_BLEND_PREMULTI);
7862
7863 drm_plane_create_alpha_property(plane);
7864 drm_plane_create_blend_mode_property(plane, blend_caps);
7865 }
7866
fc8e5230 7867 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7868 plane_cap &&
7869 (plane_cap->pixel_format_support.nv12 ||
7870 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7871 /* This only affects YUV formats. */
7872 drm_plane_create_color_properties(
7873 plane,
7874 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7875 BIT(DRM_COLOR_YCBCR_BT709) |
7876 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7877 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7878 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7879 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7880 }
7881
ecc874a6
PLG
7882 supported_rotations =
7883 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7884 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7885
1347385f
SS
7886 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7887 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7888 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7889 supported_rotations);
ecc874a6 7890
f180b4bc 7891 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7892
96719c54 7893 /* Create (reset) the plane state */
f180b4bc
HW
7894 if (plane->funcs->reset)
7895 plane->funcs->reset(plane);
96719c54 7896
37c6a93b 7897 return 0;
e7b07cee
HW
7898}
7899
7578ecda
AD
7900static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7901 struct drm_plane *plane,
7902 uint32_t crtc_index)
e7b07cee
HW
7903{
7904 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7905 struct drm_plane *cursor_plane;
e7b07cee
HW
7906
7907 int res = -ENOMEM;
7908
7909 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7910 if (!cursor_plane)
7911 goto fail;
7912
f180b4bc 7913 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7914 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7915
7916 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7917 if (!acrtc)
7918 goto fail;
7919
7920 res = drm_crtc_init_with_planes(
7921 dm->ddev,
7922 &acrtc->base,
7923 plane,
f180b4bc 7924 cursor_plane,
e7b07cee
HW
7925 &amdgpu_dm_crtc_funcs, NULL);
7926
7927 if (res)
7928 goto fail;
7929
7930 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7931
96719c54
HW
7932 /* Create (reset) the plane state */
7933 if (acrtc->base.funcs->reset)
7934 acrtc->base.funcs->reset(&acrtc->base);
7935
e7b07cee
HW
7936 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7937 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7938
7939 acrtc->crtc_id = crtc_index;
7940 acrtc->base.enabled = false;
c37e2d29 7941 acrtc->otg_inst = -1;
e7b07cee
HW
7942
7943 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7944 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7945 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7946 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7947
e7b07cee
HW
7948 return 0;
7949
7950fail:
b830ebc9
HW
7951 kfree(acrtc);
7952 kfree(cursor_plane);
e7b07cee
HW
7953 return res;
7954}
7955
7956
7957static int to_drm_connector_type(enum signal_type st)
7958{
7959 switch (st) {
7960 case SIGNAL_TYPE_HDMI_TYPE_A:
7961 return DRM_MODE_CONNECTOR_HDMIA;
7962 case SIGNAL_TYPE_EDP:
7963 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7964 case SIGNAL_TYPE_LVDS:
7965 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7966 case SIGNAL_TYPE_RGB:
7967 return DRM_MODE_CONNECTOR_VGA;
7968 case SIGNAL_TYPE_DISPLAY_PORT:
7969 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7970 return DRM_MODE_CONNECTOR_DisplayPort;
7971 case SIGNAL_TYPE_DVI_DUAL_LINK:
7972 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7973 return DRM_MODE_CONNECTOR_DVID;
7974 case SIGNAL_TYPE_VIRTUAL:
7975 return DRM_MODE_CONNECTOR_VIRTUAL;
7976
7977 default:
7978 return DRM_MODE_CONNECTOR_Unknown;
7979 }
7980}
7981
2b4c1c05
DV
7982static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7983{
62afb4ad
JRS
7984 struct drm_encoder *encoder;
7985
7986 /* There is only one encoder per connector */
7987 drm_connector_for_each_possible_encoder(connector, encoder)
7988 return encoder;
7989
7990 return NULL;
2b4c1c05
DV
7991}
7992
e7b07cee
HW
7993static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7994{
e7b07cee
HW
7995 struct drm_encoder *encoder;
7996 struct amdgpu_encoder *amdgpu_encoder;
7997
2b4c1c05 7998 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7999
8000 if (encoder == NULL)
8001 return;
8002
8003 amdgpu_encoder = to_amdgpu_encoder(encoder);
8004
8005 amdgpu_encoder->native_mode.clock = 0;
8006
8007 if (!list_empty(&connector->probed_modes)) {
8008 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 8009
e7b07cee 8010 list_for_each_entry(preferred_mode,
b830ebc9
HW
8011 &connector->probed_modes,
8012 head) {
8013 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8014 amdgpu_encoder->native_mode = *preferred_mode;
8015
e7b07cee
HW
8016 break;
8017 }
8018
8019 }
8020}
8021
3ee6b26b
AD
8022static struct drm_display_mode *
8023amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8024 char *name,
8025 int hdisplay, int vdisplay)
e7b07cee
HW
8026{
8027 struct drm_device *dev = encoder->dev;
8028 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8029 struct drm_display_mode *mode = NULL;
8030 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8031
8032 mode = drm_mode_duplicate(dev, native_mode);
8033
b830ebc9 8034 if (mode == NULL)
e7b07cee
HW
8035 return NULL;
8036
8037 mode->hdisplay = hdisplay;
8038 mode->vdisplay = vdisplay;
8039 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 8040 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
8041
8042 return mode;
8043
8044}
8045
8046static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 8047 struct drm_connector *connector)
e7b07cee
HW
8048{
8049 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8050 struct drm_display_mode *mode = NULL;
8051 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
8052 struct amdgpu_dm_connector *amdgpu_dm_connector =
8053 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8054 int i;
8055 int n;
8056 struct mode_size {
8057 char name[DRM_DISPLAY_MODE_LEN];
8058 int w;
8059 int h;
b830ebc9 8060 } common_modes[] = {
e7b07cee
HW
8061 { "640x480", 640, 480},
8062 { "800x600", 800, 600},
8063 { "1024x768", 1024, 768},
8064 { "1280x720", 1280, 720},
8065 { "1280x800", 1280, 800},
8066 {"1280x1024", 1280, 1024},
8067 { "1440x900", 1440, 900},
8068 {"1680x1050", 1680, 1050},
8069 {"1600x1200", 1600, 1200},
8070 {"1920x1080", 1920, 1080},
8071 {"1920x1200", 1920, 1200}
8072 };
8073
b830ebc9 8074 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
8075
8076 for (i = 0; i < n; i++) {
8077 struct drm_display_mode *curmode = NULL;
8078 bool mode_existed = false;
8079
8080 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
8081 common_modes[i].h > native_mode->vdisplay ||
8082 (common_modes[i].w == native_mode->hdisplay &&
8083 common_modes[i].h == native_mode->vdisplay))
8084 continue;
e7b07cee
HW
8085
8086 list_for_each_entry(curmode, &connector->probed_modes, head) {
8087 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 8088 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
8089 mode_existed = true;
8090 break;
8091 }
8092 }
8093
8094 if (mode_existed)
8095 continue;
8096
8097 mode = amdgpu_dm_create_common_mode(encoder,
8098 common_modes[i].name, common_modes[i].w,
8099 common_modes[i].h);
8100 drm_mode_probed_add(connector, mode);
c84dec2f 8101 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
8102 }
8103}
8104
d77de788
SS
8105static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8106{
8107 struct drm_encoder *encoder;
8108 struct amdgpu_encoder *amdgpu_encoder;
8109 const struct drm_display_mode *native_mode;
8110
8111 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8112 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8113 return;
8114
8115 encoder = amdgpu_dm_connector_to_encoder(connector);
8116 if (!encoder)
8117 return;
8118
8119 amdgpu_encoder = to_amdgpu_encoder(encoder);
8120
8121 native_mode = &amdgpu_encoder->native_mode;
8122 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8123 return;
8124
8125 drm_connector_set_panel_orientation_with_quirk(connector,
8126 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8127 native_mode->hdisplay,
8128 native_mode->vdisplay);
8129}
8130
3ee6b26b
AD
8131static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8132 struct edid *edid)
e7b07cee 8133{
c84dec2f
HW
8134 struct amdgpu_dm_connector *amdgpu_dm_connector =
8135 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8136
8137 if (edid) {
8138 /* empty probed_modes */
8139 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8140 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8141 drm_add_edid_modes(connector, edid);
8142
f1e5e913
YMM
8143 /* sorting the probed modes before calling function
8144 * amdgpu_dm_get_native_mode() since EDID can have
8145 * more than one preferred mode. The modes that are
8146 * later in the probed mode list could be of higher
8147 * and preferred resolution. For example, 3840x2160
8148 * resolution in base EDID preferred timing and 4096x2160
8149 * preferred resolution in DID extension block later.
8150 */
8151 drm_mode_sort(&connector->probed_modes);
e7b07cee 8152 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8153
8154 /* Freesync capabilities are reset by calling
8155 * drm_add_edid_modes() and need to be
8156 * restored here.
8157 */
8158 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8159
8160 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8161 } else {
c84dec2f 8162 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8163 }
e7b07cee
HW
8164}
8165
a85ba005
NC
8166static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8167 struct drm_display_mode *mode)
8168{
8169 struct drm_display_mode *m;
8170
8171 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8172 if (drm_mode_equal(m, mode))
8173 return true;
8174 }
8175
8176 return false;
8177}
8178
8179static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8180{
8181 const struct drm_display_mode *m;
8182 struct drm_display_mode *new_mode;
8183 uint i;
8184 uint32_t new_modes_count = 0;
8185
8186 /* Standard FPS values
8187 *
12cdff6b
SC
8188 * 23.976 - TV/NTSC
8189 * 24 - Cinema
8190 * 25 - TV/PAL
8191 * 29.97 - TV/NTSC
8192 * 30 - TV/NTSC
8193 * 48 - Cinema HFR
8194 * 50 - TV/PAL
8195 * 60 - Commonly used
8196 * 48,72,96,120 - Multiples of 24
a85ba005 8197 */
9ce5ed6e
CIK
8198 static const uint32_t common_rates[] = {
8199 23976, 24000, 25000, 29970, 30000,
12cdff6b 8200 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8201 };
a85ba005
NC
8202
8203 /*
8204 * Find mode with highest refresh rate with the same resolution
8205 * as the preferred mode. Some monitors report a preferred mode
8206 * with lower resolution than the highest refresh rate supported.
8207 */
8208
8209 m = get_highest_refresh_rate_mode(aconnector, true);
8210 if (!m)
8211 return 0;
8212
8213 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8214 uint64_t target_vtotal, target_vtotal_diff;
8215 uint64_t num, den;
8216
8217 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8218 continue;
8219
8220 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8221 common_rates[i] > aconnector->max_vfreq * 1000)
8222 continue;
8223
8224 num = (unsigned long long)m->clock * 1000 * 1000;
8225 den = common_rates[i] * (unsigned long long)m->htotal;
8226 target_vtotal = div_u64(num, den);
8227 target_vtotal_diff = target_vtotal - m->vtotal;
8228
8229 /* Check for illegal modes */
8230 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8231 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8232 m->vtotal + target_vtotal_diff < m->vsync_end)
8233 continue;
8234
8235 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8236 if (!new_mode)
8237 goto out;
8238
8239 new_mode->vtotal += (u16)target_vtotal_diff;
8240 new_mode->vsync_start += (u16)target_vtotal_diff;
8241 new_mode->vsync_end += (u16)target_vtotal_diff;
8242 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8243 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8244
8245 if (!is_duplicate_mode(aconnector, new_mode)) {
8246 drm_mode_probed_add(&aconnector->base, new_mode);
8247 new_modes_count += 1;
8248 } else
8249 drm_mode_destroy(aconnector->base.dev, new_mode);
8250 }
8251 out:
8252 return new_modes_count;
8253}
8254
8255static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8256 struct edid *edid)
8257{
8258 struct amdgpu_dm_connector *amdgpu_dm_connector =
8259 to_amdgpu_dm_connector(connector);
8260
8261 if (!(amdgpu_freesync_vid_mode && edid))
8262 return;
fe8858bb 8263
a85ba005
NC
8264 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8265 amdgpu_dm_connector->num_modes +=
8266 add_fs_modes(amdgpu_dm_connector);
8267}
8268
7578ecda 8269static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8270{
c84dec2f
HW
8271 struct amdgpu_dm_connector *amdgpu_dm_connector =
8272 to_amdgpu_dm_connector(connector);
e7b07cee 8273 struct drm_encoder *encoder;
c84dec2f 8274 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8275
2b4c1c05 8276 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8277
5c0e6840 8278 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8279 amdgpu_dm_connector->num_modes =
8280 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8281 } else {
8282 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8283 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8284 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8285 }
3e332d3a 8286 amdgpu_dm_fbc_init(connector);
5099114b 8287
c84dec2f 8288 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8289}
8290
3ee6b26b
AD
8291void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8292 struct amdgpu_dm_connector *aconnector,
8293 int connector_type,
8294 struct dc_link *link,
8295 int link_index)
e7b07cee 8296{
1348969a 8297 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8298
f04bee34
NK
8299 /*
8300 * Some of the properties below require access to state, like bpc.
8301 * Allocate some default initial connector state with our reset helper.
8302 */
8303 if (aconnector->base.funcs->reset)
8304 aconnector->base.funcs->reset(&aconnector->base);
8305
e7b07cee
HW
8306 aconnector->connector_id = link_index;
8307 aconnector->dc_link = link;
8308 aconnector->base.interlace_allowed = false;
8309 aconnector->base.doublescan_allowed = false;
8310 aconnector->base.stereo_allowed = false;
8311 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8312 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8313 aconnector->audio_inst = -1;
e7b07cee
HW
8314 mutex_init(&aconnector->hpd_lock);
8315
1f6010a9
DF
8316 /*
8317 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8318 * which means HPD hot plug not supported
8319 */
e7b07cee
HW
8320 switch (connector_type) {
8321 case DRM_MODE_CONNECTOR_HDMIA:
8322 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8323 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8324 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8325 break;
8326 case DRM_MODE_CONNECTOR_DisplayPort:
8327 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
f6e03f80
JS
8328 if (link->is_dig_mapping_flexible &&
8329 link->dc->res_pool->funcs->link_encs_assign) {
8330 link->link_enc =
8331 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8332 if (!link->link_enc)
8333 link->link_enc =
8334 link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8335 }
8336
8337 if (link->link_enc)
8338 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8339 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8340 break;
8341 case DRM_MODE_CONNECTOR_DVID:
8342 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8343 break;
8344 default:
8345 break;
8346 }
8347
8348 drm_object_attach_property(&aconnector->base.base,
8349 dm->ddev->mode_config.scaling_mode_property,
8350 DRM_MODE_SCALE_NONE);
8351
8352 drm_object_attach_property(&aconnector->base.base,
8353 adev->mode_info.underscan_property,
8354 UNDERSCAN_OFF);
8355 drm_object_attach_property(&aconnector->base.base,
8356 adev->mode_info.underscan_hborder_property,
8357 0);
8358 drm_object_attach_property(&aconnector->base.base,
8359 adev->mode_info.underscan_vborder_property,
8360 0);
1825fd34 8361
8c61b31e
JFZ
8362 if (!aconnector->mst_port)
8363 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8364
4a8ca46b
RL
8365 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8366 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8367 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8368
c1ee92f9 8369 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8370 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8371 drm_object_attach_property(&aconnector->base.base,
8372 adev->mode_info.abm_level_property, 0);
8373 }
bb47de73
NK
8374
8375 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8376 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8377 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8378 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8379
8c61b31e
JFZ
8380 if (!aconnector->mst_port)
8381 drm_connector_attach_vrr_capable_property(&aconnector->base);
8382
0c8620d6 8383#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8384 if (adev->dm.hdcp_workqueue)
53e108aa 8385 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8386#endif
bb47de73 8387 }
e7b07cee
HW
8388}
8389
7578ecda
AD
8390static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8391 struct i2c_msg *msgs, int num)
e7b07cee
HW
8392{
8393 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8394 struct ddc_service *ddc_service = i2c->ddc_service;
8395 struct i2c_command cmd;
8396 int i;
8397 int result = -EIO;
8398
b830ebc9 8399 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8400
8401 if (!cmd.payloads)
8402 return result;
8403
8404 cmd.number_of_payloads = num;
8405 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8406 cmd.speed = 100;
8407
8408 for (i = 0; i < num; i++) {
8409 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8410 cmd.payloads[i].address = msgs[i].addr;
8411 cmd.payloads[i].length = msgs[i].len;
8412 cmd.payloads[i].data = msgs[i].buf;
8413 }
8414
c85e6e54
DF
8415 if (dc_submit_i2c(
8416 ddc_service->ctx->dc,
8417 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8418 &cmd))
8419 result = num;
8420
8421 kfree(cmd.payloads);
8422 return result;
8423}
8424
7578ecda 8425static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8426{
8427 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8428}
8429
8430static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8431 .master_xfer = amdgpu_dm_i2c_xfer,
8432 .functionality = amdgpu_dm_i2c_func,
8433};
8434
3ee6b26b
AD
8435static struct amdgpu_i2c_adapter *
8436create_i2c(struct ddc_service *ddc_service,
8437 int link_index,
8438 int *res)
e7b07cee
HW
8439{
8440 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8441 struct amdgpu_i2c_adapter *i2c;
8442
b830ebc9 8443 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8444 if (!i2c)
8445 return NULL;
e7b07cee
HW
8446 i2c->base.owner = THIS_MODULE;
8447 i2c->base.class = I2C_CLASS_DDC;
8448 i2c->base.dev.parent = &adev->pdev->dev;
8449 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8450 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8451 i2c_set_adapdata(&i2c->base, i2c);
8452 i2c->ddc_service = ddc_service;
f6e03f80
JS
8453 if (i2c->ddc_service->ddc_pin)
8454 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8455
8456 return i2c;
8457}
8458
89fc8d4e 8459
1f6010a9
DF
8460/*
8461 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8462 * dc_link which will be represented by this aconnector.
8463 */
7578ecda
AD
8464static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8465 struct amdgpu_dm_connector *aconnector,
8466 uint32_t link_index,
8467 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8468{
8469 int res = 0;
8470 int connector_type;
8471 struct dc *dc = dm->dc;
8472 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8473 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8474
8475 link->priv = aconnector;
e7b07cee 8476
f1ad2f5e 8477 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8478
8479 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8480 if (!i2c) {
8481 DRM_ERROR("Failed to create i2c adapter data\n");
8482 return -ENOMEM;
8483 }
8484
e7b07cee
HW
8485 aconnector->i2c = i2c;
8486 res = i2c_add_adapter(&i2c->base);
8487
8488 if (res) {
8489 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8490 goto out_free;
8491 }
8492
8493 connector_type = to_drm_connector_type(link->connector_signal);
8494
17165de2 8495 res = drm_connector_init_with_ddc(
e7b07cee
HW
8496 dm->ddev,
8497 &aconnector->base,
8498 &amdgpu_dm_connector_funcs,
17165de2
AP
8499 connector_type,
8500 &i2c->base);
e7b07cee
HW
8501
8502 if (res) {
8503 DRM_ERROR("connector_init failed\n");
8504 aconnector->connector_id = -1;
8505 goto out_free;
8506 }
8507
8508 drm_connector_helper_add(
8509 &aconnector->base,
8510 &amdgpu_dm_connector_helper_funcs);
8511
8512 amdgpu_dm_connector_init_helper(
8513 dm,
8514 aconnector,
8515 connector_type,
8516 link,
8517 link_index);
8518
cde4c44d 8519 drm_connector_attach_encoder(
e7b07cee
HW
8520 &aconnector->base, &aencoder->base);
8521
e7b07cee
HW
8522 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8523 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8524 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8525
e7b07cee
HW
8526out_free:
8527 if (res) {
8528 kfree(i2c);
8529 aconnector->i2c = NULL;
8530 }
8531 return res;
8532}
8533
8534int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8535{
8536 switch (adev->mode_info.num_crtc) {
8537 case 1:
8538 return 0x1;
8539 case 2:
8540 return 0x3;
8541 case 3:
8542 return 0x7;
8543 case 4:
8544 return 0xf;
8545 case 5:
8546 return 0x1f;
8547 case 6:
8548 default:
8549 return 0x3f;
8550 }
8551}
8552
7578ecda
AD
8553static int amdgpu_dm_encoder_init(struct drm_device *dev,
8554 struct amdgpu_encoder *aencoder,
8555 uint32_t link_index)
e7b07cee 8556{
1348969a 8557 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8558
8559 int res = drm_encoder_init(dev,
8560 &aencoder->base,
8561 &amdgpu_dm_encoder_funcs,
8562 DRM_MODE_ENCODER_TMDS,
8563 NULL);
8564
8565 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8566
8567 if (!res)
8568 aencoder->encoder_id = link_index;
8569 else
8570 aencoder->encoder_id = -1;
8571
8572 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8573
8574 return res;
8575}
8576
3ee6b26b
AD
8577static void manage_dm_interrupts(struct amdgpu_device *adev,
8578 struct amdgpu_crtc *acrtc,
8579 bool enable)
e7b07cee
HW
8580{
8581 /*
8fe684e9
NK
8582 * We have no guarantee that the frontend index maps to the same
8583 * backend index - some even map to more than one.
8584 *
8585 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8586 */
8587 int irq_type =
734dd01d 8588 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8589 adev,
8590 acrtc->crtc_id);
8591
8592 if (enable) {
8593 drm_crtc_vblank_on(&acrtc->base);
8594 amdgpu_irq_get(
8595 adev,
8596 &adev->pageflip_irq,
8597 irq_type);
86bc2219
WL
8598#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8599 amdgpu_irq_get(
8600 adev,
8601 &adev->vline0_irq,
8602 irq_type);
8603#endif
e7b07cee 8604 } else {
86bc2219
WL
8605#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8606 amdgpu_irq_put(
8607 adev,
8608 &adev->vline0_irq,
8609 irq_type);
8610#endif
e7b07cee
HW
8611 amdgpu_irq_put(
8612 adev,
8613 &adev->pageflip_irq,
8614 irq_type);
8615 drm_crtc_vblank_off(&acrtc->base);
8616 }
8617}
8618
8fe684e9
NK
8619static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8620 struct amdgpu_crtc *acrtc)
8621{
8622 int irq_type =
8623 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8624
8625 /**
8626 * This reads the current state for the IRQ and force reapplies
8627 * the setting to hardware.
8628 */
8629 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8630}
8631
3ee6b26b
AD
8632static bool
8633is_scaling_state_different(const struct dm_connector_state *dm_state,
8634 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8635{
8636 if (dm_state->scaling != old_dm_state->scaling)
8637 return true;
8638 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8639 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8640 return true;
8641 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8642 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8643 return true;
b830ebc9
HW
8644 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8645 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8646 return true;
e7b07cee
HW
8647 return false;
8648}
8649
0c8620d6
BL
8650#ifdef CONFIG_DRM_AMD_DC_HDCP
8651static bool is_content_protection_different(struct drm_connector_state *state,
8652 const struct drm_connector_state *old_state,
8653 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8654{
8655 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8656 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8657
31c0ed90 8658 /* Handle: Type0/1 change */
53e108aa
BL
8659 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8660 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8661 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8662 return true;
8663 }
8664
31c0ed90
BL
8665 /* CP is being re enabled, ignore this
8666 *
8667 * Handles: ENABLED -> DESIRED
8668 */
0c8620d6
BL
8669 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8670 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8671 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8672 return false;
8673 }
8674
31c0ed90
BL
8675 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8676 *
8677 * Handles: UNDESIRED -> ENABLED
8678 */
0c8620d6
BL
8679 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8680 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8681 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8682
0d9a947b
QZ
8683 /* Stream removed and re-enabled
8684 *
8685 * Can sometimes overlap with the HPD case,
8686 * thus set update_hdcp to false to avoid
8687 * setting HDCP multiple times.
8688 *
8689 * Handles: DESIRED -> DESIRED (Special case)
8690 */
8691 if (!(old_state->crtc && old_state->crtc->enabled) &&
8692 state->crtc && state->crtc->enabled &&
8693 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8694 dm_con_state->update_hdcp = false;
8695 return true;
8696 }
8697
8698 /* Hot-plug, headless s3, dpms
8699 *
8700 * Only start HDCP if the display is connected/enabled.
8701 * update_hdcp flag will be set to false until the next
8702 * HPD comes in.
31c0ed90
BL
8703 *
8704 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8705 */
97f6c917
BL
8706 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8707 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8708 dm_con_state->update_hdcp = false;
0c8620d6 8709 return true;
97f6c917 8710 }
0c8620d6 8711
31c0ed90
BL
8712 /*
8713 * Handles: UNDESIRED -> UNDESIRED
8714 * DESIRED -> DESIRED
8715 * ENABLED -> ENABLED
8716 */
0c8620d6
BL
8717 if (old_state->content_protection == state->content_protection)
8718 return false;
8719
31c0ed90
BL
8720 /*
8721 * Handles: UNDESIRED -> DESIRED
8722 * DESIRED -> UNDESIRED
8723 * ENABLED -> UNDESIRED
8724 */
97f6c917 8725 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8726 return true;
8727
31c0ed90
BL
8728 /*
8729 * Handles: DESIRED -> ENABLED
8730 */
0c8620d6
BL
8731 return false;
8732}
8733
0c8620d6 8734#endif
3ee6b26b
AD
8735static void remove_stream(struct amdgpu_device *adev,
8736 struct amdgpu_crtc *acrtc,
8737 struct dc_stream_state *stream)
e7b07cee
HW
8738{
8739 /* this is the update mode case */
e7b07cee
HW
8740
8741 acrtc->otg_inst = -1;
8742 acrtc->enabled = false;
8743}
8744
7578ecda
AD
8745static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8746 struct dc_cursor_position *position)
2a8f6ccb 8747{
f4c2cc43 8748 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8749 int x, y;
8750 int xorigin = 0, yorigin = 0;
8751
e371e19c 8752 if (!crtc || !plane->state->fb)
2a8f6ccb 8753 return 0;
2a8f6ccb
HW
8754
8755 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8756 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8757 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8758 __func__,
8759 plane->state->crtc_w,
8760 plane->state->crtc_h);
8761 return -EINVAL;
8762 }
8763
8764 x = plane->state->crtc_x;
8765 y = plane->state->crtc_y;
c14a005c 8766
e371e19c
NK
8767 if (x <= -amdgpu_crtc->max_cursor_width ||
8768 y <= -amdgpu_crtc->max_cursor_height)
8769 return 0;
8770
2a8f6ccb
HW
8771 if (x < 0) {
8772 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8773 x = 0;
8774 }
8775 if (y < 0) {
8776 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8777 y = 0;
8778 }
8779 position->enable = true;
d243b6ff 8780 position->translate_by_source = true;
2a8f6ccb
HW
8781 position->x = x;
8782 position->y = y;
8783 position->x_hotspot = xorigin;
8784 position->y_hotspot = yorigin;
8785
8786 return 0;
8787}
8788
3ee6b26b
AD
8789static void handle_cursor_update(struct drm_plane *plane,
8790 struct drm_plane_state *old_plane_state)
e7b07cee 8791{
1348969a 8792 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8793 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8794 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8795 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8796 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8797 uint64_t address = afb ? afb->address : 0;
6a30a929 8798 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8799 struct dc_cursor_attributes attributes;
8800 int ret;
8801
e7b07cee
HW
8802 if (!plane->state->fb && !old_plane_state->fb)
8803 return;
8804
cb2318b7 8805 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8806 __func__,
8807 amdgpu_crtc->crtc_id,
8808 plane->state->crtc_w,
8809 plane->state->crtc_h);
2a8f6ccb
HW
8810
8811 ret = get_cursor_position(plane, crtc, &position);
8812 if (ret)
8813 return;
8814
8815 if (!position.enable) {
8816 /* turn off cursor */
674e78ac
NK
8817 if (crtc_state && crtc_state->stream) {
8818 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8819 dc_stream_set_cursor_position(crtc_state->stream,
8820 &position);
674e78ac
NK
8821 mutex_unlock(&adev->dm.dc_lock);
8822 }
2a8f6ccb 8823 return;
e7b07cee 8824 }
e7b07cee 8825
2a8f6ccb
HW
8826 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8827 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8828
c1cefe11 8829 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8830 attributes.address.high_part = upper_32_bits(address);
8831 attributes.address.low_part = lower_32_bits(address);
8832 attributes.width = plane->state->crtc_w;
8833 attributes.height = plane->state->crtc_h;
8834 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8835 attributes.rotation_angle = 0;
8836 attributes.attribute_flags.value = 0;
8837
03a66367 8838 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8839
886daac9 8840 if (crtc_state->stream) {
674e78ac 8841 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8842 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8843 &attributes))
8844 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8845
2a8f6ccb
HW
8846 if (!dc_stream_set_cursor_position(crtc_state->stream,
8847 &position))
8848 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8849 mutex_unlock(&adev->dm.dc_lock);
886daac9 8850 }
2a8f6ccb 8851}
e7b07cee
HW
8852
8853static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8854{
8855
8856 assert_spin_locked(&acrtc->base.dev->event_lock);
8857 WARN_ON(acrtc->event);
8858
8859 acrtc->event = acrtc->base.state->event;
8860
8861 /* Set the flip status */
8862 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8863
8864 /* Mark this event as consumed */
8865 acrtc->base.state->event = NULL;
8866
cb2318b7
VL
8867 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8868 acrtc->crtc_id);
e7b07cee
HW
8869}
8870
bb47de73
NK
8871static void update_freesync_state_on_stream(
8872 struct amdgpu_display_manager *dm,
8873 struct dm_crtc_state *new_crtc_state,
180db303
NK
8874 struct dc_stream_state *new_stream,
8875 struct dc_plane_state *surface,
8876 u32 flip_timestamp_in_us)
bb47de73 8877{
09aef2c4 8878 struct mod_vrr_params vrr_params;
bb47de73 8879 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8880 struct amdgpu_device *adev = dm->adev;
585d450c 8881 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8882 unsigned long flags;
4cda3243 8883 bool pack_sdp_v1_3 = false;
bb47de73
NK
8884
8885 if (!new_stream)
8886 return;
8887
8888 /*
8889 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8890 * For now it's sufficient to just guard against these conditions.
8891 */
8892
8893 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8894 return;
8895
4a580877 8896 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8897 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8898
180db303
NK
8899 if (surface) {
8900 mod_freesync_handle_preflip(
8901 dm->freesync_module,
8902 surface,
8903 new_stream,
8904 flip_timestamp_in_us,
8905 &vrr_params);
09aef2c4
MK
8906
8907 if (adev->family < AMDGPU_FAMILY_AI &&
8908 amdgpu_dm_vrr_active(new_crtc_state)) {
8909 mod_freesync_handle_v_update(dm->freesync_module,
8910 new_stream, &vrr_params);
e63e2491
EB
8911
8912 /* Need to call this before the frame ends. */
8913 dc_stream_adjust_vmin_vmax(dm->dc,
8914 new_crtc_state->stream,
8915 &vrr_params.adjust);
09aef2c4 8916 }
180db303 8917 }
bb47de73
NK
8918
8919 mod_freesync_build_vrr_infopacket(
8920 dm->freesync_module,
8921 new_stream,
180db303 8922 &vrr_params,
ecd0136b
HT
8923 PACKET_TYPE_VRR,
8924 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8925 &vrr_infopacket,
8926 pack_sdp_v1_3);
bb47de73 8927
8a48b44c 8928 new_crtc_state->freesync_timing_changed |=
585d450c 8929 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8930 &vrr_params.adjust,
8931 sizeof(vrr_params.adjust)) != 0);
bb47de73 8932
8a48b44c 8933 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8934 (memcmp(&new_crtc_state->vrr_infopacket,
8935 &vrr_infopacket,
8936 sizeof(vrr_infopacket)) != 0);
8937
585d450c 8938 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8939 new_crtc_state->vrr_infopacket = vrr_infopacket;
8940
585d450c 8941 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8942 new_stream->vrr_infopacket = vrr_infopacket;
8943
8944 if (new_crtc_state->freesync_vrr_info_changed)
8945 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8946 new_crtc_state->base.crtc->base.id,
8947 (int)new_crtc_state->base.vrr_enabled,
180db303 8948 (int)vrr_params.state);
09aef2c4 8949
4a580877 8950 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8951}
8952
585d450c 8953static void update_stream_irq_parameters(
e854194c
MK
8954 struct amdgpu_display_manager *dm,
8955 struct dm_crtc_state *new_crtc_state)
8956{
8957 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8958 struct mod_vrr_params vrr_params;
e854194c 8959 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8960 struct amdgpu_device *adev = dm->adev;
585d450c 8961 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8962 unsigned long flags;
e854194c
MK
8963
8964 if (!new_stream)
8965 return;
8966
8967 /*
8968 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8969 * For now it's sufficient to just guard against these conditions.
8970 */
8971 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8972 return;
8973
4a580877 8974 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8975 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8976
e854194c
MK
8977 if (new_crtc_state->vrr_supported &&
8978 config.min_refresh_in_uhz &&
8979 config.max_refresh_in_uhz) {
a85ba005
NC
8980 /*
8981 * if freesync compatible mode was set, config.state will be set
8982 * in atomic check
8983 */
8984 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8985 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8986 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8987 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8988 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8989 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8990 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8991 } else {
8992 config.state = new_crtc_state->base.vrr_enabled ?
8993 VRR_STATE_ACTIVE_VARIABLE :
8994 VRR_STATE_INACTIVE;
8995 }
e854194c
MK
8996 } else {
8997 config.state = VRR_STATE_UNSUPPORTED;
8998 }
8999
9000 mod_freesync_build_vrr_params(dm->freesync_module,
9001 new_stream,
9002 &config, &vrr_params);
9003
9004 new_crtc_state->freesync_timing_changed |=
585d450c
AP
9005 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9006 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 9007
585d450c
AP
9008 new_crtc_state->freesync_config = config;
9009 /* Copy state for access from DM IRQ handler */
9010 acrtc->dm_irq_params.freesync_config = config;
9011 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9012 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 9013 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
9014}
9015
66b0c973
MK
9016static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9017 struct dm_crtc_state *new_state)
9018{
9019 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9020 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9021
9022 if (!old_vrr_active && new_vrr_active) {
9023 /* Transition VRR inactive -> active:
9024 * While VRR is active, we must not disable vblank irq, as a
9025 * reenable after disable would compute bogus vblank/pflip
9026 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
9027 *
9028 * We also need vupdate irq for the actual core vblank handling
9029 * at end of vblank.
66b0c973 9030 */
d2574c33 9031 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
9032 drm_crtc_vblank_get(new_state->base.crtc);
9033 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9034 __func__, new_state->base.crtc->base.id);
9035 } else if (old_vrr_active && !new_vrr_active) {
9036 /* Transition VRR active -> inactive:
9037 * Allow vblank irq disable again for fixed refresh rate.
9038 */
d2574c33 9039 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
9040 drm_crtc_vblank_put(new_state->base.crtc);
9041 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9042 __func__, new_state->base.crtc->base.id);
9043 }
9044}
9045
8ad27806
NK
9046static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9047{
9048 struct drm_plane *plane;
5760dcb9 9049 struct drm_plane_state *old_plane_state;
8ad27806
NK
9050 int i;
9051
9052 /*
9053 * TODO: Make this per-stream so we don't issue redundant updates for
9054 * commits with multiple streams.
9055 */
5760dcb9 9056 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
9057 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9058 handle_cursor_update(plane, old_plane_state);
9059}
9060
3be5262e 9061static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 9062 struct dc_state *dc_state,
3ee6b26b
AD
9063 struct drm_device *dev,
9064 struct amdgpu_display_manager *dm,
9065 struct drm_crtc *pcrtc,
420cd472 9066 bool wait_for_vblank)
e7b07cee 9067{
efc8278e 9068 uint32_t i;
8a48b44c 9069 uint64_t timestamp_ns;
e7b07cee 9070 struct drm_plane *plane;
0bc9706d 9071 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 9072 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
9073 struct drm_crtc_state *new_pcrtc_state =
9074 drm_atomic_get_new_crtc_state(state, pcrtc);
9075 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
9076 struct dm_crtc_state *dm_old_crtc_state =
9077 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 9078 int planes_count = 0, vpos, hpos;
570c91d5 9079 long r;
e7b07cee 9080 unsigned long flags;
8a48b44c 9081 struct amdgpu_bo *abo;
fdd1fe57
MK
9082 uint32_t target_vblank, last_flip_vblank;
9083 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 9084 bool pflip_present = false;
bc7f670e
DF
9085 struct {
9086 struct dc_surface_update surface_updates[MAX_SURFACES];
9087 struct dc_plane_info plane_infos[MAX_SURFACES];
9088 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 9089 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 9090 struct dc_stream_update stream_update;
74aa7bd4 9091 } *bundle;
bc7f670e 9092
74aa7bd4 9093 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 9094
74aa7bd4
DF
9095 if (!bundle) {
9096 dm_error("Failed to allocate update bundle\n");
4b510503
NK
9097 goto cleanup;
9098 }
e7b07cee 9099
8ad27806
NK
9100 /*
9101 * Disable the cursor first if we're disabling all the planes.
9102 * It'll remain on the screen after the planes are re-enabled
9103 * if we don't.
9104 */
9105 if (acrtc_state->active_planes == 0)
9106 amdgpu_dm_commit_cursors(state);
9107
e7b07cee 9108 /* update planes when needed */
efc8278e 9109 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9110 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9111 struct drm_crtc_state *new_crtc_state;
0bc9706d 9112 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9113 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9114 bool plane_needs_flip;
c7af5f77 9115 struct dc_plane_state *dc_plane;
54d76575 9116 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9117
80c218d5
NK
9118 /* Cursor plane is handled after stream updates */
9119 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9120 continue;
e7b07cee 9121
f5ba60fe
DD
9122 if (!fb || !crtc || pcrtc != crtc)
9123 continue;
9124
9125 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9126 if (!new_crtc_state->active)
e7b07cee
HW
9127 continue;
9128
bc7f670e 9129 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9130
74aa7bd4 9131 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9132 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9133 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9134 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9135 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9136 }
8a48b44c 9137
4375d625 9138 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9139 &bundle->scaling_infos[planes_count]);
8a48b44c 9140
695af5f9
NK
9141 bundle->surface_updates[planes_count].scaling_info =
9142 &bundle->scaling_infos[planes_count];
8a48b44c 9143
f5031000 9144 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9145
f5031000 9146 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9147
f5031000
DF
9148 if (!plane_needs_flip) {
9149 planes_count += 1;
9150 continue;
9151 }
8a48b44c 9152
2fac0f53
CK
9153 abo = gem_to_amdgpu_bo(fb->obj[0]);
9154
f8308898
AG
9155 /*
9156 * Wait for all fences on this FB. Do limited wait to avoid
9157 * deadlock during GPU reset when this fence will not signal
9158 * but we hold reservation lock for the BO.
9159 */
d3fae3b3
CK
9160 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9161 msecs_to_jiffies(5000));
f8308898 9162 if (unlikely(r <= 0))
ed8a5fb2 9163 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9164
695af5f9 9165 fill_dc_plane_info_and_addr(
8ce5d842 9166 dm->adev, new_plane_state,
6eed95b0 9167 afb->tiling_flags,
695af5f9 9168 &bundle->plane_infos[planes_count],
87b7ebc2 9169 &bundle->flip_addrs[planes_count].address,
6eed95b0 9170 afb->tmz_surface, false);
87b7ebc2 9171
4711c033 9172 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9173 new_plane_state->plane->index,
9174 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9175
9176 bundle->surface_updates[planes_count].plane_info =
9177 &bundle->plane_infos[planes_count];
8a48b44c 9178
caff0e66
NK
9179 /*
9180 * Only allow immediate flips for fast updates that don't
9181 * change FB pitch, DCC state, rotation or mirroing.
9182 */
f5031000 9183 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9184 crtc->state->async_flip &&
caff0e66 9185 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9186
f5031000
DF
9187 timestamp_ns = ktime_get_ns();
9188 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9189 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9190 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9191
f5031000
DF
9192 if (!bundle->surface_updates[planes_count].surface) {
9193 DRM_ERROR("No surface for CRTC: id=%d\n",
9194 acrtc_attach->crtc_id);
9195 continue;
bc7f670e
DF
9196 }
9197
f5031000
DF
9198 if (plane == pcrtc->primary)
9199 update_freesync_state_on_stream(
9200 dm,
9201 acrtc_state,
9202 acrtc_state->stream,
9203 dc_plane,
9204 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9205
4711c033 9206 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9207 __func__,
9208 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9209 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9210
9211 planes_count += 1;
9212
8a48b44c
DF
9213 }
9214
74aa7bd4 9215 if (pflip_present) {
634092b1
MK
9216 if (!vrr_active) {
9217 /* Use old throttling in non-vrr fixed refresh rate mode
9218 * to keep flip scheduling based on target vblank counts
9219 * working in a backwards compatible way, e.g., for
9220 * clients using the GLX_OML_sync_control extension or
9221 * DRI3/Present extension with defined target_msc.
9222 */
e3eff4b5 9223 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9224 }
9225 else {
9226 /* For variable refresh rate mode only:
9227 * Get vblank of last completed flip to avoid > 1 vrr
9228 * flips per video frame by use of throttling, but allow
9229 * flip programming anywhere in the possibly large
9230 * variable vrr vblank interval for fine-grained flip
9231 * timing control and more opportunity to avoid stutter
9232 * on late submission of flips.
9233 */
9234 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9235 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9236 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9237 }
9238
fdd1fe57 9239 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9240
9241 /*
9242 * Wait until we're out of the vertical blank period before the one
9243 * targeted by the flip
9244 */
9245 while ((acrtc_attach->enabled &&
9246 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9247 0, &vpos, &hpos, NULL,
9248 NULL, &pcrtc->hwmode)
9249 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9250 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9251 (int)(target_vblank -
e3eff4b5 9252 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9253 usleep_range(1000, 1100);
9254 }
9255
8fe684e9
NK
9256 /**
9257 * Prepare the flip event for the pageflip interrupt to handle.
9258 *
9259 * This only works in the case where we've already turned on the
9260 * appropriate hardware blocks (eg. HUBP) so in the transition case
9261 * from 0 -> n planes we have to skip a hardware generated event
9262 * and rely on sending it from software.
9263 */
9264 if (acrtc_attach->base.state->event &&
035f5496
AP
9265 acrtc_state->active_planes > 0 &&
9266 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9267 drm_crtc_vblank_get(pcrtc);
9268
9269 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9270
9271 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9272 prepare_flip_isr(acrtc_attach);
9273
9274 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9275 }
9276
9277 if (acrtc_state->stream) {
8a48b44c 9278 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9279 bundle->stream_update.vrr_infopacket =
8a48b44c 9280 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9281 }
e7b07cee
HW
9282 }
9283
bc92c065 9284 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9285 if ((planes_count || acrtc_state->active_planes == 0) &&
9286 acrtc_state->stream) {
96160687 9287#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9288 /*
9289 * If PSR or idle optimizations are enabled then flush out
9290 * any pending work before hardware programming.
9291 */
06dd1888
NK
9292 if (dm->vblank_control_workqueue)
9293 flush_workqueue(dm->vblank_control_workqueue);
96160687 9294#endif
58aa1c50 9295
b6e881c9 9296 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9297 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9298 bundle->stream_update.src = acrtc_state->stream->src;
9299 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9300 }
9301
cf020d49
NK
9302 if (new_pcrtc_state->color_mgmt_changed) {
9303 /*
9304 * TODO: This isn't fully correct since we've actually
9305 * already modified the stream in place.
9306 */
9307 bundle->stream_update.gamut_remap =
9308 &acrtc_state->stream->gamut_remap_matrix;
9309 bundle->stream_update.output_csc_transform =
9310 &acrtc_state->stream->csc_color_matrix;
9311 bundle->stream_update.out_transfer_func =
9312 acrtc_state->stream->out_transfer_func;
9313 }
bc7f670e 9314
8a48b44c 9315 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9316 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9317 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9318
e63e2491
EB
9319 /*
9320 * If FreeSync state on the stream has changed then we need to
9321 * re-adjust the min/max bounds now that DC doesn't handle this
9322 * as part of commit.
9323 */
a85ba005 9324 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9325 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9326 dc_stream_adjust_vmin_vmax(
9327 dm->dc, acrtc_state->stream,
585d450c 9328 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9329 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9330 }
bc7f670e 9331 mutex_lock(&dm->dc_lock);
8c322309 9332 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9333 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9334 amdgpu_dm_psr_disable(acrtc_state->stream);
9335
bc7f670e 9336 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9337 bundle->surface_updates,
bc7f670e
DF
9338 planes_count,
9339 acrtc_state->stream,
efc8278e
AJ
9340 &bundle->stream_update,
9341 dc_state);
8c322309 9342
8fe684e9
NK
9343 /**
9344 * Enable or disable the interrupts on the backend.
9345 *
9346 * Most pipes are put into power gating when unused.
9347 *
9348 * When power gating is enabled on a pipe we lose the
9349 * interrupt enablement state when power gating is disabled.
9350 *
9351 * So we need to update the IRQ control state in hardware
9352 * whenever the pipe turns on (since it could be previously
9353 * power gated) or off (since some pipes can't be power gated
9354 * on some ASICs).
9355 */
9356 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9357 dm_update_pflip_irq_state(drm_to_adev(dev),
9358 acrtc_attach);
8fe684e9 9359
8c322309 9360 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9361 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9362 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9363 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9364
9365 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9366 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9367 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9368 struct amdgpu_dm_connector *aconn =
9369 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9370
9371 if (aconn->psr_skip_count > 0)
9372 aconn->psr_skip_count--;
58aa1c50
NK
9373
9374 /* Allow PSR when skip count is 0. */
9375 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9376 } else {
9377 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9378 }
9379
bc7f670e 9380 mutex_unlock(&dm->dc_lock);
e7b07cee 9381 }
4b510503 9382
8ad27806
NK
9383 /*
9384 * Update cursor state *after* programming all the planes.
9385 * This avoids redundant programming in the case where we're going
9386 * to be disabling a single plane - those pipes are being disabled.
9387 */
9388 if (acrtc_state->active_planes)
9389 amdgpu_dm_commit_cursors(state);
80c218d5 9390
4b510503 9391cleanup:
74aa7bd4 9392 kfree(bundle);
e7b07cee
HW
9393}
9394
6ce8f316
NK
9395static void amdgpu_dm_commit_audio(struct drm_device *dev,
9396 struct drm_atomic_state *state)
9397{
1348969a 9398 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9399 struct amdgpu_dm_connector *aconnector;
9400 struct drm_connector *connector;
9401 struct drm_connector_state *old_con_state, *new_con_state;
9402 struct drm_crtc_state *new_crtc_state;
9403 struct dm_crtc_state *new_dm_crtc_state;
9404 const struct dc_stream_status *status;
9405 int i, inst;
9406
9407 /* Notify device removals. */
9408 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9409 if (old_con_state->crtc != new_con_state->crtc) {
9410 /* CRTC changes require notification. */
9411 goto notify;
9412 }
9413
9414 if (!new_con_state->crtc)
9415 continue;
9416
9417 new_crtc_state = drm_atomic_get_new_crtc_state(
9418 state, new_con_state->crtc);
9419
9420 if (!new_crtc_state)
9421 continue;
9422
9423 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9424 continue;
9425
9426 notify:
9427 aconnector = to_amdgpu_dm_connector(connector);
9428
9429 mutex_lock(&adev->dm.audio_lock);
9430 inst = aconnector->audio_inst;
9431 aconnector->audio_inst = -1;
9432 mutex_unlock(&adev->dm.audio_lock);
9433
9434 amdgpu_dm_audio_eld_notify(adev, inst);
9435 }
9436
9437 /* Notify audio device additions. */
9438 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9439 if (!new_con_state->crtc)
9440 continue;
9441
9442 new_crtc_state = drm_atomic_get_new_crtc_state(
9443 state, new_con_state->crtc);
9444
9445 if (!new_crtc_state)
9446 continue;
9447
9448 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9449 continue;
9450
9451 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9452 if (!new_dm_crtc_state->stream)
9453 continue;
9454
9455 status = dc_stream_get_status(new_dm_crtc_state->stream);
9456 if (!status)
9457 continue;
9458
9459 aconnector = to_amdgpu_dm_connector(connector);
9460
9461 mutex_lock(&adev->dm.audio_lock);
9462 inst = status->audio_inst;
9463 aconnector->audio_inst = inst;
9464 mutex_unlock(&adev->dm.audio_lock);
9465
9466 amdgpu_dm_audio_eld_notify(adev, inst);
9467 }
9468}
9469
1f6010a9 9470/*
27b3f4fc
LSL
9471 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9472 * @crtc_state: the DRM CRTC state
9473 * @stream_state: the DC stream state.
9474 *
9475 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9476 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9477 */
9478static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9479 struct dc_stream_state *stream_state)
9480{
b9952f93 9481 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9482}
e7b07cee 9483
b8592b48
LL
9484/**
9485 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9486 * @state: The atomic state to commit
9487 *
9488 * This will tell DC to commit the constructed DC state from atomic_check,
9489 * programming the hardware. Any failures here implies a hardware failure, since
9490 * atomic check should have filtered anything non-kosher.
9491 */
7578ecda 9492static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9493{
9494 struct drm_device *dev = state->dev;
1348969a 9495 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9496 struct amdgpu_display_manager *dm = &adev->dm;
9497 struct dm_atomic_state *dm_state;
eb3dc897 9498 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9499 uint32_t i, j;
5cc6dcbd 9500 struct drm_crtc *crtc;
0bc9706d 9501 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9502 unsigned long flags;
9503 bool wait_for_vblank = true;
9504 struct drm_connector *connector;
c2cea706 9505 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9506 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9507 int crtc_disable_count = 0;
6ee90e88 9508 bool mode_set_reset_required = false;
e7b07cee 9509
e8a98235
RS
9510 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9511
e7b07cee
HW
9512 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9513
eb3dc897
NK
9514 dm_state = dm_atomic_get_new_state(state);
9515 if (dm_state && dm_state->context) {
9516 dc_state = dm_state->context;
9517 } else {
9518 /* No state changes, retain current state. */
813d20dc 9519 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9520 ASSERT(dc_state_temp);
9521 dc_state = dc_state_temp;
9522 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9523 }
e7b07cee 9524
6d90a208
AP
9525 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9526 new_crtc_state, i) {
9527 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9528
9529 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9530
9531 if (old_crtc_state->active &&
9532 (!new_crtc_state->active ||
9533 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9534 manage_dm_interrupts(adev, acrtc, false);
9535 dc_stream_release(dm_old_crtc_state->stream);
9536 }
9537 }
9538
8976f73b
RS
9539 drm_atomic_helper_calc_timestamping_constants(state);
9540
e7b07cee 9541 /* update changed items */
0bc9706d 9542 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9543 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9544
54d76575
LSL
9545 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9546 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9547
4711c033 9548 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9549 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9550 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9551 "connectors_changed:%d\n",
9552 acrtc->crtc_id,
0bc9706d
LSL
9553 new_crtc_state->enable,
9554 new_crtc_state->active,
9555 new_crtc_state->planes_changed,
9556 new_crtc_state->mode_changed,
9557 new_crtc_state->active_changed,
9558 new_crtc_state->connectors_changed);
e7b07cee 9559
5c68c652
VL
9560 /* Disable cursor if disabling crtc */
9561 if (old_crtc_state->active && !new_crtc_state->active) {
9562 struct dc_cursor_position position;
9563
9564 memset(&position, 0, sizeof(position));
9565 mutex_lock(&dm->dc_lock);
9566 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9567 mutex_unlock(&dm->dc_lock);
9568 }
9569
27b3f4fc
LSL
9570 /* Copy all transient state flags into dc state */
9571 if (dm_new_crtc_state->stream) {
9572 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9573 dm_new_crtc_state->stream);
9574 }
9575
e7b07cee
HW
9576 /* handles headless hotplug case, updating new_state and
9577 * aconnector as needed
9578 */
9579
54d76575 9580 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9581
4711c033 9582 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9583
54d76575 9584 if (!dm_new_crtc_state->stream) {
e7b07cee 9585 /*
b830ebc9
HW
9586 * this could happen because of issues with
9587 * userspace notifications delivery.
9588 * In this case userspace tries to set mode on
1f6010a9
DF
9589 * display which is disconnected in fact.
9590 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9591 * We expect reset mode will come soon.
9592 *
9593 * This can also happen when unplug is done
9594 * during resume sequence ended
9595 *
9596 * In this case, we want to pretend we still
9597 * have a sink to keep the pipe running so that
9598 * hw state is consistent with the sw state
9599 */
f1ad2f5e 9600 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9601 __func__, acrtc->base.base.id);
9602 continue;
9603 }
9604
54d76575
LSL
9605 if (dm_old_crtc_state->stream)
9606 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9607
97028037
LP
9608 pm_runtime_get_noresume(dev->dev);
9609
e7b07cee 9610 acrtc->enabled = true;
0bc9706d
LSL
9611 acrtc->hw_mode = new_crtc_state->mode;
9612 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9613 mode_set_reset_required = true;
0bc9706d 9614 } else if (modereset_required(new_crtc_state)) {
4711c033 9615 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9616 /* i.e. reset mode */
6ee90e88 9617 if (dm_old_crtc_state->stream)
54d76575 9618 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9619
6ee90e88 9620 mode_set_reset_required = true;
e7b07cee
HW
9621 }
9622 } /* for_each_crtc_in_state() */
9623
eb3dc897 9624 if (dc_state) {
6ee90e88 9625 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9626 if (mode_set_reset_required) {
96160687 9627#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9628 if (dm->vblank_control_workqueue)
9629 flush_workqueue(dm->vblank_control_workqueue);
96160687 9630#endif
6ee90e88 9631 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9632 }
6ee90e88 9633
eb3dc897 9634 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9635 mutex_lock(&dm->dc_lock);
eb3dc897 9636 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9637#if defined(CONFIG_DRM_AMD_DC_DCN)
9638 /* Allow idle optimization when vblank count is 0 for display off */
9639 if (dm->active_vblank_irq_count == 0)
9640 dc_allow_idle_optimizations(dm->dc,true);
9641#endif
674e78ac 9642 mutex_unlock(&dm->dc_lock);
fa2123db 9643 }
fe8858bb 9644
0bc9706d 9645 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9646 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9647
54d76575 9648 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9649
54d76575 9650 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9651 const struct dc_stream_status *status =
54d76575 9652 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9653
eb3dc897 9654 if (!status)
09f609c3
LL
9655 status = dc_stream_get_status_from_state(dc_state,
9656 dm_new_crtc_state->stream);
e7b07cee 9657 if (!status)
54d76575 9658 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9659 else
9660 acrtc->otg_inst = status->primary_otg_inst;
9661 }
9662 }
0c8620d6
BL
9663#ifdef CONFIG_DRM_AMD_DC_HDCP
9664 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9665 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9666 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9667 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9668
9669 new_crtc_state = NULL;
9670
9671 if (acrtc)
9672 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9673
9674 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9675
9676 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9677 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9678 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9679 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9680 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9681 continue;
9682 }
9683
9684 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9685 hdcp_update_display(
9686 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9687 new_con_state->hdcp_content_type,
0e86d3d4 9688 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9689 }
9690#endif
e7b07cee 9691
02d6a6fc 9692 /* Handle connector state changes */
c2cea706 9693 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9694 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9695 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9696 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9697 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9698 struct dc_stream_update stream_update;
b232d4ed 9699 struct dc_info_packet hdr_packet;
e7b07cee 9700 struct dc_stream_status *status = NULL;
b232d4ed 9701 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9702
efc8278e 9703 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9704 memset(&stream_update, 0, sizeof(stream_update));
9705
44d09c6a 9706 if (acrtc) {
0bc9706d 9707 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9708 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9709 }
0bc9706d 9710
e7b07cee 9711 /* Skip any modesets/resets */
0bc9706d 9712 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9713 continue;
9714
54d76575 9715 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9716 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9717
b232d4ed
NK
9718 scaling_changed = is_scaling_state_different(dm_new_con_state,
9719 dm_old_con_state);
9720
9721 abm_changed = dm_new_crtc_state->abm_level !=
9722 dm_old_crtc_state->abm_level;
9723
9724 hdr_changed =
72921cdf 9725 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9726
9727 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9728 continue;
e7b07cee 9729
b6e881c9 9730 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9731 if (scaling_changed) {
02d6a6fc 9732 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9733 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9734
02d6a6fc
DF
9735 stream_update.src = dm_new_crtc_state->stream->src;
9736 stream_update.dst = dm_new_crtc_state->stream->dst;
9737 }
9738
b232d4ed 9739 if (abm_changed) {
02d6a6fc
DF
9740 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9741
9742 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9743 }
70e8ffc5 9744
b232d4ed
NK
9745 if (hdr_changed) {
9746 fill_hdr_info_packet(new_con_state, &hdr_packet);
9747 stream_update.hdr_static_metadata = &hdr_packet;
9748 }
9749
54d76575 9750 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9751
9752 if (WARN_ON(!status))
9753 continue;
9754
3be5262e 9755 WARN_ON(!status->plane_count);
e7b07cee 9756
02d6a6fc
DF
9757 /*
9758 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9759 * Here we create an empty update on each plane.
9760 * To fix this, DC should permit updating only stream properties.
9761 */
9762 for (j = 0; j < status->plane_count; j++)
efc8278e 9763 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9764
9765
9766 mutex_lock(&dm->dc_lock);
9767 dc_commit_updates_for_stream(dm->dc,
efc8278e 9768 dummy_updates,
02d6a6fc
DF
9769 status->plane_count,
9770 dm_new_crtc_state->stream,
efc8278e
AJ
9771 &stream_update,
9772 dc_state);
02d6a6fc 9773 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9774 }
9775
b5e83f6f 9776 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9777 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9778 new_crtc_state, i) {
fe2a1965
LP
9779 if (old_crtc_state->active && !new_crtc_state->active)
9780 crtc_disable_count++;
9781
54d76575 9782 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9783 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9784
585d450c
AP
9785 /* For freesync config update on crtc state and params for irq */
9786 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9787
66b0c973
MK
9788 /* Handle vrr on->off / off->on transitions */
9789 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9790 dm_new_crtc_state);
e7b07cee
HW
9791 }
9792
8fe684e9
NK
9793 /**
9794 * Enable interrupts for CRTCs that are newly enabled or went through
9795 * a modeset. It was intentionally deferred until after the front end
9796 * state was modified to wait until the OTG was on and so the IRQ
9797 * handlers didn't access stale or invalid state.
9798 */
9799 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9800 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9801#ifdef CONFIG_DEBUG_FS
86bc2219 9802 bool configure_crc = false;
8e7b6fee 9803 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9804#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9805 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9806#endif
9807 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9808 cur_crc_src = acrtc->dm_irq_params.crc_src;
9809 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9810#endif
585d450c
AP
9811 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9812
8fe684e9
NK
9813 if (new_crtc_state->active &&
9814 (!old_crtc_state->active ||
9815 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9816 dc_stream_retain(dm_new_crtc_state->stream);
9817 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9818 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9819
24eb9374 9820#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9821 /**
9822 * Frontend may have changed so reapply the CRC capture
9823 * settings for the stream.
9824 */
9825 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9826
8e7b6fee 9827 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9828 configure_crc = true;
9829#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9830 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9831 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9832 acrtc->dm_irq_params.crc_window.update_win = true;
9833 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9834 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9835 crc_rd_wrk->crtc = crtc;
9836 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9837 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9838 }
86bc2219 9839#endif
e2881d6d 9840 }
c920888c 9841
86bc2219 9842 if (configure_crc)
bbc49fc0
WL
9843 if (amdgpu_dm_crtc_configure_crc_source(
9844 crtc, dm_new_crtc_state, cur_crc_src))
9845 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9846#endif
8fe684e9
NK
9847 }
9848 }
e7b07cee 9849
420cd472 9850 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9851 if (new_crtc_state->async_flip)
420cd472
DF
9852 wait_for_vblank = false;
9853
e7b07cee 9854 /* update planes when needed per crtc*/
5cc6dcbd 9855 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9856 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9857
54d76575 9858 if (dm_new_crtc_state->stream)
eb3dc897 9859 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9860 dm, crtc, wait_for_vblank);
e7b07cee
HW
9861 }
9862
6ce8f316
NK
9863 /* Update audio instances for each connector. */
9864 amdgpu_dm_commit_audio(dev, state);
9865
7230362c
AD
9866#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9867 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9868 /* restore the backlight level */
7fd13bae
AD
9869 for (i = 0; i < dm->num_of_edps; i++) {
9870 if (dm->backlight_dev[i] &&
9871 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9872 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9873 }
7230362c 9874#endif
e7b07cee
HW
9875 /*
9876 * send vblank event on all events not handled in flip and
9877 * mark consumed event for drm_atomic_helper_commit_hw_done
9878 */
4a580877 9879 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9880 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9881
0bc9706d
LSL
9882 if (new_crtc_state->event)
9883 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9884
0bc9706d 9885 new_crtc_state->event = NULL;
e7b07cee 9886 }
4a580877 9887 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9888
29c8f234
LL
9889 /* Signal HW programming completion */
9890 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9891
9892 if (wait_for_vblank)
320a1274 9893 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9894
9895 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9896
5f6fab24
AD
9897 /* return the stolen vga memory back to VRAM */
9898 if (!adev->mman.keep_stolen_vga_memory)
9899 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9900 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9901
1f6010a9
DF
9902 /*
9903 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9904 * so we can put the GPU into runtime suspend if we're not driving any
9905 * displays anymore
9906 */
fe2a1965
LP
9907 for (i = 0; i < crtc_disable_count; i++)
9908 pm_runtime_put_autosuspend(dev->dev);
97028037 9909 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9910
9911 if (dc_state_temp)
9912 dc_release_state(dc_state_temp);
e7b07cee
HW
9913}
9914
9915
9916static int dm_force_atomic_commit(struct drm_connector *connector)
9917{
9918 int ret = 0;
9919 struct drm_device *ddev = connector->dev;
9920 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9921 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9922 struct drm_plane *plane = disconnected_acrtc->base.primary;
9923 struct drm_connector_state *conn_state;
9924 struct drm_crtc_state *crtc_state;
9925 struct drm_plane_state *plane_state;
9926
9927 if (!state)
9928 return -ENOMEM;
9929
9930 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9931
9932 /* Construct an atomic state to restore previous display setting */
9933
9934 /*
9935 * Attach connectors to drm_atomic_state
9936 */
9937 conn_state = drm_atomic_get_connector_state(state, connector);
9938
9939 ret = PTR_ERR_OR_ZERO(conn_state);
9940 if (ret)
2dc39051 9941 goto out;
e7b07cee
HW
9942
9943 /* Attach crtc to drm_atomic_state*/
9944 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9945
9946 ret = PTR_ERR_OR_ZERO(crtc_state);
9947 if (ret)
2dc39051 9948 goto out;
e7b07cee
HW
9949
9950 /* force a restore */
9951 crtc_state->mode_changed = true;
9952
9953 /* Attach plane to drm_atomic_state */
9954 plane_state = drm_atomic_get_plane_state(state, plane);
9955
9956 ret = PTR_ERR_OR_ZERO(plane_state);
9957 if (ret)
2dc39051 9958 goto out;
e7b07cee
HW
9959
9960 /* Call commit internally with the state we just constructed */
9961 ret = drm_atomic_commit(state);
e7b07cee 9962
2dc39051 9963out:
e7b07cee 9964 drm_atomic_state_put(state);
2dc39051
VL
9965 if (ret)
9966 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9967
9968 return ret;
9969}
9970
9971/*
1f6010a9
DF
9972 * This function handles all cases when set mode does not come upon hotplug.
9973 * This includes when a display is unplugged then plugged back into the
9974 * same port and when running without usermode desktop manager supprot
e7b07cee 9975 */
3ee6b26b
AD
9976void dm_restore_drm_connector_state(struct drm_device *dev,
9977 struct drm_connector *connector)
e7b07cee 9978{
c84dec2f 9979 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9980 struct amdgpu_crtc *disconnected_acrtc;
9981 struct dm_crtc_state *acrtc_state;
9982
9983 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9984 return;
9985
9986 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9987 if (!disconnected_acrtc)
9988 return;
e7b07cee 9989
70e8ffc5
HW
9990 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9991 if (!acrtc_state->stream)
e7b07cee
HW
9992 return;
9993
9994 /*
9995 * If the previous sink is not released and different from the current,
9996 * we deduce we are in a state where we can not rely on usermode call
9997 * to turn on the display, so we do it here
9998 */
9999 if (acrtc_state->stream->sink != aconnector->dc_sink)
10000 dm_force_atomic_commit(&aconnector->base);
10001}
10002
1f6010a9 10003/*
e7b07cee
HW
10004 * Grabs all modesetting locks to serialize against any blocking commits,
10005 * Waits for completion of all non blocking commits.
10006 */
3ee6b26b
AD
10007static int do_aquire_global_lock(struct drm_device *dev,
10008 struct drm_atomic_state *state)
e7b07cee
HW
10009{
10010 struct drm_crtc *crtc;
10011 struct drm_crtc_commit *commit;
10012 long ret;
10013
1f6010a9
DF
10014 /*
10015 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
10016 * ensure that when the framework release it the
10017 * extra locks we are locking here will get released to
10018 */
10019 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10020 if (ret)
10021 return ret;
10022
10023 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10024 spin_lock(&crtc->commit_lock);
10025 commit = list_first_entry_or_null(&crtc->commit_list,
10026 struct drm_crtc_commit, commit_entry);
10027 if (commit)
10028 drm_crtc_commit_get(commit);
10029 spin_unlock(&crtc->commit_lock);
10030
10031 if (!commit)
10032 continue;
10033
1f6010a9
DF
10034 /*
10035 * Make sure all pending HW programming completed and
e7b07cee
HW
10036 * page flips done
10037 */
10038 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10039
10040 if (ret > 0)
10041 ret = wait_for_completion_interruptible_timeout(
10042 &commit->flip_done, 10*HZ);
10043
10044 if (ret == 0)
10045 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 10046 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
10047
10048 drm_crtc_commit_put(commit);
10049 }
10050
10051 return ret < 0 ? ret : 0;
10052}
10053
bb47de73
NK
10054static void get_freesync_config_for_crtc(
10055 struct dm_crtc_state *new_crtc_state,
10056 struct dm_connector_state *new_con_state)
98e6436d
AK
10057{
10058 struct mod_freesync_config config = {0};
98e6436d
AK
10059 struct amdgpu_dm_connector *aconnector =
10060 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 10061 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 10062 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 10063 bool fs_vid_mode = false;
98e6436d 10064
a057ec46 10065 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
10066 vrefresh >= aconnector->min_vfreq &&
10067 vrefresh <= aconnector->max_vfreq;
bb47de73 10068
a057ec46
IB
10069 if (new_crtc_state->vrr_supported) {
10070 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
10071 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10072
10073 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10074 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 10075 config.vsif_supported = true;
180db303 10076 config.btr = true;
98e6436d 10077
a85ba005
NC
10078 if (fs_vid_mode) {
10079 config.state = VRR_STATE_ACTIVE_FIXED;
10080 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10081 goto out;
10082 } else if (new_crtc_state->base.vrr_enabled) {
10083 config.state = VRR_STATE_ACTIVE_VARIABLE;
10084 } else {
10085 config.state = VRR_STATE_INACTIVE;
10086 }
10087 }
10088out:
bb47de73
NK
10089 new_crtc_state->freesync_config = config;
10090}
98e6436d 10091
bb47de73
NK
10092static void reset_freesync_config_for_crtc(
10093 struct dm_crtc_state *new_crtc_state)
10094{
10095 new_crtc_state->vrr_supported = false;
98e6436d 10096
bb47de73
NK
10097 memset(&new_crtc_state->vrr_infopacket, 0,
10098 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
10099}
10100
a85ba005
NC
10101static bool
10102is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10103 struct drm_crtc_state *new_crtc_state)
10104{
10105 struct drm_display_mode old_mode, new_mode;
10106
10107 if (!old_crtc_state || !new_crtc_state)
10108 return false;
10109
10110 old_mode = old_crtc_state->mode;
10111 new_mode = new_crtc_state->mode;
10112
10113 if (old_mode.clock == new_mode.clock &&
10114 old_mode.hdisplay == new_mode.hdisplay &&
10115 old_mode.vdisplay == new_mode.vdisplay &&
10116 old_mode.htotal == new_mode.htotal &&
10117 old_mode.vtotal != new_mode.vtotal &&
10118 old_mode.hsync_start == new_mode.hsync_start &&
10119 old_mode.vsync_start != new_mode.vsync_start &&
10120 old_mode.hsync_end == new_mode.hsync_end &&
10121 old_mode.vsync_end != new_mode.vsync_end &&
10122 old_mode.hskew == new_mode.hskew &&
10123 old_mode.vscan == new_mode.vscan &&
10124 (old_mode.vsync_end - old_mode.vsync_start) ==
10125 (new_mode.vsync_end - new_mode.vsync_start))
10126 return true;
10127
10128 return false;
10129}
10130
10131static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10132 uint64_t num, den, res;
10133 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10134
10135 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10136
10137 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10138 den = (unsigned long long)new_crtc_state->mode.htotal *
10139 (unsigned long long)new_crtc_state->mode.vtotal;
10140
10141 res = div_u64(num, den);
10142 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10143}
10144
4b9674e5
LL
10145static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10146 struct drm_atomic_state *state,
10147 struct drm_crtc *crtc,
10148 struct drm_crtc_state *old_crtc_state,
10149 struct drm_crtc_state *new_crtc_state,
10150 bool enable,
10151 bool *lock_and_validation_needed)
e7b07cee 10152{
eb3dc897 10153 struct dm_atomic_state *dm_state = NULL;
54d76575 10154 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10155 struct dc_stream_state *new_stream;
62f55537 10156 int ret = 0;
d4d4a645 10157
1f6010a9
DF
10158 /*
10159 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10160 * update changed items
10161 */
4b9674e5
LL
10162 struct amdgpu_crtc *acrtc = NULL;
10163 struct amdgpu_dm_connector *aconnector = NULL;
10164 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10165 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10166
4b9674e5 10167 new_stream = NULL;
9635b754 10168
4b9674e5
LL
10169 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10170 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10171 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10172 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10173
4b9674e5
LL
10174 /* TODO This hack should go away */
10175 if (aconnector && enable) {
10176 /* Make sure fake sink is created in plug-in scenario */
10177 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10178 &aconnector->base);
10179 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10180 &aconnector->base);
19f89e23 10181
4b9674e5
LL
10182 if (IS_ERR(drm_new_conn_state)) {
10183 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10184 goto fail;
10185 }
19f89e23 10186
4b9674e5
LL
10187 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10188 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10189
02d35a67
JFZ
10190 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10191 goto skip_modeset;
10192
cbd14ae7
SW
10193 new_stream = create_validate_stream_for_sink(aconnector,
10194 &new_crtc_state->mode,
10195 dm_new_conn_state,
10196 dm_old_crtc_state->stream);
19f89e23 10197
4b9674e5
LL
10198 /*
10199 * we can have no stream on ACTION_SET if a display
10200 * was disconnected during S3, in this case it is not an
10201 * error, the OS will be updated after detection, and
10202 * will do the right thing on next atomic commit
10203 */
19f89e23 10204
4b9674e5
LL
10205 if (!new_stream) {
10206 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10207 __func__, acrtc->base.base.id);
10208 ret = -ENOMEM;
10209 goto fail;
10210 }
e7b07cee 10211
3d4e52d0
VL
10212 /*
10213 * TODO: Check VSDB bits to decide whether this should
10214 * be enabled or not.
10215 */
10216 new_stream->triggered_crtc_reset.enabled =
10217 dm->force_timing_sync;
10218
4b9674e5 10219 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10220
88694af9
NK
10221 ret = fill_hdr_info_packet(drm_new_conn_state,
10222 &new_stream->hdr_static_metadata);
10223 if (ret)
10224 goto fail;
10225
7e930949
NK
10226 /*
10227 * If we already removed the old stream from the context
10228 * (and set the new stream to NULL) then we can't reuse
10229 * the old stream even if the stream and scaling are unchanged.
10230 * We'll hit the BUG_ON and black screen.
10231 *
10232 * TODO: Refactor this function to allow this check to work
10233 * in all conditions.
10234 */
a85ba005
NC
10235 if (amdgpu_freesync_vid_mode &&
10236 dm_new_crtc_state->stream &&
10237 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10238 goto skip_modeset;
10239
7e930949
NK
10240 if (dm_new_crtc_state->stream &&
10241 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10242 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10243 new_crtc_state->mode_changed = false;
10244 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10245 new_crtc_state->mode_changed);
62f55537 10246 }
4b9674e5 10247 }
b830ebc9 10248
02d35a67 10249 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10250 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10251 goto skip_modeset;
e7b07cee 10252
4711c033 10253 DRM_DEBUG_ATOMIC(
4b9674e5
LL
10254 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10255 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10256 "connectors_changed:%d\n",
10257 acrtc->crtc_id,
10258 new_crtc_state->enable,
10259 new_crtc_state->active,
10260 new_crtc_state->planes_changed,
10261 new_crtc_state->mode_changed,
10262 new_crtc_state->active_changed,
10263 new_crtc_state->connectors_changed);
62f55537 10264
4b9674e5
LL
10265 /* Remove stream for any changed/disabled CRTC */
10266 if (!enable) {
62f55537 10267
4b9674e5
LL
10268 if (!dm_old_crtc_state->stream)
10269 goto skip_modeset;
eb3dc897 10270
a85ba005
NC
10271 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10272 is_timing_unchanged_for_freesync(new_crtc_state,
10273 old_crtc_state)) {
10274 new_crtc_state->mode_changed = false;
10275 DRM_DEBUG_DRIVER(
10276 "Mode change not required for front porch change, "
10277 "setting mode_changed to %d",
10278 new_crtc_state->mode_changed);
10279
10280 set_freesync_fixed_config(dm_new_crtc_state);
10281
10282 goto skip_modeset;
10283 } else if (amdgpu_freesync_vid_mode && aconnector &&
10284 is_freesync_video_mode(&new_crtc_state->mode,
10285 aconnector)) {
e88ebd83
SC
10286 struct drm_display_mode *high_mode;
10287
10288 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10289 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10290 set_freesync_fixed_config(dm_new_crtc_state);
10291 }
a85ba005
NC
10292 }
10293
4b9674e5
LL
10294 ret = dm_atomic_get_state(state, &dm_state);
10295 if (ret)
10296 goto fail;
e7b07cee 10297
4b9674e5
LL
10298 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10299 crtc->base.id);
62f55537 10300
4b9674e5
LL
10301 /* i.e. reset mode */
10302 if (dc_remove_stream_from_ctx(
10303 dm->dc,
10304 dm_state->context,
10305 dm_old_crtc_state->stream) != DC_OK) {
10306 ret = -EINVAL;
10307 goto fail;
10308 }
62f55537 10309
4b9674e5
LL
10310 dc_stream_release(dm_old_crtc_state->stream);
10311 dm_new_crtc_state->stream = NULL;
bb47de73 10312
4b9674e5 10313 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10314
4b9674e5 10315 *lock_and_validation_needed = true;
62f55537 10316
4b9674e5
LL
10317 } else {/* Add stream for any updated/enabled CRTC */
10318 /*
10319 * Quick fix to prevent NULL pointer on new_stream when
10320 * added MST connectors not found in existing crtc_state in the chained mode
10321 * TODO: need to dig out the root cause of that
10322 */
10323 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10324 goto skip_modeset;
62f55537 10325
4b9674e5
LL
10326 if (modereset_required(new_crtc_state))
10327 goto skip_modeset;
62f55537 10328
4b9674e5
LL
10329 if (modeset_required(new_crtc_state, new_stream,
10330 dm_old_crtc_state->stream)) {
62f55537 10331
4b9674e5 10332 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10333
4b9674e5
LL
10334 ret = dm_atomic_get_state(state, &dm_state);
10335 if (ret)
10336 goto fail;
27b3f4fc 10337
4b9674e5 10338 dm_new_crtc_state->stream = new_stream;
62f55537 10339
4b9674e5 10340 dc_stream_retain(new_stream);
1dc90497 10341
4711c033
LT
10342 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10343 crtc->base.id);
1dc90497 10344
4b9674e5
LL
10345 if (dc_add_stream_to_ctx(
10346 dm->dc,
10347 dm_state->context,
10348 dm_new_crtc_state->stream) != DC_OK) {
10349 ret = -EINVAL;
10350 goto fail;
9b690ef3
BL
10351 }
10352
4b9674e5
LL
10353 *lock_and_validation_needed = true;
10354 }
10355 }
e277adc5 10356
4b9674e5
LL
10357skip_modeset:
10358 /* Release extra reference */
10359 if (new_stream)
10360 dc_stream_release(new_stream);
e277adc5 10361
4b9674e5
LL
10362 /*
10363 * We want to do dc stream updates that do not require a
10364 * full modeset below.
10365 */
2afda735 10366 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10367 return 0;
10368 /*
10369 * Given above conditions, the dc state cannot be NULL because:
10370 * 1. We're in the process of enabling CRTCs (just been added
10371 * to the dc context, or already is on the context)
10372 * 2. Has a valid connector attached, and
10373 * 3. Is currently active and enabled.
10374 * => The dc stream state currently exists.
10375 */
10376 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10377
4b9674e5 10378 /* Scaling or underscan settings */
c521fc31
RL
10379 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10380 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10381 update_stream_scaling_settings(
10382 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10383
b05e2c5e
DF
10384 /* ABM settings */
10385 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10386
4b9674e5
LL
10387 /*
10388 * Color management settings. We also update color properties
10389 * when a modeset is needed, to ensure it gets reprogrammed.
10390 */
10391 if (dm_new_crtc_state->base.color_mgmt_changed ||
10392 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10393 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10394 if (ret)
10395 goto fail;
62f55537 10396 }
e7b07cee 10397
4b9674e5
LL
10398 /* Update Freesync settings. */
10399 get_freesync_config_for_crtc(dm_new_crtc_state,
10400 dm_new_conn_state);
10401
62f55537 10402 return ret;
9635b754
DS
10403
10404fail:
10405 if (new_stream)
10406 dc_stream_release(new_stream);
10407 return ret;
62f55537 10408}
9b690ef3 10409
f6ff2a08
NK
10410static bool should_reset_plane(struct drm_atomic_state *state,
10411 struct drm_plane *plane,
10412 struct drm_plane_state *old_plane_state,
10413 struct drm_plane_state *new_plane_state)
10414{
10415 struct drm_plane *other;
10416 struct drm_plane_state *old_other_state, *new_other_state;
10417 struct drm_crtc_state *new_crtc_state;
10418 int i;
10419
70a1efac
NK
10420 /*
10421 * TODO: Remove this hack once the checks below are sufficient
10422 * enough to determine when we need to reset all the planes on
10423 * the stream.
10424 */
10425 if (state->allow_modeset)
10426 return true;
10427
f6ff2a08
NK
10428 /* Exit early if we know that we're adding or removing the plane. */
10429 if (old_plane_state->crtc != new_plane_state->crtc)
10430 return true;
10431
10432 /* old crtc == new_crtc == NULL, plane not in context. */
10433 if (!new_plane_state->crtc)
10434 return false;
10435
10436 new_crtc_state =
10437 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10438
10439 if (!new_crtc_state)
10440 return true;
10441
7316c4ad
NK
10442 /* CRTC Degamma changes currently require us to recreate planes. */
10443 if (new_crtc_state->color_mgmt_changed)
10444 return true;
10445
f6ff2a08
NK
10446 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10447 return true;
10448
10449 /*
10450 * If there are any new primary or overlay planes being added or
10451 * removed then the z-order can potentially change. To ensure
10452 * correct z-order and pipe acquisition the current DC architecture
10453 * requires us to remove and recreate all existing planes.
10454 *
10455 * TODO: Come up with a more elegant solution for this.
10456 */
10457 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10458 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10459 if (other->type == DRM_PLANE_TYPE_CURSOR)
10460 continue;
10461
10462 if (old_other_state->crtc != new_plane_state->crtc &&
10463 new_other_state->crtc != new_plane_state->crtc)
10464 continue;
10465
10466 if (old_other_state->crtc != new_other_state->crtc)
10467 return true;
10468
dc4cb30d
NK
10469 /* Src/dst size and scaling updates. */
10470 if (old_other_state->src_w != new_other_state->src_w ||
10471 old_other_state->src_h != new_other_state->src_h ||
10472 old_other_state->crtc_w != new_other_state->crtc_w ||
10473 old_other_state->crtc_h != new_other_state->crtc_h)
10474 return true;
10475
10476 /* Rotation / mirroring updates. */
10477 if (old_other_state->rotation != new_other_state->rotation)
10478 return true;
10479
10480 /* Blending updates. */
10481 if (old_other_state->pixel_blend_mode !=
10482 new_other_state->pixel_blend_mode)
10483 return true;
10484
10485 /* Alpha updates. */
10486 if (old_other_state->alpha != new_other_state->alpha)
10487 return true;
10488
10489 /* Colorspace changes. */
10490 if (old_other_state->color_range != new_other_state->color_range ||
10491 old_other_state->color_encoding != new_other_state->color_encoding)
10492 return true;
10493
9a81cc60
NK
10494 /* Framebuffer checks fall at the end. */
10495 if (!old_other_state->fb || !new_other_state->fb)
10496 continue;
10497
10498 /* Pixel format changes can require bandwidth updates. */
10499 if (old_other_state->fb->format != new_other_state->fb->format)
10500 return true;
10501
6eed95b0
BN
10502 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10503 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10504
10505 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10506 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10507 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10508 return true;
10509 }
10510
10511 return false;
10512}
10513
b0455fda
SS
10514static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10515 struct drm_plane_state *new_plane_state,
10516 struct drm_framebuffer *fb)
10517{
e72868c4
SS
10518 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10519 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10520 unsigned int pitch;
e72868c4 10521 bool linear;
b0455fda
SS
10522
10523 if (fb->width > new_acrtc->max_cursor_width ||
10524 fb->height > new_acrtc->max_cursor_height) {
10525 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10526 new_plane_state->fb->width,
10527 new_plane_state->fb->height);
10528 return -EINVAL;
10529 }
10530 if (new_plane_state->src_w != fb->width << 16 ||
10531 new_plane_state->src_h != fb->height << 16) {
10532 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10533 return -EINVAL;
10534 }
10535
10536 /* Pitch in pixels */
10537 pitch = fb->pitches[0] / fb->format->cpp[0];
10538
10539 if (fb->width != pitch) {
10540 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10541 fb->width, pitch);
10542 return -EINVAL;
10543 }
10544
10545 switch (pitch) {
10546 case 64:
10547 case 128:
10548 case 256:
10549 /* FB pitch is supported by cursor plane */
10550 break;
10551 default:
10552 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10553 return -EINVAL;
10554 }
10555
e72868c4
SS
10556 /* Core DRM takes care of checking FB modifiers, so we only need to
10557 * check tiling flags when the FB doesn't have a modifier. */
10558 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10559 if (adev->family < AMDGPU_FAMILY_AI) {
10560 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10561 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10562 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10563 } else {
10564 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10565 }
10566 if (!linear) {
10567 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10568 return -EINVAL;
10569 }
10570 }
10571
b0455fda
SS
10572 return 0;
10573}
10574
9e869063
LL
10575static int dm_update_plane_state(struct dc *dc,
10576 struct drm_atomic_state *state,
10577 struct drm_plane *plane,
10578 struct drm_plane_state *old_plane_state,
10579 struct drm_plane_state *new_plane_state,
10580 bool enable,
10581 bool *lock_and_validation_needed)
62f55537 10582{
eb3dc897
NK
10583
10584 struct dm_atomic_state *dm_state = NULL;
62f55537 10585 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10586 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10587 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10588 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10589 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10590 bool needs_reset;
62f55537 10591 int ret = 0;
e7b07cee 10592
9b690ef3 10593
9e869063
LL
10594 new_plane_crtc = new_plane_state->crtc;
10595 old_plane_crtc = old_plane_state->crtc;
10596 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10597 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10598
626bf90f
SS
10599 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10600 if (!enable || !new_plane_crtc ||
10601 drm_atomic_plane_disabling(plane->state, new_plane_state))
10602 return 0;
10603
10604 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10605
5f581248
SS
10606 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10607 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10608 return -EINVAL;
10609 }
10610
24f99d2b 10611 if (new_plane_state->fb) {
b0455fda
SS
10612 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10613 new_plane_state->fb);
10614 if (ret)
10615 return ret;
24f99d2b
SS
10616 }
10617
9e869063 10618 return 0;
626bf90f 10619 }
9b690ef3 10620
f6ff2a08
NK
10621 needs_reset = should_reset_plane(state, plane, old_plane_state,
10622 new_plane_state);
10623
9e869063
LL
10624 /* Remove any changed/removed planes */
10625 if (!enable) {
f6ff2a08 10626 if (!needs_reset)
9e869063 10627 return 0;
a7b06724 10628
9e869063
LL
10629 if (!old_plane_crtc)
10630 return 0;
62f55537 10631
9e869063
LL
10632 old_crtc_state = drm_atomic_get_old_crtc_state(
10633 state, old_plane_crtc);
10634 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10635
9e869063
LL
10636 if (!dm_old_crtc_state->stream)
10637 return 0;
62f55537 10638
9e869063
LL
10639 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10640 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10641
9e869063
LL
10642 ret = dm_atomic_get_state(state, &dm_state);
10643 if (ret)
10644 return ret;
eb3dc897 10645
9e869063
LL
10646 if (!dc_remove_plane_from_context(
10647 dc,
10648 dm_old_crtc_state->stream,
10649 dm_old_plane_state->dc_state,
10650 dm_state->context)) {
62f55537 10651
c3537613 10652 return -EINVAL;
9e869063 10653 }
e7b07cee 10654
9b690ef3 10655
9e869063
LL
10656 dc_plane_state_release(dm_old_plane_state->dc_state);
10657 dm_new_plane_state->dc_state = NULL;
1dc90497 10658
9e869063 10659 *lock_and_validation_needed = true;
1dc90497 10660
9e869063
LL
10661 } else { /* Add new planes */
10662 struct dc_plane_state *dc_new_plane_state;
1dc90497 10663
9e869063
LL
10664 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10665 return 0;
e7b07cee 10666
9e869063
LL
10667 if (!new_plane_crtc)
10668 return 0;
e7b07cee 10669
9e869063
LL
10670 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10671 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10672
9e869063
LL
10673 if (!dm_new_crtc_state->stream)
10674 return 0;
62f55537 10675
f6ff2a08 10676 if (!needs_reset)
9e869063 10677 return 0;
62f55537 10678
8c44515b
AP
10679 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10680 if (ret)
10681 return ret;
10682
9e869063 10683 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10684
9e869063
LL
10685 dc_new_plane_state = dc_create_plane_state(dc);
10686 if (!dc_new_plane_state)
10687 return -ENOMEM;
62f55537 10688
4711c033
LT
10689 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10690 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10691
695af5f9 10692 ret = fill_dc_plane_attributes(
1348969a 10693 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10694 dc_new_plane_state,
10695 new_plane_state,
10696 new_crtc_state);
10697 if (ret) {
10698 dc_plane_state_release(dc_new_plane_state);
10699 return ret;
10700 }
62f55537 10701
9e869063
LL
10702 ret = dm_atomic_get_state(state, &dm_state);
10703 if (ret) {
10704 dc_plane_state_release(dc_new_plane_state);
10705 return ret;
10706 }
eb3dc897 10707
9e869063
LL
10708 /*
10709 * Any atomic check errors that occur after this will
10710 * not need a release. The plane state will be attached
10711 * to the stream, and therefore part of the atomic
10712 * state. It'll be released when the atomic state is
10713 * cleaned.
10714 */
10715 if (!dc_add_plane_to_context(
10716 dc,
10717 dm_new_crtc_state->stream,
10718 dc_new_plane_state,
10719 dm_state->context)) {
62f55537 10720
9e869063
LL
10721 dc_plane_state_release(dc_new_plane_state);
10722 return -EINVAL;
10723 }
8c45c5db 10724
9e869063 10725 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10726
9e869063
LL
10727 /* Tell DC to do a full surface update every time there
10728 * is a plane change. Inefficient, but works for now.
10729 */
10730 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10731
10732 *lock_and_validation_needed = true;
62f55537 10733 }
e7b07cee
HW
10734
10735
62f55537
AG
10736 return ret;
10737}
a87fa993 10738
12f4849a
SS
10739static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10740 struct drm_crtc *crtc,
10741 struct drm_crtc_state *new_crtc_state)
10742{
d1bfbe8a
SS
10743 struct drm_plane *cursor = crtc->cursor, *underlying;
10744 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10745 int i;
10746 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
12f4849a
SS
10747
10748 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10749 * cursor per pipe but it's going to inherit the scaling and
10750 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 10751 * blending properties match the underlying planes'. */
12f4849a 10752
d1bfbe8a
SS
10753 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10754 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
10755 return 0;
10756 }
10757
10758 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10759 (new_cursor_state->src_w >> 16);
10760 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10761 (new_cursor_state->src_h >> 16);
10762
d1bfbe8a
SS
10763 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10764 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10765 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10766 continue;
12f4849a 10767
d1bfbe8a
SS
10768 /* Ignore disabled planes */
10769 if (!new_underlying_state->fb)
10770 continue;
10771
10772 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10773 (new_underlying_state->src_w >> 16);
10774 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10775 (new_underlying_state->src_h >> 16);
10776
10777 if (cursor_scale_w != underlying_scale_w ||
10778 cursor_scale_h != underlying_scale_h) {
10779 drm_dbg_atomic(crtc->dev,
10780 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10781 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10782 return -EINVAL;
10783 }
10784
10785 /* If this plane covers the whole CRTC, no need to check planes underneath */
10786 if (new_underlying_state->crtc_x <= 0 &&
10787 new_underlying_state->crtc_y <= 0 &&
10788 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10789 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10790 break;
12f4849a
SS
10791 }
10792
10793 return 0;
10794}
10795
e10517b3 10796#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10797static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10798{
10799 struct drm_connector *connector;
10800 struct drm_connector_state *conn_state;
10801 struct amdgpu_dm_connector *aconnector = NULL;
10802 int i;
10803 for_each_new_connector_in_state(state, connector, conn_state, i) {
10804 if (conn_state->crtc != crtc)
10805 continue;
10806
10807 aconnector = to_amdgpu_dm_connector(connector);
10808 if (!aconnector->port || !aconnector->mst_port)
10809 aconnector = NULL;
10810 else
10811 break;
10812 }
10813
10814 if (!aconnector)
10815 return 0;
10816
10817 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10818}
e10517b3 10819#endif
44be939f 10820
b8592b48
LL
10821/**
10822 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10823 * @dev: The DRM device
10824 * @state: The atomic state to commit
10825 *
10826 * Validate that the given atomic state is programmable by DC into hardware.
10827 * This involves constructing a &struct dc_state reflecting the new hardware
10828 * state we wish to commit, then querying DC to see if it is programmable. It's
10829 * important not to modify the existing DC state. Otherwise, atomic_check
10830 * may unexpectedly commit hardware changes.
10831 *
10832 * When validating the DC state, it's important that the right locks are
10833 * acquired. For full updates case which removes/adds/updates streams on one
10834 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10835 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10836 * flip using DRMs synchronization events.
b8592b48
LL
10837 *
10838 * Note that DM adds the affected connectors for all CRTCs in state, when that
10839 * might not seem necessary. This is because DC stream creation requires the
10840 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10841 * be possible but non-trivial - a possible TODO item.
10842 *
10843 * Return: -Error code if validation failed.
10844 */
7578ecda
AD
10845static int amdgpu_dm_atomic_check(struct drm_device *dev,
10846 struct drm_atomic_state *state)
62f55537 10847{
1348969a 10848 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10849 struct dm_atomic_state *dm_state = NULL;
62f55537 10850 struct dc *dc = adev->dm.dc;
62f55537 10851 struct drm_connector *connector;
c2cea706 10852 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10853 struct drm_crtc *crtc;
fc9e9920 10854 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10855 struct drm_plane *plane;
10856 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10857 enum dc_status status;
1e88ad0a 10858 int ret, i;
62f55537 10859 bool lock_and_validation_needed = false;
886876ec 10860 struct dm_crtc_state *dm_old_crtc_state;
6513104b
HW
10861#if defined(CONFIG_DRM_AMD_DC_DCN)
10862 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
10863 struct drm_dp_mst_topology_state *mst_state;
10864 struct drm_dp_mst_topology_mgr *mgr;
6513104b 10865#endif
62f55537 10866
e8a98235 10867 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10868
62f55537 10869 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
10870 if (ret) {
10871 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 10872 goto fail;
68ca1c3e 10873 }
62f55537 10874
c5892a10
SW
10875 /* Check connector changes */
10876 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10877 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10878 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10879
10880 /* Skip connectors that are disabled or part of modeset already. */
10881 if (!old_con_state->crtc && !new_con_state->crtc)
10882 continue;
10883
10884 if (!new_con_state->crtc)
10885 continue;
10886
10887 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10888 if (IS_ERR(new_crtc_state)) {
68ca1c3e 10889 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
10890 ret = PTR_ERR(new_crtc_state);
10891 goto fail;
10892 }
10893
10894 if (dm_old_con_state->abm_level !=
10895 dm_new_con_state->abm_level)
10896 new_crtc_state->connectors_changed = true;
10897 }
10898
e10517b3 10899#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10900 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10901 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10902 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10903 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
10904 if (ret) {
10905 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 10906 goto fail;
68ca1c3e 10907 }
44be939f
ML
10908 }
10909 }
10910 }
e10517b3 10911#endif
1e88ad0a 10912 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10913 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10914
1e88ad0a 10915 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10916 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10917 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10918 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10919 continue;
7bef1af3 10920
03fc4cf4 10921 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
10922 if (ret) {
10923 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 10924 goto fail;
68ca1c3e 10925 }
03fc4cf4 10926
1e88ad0a
S
10927 if (!new_crtc_state->enable)
10928 continue;
fc9e9920 10929
1e88ad0a 10930 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
10931 if (ret) {
10932 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 10933 goto fail;
68ca1c3e 10934 }
fc9e9920 10935
1e88ad0a 10936 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
10937 if (ret) {
10938 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 10939 goto fail;
68ca1c3e 10940 }
115a385c 10941
cbac53f7 10942 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10943 new_crtc_state->mode_changed = true;
e7b07cee
HW
10944 }
10945
2d9e6431
NK
10946 /*
10947 * Add all primary and overlay planes on the CRTC to the state
10948 * whenever a plane is enabled to maintain correct z-ordering
10949 * and to enable fast surface updates.
10950 */
10951 drm_for_each_crtc(crtc, dev) {
10952 bool modified = false;
10953
10954 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10955 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10956 continue;
10957
10958 if (new_plane_state->crtc == crtc ||
10959 old_plane_state->crtc == crtc) {
10960 modified = true;
10961 break;
10962 }
10963 }
10964
10965 if (!modified)
10966 continue;
10967
10968 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10969 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10970 continue;
10971
10972 new_plane_state =
10973 drm_atomic_get_plane_state(state, plane);
10974
10975 if (IS_ERR(new_plane_state)) {
10976 ret = PTR_ERR(new_plane_state);
68ca1c3e 10977 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
10978 goto fail;
10979 }
10980 }
10981 }
10982
62f55537 10983 /* Remove exiting planes if they are modified */
9e869063
LL
10984 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10985 ret = dm_update_plane_state(dc, state, plane,
10986 old_plane_state,
10987 new_plane_state,
10988 false,
10989 &lock_and_validation_needed);
68ca1c3e
S
10990 if (ret) {
10991 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 10992 goto fail;
68ca1c3e 10993 }
62f55537
AG
10994 }
10995
10996 /* Disable all crtcs which require disable */
4b9674e5
LL
10997 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10998 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10999 old_crtc_state,
11000 new_crtc_state,
11001 false,
11002 &lock_and_validation_needed);
68ca1c3e
S
11003 if (ret) {
11004 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 11005 goto fail;
68ca1c3e 11006 }
62f55537
AG
11007 }
11008
11009 /* Enable all crtcs which require enable */
4b9674e5
LL
11010 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11011 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11012 old_crtc_state,
11013 new_crtc_state,
11014 true,
11015 &lock_and_validation_needed);
68ca1c3e
S
11016 if (ret) {
11017 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 11018 goto fail;
68ca1c3e 11019 }
62f55537
AG
11020 }
11021
11022 /* Add new/modified planes */
9e869063
LL
11023 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11024 ret = dm_update_plane_state(dc, state, plane,
11025 old_plane_state,
11026 new_plane_state,
11027 true,
11028 &lock_and_validation_needed);
68ca1c3e
S
11029 if (ret) {
11030 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11031 goto fail;
68ca1c3e 11032 }
62f55537
AG
11033 }
11034
b349f76e
ES
11035 /* Run this here since we want to validate the streams we created */
11036 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
11037 if (ret) {
11038 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 11039 goto fail;
68ca1c3e 11040 }
62f55537 11041
12f4849a
SS
11042 /* Check cursor planes scaling */
11043 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11044 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
11045 if (ret) {
11046 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 11047 goto fail;
68ca1c3e 11048 }
12f4849a
SS
11049 }
11050
43d10d30
NK
11051 if (state->legacy_cursor_update) {
11052 /*
11053 * This is a fast cursor update coming from the plane update
11054 * helper, check if it can be done asynchronously for better
11055 * performance.
11056 */
11057 state->async_update =
11058 !drm_atomic_helper_async_check(dev, state);
11059
11060 /*
11061 * Skip the remaining global validation if this is an async
11062 * update. Cursor updates can be done without affecting
11063 * state or bandwidth calcs and this avoids the performance
11064 * penalty of locking the private state object and
11065 * allocating a new dc_state.
11066 */
11067 if (state->async_update)
11068 return 0;
11069 }
11070
ebdd27e1 11071 /* Check scaling and underscan changes*/
1f6010a9 11072 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
11073 * new stream into context w\o causing full reset. Need to
11074 * decide how to handle.
11075 */
c2cea706 11076 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
11077 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11078 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11079 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
11080
11081 /* Skip any modesets/resets */
0bc9706d
LSL
11082 if (!acrtc || drm_atomic_crtc_needs_modeset(
11083 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
11084 continue;
11085
b830ebc9 11086 /* Skip any thing not scale or underscan changes */
54d76575 11087 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
11088 continue;
11089
11090 lock_and_validation_needed = true;
11091 }
11092
41724ea2
BL
11093#if defined(CONFIG_DRM_AMD_DC_DCN)
11094 /* set the slot info for each mst_state based on the link encoding format */
11095 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11096 struct amdgpu_dm_connector *aconnector;
11097 struct drm_connector *connector;
11098 struct drm_connector_list_iter iter;
11099 u8 link_coding_cap;
11100
11101 if (!mgr->mst_state )
11102 continue;
11103
11104 drm_connector_list_iter_begin(dev, &iter);
11105 drm_for_each_connector_iter(connector, &iter) {
11106 int id = connector->index;
11107
11108 if (id == mst_state->mgr->conn_base_id) {
11109 aconnector = to_amdgpu_dm_connector(connector);
11110 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11111 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11112
11113 break;
11114 }
11115 }
11116 drm_connector_list_iter_end(&iter);
11117
11118 }
11119#endif
f6d7c7fa
NK
11120 /**
11121 * Streams and planes are reset when there are changes that affect
11122 * bandwidth. Anything that affects bandwidth needs to go through
11123 * DC global validation to ensure that the configuration can be applied
11124 * to hardware.
11125 *
11126 * We have to currently stall out here in atomic_check for outstanding
11127 * commits to finish in this case because our IRQ handlers reference
11128 * DRM state directly - we can end up disabling interrupts too early
11129 * if we don't.
11130 *
11131 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11132 */
f6d7c7fa 11133 if (lock_and_validation_needed) {
eb3dc897 11134 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11135 if (ret) {
11136 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11137 goto fail;
68ca1c3e 11138 }
e7b07cee
HW
11139
11140 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11141 if (ret) {
11142 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11143 goto fail;
68ca1c3e 11144 }
1dc90497 11145
d9fe1a4c 11146#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11147 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11148 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
8c20a1ed 11149 goto fail;
68ca1c3e 11150 }
8c20a1ed 11151
6513104b 11152 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11153 if (ret) {
11154 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11155 goto fail;
68ca1c3e 11156 }
d9fe1a4c 11157#endif
29b9ba74 11158
ded58c7b
ZL
11159 /*
11160 * Perform validation of MST topology in the state:
11161 * We need to perform MST atomic check before calling
11162 * dc_validate_global_state(), or there is a chance
11163 * to get stuck in an infinite loop and hang eventually.
11164 */
11165 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11166 if (ret) {
11167 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11168 goto fail;
68ca1c3e 11169 }
85fb8bb9 11170 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 11171 if (status != DC_OK) {
68ca1c3e 11172 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11173 dc_status_to_str(status), status);
e7b07cee
HW
11174 ret = -EINVAL;
11175 goto fail;
11176 }
bd200d19 11177 } else {
674e78ac 11178 /*
bd200d19
NK
11179 * The commit is a fast update. Fast updates shouldn't change
11180 * the DC context, affect global validation, and can have their
11181 * commit work done in parallel with other commits not touching
11182 * the same resource. If we have a new DC context as part of
11183 * the DM atomic state from validation we need to free it and
11184 * retain the existing one instead.
fde9f39a
MR
11185 *
11186 * Furthermore, since the DM atomic state only contains the DC
11187 * context and can safely be annulled, we can free the state
11188 * and clear the associated private object now to free
11189 * some memory and avoid a possible use-after-free later.
674e78ac 11190 */
bd200d19 11191
fde9f39a
MR
11192 for (i = 0; i < state->num_private_objs; i++) {
11193 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11194
fde9f39a
MR
11195 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11196 int j = state->num_private_objs-1;
bd200d19 11197
fde9f39a
MR
11198 dm_atomic_destroy_state(obj,
11199 state->private_objs[i].state);
11200
11201 /* If i is not at the end of the array then the
11202 * last element needs to be moved to where i was
11203 * before the array can safely be truncated.
11204 */
11205 if (i != j)
11206 state->private_objs[i] =
11207 state->private_objs[j];
bd200d19 11208
fde9f39a
MR
11209 state->private_objs[j].ptr = NULL;
11210 state->private_objs[j].state = NULL;
11211 state->private_objs[j].old_state = NULL;
11212 state->private_objs[j].new_state = NULL;
11213
11214 state->num_private_objs = j;
11215 break;
11216 }
bd200d19 11217 }
e7b07cee
HW
11218 }
11219
caff0e66
NK
11220 /* Store the overall update type for use later in atomic check. */
11221 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11222 struct dm_crtc_state *dm_new_crtc_state =
11223 to_dm_crtc_state(new_crtc_state);
11224
f6d7c7fa
NK
11225 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11226 UPDATE_TYPE_FULL :
11227 UPDATE_TYPE_FAST;
e7b07cee
HW
11228 }
11229
11230 /* Must be success */
11231 WARN_ON(ret);
e8a98235
RS
11232
11233 trace_amdgpu_dm_atomic_check_finish(state, ret);
11234
e7b07cee
HW
11235 return ret;
11236
11237fail:
11238 if (ret == -EDEADLK)
01e28f9c 11239 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11240 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11241 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11242 else
01e28f9c 11243 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11244
e8a98235
RS
11245 trace_amdgpu_dm_atomic_check_finish(state, ret);
11246
e7b07cee
HW
11247 return ret;
11248}
11249
3ee6b26b
AD
11250static bool is_dp_capable_without_timing_msa(struct dc *dc,
11251 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11252{
11253 uint8_t dpcd_data;
11254 bool capable = false;
11255
c84dec2f 11256 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11257 dm_helpers_dp_read_dpcd(
11258 NULL,
c84dec2f 11259 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11260 DP_DOWN_STREAM_PORT_COUNT,
11261 &dpcd_data,
11262 sizeof(dpcd_data))) {
11263 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11264 }
11265
11266 return capable;
11267}
f9b4f20c 11268
46db138d
SW
11269static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11270 unsigned int offset,
11271 unsigned int total_length,
11272 uint8_t *data,
11273 unsigned int length,
11274 struct amdgpu_hdmi_vsdb_info *vsdb)
11275{
11276 bool res;
11277 union dmub_rb_cmd cmd;
11278 struct dmub_cmd_send_edid_cea *input;
11279 struct dmub_cmd_edid_cea_output *output;
11280
11281 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11282 return false;
11283
11284 memset(&cmd, 0, sizeof(cmd));
11285
11286 input = &cmd.edid_cea.data.input;
11287
11288 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11289 cmd.edid_cea.header.sub_type = 0;
11290 cmd.edid_cea.header.payload_bytes =
11291 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11292 input->offset = offset;
11293 input->length = length;
11294 input->total_length = total_length;
11295 memcpy(input->payload, data, length);
11296
11297 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11298 if (!res) {
11299 DRM_ERROR("EDID CEA parser failed\n");
11300 return false;
11301 }
11302
11303 output = &cmd.edid_cea.data.output;
11304
11305 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11306 if (!output->ack.success) {
11307 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11308 output->ack.offset);
11309 }
11310 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11311 if (!output->amd_vsdb.vsdb_found)
11312 return false;
11313
11314 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11315 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11316 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11317 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11318 } else {
b76a8062 11319 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11320 return false;
11321 }
11322
11323 return true;
11324}
11325
11326static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11327 uint8_t *edid_ext, int len,
11328 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11329{
11330 int i;
f9b4f20c
SW
11331
11332 /* send extension block to DMCU for parsing */
11333 for (i = 0; i < len; i += 8) {
11334 bool res;
11335 int offset;
11336
11337 /* send 8 bytes a time */
46db138d 11338 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11339 return false;
11340
11341 if (i+8 == len) {
11342 /* EDID block sent completed, expect result */
11343 int version, min_rate, max_rate;
11344
46db138d 11345 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11346 if (res) {
11347 /* amd vsdb found */
11348 vsdb_info->freesync_supported = 1;
11349 vsdb_info->amd_vsdb_version = version;
11350 vsdb_info->min_refresh_rate_hz = min_rate;
11351 vsdb_info->max_refresh_rate_hz = max_rate;
11352 return true;
11353 }
11354 /* not amd vsdb */
11355 return false;
11356 }
11357
11358 /* check for ack*/
46db138d 11359 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11360 if (!res)
11361 return false;
11362 }
11363
11364 return false;
11365}
11366
46db138d
SW
11367static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11368 uint8_t *edid_ext, int len,
11369 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11370{
11371 int i;
11372
11373 /* send extension block to DMCU for parsing */
11374 for (i = 0; i < len; i += 8) {
11375 /* send 8 bytes a time */
11376 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11377 return false;
11378 }
11379
11380 return vsdb_info->freesync_supported;
11381}
11382
11383static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11384 uint8_t *edid_ext, int len,
11385 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11386{
11387 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11388
11389 if (adev->dm.dmub_srv)
11390 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11391 else
11392 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11393}
11394
7c7dd774 11395static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11396 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11397{
11398 uint8_t *edid_ext = NULL;
11399 int i;
11400 bool valid_vsdb_found = false;
11401
11402 /*----- drm_find_cea_extension() -----*/
11403 /* No EDID or EDID extensions */
11404 if (edid == NULL || edid->extensions == 0)
7c7dd774 11405 return -ENODEV;
f9b4f20c
SW
11406
11407 /* Find CEA extension */
11408 for (i = 0; i < edid->extensions; i++) {
11409 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11410 if (edid_ext[0] == CEA_EXT)
11411 break;
11412 }
11413
11414 if (i == edid->extensions)
7c7dd774 11415 return -ENODEV;
f9b4f20c
SW
11416
11417 /*----- cea_db_offsets() -----*/
11418 if (edid_ext[0] != CEA_EXT)
7c7dd774 11419 return -ENODEV;
f9b4f20c
SW
11420
11421 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11422
11423 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11424}
11425
98e6436d
AK
11426void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11427 struct edid *edid)
e7b07cee 11428{
eb0709ba 11429 int i = 0;
e7b07cee
HW
11430 struct detailed_timing *timing;
11431 struct detailed_non_pixel *data;
11432 struct detailed_data_monitor_range *range;
c84dec2f
HW
11433 struct amdgpu_dm_connector *amdgpu_dm_connector =
11434 to_amdgpu_dm_connector(connector);
bb47de73 11435 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11436 struct dc_sink *sink;
e7b07cee
HW
11437
11438 struct drm_device *dev = connector->dev;
1348969a 11439 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11440 bool freesync_capable = false;
f9b4f20c 11441 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11442
8218d7f1
HW
11443 if (!connector->state) {
11444 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11445 goto update;
8218d7f1
HW
11446 }
11447
9b2fdc33
AP
11448 sink = amdgpu_dm_connector->dc_sink ?
11449 amdgpu_dm_connector->dc_sink :
11450 amdgpu_dm_connector->dc_em_sink;
11451
11452 if (!edid || !sink) {
98e6436d
AK
11453 dm_con_state = to_dm_connector_state(connector->state);
11454
11455 amdgpu_dm_connector->min_vfreq = 0;
11456 amdgpu_dm_connector->max_vfreq = 0;
11457 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11458 connector->display_info.monitor_range.min_vfreq = 0;
11459 connector->display_info.monitor_range.max_vfreq = 0;
11460 freesync_capable = false;
98e6436d 11461
bb47de73 11462 goto update;
98e6436d
AK
11463 }
11464
8218d7f1
HW
11465 dm_con_state = to_dm_connector_state(connector->state);
11466
e7b07cee 11467 if (!adev->dm.freesync_module)
bb47de73 11468 goto update;
f9b4f20c
SW
11469
11470
9b2fdc33
AP
11471 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11472 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11473 bool edid_check_required = false;
11474
11475 if (edid) {
e7b07cee
HW
11476 edid_check_required = is_dp_capable_without_timing_msa(
11477 adev->dm.dc,
c84dec2f 11478 amdgpu_dm_connector);
e7b07cee 11479 }
e7b07cee 11480
f9b4f20c
SW
11481 if (edid_check_required == true && (edid->version > 1 ||
11482 (edid->version == 1 && edid->revision > 1))) {
11483 for (i = 0; i < 4; i++) {
e7b07cee 11484
f9b4f20c
SW
11485 timing = &edid->detailed_timings[i];
11486 data = &timing->data.other_data;
11487 range = &data->data.range;
11488 /*
11489 * Check if monitor has continuous frequency mode
11490 */
11491 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11492 continue;
11493 /*
11494 * Check for flag range limits only. If flag == 1 then
11495 * no additional timing information provided.
11496 * Default GTF, GTF Secondary curve and CVT are not
11497 * supported
11498 */
11499 if (range->flags != 1)
11500 continue;
a0ffc3fd 11501
f9b4f20c
SW
11502 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11503 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11504 amdgpu_dm_connector->pixel_clock_mhz =
11505 range->pixel_clock_mhz * 10;
a0ffc3fd 11506
f9b4f20c
SW
11507 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11508 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11509
f9b4f20c
SW
11510 break;
11511 }
98e6436d 11512
f9b4f20c
SW
11513 if (amdgpu_dm_connector->max_vfreq -
11514 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11515
f9b4f20c
SW
11516 freesync_capable = true;
11517 }
11518 }
9b2fdc33 11519 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11520 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11521 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11522 timing = &edid->detailed_timings[i];
11523 data = &timing->data.other_data;
11524
11525 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11526 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11527 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11528 freesync_capable = true;
11529
11530 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11531 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11532 }
11533 }
bb47de73
NK
11534
11535update:
11536 if (dm_con_state)
11537 dm_con_state->freesync_capable = freesync_capable;
11538
11539 if (connector->vrr_capable_property)
11540 drm_connector_set_vrr_capable_property(connector,
11541 freesync_capable);
e7b07cee
HW
11542}
11543
3d4e52d0
VL
11544void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11545{
1348969a 11546 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11547 struct dc *dc = adev->dm.dc;
11548 int i;
11549
11550 mutex_lock(&adev->dm.dc_lock);
11551 if (dc->current_state) {
11552 for (i = 0; i < dc->current_state->stream_count; ++i)
11553 dc->current_state->streams[i]
11554 ->triggered_crtc_reset.enabled =
11555 adev->dm.force_timing_sync;
11556
11557 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11558 dc_trigger_sync(dc, dc->current_state);
11559 }
11560 mutex_unlock(&adev->dm.dc_lock);
11561}
9d83722d
RS
11562
11563void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11564 uint32_t value, const char *func_name)
11565{
11566#ifdef DM_CHECK_ADDR_0
11567 if (address == 0) {
11568 DC_ERR("invalid register write. address = 0");
11569 return;
11570 }
11571#endif
11572 cgs_write_register(ctx->cgs_device, address, value);
11573 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11574}
11575
11576uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11577 const char *func_name)
11578{
11579 uint32_t value;
11580#ifdef DM_CHECK_ADDR_0
11581 if (address == 0) {
11582 DC_ERR("invalid register read; address = 0\n");
11583 return 0;
11584 }
11585#endif
11586
11587 if (ctx->dmub_srv &&
11588 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11589 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11590 ASSERT(false);
11591 return 0;
11592 }
11593
11594 value = cgs_read_register(ctx->cgs_device, address);
11595
11596 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11597
11598 return value;
11599}
81927e28 11600
88f52b1f
JS
11601int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11602 uint8_t status_type, uint32_t *operation_result)
11603{
11604 struct amdgpu_device *adev = ctx->driver_context;
11605 int return_status = -1;
11606 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11607
11608 if (is_cmd_aux) {
11609 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11610 return_status = p_notify->aux_reply.length;
11611 *operation_result = p_notify->result;
11612 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11613 *operation_result = AUX_RET_ERROR_TIMEOUT;
11614 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11615 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11616 } else {
11617 *operation_result = AUX_RET_ERROR_UNKNOWN;
11618 }
11619 } else {
11620 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11621 return_status = 0;
11622 *operation_result = p_notify->sc_status;
11623 } else {
11624 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11625 }
11626 }
11627
11628 return return_status;
11629}
11630
11631int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11632 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11633{
11634 struct amdgpu_device *adev = ctx->driver_context;
11635 int ret = 0;
11636
88f52b1f
JS
11637 if (is_cmd_aux) {
11638 dc_process_dmub_aux_transfer_async(ctx->dc,
11639 link_index, (struct aux_payload *)cmd_payload);
11640 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11641 (struct set_config_cmd_payload *)cmd_payload,
11642 adev->dm.dmub_notify)) {
11643 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11644 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11645 (uint32_t *)operation_result);
11646 }
11647
9e3a50d2 11648 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11649 if (ret == 0) {
9e3a50d2 11650 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11651 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11652 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11653 (uint32_t *)operation_result);
81927e28 11654 }
81927e28 11655
88f52b1f
JS
11656 if (is_cmd_aux) {
11657 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11658 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11659
88f52b1f
JS
11660 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11661 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11662 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11663 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11664 adev->dm.dmub_notify->aux_reply.length);
11665 }
11666 }
81927e28
JS
11667 }
11668
88f52b1f
JS
11669 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11670 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11671 (uint32_t *)operation_result);
81927e28 11672}
1edf5ae1
ZL
11673
11674/*
11675 * Check whether seamless boot is supported.
11676 *
11677 * So far we only support seamless boot on CHIP_VANGOGH.
11678 * If everything goes well, we may consider expanding
11679 * seamless boot to other ASICs.
11680 */
11681bool check_seamless_boot_capability(struct amdgpu_device *adev)
11682{
11683 switch (adev->asic_type) {
11684 case CHIP_VANGOGH:
11685 if (!adev->mman.keep_stolen_vga_memory)
11686 return true;
11687 break;
11688 default:
11689 break;
11690 }
11691
11692 return false;
11693}