drm/amdgpu: remove pointless ttm_eu usage from vkms
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
53e108aa 51#include <drm/drm_hdcp.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
1f579254 54#include "amdgpu_atombios.h"
4562236b
HW
55
56#include "amd_shared.h"
57#include "amdgpu_dm_irq.h"
58#include "dm_helpers.h"
e7b07cee 59#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
60#if defined(CONFIG_DEBUG_FS)
61#include "amdgpu_dm_debugfs.h"
62#endif
f4594cd1 63#include "amdgpu_dm_psr.h"
4562236b
HW
64
65#include "ivsrcid/ivsrcid_vislands30.h"
66
81927e28 67#include "i2caux_interface.h"
4562236b
HW
68#include <linux/module.h>
69#include <linux/moduleparam.h>
e7b07cee 70#include <linux/types.h>
97028037 71#include <linux/pm_runtime.h>
09d21852 72#include <linux/pci.h>
a94d5569 73#include <linux/firmware.h>
6ce8f316 74#include <linux/component.h>
4562236b
HW
75
76#include <drm/drm_atomic.h>
674e78ac 77#include <drm/drm_atomic_uapi.h>
4562236b 78#include <drm/drm_atomic_helper.h>
5b529e8d 79#include <drm/dp/drm_dp_mst_helper.h>
e7b07cee 80#include <drm/drm_fb_helper.h>
09d21852 81#include <drm/drm_fourcc.h>
e7b07cee 82#include <drm/drm_edid.h>
09d21852 83#include <drm/drm_vblank.h>
6ce8f316 84#include <drm/drm_audio_component.h>
4562236b 85
b86a1aa3 86#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 87#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 88
ad941f7a
FX
89#include "dcn/dcn_1_0_offset.h"
90#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
91#include "soc15_hw_ip.h"
92#include "vega10_ip_offset.h"
ff5ef992
AD
93
94#include "soc15_common.h"
95#endif
96
e7b07cee 97#include "modules/inc/mod_freesync.h"
bbf854dc 98#include "modules/power/power_helpers.h"
ecd0136b 99#include "modules/inc/mod_info_packet.h"
e7b07cee 100
743b9786
NK
101#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
103#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
105#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
107#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
109#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
111#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
113#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
115#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
b5b8ed44
QZ
117#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
de7cc1b4
PL
119#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
2200eb9e 121
a94d5569
DF
122#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
123MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 124
5ea23931
RL
125#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
126MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
127
8c7aea40
NK
128/* Number of bytes in PSP header for firmware. */
129#define PSP_HEADER_BYTES 0x100
130
131/* Number of bytes in PSP footer for firmware. */
132#define PSP_FOOTER_BYTES 0x100
133
b8592b48
LL
134/**
135 * DOC: overview
136 *
137 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 138 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
139 * requests into DC requests, and DC responses into DRM responses.
140 *
141 * The root control structure is &struct amdgpu_display_manager.
142 */
143
7578ecda
AD
144/* basic init/fini API */
145static int amdgpu_dm_init(struct amdgpu_device *adev);
146static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 147static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 148
0f877894
OV
149static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
150{
151 switch (link->dpcd_caps.dongle_type) {
152 case DISPLAY_DONGLE_NONE:
153 return DRM_MODE_SUBCONNECTOR_Native;
154 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155 return DRM_MODE_SUBCONNECTOR_VGA;
156 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157 case DISPLAY_DONGLE_DP_DVI_DONGLE:
158 return DRM_MODE_SUBCONNECTOR_DVID;
159 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161 return DRM_MODE_SUBCONNECTOR_HDMIA;
162 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
163 default:
164 return DRM_MODE_SUBCONNECTOR_Unknown;
165 }
166}
167
168static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
169{
170 struct dc_link *link = aconnector->dc_link;
171 struct drm_connector *connector = &aconnector->base;
172 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
173
174 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
175 return;
176
177 if (aconnector->dc_sink)
178 subconnector = get_subconnector_type(link);
179
180 drm_object_property_set_value(&connector->base,
181 connector->dev->mode_config.dp_subconnector_property,
182 subconnector);
183}
184
1f6010a9
DF
185/*
186 * initializes drm_device display related structures, based on the information
7578ecda
AD
187 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188 * drm_encoder, drm_mode_config
189 *
190 * Returns 0 on success
191 */
192static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193/* removes and deallocates the drm structures, created by the above function */
194static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
195
7578ecda 196static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 197 struct drm_plane *plane,
cc1fec57
NK
198 unsigned long possible_crtcs,
199 const struct dc_plane_cap *plane_cap);
7578ecda
AD
200static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201 struct drm_plane *plane,
202 uint32_t link_index);
203static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204 struct amdgpu_dm_connector *amdgpu_dm_connector,
205 uint32_t link_index,
206 struct amdgpu_encoder *amdgpu_encoder);
207static int amdgpu_dm_encoder_init(struct drm_device *dev,
208 struct amdgpu_encoder *aencoder,
209 uint32_t link_index);
210
211static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
212
7578ecda
AD
213static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
214
215static int amdgpu_dm_atomic_check(struct drm_device *dev,
216 struct drm_atomic_state *state);
217
674e78ac
NK
218static void handle_cursor_update(struct drm_plane *plane,
219 struct drm_plane_state *old_plane_state);
7578ecda 220
dfbbfe3c
BN
221static const struct drm_format_info *
222amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
223
e27c41d5 224static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 225static void handle_hpd_rx_irq(void *param);
e27c41d5 226
a85ba005
NC
227static bool
228is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229 struct drm_crtc_state *new_crtc_state);
4562236b
HW
230/*
231 * dm_vblank_get_counter
232 *
233 * @brief
234 * Get counter for number of vertical blanks
235 *
236 * @param
237 * struct amdgpu_device *adev - [in] desired amdgpu device
238 * int disp_idx - [in] which CRTC to get the counter from
239 *
240 * @return
241 * Counter for vertical blanks
242 */
243static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
244{
245 if (crtc >= adev->mode_info.num_crtc)
246 return 0;
247 else {
248 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
249
585d450c 250 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
251 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252 crtc);
4562236b
HW
253 return 0;
254 }
255
585d450c 256 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
257 }
258}
259
260static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 261 u32 *vbl, u32 *position)
4562236b 262{
81c50963
ST
263 uint32_t v_blank_start, v_blank_end, h_position, v_position;
264
4562236b
HW
265 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
266 return -EINVAL;
267 else {
268 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
269
585d450c 270 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
271 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
272 crtc);
4562236b
HW
273 return 0;
274 }
275
81c50963
ST
276 /*
277 * TODO rework base driver to use values directly.
278 * for now parse it back into reg-format
279 */
585d450c 280 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
281 &v_blank_start,
282 &v_blank_end,
283 &h_position,
284 &v_position);
285
e806208d
AG
286 *position = v_position | (h_position << 16);
287 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
288 }
289
290 return 0;
291}
292
293static bool dm_is_idle(void *handle)
294{
295 /* XXX todo */
296 return true;
297}
298
299static int dm_wait_for_idle(void *handle)
300{
301 /* XXX todo */
302 return 0;
303}
304
305static bool dm_check_soft_reset(void *handle)
306{
307 return false;
308}
309
310static int dm_soft_reset(void *handle)
311{
312 /* XXX todo */
313 return 0;
314}
315
3ee6b26b
AD
316static struct amdgpu_crtc *
317get_crtc_by_otg_inst(struct amdgpu_device *adev,
318 int otg_inst)
4562236b 319{
4a580877 320 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
321 struct drm_crtc *crtc;
322 struct amdgpu_crtc *amdgpu_crtc;
323
bcd74374 324 if (WARN_ON(otg_inst == -1))
4562236b 325 return adev->mode_info.crtcs[0];
4562236b
HW
326
327 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328 amdgpu_crtc = to_amdgpu_crtc(crtc);
329
330 if (amdgpu_crtc->otg_inst == otg_inst)
331 return amdgpu_crtc;
332 }
333
334 return NULL;
335}
336
585d450c
AP
337static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
338{
339 return acrtc->dm_irq_params.freesync_config.state ==
340 VRR_STATE_ACTIVE_VARIABLE ||
341 acrtc->dm_irq_params.freesync_config.state ==
342 VRR_STATE_ACTIVE_FIXED;
343}
344
66b0c973
MK
345static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
346{
347 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
349}
350
a85ba005
NC
351static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352 struct dm_crtc_state *new_state)
353{
354 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
355 return true;
356 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357 return true;
358 else
359 return false;
360}
361
b8e8c934
HW
362/**
363 * dm_pflip_high_irq() - Handle pageflip interrupt
364 * @interrupt_params: ignored
365 *
366 * Handles the pageflip interrupt by notifying all interested parties
367 * that the pageflip has been completed.
368 */
4562236b
HW
369static void dm_pflip_high_irq(void *interrupt_params)
370{
4562236b
HW
371 struct amdgpu_crtc *amdgpu_crtc;
372 struct common_irq_params *irq_params = interrupt_params;
373 struct amdgpu_device *adev = irq_params->adev;
374 unsigned long flags;
71bbe51a 375 struct drm_pending_vblank_event *e;
71bbe51a
MK
376 uint32_t vpos, hpos, v_blank_start, v_blank_end;
377 bool vrr_active;
4562236b
HW
378
379 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
380
381 /* IRQ could occur when in initial stage */
1f6010a9 382 /* TODO work and BO cleanup */
4562236b 383 if (amdgpu_crtc == NULL) {
cb2318b7 384 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
385 return;
386 }
387
4a580877 388 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
389
390 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 391 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
392 amdgpu_crtc->pflip_status,
393 AMDGPU_FLIP_SUBMITTED,
394 amdgpu_crtc->crtc_id,
395 amdgpu_crtc);
4a580877 396 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
397 return;
398 }
399
71bbe51a
MK
400 /* page flip completed. */
401 e = amdgpu_crtc->event;
402 amdgpu_crtc->event = NULL;
4562236b 403
bcd74374 404 WARN_ON(!e);
1159898a 405
585d450c 406 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
407
408 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
409 if (!vrr_active ||
585d450c 410 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
411 &v_blank_end, &hpos, &vpos) ||
412 (vpos < v_blank_start)) {
413 /* Update to correct count and vblank timestamp if racing with
414 * vblank irq. This also updates to the correct vblank timestamp
415 * even in VRR mode, as scanout is past the front-porch atm.
416 */
417 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 418
71bbe51a
MK
419 /* Wake up userspace by sending the pageflip event with proper
420 * count and timestamp of vblank of flip completion.
421 */
422 if (e) {
423 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
424
425 /* Event sent, so done with vblank for this flip */
426 drm_crtc_vblank_put(&amdgpu_crtc->base);
427 }
428 } else if (e) {
429 /* VRR active and inside front-porch: vblank count and
430 * timestamp for pageflip event will only be up to date after
431 * drm_crtc_handle_vblank() has been executed from late vblank
432 * irq handler after start of back-porch (vline 0). We queue the
433 * pageflip event for send-out by drm_crtc_handle_vblank() with
434 * updated timestamp and count, once it runs after us.
435 *
436 * We need to open-code this instead of using the helper
437 * drm_crtc_arm_vblank_event(), as that helper would
438 * call drm_crtc_accurate_vblank_count(), which we must
439 * not call in VRR mode while we are in front-porch!
440 */
441
442 /* sequence will be replaced by real count during send-out. */
443 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444 e->pipe = amdgpu_crtc->crtc_id;
445
4a580877 446 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
447 e = NULL;
448 }
4562236b 449
fdd1fe57
MK
450 /* Keep track of vblank of this flip for flip throttling. We use the
451 * cooked hw counter, as that one incremented at start of this vblank
452 * of pageflip completion, so last_flip_vblank is the forbidden count
453 * for queueing new pageflips if vsync + VRR is enabled.
454 */
5d1c59c4 455 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 456 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 457
54f5499a 458 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 459 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 460
cb2318b7
VL
461 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462 amdgpu_crtc->crtc_id, amdgpu_crtc,
463 vrr_active, (int) !e);
4562236b
HW
464}
465
d2574c33
MK
466static void dm_vupdate_high_irq(void *interrupt_params)
467{
468 struct common_irq_params *irq_params = interrupt_params;
469 struct amdgpu_device *adev = irq_params->adev;
470 struct amdgpu_crtc *acrtc;
47588233
RS
471 struct drm_device *drm_dev;
472 struct drm_vblank_crtc *vblank;
473 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 474 unsigned long flags;
585d450c 475 int vrr_active;
d2574c33
MK
476
477 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
478
479 if (acrtc) {
585d450c 480 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
481 drm_dev = acrtc->base.dev;
482 vblank = &drm_dev->vblank[acrtc->base.index];
483 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
484 frame_duration_ns = vblank->time - previous_timestamp;
485
486 if (frame_duration_ns > 0) {
487 trace_amdgpu_refresh_rate_track(acrtc->base.index,
488 frame_duration_ns,
489 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
490 atomic64_set(&irq_params->previous_timestamp, vblank->time);
491 }
d2574c33 492
cb2318b7 493 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 494 acrtc->crtc_id,
585d450c 495 vrr_active);
d2574c33
MK
496
497 /* Core vblank handling is done here after end of front-porch in
498 * vrr mode, as vblank timestamping will give valid results
499 * while now done after front-porch. This will also deliver
500 * page-flip completion events that have been queued to us
501 * if a pageflip happened inside front-porch.
502 */
585d450c 503 if (vrr_active) {
d2574c33 504 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
505
506 /* BTR processing for pre-DCE12 ASICs */
585d450c 507 if (acrtc->dm_irq_params.stream &&
09aef2c4 508 adev->family < AMDGPU_FAMILY_AI) {
4a580877 509 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
510 mod_freesync_handle_v_update(
511 adev->dm.freesync_module,
585d450c
AP
512 acrtc->dm_irq_params.stream,
513 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
514
515 dc_stream_adjust_vmin_vmax(
516 adev->dm.dc,
585d450c
AP
517 acrtc->dm_irq_params.stream,
518 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 519 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
520 }
521 }
d2574c33
MK
522 }
523}
524
b8e8c934
HW
525/**
526 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 527 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
528 *
529 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
530 * event handler.
531 */
4562236b
HW
532static void dm_crtc_high_irq(void *interrupt_params)
533{
534 struct common_irq_params *irq_params = interrupt_params;
535 struct amdgpu_device *adev = irq_params->adev;
4562236b 536 struct amdgpu_crtc *acrtc;
09aef2c4 537 unsigned long flags;
585d450c 538 int vrr_active;
4562236b 539
b57de80a 540 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
541 if (!acrtc)
542 return;
543
585d450c 544 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 545
cb2318b7 546 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 547 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 548
2346ef47
NK
549 /**
550 * Core vblank handling at start of front-porch is only possible
551 * in non-vrr mode, as only there vblank timestamping will give
552 * valid results while done in front-porch. Otherwise defer it
553 * to dm_vupdate_high_irq after end of front-porch.
554 */
585d450c 555 if (!vrr_active)
2346ef47
NK
556 drm_crtc_handle_vblank(&acrtc->base);
557
558 /**
559 * Following stuff must happen at start of vblank, for crc
560 * computation and below-the-range btr support in vrr mode.
561 */
16f17eda 562 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
563
564 /* BTR updates need to happen before VUPDATE on Vega and above. */
565 if (adev->family < AMDGPU_FAMILY_AI)
566 return;
16f17eda 567
4a580877 568 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 569
585d450c
AP
570 if (acrtc->dm_irq_params.stream &&
571 acrtc->dm_irq_params.vrr_params.supported &&
572 acrtc->dm_irq_params.freesync_config.state ==
573 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 574 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
575 acrtc->dm_irq_params.stream,
576 &acrtc->dm_irq_params.vrr_params);
16f17eda 577
585d450c
AP
578 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
579 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
580 }
581
2b5aed9a
MK
582 /*
583 * If there aren't any active_planes then DCH HUBP may be clock-gated.
584 * In that case, pageflip completion interrupts won't fire and pageflip
585 * completion events won't get delivered. Prevent this by sending
586 * pending pageflip events from here if a flip is still pending.
587 *
588 * If any planes are enabled, use dm_pflip_high_irq() instead, to
589 * avoid race conditions between flip programming and completion,
590 * which could cause too early flip completion events.
591 */
2346ef47
NK
592 if (adev->family >= AMDGPU_FAMILY_RV &&
593 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 594 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
595 if (acrtc->event) {
596 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
597 acrtc->event = NULL;
598 drm_crtc_vblank_put(&acrtc->base);
599 }
600 acrtc->pflip_status = AMDGPU_FLIP_NONE;
601 }
602
4a580877 603 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
604}
605
86bc2219 606#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 607#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
608/**
609 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
610 * DCN generation ASICs
48e01bf4 611 * @interrupt_params: interrupt parameters
86bc2219
WL
612 *
613 * Used to set crc window/read out crc value at vertical line 0 position
614 */
86bc2219
WL
615static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
616{
617 struct common_irq_params *irq_params = interrupt_params;
618 struct amdgpu_device *adev = irq_params->adev;
619 struct amdgpu_crtc *acrtc;
620
621 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622
623 if (!acrtc)
624 return;
625
626 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627}
433e5dec 628#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 629
e27c41d5 630/**
03f2abb0 631 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
e27c41d5
JS
632 * @adev: amdgpu_device pointer
633 * @notify: dmub notification structure
634 *
635 * Dmub AUX or SET_CONFIG command completion processing callback
636 * Copies dmub notification to DM which is to be read by AUX command.
637 * issuing thread and also signals the event to wake up the thread.
638 */
240e6d25
IB
639static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
640 struct dmub_notification *notify)
e27c41d5
JS
641{
642 if (adev->dm.dmub_notify)
643 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
644 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
645 complete(&adev->dm.dmub_aux_transfer_done);
646}
647
648/**
649 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
650 * @adev: amdgpu_device pointer
651 * @notify: dmub notification structure
652 *
653 * Dmub Hpd interrupt processing callback. Gets displayindex through the
654 * ink index and calls helper to do the processing.
655 */
240e6d25
IB
656static void dmub_hpd_callback(struct amdgpu_device *adev,
657 struct dmub_notification *notify)
e27c41d5
JS
658{
659 struct amdgpu_dm_connector *aconnector;
f6e03f80 660 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
661 struct drm_connector *connector;
662 struct drm_connector_list_iter iter;
663 struct dc_link *link;
664 uint8_t link_index = 0;
978ffac8 665 struct drm_device *dev;
e27c41d5
JS
666
667 if (adev == NULL)
668 return;
669
670 if (notify == NULL) {
671 DRM_ERROR("DMUB HPD callback notification was NULL");
672 return;
673 }
674
675 if (notify->link_index > adev->dm.dc->link_count) {
676 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
677 return;
678 }
679
e27c41d5 680 link_index = notify->link_index;
e27c41d5 681 link = adev->dm.dc->links[link_index];
978ffac8 682 dev = adev->dm.ddev;
e27c41d5
JS
683
684 drm_connector_list_iter_begin(dev, &iter);
685 drm_for_each_connector_iter(connector, &iter) {
686 aconnector = to_amdgpu_dm_connector(connector);
687 if (link && aconnector->dc_link == link) {
688 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 689 hpd_aconnector = aconnector;
e27c41d5
JS
690 break;
691 }
692 }
693 drm_connector_list_iter_end(&iter);
e27c41d5 694
c40a09e5
NK
695 if (hpd_aconnector) {
696 if (notify->type == DMUB_NOTIFICATION_HPD)
697 handle_hpd_irq_helper(hpd_aconnector);
698 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
699 handle_hpd_rx_irq(hpd_aconnector);
700 }
e27c41d5
JS
701}
702
703/**
704 * register_dmub_notify_callback - Sets callback for DMUB notify
705 * @adev: amdgpu_device pointer
706 * @type: Type of dmub notification
707 * @callback: Dmub interrupt callback function
708 * @dmub_int_thread_offload: offload indicator
709 *
710 * API to register a dmub callback handler for a dmub notification
711 * Also sets indicator whether callback processing to be offloaded.
712 * to dmub interrupt handling thread
713 * Return: true if successfully registered, false if there is existing registration
714 */
240e6d25
IB
715static bool register_dmub_notify_callback(struct amdgpu_device *adev,
716 enum dmub_notification_type type,
717 dmub_notify_interrupt_callback_t callback,
718 bool dmub_int_thread_offload)
e27c41d5
JS
719{
720 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
721 adev->dm.dmub_callback[type] = callback;
722 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
723 } else
724 return false;
725
726 return true;
727}
728
729static void dm_handle_hpd_work(struct work_struct *work)
730{
731 struct dmub_hpd_work *dmub_hpd_wrk;
732
733 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
734
735 if (!dmub_hpd_wrk->dmub_notify) {
736 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
737 return;
738 }
739
740 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
741 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
742 dmub_hpd_wrk->dmub_notify);
743 }
094b21c1
JS
744
745 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
746 kfree(dmub_hpd_wrk);
747
748}
749
e25515e2 750#define DMUB_TRACE_MAX_READ 64
81927e28
JS
751/**
752 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
753 * @interrupt_params: used for determining the Outbox instance
754 *
755 * Handles the Outbox Interrupt
756 * event handler.
757 */
81927e28
JS
758static void dm_dmub_outbox1_low_irq(void *interrupt_params)
759{
760 struct dmub_notification notify;
761 struct common_irq_params *irq_params = interrupt_params;
762 struct amdgpu_device *adev = irq_params->adev;
763 struct amdgpu_display_manager *dm = &adev->dm;
764 struct dmcub_trace_buf_entry entry = { 0 };
765 uint32_t count = 0;
e27c41d5 766 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 767 struct dc_link *plink = NULL;
81927e28 768
f6e03f80
JS
769 if (dc_enable_dmub_notifications(adev->dm.dc) &&
770 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 771
f6e03f80
JS
772 do {
773 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
774 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
775 DRM_ERROR("DM: notify type %d invalid!", notify.type);
776 continue;
777 }
c40a09e5
NK
778 if (!dm->dmub_callback[notify.type]) {
779 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
780 continue;
781 }
f6e03f80 782 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
783 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
784 if (!dmub_hpd_wrk) {
785 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
786 return;
787 }
788 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
789 if (!dmub_hpd_wrk->dmub_notify) {
790 kfree(dmub_hpd_wrk);
791 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
792 return;
793 }
794 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
795 if (dmub_hpd_wrk->dmub_notify)
796 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
797 dmub_hpd_wrk->adev = adev;
798 if (notify.type == DMUB_NOTIFICATION_HPD) {
799 plink = adev->dm.dc->links[notify.link_index];
800 if (plink) {
801 plink->hpd_status =
b97788e5 802 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 803 }
e27c41d5 804 }
f6e03f80
JS
805 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
806 } else {
807 dm->dmub_callback[notify.type](adev, &notify);
808 }
809 } while (notify.pending_notification);
81927e28
JS
810 }
811
812
813 do {
814 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
815 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
816 entry.param0, entry.param1);
817
818 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
819 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
820 } else
821 break;
822
823 count++;
824
825 } while (count <= DMUB_TRACE_MAX_READ);
826
f6e03f80
JS
827 if (count > DMUB_TRACE_MAX_READ)
828 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 829}
433e5dec 830#endif /* CONFIG_DRM_AMD_DC_DCN */
86bc2219 831
4562236b
HW
832static int dm_set_clockgating_state(void *handle,
833 enum amd_clockgating_state state)
834{
835 return 0;
836}
837
838static int dm_set_powergating_state(void *handle,
839 enum amd_powergating_state state)
840{
841 return 0;
842}
843
844/* Prototypes of private functions */
845static int dm_early_init(void* handle);
846
a32e24b4 847/* Allocate memory for FBC compressed data */
3e332d3a 848static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 849{
3e332d3a 850 struct drm_device *dev = connector->dev;
1348969a 851 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 852 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
853 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
854 struct drm_display_mode *mode;
42e67c3b
RL
855 unsigned long max_size = 0;
856
857 if (adev->dm.dc->fbc_compressor == NULL)
858 return;
a32e24b4 859
3e332d3a 860 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
861 return;
862
3e332d3a
RL
863 if (compressor->bo_ptr)
864 return;
42e67c3b 865
42e67c3b 866
3e332d3a
RL
867 list_for_each_entry(mode, &connector->modes, head) {
868 if (max_size < mode->htotal * mode->vtotal)
869 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
870 }
871
872 if (max_size) {
873 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 874 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 875 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
876
877 if (r)
42e67c3b
RL
878 DRM_ERROR("DM: Failed to initialize FBC\n");
879 else {
880 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
881 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
882 }
883
a32e24b4
RL
884 }
885
886}
a32e24b4 887
6ce8f316
NK
888static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
889 int pipe, bool *enabled,
890 unsigned char *buf, int max_bytes)
891{
892 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 893 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
894 struct drm_connector *connector;
895 struct drm_connector_list_iter conn_iter;
896 struct amdgpu_dm_connector *aconnector;
897 int ret = 0;
898
899 *enabled = false;
900
901 mutex_lock(&adev->dm.audio_lock);
902
903 drm_connector_list_iter_begin(dev, &conn_iter);
904 drm_for_each_connector_iter(connector, &conn_iter) {
905 aconnector = to_amdgpu_dm_connector(connector);
906 if (aconnector->audio_inst != port)
907 continue;
908
909 *enabled = true;
910 ret = drm_eld_size(connector->eld);
911 memcpy(buf, connector->eld, min(max_bytes, ret));
912
913 break;
914 }
915 drm_connector_list_iter_end(&conn_iter);
916
917 mutex_unlock(&adev->dm.audio_lock);
918
919 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
920
921 return ret;
922}
923
924static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
925 .get_eld = amdgpu_dm_audio_component_get_eld,
926};
927
928static int amdgpu_dm_audio_component_bind(struct device *kdev,
929 struct device *hda_kdev, void *data)
930{
931 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 932 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
933 struct drm_audio_component *acomp = data;
934
935 acomp->ops = &amdgpu_dm_audio_component_ops;
936 acomp->dev = kdev;
937 adev->dm.audio_component = acomp;
938
939 return 0;
940}
941
942static void amdgpu_dm_audio_component_unbind(struct device *kdev,
943 struct device *hda_kdev, void *data)
944{
945 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 946 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
947 struct drm_audio_component *acomp = data;
948
949 acomp->ops = NULL;
950 acomp->dev = NULL;
951 adev->dm.audio_component = NULL;
952}
953
954static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
955 .bind = amdgpu_dm_audio_component_bind,
956 .unbind = amdgpu_dm_audio_component_unbind,
957};
958
959static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
960{
961 int i, ret;
962
963 if (!amdgpu_audio)
964 return 0;
965
966 adev->mode_info.audio.enabled = true;
967
968 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
969
970 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
971 adev->mode_info.audio.pin[i].channels = -1;
972 adev->mode_info.audio.pin[i].rate = -1;
973 adev->mode_info.audio.pin[i].bits_per_sample = -1;
974 adev->mode_info.audio.pin[i].status_bits = 0;
975 adev->mode_info.audio.pin[i].category_code = 0;
976 adev->mode_info.audio.pin[i].connected = false;
977 adev->mode_info.audio.pin[i].id =
978 adev->dm.dc->res_pool->audios[i]->inst;
979 adev->mode_info.audio.pin[i].offset = 0;
980 }
981
982 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
983 if (ret < 0)
984 return ret;
985
986 adev->dm.audio_registered = true;
987
988 return 0;
989}
990
991static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
992{
993 if (!amdgpu_audio)
994 return;
995
996 if (!adev->mode_info.audio.enabled)
997 return;
998
999 if (adev->dm.audio_registered) {
1000 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001 adev->dm.audio_registered = false;
1002 }
1003
1004 /* TODO: Disable audio? */
1005
1006 adev->mode_info.audio.enabled = false;
1007}
1008
dfd84d90 1009static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1010{
1011 struct drm_audio_component *acomp = adev->dm.audio_component;
1012
1013 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1014 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1015
1016 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1017 pin, -1);
1018 }
1019}
1020
743b9786
NK
1021static int dm_dmub_hw_init(struct amdgpu_device *adev)
1022{
743b9786
NK
1023 const struct dmcub_firmware_header_v1_0 *hdr;
1024 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1025 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1026 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1027 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1028 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1029 struct dmub_srv_hw_params hw_params;
1030 enum dmub_status status;
1031 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1032 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
1033 bool has_hw_support;
1034
1035 if (!dmub_srv)
1036 /* DMUB isn't supported on the ASIC. */
1037 return 0;
1038
8c7aea40
NK
1039 if (!fb_info) {
1040 DRM_ERROR("No framebuffer info for DMUB service.\n");
1041 return -EINVAL;
1042 }
1043
743b9786
NK
1044 if (!dmub_fw) {
1045 /* Firmware required for DMUB support. */
1046 DRM_ERROR("No firmware provided for DMUB.\n");
1047 return -EINVAL;
1048 }
1049
1050 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1051 if (status != DMUB_STATUS_OK) {
1052 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1053 return -EINVAL;
1054 }
1055
1056 if (!has_hw_support) {
1057 DRM_INFO("DMUB unsupported on ASIC\n");
1058 return 0;
1059 }
1060
47e62dbd
NK
1061 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1062 status = dmub_srv_hw_reset(dmub_srv);
1063 if (status != DMUB_STATUS_OK)
1064 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1065
743b9786
NK
1066 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1067
743b9786
NK
1068 fw_inst_const = dmub_fw->data +
1069 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1070 PSP_HEADER_BYTES;
743b9786
NK
1071
1072 fw_bss_data = dmub_fw->data +
1073 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1074 le32_to_cpu(hdr->inst_const_bytes);
1075
1076 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1077 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1078 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1079
1080 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1081
ddde28a5
HW
1082 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1083 * amdgpu_ucode_init_single_fw will load dmub firmware
1084 * fw_inst_const part to cw0; otherwise, the firmware back door load
1085 * will be done by dm_dmub_hw_init
1086 */
1087 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1088 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1089 fw_inst_const_size);
1090 }
1091
a576b345
NK
1092 if (fw_bss_data_size)
1093 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1094 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1095
1096 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1097 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1098 adev->bios_size);
1099
1100 /* Reset regions that need to be reset. */
1101 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1102 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1103
1104 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1105 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1106
1107 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1108 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1109
1110 /* Initialize hardware. */
1111 memset(&hw_params, 0, sizeof(hw_params));
1112 hw_params.fb_base = adev->gmc.fb_start;
1113 hw_params.fb_offset = adev->gmc.aper_base;
1114
31a7f4bb
HW
1115 /* backdoor load firmware and trigger dmub running */
1116 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1117 hw_params.load_inst_const = true;
1118
743b9786
NK
1119 if (dmcu)
1120 hw_params.psp_version = dmcu->psp_version;
1121
8c7aea40
NK
1122 for (i = 0; i < fb_info->num_fb; ++i)
1123 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1124
3b36f50d
TH
1125 switch (adev->ip_versions[DCE_HWIP][0]) {
1126 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1127 hw_params.dpia_supported = true;
5b109397 1128#if defined(CONFIG_DRM_AMD_DC_DCN)
7367540b 1129 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
5b109397 1130#endif
5b109397
JS
1131 break;
1132 default:
1133 break;
1134 }
1135
743b9786
NK
1136 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1137 if (status != DMUB_STATUS_OK) {
1138 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1139 return -EINVAL;
1140 }
1141
1142 /* Wait for firmware load to finish. */
1143 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1144 if (status != DMUB_STATUS_OK)
1145 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1146
1147 /* Init DMCU and ABM if available. */
1148 if (dmcu && abm) {
1149 dmcu->funcs->dmcu_init(dmcu);
1150 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1151 }
1152
051b7887
RL
1153 if (!adev->dm.dc->ctx->dmub_srv)
1154 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1155 if (!adev->dm.dc->ctx->dmub_srv) {
1156 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1157 return -ENOMEM;
1158 }
1159
743b9786
NK
1160 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1161 adev->dm.dmcub_fw_version);
1162
1163 return 0;
1164}
1165
79d6b935
NK
1166static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1167{
1168 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1169 enum dmub_status status;
1170 bool init;
1171
1172 if (!dmub_srv) {
1173 /* DMUB isn't supported on the ASIC. */
1174 return;
1175 }
1176
1177 status = dmub_srv_is_hw_init(dmub_srv, &init);
1178 if (status != DMUB_STATUS_OK)
1179 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1180
1181 if (status == DMUB_STATUS_OK && init) {
1182 /* Wait for firmware load to finish. */
1183 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1184 if (status != DMUB_STATUS_OK)
1185 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1186 } else {
1187 /* Perform the full hardware initialization. */
1188 dm_dmub_hw_init(adev);
1189 }
1190}
1191
a3fe0e33 1192#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1193static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1194{
c0fb85ae
YZ
1195 uint64_t pt_base;
1196 uint32_t logical_addr_low;
1197 uint32_t logical_addr_high;
1198 uint32_t agp_base, agp_bot, agp_top;
1199 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1200
a0f884f5
NK
1201 memset(pa_config, 0, sizeof(*pa_config));
1202
c0fb85ae
YZ
1203 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1204 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1205
c0fb85ae
YZ
1206 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1207 /*
1208 * Raven2 has a HW issue that it is unable to use the vram which
1209 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1210 * workaround that increase system aperture high address (add 1)
1211 * to get rid of the VM fault and hardware hang.
1212 */
1213 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1214 else
1215 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1216
c0fb85ae
YZ
1217 agp_base = 0;
1218 agp_bot = adev->gmc.agp_start >> 24;
1219 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1220
c44a22b3 1221
c0fb85ae
YZ
1222 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1223 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1224 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1225 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1226 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1227 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1228
c0fb85ae
YZ
1229 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1230 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1231
1232 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1233 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1234 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1235
1236 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1237 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1238 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1239
1240 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1241 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1242 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1243
1244 pa_config->is_hvm_enabled = 0;
c44a22b3 1245
c44a22b3 1246}
e6cd859d 1247#endif
ea3b4242 1248#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1249static void vblank_control_worker(struct work_struct *work)
ea3b4242 1250{
09a5df6c
NK
1251 struct vblank_control_work *vblank_work =
1252 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1253 struct amdgpu_display_manager *dm = vblank_work->dm;
1254
1255 mutex_lock(&dm->dc_lock);
1256
1257 if (vblank_work->enable)
1258 dm->active_vblank_irq_count++;
5af50b0b 1259 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1260 dm->active_vblank_irq_count--;
1261
2cbcb78c 1262 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1263
4711c033 1264 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1265
58aa1c50
NK
1266 /* Control PSR based on vblank requirements from OS */
1267 if (vblank_work->stream && vblank_work->stream->link) {
1268 if (vblank_work->enable) {
1269 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1270 amdgpu_dm_psr_disable(vblank_work->stream);
1271 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1272 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1273 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1274 amdgpu_dm_psr_enable(vblank_work->stream);
1275 }
1276 }
1277
ea3b4242 1278 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1279
1280 dc_stream_release(vblank_work->stream);
1281
09a5df6c 1282 kfree(vblank_work);
ea3b4242
QZ
1283}
1284
ea3b4242 1285#endif
8e794421
WL
1286
1287static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1288{
1289 struct hpd_rx_irq_offload_work *offload_work;
1290 struct amdgpu_dm_connector *aconnector;
1291 struct dc_link *dc_link;
1292 struct amdgpu_device *adev;
1293 enum dc_connection_type new_connection_type = dc_connection_none;
1294 unsigned long flags;
1295
1296 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1297 aconnector = offload_work->offload_wq->aconnector;
1298
1299 if (!aconnector) {
1300 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1301 goto skip;
1302 }
1303
1304 adev = drm_to_adev(aconnector->base.dev);
1305 dc_link = aconnector->dc_link;
1306
1307 mutex_lock(&aconnector->hpd_lock);
1308 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1309 DRM_ERROR("KMS: Failed to detect connector\n");
1310 mutex_unlock(&aconnector->hpd_lock);
1311
1312 if (new_connection_type == dc_connection_none)
1313 goto skip;
1314
1315 if (amdgpu_in_reset(adev))
1316 goto skip;
1317
1318 mutex_lock(&adev->dm.dc_lock);
1319 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1320 dc_link_dp_handle_automated_test(dc_link);
1321 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1322 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1323 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1324 dc_link_dp_handle_link_loss(dc_link);
1325 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1326 offload_work->offload_wq->is_handling_link_loss = false;
1327 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1328 }
1329 mutex_unlock(&adev->dm.dc_lock);
1330
1331skip:
1332 kfree(offload_work);
1333
1334}
1335
1336static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1337{
1338 int max_caps = dc->caps.max_links;
1339 int i = 0;
1340 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1341
1342 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1343
1344 if (!hpd_rx_offload_wq)
1345 return NULL;
1346
1347
1348 for (i = 0; i < max_caps; i++) {
1349 hpd_rx_offload_wq[i].wq =
1350 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1351
1352 if (hpd_rx_offload_wq[i].wq == NULL) {
1353 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1354 return NULL;
1355 }
1356
1357 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1358 }
1359
1360 return hpd_rx_offload_wq;
1361}
1362
3ce51649
AD
1363struct amdgpu_stutter_quirk {
1364 u16 chip_vendor;
1365 u16 chip_device;
1366 u16 subsys_vendor;
1367 u16 subsys_device;
1368 u8 revision;
1369};
1370
1371static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1372 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1373 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1374 { 0, 0, 0, 0, 0 },
1375};
1376
1377static bool dm_should_disable_stutter(struct pci_dev *pdev)
1378{
1379 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1380
1381 while (p && p->chip_device != 0) {
1382 if (pdev->vendor == p->chip_vendor &&
1383 pdev->device == p->chip_device &&
1384 pdev->subsystem_vendor == p->subsys_vendor &&
1385 pdev->subsystem_device == p->subsys_device &&
1386 pdev->revision == p->revision) {
1387 return true;
1388 }
1389 ++p;
1390 }
1391 return false;
1392}
1393
7578ecda 1394static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1395{
1396 struct dc_init_data init_data;
52704fca
BL
1397#ifdef CONFIG_DRM_AMD_DC_HDCP
1398 struct dc_callback_init init_params;
1399#endif
743b9786 1400 int r;
52704fca 1401
4a580877 1402 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1403 adev->dm.adev = adev;
1404
4562236b
HW
1405 /* Zero all the fields */
1406 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1407#ifdef CONFIG_DRM_AMD_DC_HDCP
1408 memset(&init_params, 0, sizeof(init_params));
1409#endif
4562236b 1410
674e78ac 1411 mutex_init(&adev->dm.dc_lock);
6ce8f316 1412 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1413#if defined(CONFIG_DRM_AMD_DC_DCN)
1414 spin_lock_init(&adev->dm.vblank_lock);
1415#endif
674e78ac 1416
4562236b
HW
1417 if(amdgpu_dm_irq_init(adev)) {
1418 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1419 goto error;
1420 }
1421
1422 init_data.asic_id.chip_family = adev->family;
1423
2dc31ca1 1424 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1425 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1426 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1427
770d13b1 1428 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1429 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1430 init_data.asic_id.atombios_base_address =
1431 adev->mode_info.atom_context->bios;
1432
1433 init_data.driver = adev;
1434
1435 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1436
1437 if (!adev->dm.cgs_device) {
1438 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1439 goto error;
1440 }
1441
1442 init_data.cgs_device = adev->dm.cgs_device;
1443
4562236b
HW
1444 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1445
fd546bc5
AD
1446 switch (adev->ip_versions[DCE_HWIP][0]) {
1447 case IP_VERSION(2, 1, 0):
1448 switch (adev->dm.dmcub_fw_version) {
1449 case 0: /* development */
1450 case 0x1: /* linux-firmware.git hash 6d9f399 */
1451 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1452 init_data.flags.disable_dmcu = false;
1453 break;
1454 default:
1455 init_data.flags.disable_dmcu = true;
1456 }
1457 break;
1458 case IP_VERSION(2, 0, 3):
1459 init_data.flags.disable_dmcu = true;
1460 break;
1461 default:
1462 break;
1463 }
1464
60fb100b
AD
1465 switch (adev->asic_type) {
1466 case CHIP_CARRIZO:
1467 case CHIP_STONEY:
1ebcaebd
NK
1468 init_data.flags.gpu_vm_support = true;
1469 break;
60fb100b 1470 default:
1d789535 1471 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
1472 case IP_VERSION(1, 0, 0):
1473 case IP_VERSION(1, 0, 1):
a7f520bf
AD
1474 /* enable S/G on PCO and RV2 */
1475 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1476 (adev->apu_flags & AMD_APU_IS_PICASSO))
1477 init_data.flags.gpu_vm_support = true;
1478 break;
fd546bc5 1479 case IP_VERSION(2, 1, 0):
c08182f2
AD
1480 case IP_VERSION(3, 0, 1):
1481 case IP_VERSION(3, 1, 2):
1482 case IP_VERSION(3, 1, 3):
b5b8ed44 1483 case IP_VERSION(3, 1, 5):
0fe382fb 1484 case IP_VERSION(3, 1, 6):
c08182f2
AD
1485 init_data.flags.gpu_vm_support = true;
1486 break;
c08182f2
AD
1487 default:
1488 break;
1489 }
60fb100b
AD
1490 break;
1491 }
6e227308 1492
a7f520bf
AD
1493 if (init_data.flags.gpu_vm_support)
1494 adev->mode_info.gpu_vm_support = true;
1495
04b94af4
AD
1496 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1497 init_data.flags.fbc_support = true;
1498
d99f38ae
AD
1499 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1500 init_data.flags.multi_mon_pp_mclk_switch = true;
1501
eaf56410
LL
1502 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1503 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1504
1505 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1506 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1507
12320274
AP
1508#ifdef CONFIG_DRM_AMD_DC_DCN
1509 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1510 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1511 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1512 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1513#endif
1514
7aba117a 1515 init_data.flags.seamless_boot_edp_requested = false;
78ad75f8 1516
1edf5ae1 1517 if (check_seamless_boot_capability(adev)) {
7aba117a 1518 init_data.flags.seamless_boot_edp_requested = true;
1edf5ae1
ZL
1519 init_data.flags.allow_seamless_boot_optimization = true;
1520 DRM_INFO("Seamless boot condition check passed\n");
1521 }
1522
0dd79532 1523 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1524 /* Display Core create. */
1525 adev->dm.dc = dc_create(&init_data);
1526
423788c7 1527 if (adev->dm.dc) {
76121231 1528 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1529 } else {
76121231 1530 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1531 goto error;
1532 }
4562236b 1533
8a791dab
HW
1534 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1535 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1536 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1537 }
1538
f99d8762
HW
1539 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1540 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1541 if (dm_should_disable_stutter(adev->pdev))
1542 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1543
8a791dab
HW
1544 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1545 adev->dm.dc->debug.disable_stutter = true;
1546
2665f63a 1547 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1548 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1549 adev->dm.dc->debug.disable_dsc_edp = true;
1550 }
8a791dab
HW
1551
1552 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1553 adev->dm.dc->debug.disable_clock_gate = true;
1554
743b9786
NK
1555 r = dm_dmub_hw_init(adev);
1556 if (r) {
1557 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1558 goto error;
1559 }
1560
bb6785c1
NK
1561 dc_hardware_init(adev->dm.dc);
1562
8e794421
WL
1563 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1564 if (!adev->dm.hpd_rx_offload_wq) {
1565 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1566 goto error;
1567 }
1568
0b08c54b 1569#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1570 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1571 struct dc_phy_addr_space_config pa_config;
1572
0b08c54b 1573 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1574
0b08c54b
YZ
1575 // Call the DC init_memory func
1576 dc_setup_system_context(adev->dm.dc, &pa_config);
1577 }
1578#endif
c0fb85ae 1579
4562236b
HW
1580 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1581 if (!adev->dm.freesync_module) {
1582 DRM_ERROR(
1583 "amdgpu: failed to initialize freesync_module.\n");
1584 } else
f1ad2f5e 1585 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1586 adev->dm.freesync_module);
1587
e277adc5
LSL
1588 amdgpu_dm_init_color_mod();
1589
ea3b4242
QZ
1590#if defined(CONFIG_DRM_AMD_DC_DCN)
1591 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1592 adev->dm.vblank_control_workqueue =
1593 create_singlethread_workqueue("dm_vblank_control_workqueue");
1594 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1595 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1596 }
1597#endif
1598
52704fca 1599#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1600 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1601 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1602
96a3b32e
BL
1603 if (!adev->dm.hdcp_workqueue)
1604 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1605 else
1606 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1607
96a3b32e
BL
1608 dc_init_callbacks(adev->dm.dc, &init_params);
1609 }
9a65df19
WL
1610#endif
1611#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1612 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1613#endif
81927e28
JS
1614 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1615 init_completion(&adev->dm.dmub_aux_transfer_done);
1616 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1617 if (!adev->dm.dmub_notify) {
1618 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1619 goto error;
1620 }
e27c41d5
JS
1621
1622 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1623 if (!adev->dm.delayed_hpd_wq) {
1624 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1625 goto error;
1626 }
1627
81927e28 1628 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1629#if defined(CONFIG_DRM_AMD_DC_DCN)
1630 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1631 dmub_aux_setconfig_callback, false)) {
1632 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1633 goto error;
1634 }
1635 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1636 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1637 goto error;
1638 }
c40a09e5
NK
1639 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1640 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1641 goto error;
1642 }
433e5dec 1643#endif /* CONFIG_DRM_AMD_DC_DCN */
81927e28
JS
1644 }
1645
4562236b
HW
1646 if (amdgpu_dm_initialize_drm_device(adev)) {
1647 DRM_ERROR(
1648 "amdgpu: failed to initialize sw for display support.\n");
1649 goto error;
1650 }
1651
f74367e4
AD
1652 /* create fake encoders for MST */
1653 dm_dp_create_fake_mst_encoders(adev);
1654
4562236b
HW
1655 /* TODO: Add_display_info? */
1656
1657 /* TODO use dynamic cursor width */
4a580877
LT
1658 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1659 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1660
4a580877 1661 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1662 DRM_ERROR(
1663 "amdgpu: failed to initialize sw for display support.\n");
1664 goto error;
1665 }
1666
c0fb85ae 1667
f1ad2f5e 1668 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1669
1670 return 0;
1671error:
1672 amdgpu_dm_fini(adev);
1673
59d0f396 1674 return -EINVAL;
4562236b
HW
1675}
1676
e9669fb7
AG
1677static int amdgpu_dm_early_fini(void *handle)
1678{
1679 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1680
1681 amdgpu_dm_audio_fini(adev);
1682
1683 return 0;
1684}
1685
7578ecda 1686static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1687{
f74367e4
AD
1688 int i;
1689
09a5df6c
NK
1690#if defined(CONFIG_DRM_AMD_DC_DCN)
1691 if (adev->dm.vblank_control_workqueue) {
1692 destroy_workqueue(adev->dm.vblank_control_workqueue);
1693 adev->dm.vblank_control_workqueue = NULL;
1694 }
1695#endif
1696
f74367e4
AD
1697 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1698 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1699 }
1700
4562236b 1701 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1702
9a65df19
WL
1703#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1704 if (adev->dm.crc_rd_wrk) {
1705 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1706 kfree(adev->dm.crc_rd_wrk);
1707 adev->dm.crc_rd_wrk = NULL;
1708 }
1709#endif
52704fca
BL
1710#ifdef CONFIG_DRM_AMD_DC_HDCP
1711 if (adev->dm.hdcp_workqueue) {
e96b1b29 1712 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1713 adev->dm.hdcp_workqueue = NULL;
1714 }
1715
1716 if (adev->dm.dc)
1717 dc_deinit_callbacks(adev->dm.dc);
1718#endif
51ba6912 1719
3beac533 1720 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1721
81927e28
JS
1722 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1723 kfree(adev->dm.dmub_notify);
1724 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1725 destroy_workqueue(adev->dm.delayed_hpd_wq);
1726 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1727 }
1728
743b9786
NK
1729 if (adev->dm.dmub_bo)
1730 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1731 &adev->dm.dmub_bo_gpu_addr,
1732 &adev->dm.dmub_bo_cpu_addr);
52704fca 1733
006c26a0
AG
1734 if (adev->dm.hpd_rx_offload_wq) {
1735 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1736 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1737 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1738 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1739 }
1740 }
1741
1742 kfree(adev->dm.hpd_rx_offload_wq);
1743 adev->dm.hpd_rx_offload_wq = NULL;
1744 }
1745
c8bdf2b6
ED
1746 /* DC Destroy TODO: Replace destroy DAL */
1747 if (adev->dm.dc)
1748 dc_destroy(&adev->dm.dc);
4562236b
HW
1749 /*
1750 * TODO: pageflip, vlank interrupt
1751 *
1752 * amdgpu_dm_irq_fini(adev);
1753 */
1754
1755 if (adev->dm.cgs_device) {
1756 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1757 adev->dm.cgs_device = NULL;
1758 }
1759 if (adev->dm.freesync_module) {
1760 mod_freesync_destroy(adev->dm.freesync_module);
1761 adev->dm.freesync_module = NULL;
1762 }
674e78ac 1763
6ce8f316 1764 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1765 mutex_destroy(&adev->dm.dc_lock);
1766
4562236b
HW
1767 return;
1768}
1769
a94d5569 1770static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1771{
a7669aff 1772 const char *fw_name_dmcu = NULL;
a94d5569
DF
1773 int r;
1774 const struct dmcu_firmware_header_v1_0 *hdr;
1775
1776 switch(adev->asic_type) {
55e56389
MR
1777#if defined(CONFIG_DRM_AMD_DC_SI)
1778 case CHIP_TAHITI:
1779 case CHIP_PITCAIRN:
1780 case CHIP_VERDE:
1781 case CHIP_OLAND:
1782#endif
a94d5569
DF
1783 case CHIP_BONAIRE:
1784 case CHIP_HAWAII:
1785 case CHIP_KAVERI:
1786 case CHIP_KABINI:
1787 case CHIP_MULLINS:
1788 case CHIP_TONGA:
1789 case CHIP_FIJI:
1790 case CHIP_CARRIZO:
1791 case CHIP_STONEY:
1792 case CHIP_POLARIS11:
1793 case CHIP_POLARIS10:
1794 case CHIP_POLARIS12:
1795 case CHIP_VEGAM:
1796 case CHIP_VEGA10:
1797 case CHIP_VEGA12:
1798 case CHIP_VEGA20:
1799 return 0;
5ea23931
RL
1800 case CHIP_NAVI12:
1801 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1802 break;
a94d5569 1803 case CHIP_RAVEN:
a7669aff
HW
1804 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1805 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1806 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1807 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1808 else
a7669aff 1809 return 0;
a94d5569
DF
1810 break;
1811 default:
1d789535 1812 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1813 case IP_VERSION(2, 0, 2):
1814 case IP_VERSION(2, 0, 3):
1815 case IP_VERSION(2, 0, 0):
1816 case IP_VERSION(2, 1, 0):
1817 case IP_VERSION(3, 0, 0):
1818 case IP_VERSION(3, 0, 2):
1819 case IP_VERSION(3, 0, 3):
1820 case IP_VERSION(3, 0, 1):
1821 case IP_VERSION(3, 1, 2):
1822 case IP_VERSION(3, 1, 3):
b5b8ed44 1823 case IP_VERSION(3, 1, 5):
de7cc1b4 1824 case IP_VERSION(3, 1, 6):
c08182f2
AD
1825 return 0;
1826 default:
1827 break;
1828 }
a94d5569 1829 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1830 return -EINVAL;
a94d5569
DF
1831 }
1832
1833 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1834 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1835 return 0;
1836 }
1837
1838 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1839 if (r == -ENOENT) {
1840 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1841 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1842 adev->dm.fw_dmcu = NULL;
1843 return 0;
1844 }
1845 if (r) {
1846 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1847 fw_name_dmcu);
1848 return r;
1849 }
1850
1851 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1852 if (r) {
1853 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1854 fw_name_dmcu);
1855 release_firmware(adev->dm.fw_dmcu);
1856 adev->dm.fw_dmcu = NULL;
1857 return r;
1858 }
1859
1860 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1861 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1862 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1863 adev->firmware.fw_size +=
1864 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1865
1866 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1867 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1868 adev->firmware.fw_size +=
1869 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1870
ee6e89c0
DF
1871 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1872
a94d5569
DF
1873 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1874
4562236b
HW
1875 return 0;
1876}
1877
743b9786
NK
1878static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1879{
1880 struct amdgpu_device *adev = ctx;
1881
1882 return dm_read_reg(adev->dm.dc->ctx, address);
1883}
1884
1885static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1886 uint32_t value)
1887{
1888 struct amdgpu_device *adev = ctx;
1889
1890 return dm_write_reg(adev->dm.dc->ctx, address, value);
1891}
1892
1893static int dm_dmub_sw_init(struct amdgpu_device *adev)
1894{
1895 struct dmub_srv_create_params create_params;
8c7aea40
NK
1896 struct dmub_srv_region_params region_params;
1897 struct dmub_srv_region_info region_info;
1898 struct dmub_srv_fb_params fb_params;
1899 struct dmub_srv_fb_info *fb_info;
1900 struct dmub_srv *dmub_srv;
743b9786
NK
1901 const struct dmcub_firmware_header_v1_0 *hdr;
1902 const char *fw_name_dmub;
1903 enum dmub_asic dmub_asic;
1904 enum dmub_status status;
1905 int r;
1906
1d789535 1907 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1908 case IP_VERSION(2, 1, 0):
743b9786
NK
1909 dmub_asic = DMUB_ASIC_DCN21;
1910 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1911 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1912 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1913 break;
c08182f2 1914 case IP_VERSION(3, 0, 0):
1d789535 1915 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1916 dmub_asic = DMUB_ASIC_DCN30;
1917 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1918 } else {
1919 dmub_asic = DMUB_ASIC_DCN30;
1920 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1921 }
79037324 1922 break;
c08182f2 1923 case IP_VERSION(3, 0, 1):
469989ca
RL
1924 dmub_asic = DMUB_ASIC_DCN301;
1925 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1926 break;
c08182f2 1927 case IP_VERSION(3, 0, 2):
2a411205
BL
1928 dmub_asic = DMUB_ASIC_DCN302;
1929 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1930 break;
c08182f2 1931 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1932 dmub_asic = DMUB_ASIC_DCN303;
1933 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1934 break;
c08182f2
AD
1935 case IP_VERSION(3, 1, 2):
1936 case IP_VERSION(3, 1, 3):
3137f792 1937 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1938 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1939 break;
b5b8ed44
QZ
1940 case IP_VERSION(3, 1, 5):
1941 dmub_asic = DMUB_ASIC_DCN315;
1942 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1943 break;
de7cc1b4 1944 case IP_VERSION(3, 1, 6):
868f4357 1945 dmub_asic = DMUB_ASIC_DCN316;
de7cc1b4
PL
1946 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1947 break;
743b9786
NK
1948 default:
1949 /* ASIC doesn't support DMUB. */
1950 return 0;
1951 }
1952
743b9786
NK
1953 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1954 if (r) {
1955 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1956 return 0;
1957 }
1958
1959 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1960 if (r) {
1961 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1962 return 0;
1963 }
1964
743b9786 1965 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1966 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1967
9a6ed547
NK
1968 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1969 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1970 AMDGPU_UCODE_ID_DMCUB;
1971 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1972 adev->dm.dmub_fw;
1973 adev->firmware.fw_size +=
1974 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1975
9a6ed547
NK
1976 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1977 adev->dm.dmcub_fw_version);
1978 }
1979
743b9786 1980
8c7aea40
NK
1981 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1982 dmub_srv = adev->dm.dmub_srv;
1983
1984 if (!dmub_srv) {
1985 DRM_ERROR("Failed to allocate DMUB service!\n");
1986 return -ENOMEM;
1987 }
1988
1989 memset(&create_params, 0, sizeof(create_params));
1990 create_params.user_ctx = adev;
1991 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1992 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1993 create_params.asic = dmub_asic;
1994
1995 /* Create the DMUB service. */
1996 status = dmub_srv_create(dmub_srv, &create_params);
1997 if (status != DMUB_STATUS_OK) {
1998 DRM_ERROR("Error creating DMUB service: %d\n", status);
1999 return -EINVAL;
2000 }
2001
2002 /* Calculate the size of all the regions for the DMUB service. */
2003 memset(&region_params, 0, sizeof(region_params));
2004
2005 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2006 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2007 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2008 region_params.vbios_size = adev->bios_size;
0922b899 2009 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
2010 adev->dm.dmub_fw->data +
2011 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 2012 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
2013 region_params.fw_inst_const =
2014 adev->dm.dmub_fw->data +
2015 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2016 PSP_HEADER_BYTES;
8c7aea40
NK
2017
2018 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2019 &region_info);
2020
2021 if (status != DMUB_STATUS_OK) {
2022 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2023 return -EINVAL;
2024 }
2025
2026 /*
2027 * Allocate a framebuffer based on the total size of all the regions.
2028 * TODO: Move this into GART.
2029 */
2030 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2031 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2032 &adev->dm.dmub_bo_gpu_addr,
2033 &adev->dm.dmub_bo_cpu_addr);
2034 if (r)
2035 return r;
2036
2037 /* Rebase the regions on the framebuffer address. */
2038 memset(&fb_params, 0, sizeof(fb_params));
2039 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2040 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2041 fb_params.region_info = &region_info;
2042
2043 adev->dm.dmub_fb_info =
2044 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2045 fb_info = adev->dm.dmub_fb_info;
2046
2047 if (!fb_info) {
2048 DRM_ERROR(
2049 "Failed to allocate framebuffer info for DMUB service!\n");
2050 return -ENOMEM;
2051 }
2052
2053 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2054 if (status != DMUB_STATUS_OK) {
2055 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2056 return -EINVAL;
2057 }
2058
743b9786
NK
2059 return 0;
2060}
2061
a94d5569
DF
2062static int dm_sw_init(void *handle)
2063{
2064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
2065 int r;
2066
2067 r = dm_dmub_sw_init(adev);
2068 if (r)
2069 return r;
a94d5569
DF
2070
2071 return load_dmcu_fw(adev);
2072}
2073
4562236b
HW
2074static int dm_sw_fini(void *handle)
2075{
a94d5569
DF
2076 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2077
8c7aea40
NK
2078 kfree(adev->dm.dmub_fb_info);
2079 adev->dm.dmub_fb_info = NULL;
2080
743b9786
NK
2081 if (adev->dm.dmub_srv) {
2082 dmub_srv_destroy(adev->dm.dmub_srv);
2083 adev->dm.dmub_srv = NULL;
2084 }
2085
75e1658e
ND
2086 release_firmware(adev->dm.dmub_fw);
2087 adev->dm.dmub_fw = NULL;
743b9786 2088
75e1658e
ND
2089 release_firmware(adev->dm.fw_dmcu);
2090 adev->dm.fw_dmcu = NULL;
a94d5569 2091
4562236b
HW
2092 return 0;
2093}
2094
7abcf6b5 2095static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2096{
c84dec2f 2097 struct amdgpu_dm_connector *aconnector;
4562236b 2098 struct drm_connector *connector;
f8d2d39e 2099 struct drm_connector_list_iter iter;
7abcf6b5 2100 int ret = 0;
4562236b 2101
f8d2d39e
LP
2102 drm_connector_list_iter_begin(dev, &iter);
2103 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2104 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2105 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2106 aconnector->mst_mgr.aux) {
f1ad2f5e 2107 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2108 aconnector,
2109 aconnector->base.base.id);
7abcf6b5
AG
2110
2111 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2112 if (ret < 0) {
2113 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2114 aconnector->dc_link->type =
2115 dc_connection_single;
2116 break;
7abcf6b5 2117 }
f8d2d39e 2118 }
4562236b 2119 }
f8d2d39e 2120 drm_connector_list_iter_end(&iter);
4562236b 2121
7abcf6b5
AG
2122 return ret;
2123}
2124
2125static int dm_late_init(void *handle)
2126{
42e67c3b 2127 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2128
bbf854dc
DF
2129 struct dmcu_iram_parameters params;
2130 unsigned int linear_lut[16];
2131 int i;
17bdb4a8 2132 struct dmcu *dmcu = NULL;
bbf854dc 2133
17bdb4a8
JFZ
2134 dmcu = adev->dm.dc->res_pool->dmcu;
2135
bbf854dc
DF
2136 for (i = 0; i < 16; i++)
2137 linear_lut[i] = 0xFFFF * i / 15;
2138
2139 params.set = 0;
75068994 2140 params.backlight_ramping_override = false;
bbf854dc
DF
2141 params.backlight_ramping_start = 0xCCCC;
2142 params.backlight_ramping_reduction = 0xCCCCCCCC;
2143 params.backlight_lut_array_size = 16;
2144 params.backlight_lut_array = linear_lut;
2145
2ad0cdf9
AK
2146 /* Min backlight level after ABM reduction, Don't allow below 1%
2147 * 0xFFFF x 0.01 = 0x28F
2148 */
2149 params.min_abm_backlight = 0x28F;
5cb32419 2150 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2151 * dmcu object will be null.
2152 * ABM 2.4 and up are implemented on dmcub.
2153 */
2154 if (dmcu) {
2155 if (!dmcu_load_iram(dmcu, params))
2156 return -EINVAL;
2157 } else if (adev->dm.dc->ctx->dmub_srv) {
2158 struct dc_link *edp_links[MAX_NUM_EDP];
2159 int edp_num;
bbf854dc 2160
6e568e43
JW
2161 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2162 for (i = 0; i < edp_num; i++) {
2163 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2164 return -EINVAL;
2165 }
2166 }
bbf854dc 2167
4a580877 2168 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2169}
2170
2171static void s3_handle_mst(struct drm_device *dev, bool suspend)
2172{
c84dec2f 2173 struct amdgpu_dm_connector *aconnector;
4562236b 2174 struct drm_connector *connector;
f8d2d39e 2175 struct drm_connector_list_iter iter;
fe7553be
LP
2176 struct drm_dp_mst_topology_mgr *mgr;
2177 int ret;
2178 bool need_hotplug = false;
4562236b 2179
f8d2d39e
LP
2180 drm_connector_list_iter_begin(dev, &iter);
2181 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2182 aconnector = to_amdgpu_dm_connector(connector);
2183 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2184 aconnector->mst_port)
2185 continue;
2186
2187 mgr = &aconnector->mst_mgr;
2188
2189 if (suspend) {
2190 drm_dp_mst_topology_mgr_suspend(mgr);
2191 } else {
6f85f738 2192 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2193 if (ret < 0) {
2194 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2195 need_hotplug = true;
2196 }
2197 }
4562236b 2198 }
f8d2d39e 2199 drm_connector_list_iter_end(&iter);
fe7553be
LP
2200
2201 if (need_hotplug)
2202 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2203}
2204
9340dfd3
HW
2205static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2206{
9340dfd3
HW
2207 int ret = 0;
2208
9340dfd3
HW
2209 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2210 * on window driver dc implementation.
2211 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2212 * should be passed to smu during boot up and resume from s3.
2213 * boot up: dc calculate dcn watermark clock settings within dc_create,
2214 * dcn20_resource_construct
2215 * then call pplib functions below to pass the settings to smu:
2216 * smu_set_watermarks_for_clock_ranges
2217 * smu_set_watermarks_table
2218 * navi10_set_watermarks_table
2219 * smu_write_watermarks_table
2220 *
2221 * For Renoir, clock settings of dcn watermark are also fixed values.
2222 * dc has implemented different flow for window driver:
2223 * dc_hardware_init / dc_set_power_state
2224 * dcn10_init_hw
2225 * notify_wm_ranges
2226 * set_wm_ranges
2227 * -- Linux
2228 * smu_set_watermarks_for_clock_ranges
2229 * renoir_set_watermarks_table
2230 * smu_write_watermarks_table
2231 *
2232 * For Linux,
2233 * dc_hardware_init -> amdgpu_dm_init
2234 * dc_set_power_state --> dm_resume
2235 *
2236 * therefore, this function apply to navi10/12/14 but not Renoir
2237 * *
2238 */
1d789535 2239 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2240 case IP_VERSION(2, 0, 2):
2241 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2242 break;
2243 default:
2244 return 0;
2245 }
2246
13f5dbd6 2247 ret = amdgpu_dpm_write_watermarks_table(adev);
e7a95eea
EQ
2248 if (ret) {
2249 DRM_ERROR("Failed to update WMTABLE!\n");
2250 return ret;
9340dfd3
HW
2251 }
2252
9340dfd3
HW
2253 return 0;
2254}
2255
b8592b48
LL
2256/**
2257 * dm_hw_init() - Initialize DC device
28d687ea 2258 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2259 *
2260 * Initialize the &struct amdgpu_display_manager device. This involves calling
2261 * the initializers of each DM component, then populating the struct with them.
2262 *
2263 * Although the function implies hardware initialization, both hardware and
2264 * software are initialized here. Splitting them out to their relevant init
2265 * hooks is a future TODO item.
2266 *
2267 * Some notable things that are initialized here:
2268 *
2269 * - Display Core, both software and hardware
2270 * - DC modules that we need (freesync and color management)
2271 * - DRM software states
2272 * - Interrupt sources and handlers
2273 * - Vblank support
2274 * - Debug FS entries, if enabled
2275 */
4562236b
HW
2276static int dm_hw_init(void *handle)
2277{
2278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2279 /* Create DAL display manager */
2280 amdgpu_dm_init(adev);
4562236b
HW
2281 amdgpu_dm_hpd_init(adev);
2282
4562236b
HW
2283 return 0;
2284}
2285
b8592b48
LL
2286/**
2287 * dm_hw_fini() - Teardown DC device
28d687ea 2288 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2289 *
2290 * Teardown components within &struct amdgpu_display_manager that require
2291 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2292 * were loaded. Also flush IRQ workqueues and disable them.
2293 */
4562236b
HW
2294static int dm_hw_fini(void *handle)
2295{
2296 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2297
2298 amdgpu_dm_hpd_fini(adev);
2299
2300 amdgpu_dm_irq_fini(adev);
21de3396 2301 amdgpu_dm_fini(adev);
4562236b
HW
2302 return 0;
2303}
2304
cdaae837
BL
2305
2306static int dm_enable_vblank(struct drm_crtc *crtc);
2307static void dm_disable_vblank(struct drm_crtc *crtc);
2308
2309static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2310 struct dc_state *state, bool enable)
2311{
2312 enum dc_irq_source irq_source;
2313 struct amdgpu_crtc *acrtc;
2314 int rc = -EBUSY;
2315 int i = 0;
2316
2317 for (i = 0; i < state->stream_count; i++) {
2318 acrtc = get_crtc_by_otg_inst(
2319 adev, state->stream_status[i].primary_otg_inst);
2320
2321 if (acrtc && state->stream_status[i].plane_count != 0) {
2322 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2323 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2324 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2325 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2326 if (rc)
2327 DRM_WARN("Failed to %s pflip interrupts\n",
2328 enable ? "enable" : "disable");
2329
2330 if (enable) {
2331 rc = dm_enable_vblank(&acrtc->base);
2332 if (rc)
2333 DRM_WARN("Failed to enable vblank interrupts\n");
2334 } else {
2335 dm_disable_vblank(&acrtc->base);
2336 }
2337
2338 }
2339 }
2340
2341}
2342
dfd84d90 2343static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2344{
2345 struct dc_state *context = NULL;
2346 enum dc_status res = DC_ERROR_UNEXPECTED;
2347 int i;
2348 struct dc_stream_state *del_streams[MAX_PIPES];
2349 int del_streams_count = 0;
2350
2351 memset(del_streams, 0, sizeof(del_streams));
2352
2353 context = dc_create_state(dc);
2354 if (context == NULL)
2355 goto context_alloc_fail;
2356
2357 dc_resource_state_copy_construct_current(dc, context);
2358
2359 /* First remove from context all streams */
2360 for (i = 0; i < context->stream_count; i++) {
2361 struct dc_stream_state *stream = context->streams[i];
2362
2363 del_streams[del_streams_count++] = stream;
2364 }
2365
2366 /* Remove all planes for removed streams and then remove the streams */
2367 for (i = 0; i < del_streams_count; i++) {
2368 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2369 res = DC_FAIL_DETACH_SURFACES;
2370 goto fail;
2371 }
2372
2373 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2374 if (res != DC_OK)
2375 goto fail;
2376 }
2377
cdaae837
BL
2378 res = dc_commit_state(dc, context);
2379
2380fail:
2381 dc_release_state(context);
2382
2383context_alloc_fail:
2384 return res;
2385}
2386
8e794421
WL
2387static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2388{
2389 int i;
2390
2391 if (dm->hpd_rx_offload_wq) {
2392 for (i = 0; i < dm->dc->caps.max_links; i++)
2393 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2394 }
2395}
2396
4562236b
HW
2397static int dm_suspend(void *handle)
2398{
2399 struct amdgpu_device *adev = handle;
2400 struct amdgpu_display_manager *dm = &adev->dm;
2401 int ret = 0;
4562236b 2402
53b3f8f4 2403 if (amdgpu_in_reset(adev)) {
cdaae837 2404 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2405
2406#if defined(CONFIG_DRM_AMD_DC_DCN)
2407 dc_allow_idle_optimizations(adev->dm.dc, false);
2408#endif
2409
cdaae837
BL
2410 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2411
2412 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2413
2414 amdgpu_dm_commit_zero_streams(dm->dc);
2415
2416 amdgpu_dm_irq_suspend(adev);
2417
8e794421
WL
2418 hpd_rx_irq_work_suspend(dm);
2419
cdaae837
BL
2420 return ret;
2421 }
4562236b 2422
d2f0b53b 2423 WARN_ON(adev->dm.cached_state);
4a580877 2424 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2425
4a580877 2426 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2427
4562236b
HW
2428 amdgpu_dm_irq_suspend(adev);
2429
8e794421
WL
2430 hpd_rx_irq_work_suspend(dm);
2431
32f5062d 2432 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2433
1c2075d4 2434 return 0;
4562236b
HW
2435}
2436
17ce8a69 2437struct amdgpu_dm_connector *
1daf8c63
AD
2438amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2439 struct drm_crtc *crtc)
4562236b
HW
2440{
2441 uint32_t i;
c2cea706 2442 struct drm_connector_state *new_con_state;
4562236b
HW
2443 struct drm_connector *connector;
2444 struct drm_crtc *crtc_from_state;
2445
c2cea706
LSL
2446 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2447 crtc_from_state = new_con_state->crtc;
4562236b
HW
2448
2449 if (crtc_from_state == crtc)
c84dec2f 2450 return to_amdgpu_dm_connector(connector);
4562236b
HW
2451 }
2452
2453 return NULL;
2454}
2455
fbbdadf2
BL
2456static void emulated_link_detect(struct dc_link *link)
2457{
2458 struct dc_sink_init_data sink_init_data = { 0 };
2459 struct display_sink_capability sink_caps = { 0 };
2460 enum dc_edid_status edid_status;
2461 struct dc_context *dc_ctx = link->ctx;
2462 struct dc_sink *sink = NULL;
2463 struct dc_sink *prev_sink = NULL;
2464
2465 link->type = dc_connection_none;
2466 prev_sink = link->local_sink;
2467
30164a16
VL
2468 if (prev_sink)
2469 dc_sink_release(prev_sink);
fbbdadf2
BL
2470
2471 switch (link->connector_signal) {
2472 case SIGNAL_TYPE_HDMI_TYPE_A: {
2473 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2474 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2475 break;
2476 }
2477
2478 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2479 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2480 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2481 break;
2482 }
2483
2484 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2485 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2486 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2487 break;
2488 }
2489
2490 case SIGNAL_TYPE_LVDS: {
2491 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2492 sink_caps.signal = SIGNAL_TYPE_LVDS;
2493 break;
2494 }
2495
2496 case SIGNAL_TYPE_EDP: {
2497 sink_caps.transaction_type =
2498 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2499 sink_caps.signal = SIGNAL_TYPE_EDP;
2500 break;
2501 }
2502
2503 case SIGNAL_TYPE_DISPLAY_PORT: {
2504 sink_caps.transaction_type =
2505 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2506 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2507 break;
2508 }
2509
2510 default:
2511 DC_ERROR("Invalid connector type! signal:%d\n",
2512 link->connector_signal);
2513 return;
2514 }
2515
2516 sink_init_data.link = link;
2517 sink_init_data.sink_signal = sink_caps.signal;
2518
2519 sink = dc_sink_create(&sink_init_data);
2520 if (!sink) {
2521 DC_ERROR("Failed to create sink!\n");
2522 return;
2523 }
2524
dcd5fb82 2525 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2526 link->local_sink = sink;
2527
2528 edid_status = dm_helpers_read_local_edid(
2529 link->ctx,
2530 link,
2531 sink);
2532
2533 if (edid_status != EDID_OK)
2534 DC_ERROR("Failed to read EDID");
2535
2536}
2537
cdaae837
BL
2538static void dm_gpureset_commit_state(struct dc_state *dc_state,
2539 struct amdgpu_display_manager *dm)
2540{
2541 struct {
2542 struct dc_surface_update surface_updates[MAX_SURFACES];
2543 struct dc_plane_info plane_infos[MAX_SURFACES];
2544 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2545 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2546 struct dc_stream_update stream_update;
2547 } * bundle;
2548 int k, m;
2549
2550 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2551
2552 if (!bundle) {
2553 dm_error("Failed to allocate update bundle\n");
2554 goto cleanup;
2555 }
2556
2557 for (k = 0; k < dc_state->stream_count; k++) {
2558 bundle->stream_update.stream = dc_state->streams[k];
2559
2560 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2561 bundle->surface_updates[m].surface =
2562 dc_state->stream_status->plane_states[m];
2563 bundle->surface_updates[m].surface->force_full_update =
2564 true;
2565 }
2566 dc_commit_updates_for_stream(
2567 dm->dc, bundle->surface_updates,
2568 dc_state->stream_status->plane_count,
efc8278e 2569 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2570 }
2571
2572cleanup:
2573 kfree(bundle);
2574
2575 return;
2576}
2577
035f5496 2578static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2579{
2580 struct dc_stream_state *stream_state;
2581 struct amdgpu_dm_connector *aconnector = link->priv;
2582 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2583 struct dc_stream_update stream_update;
2584 bool dpms_off = true;
2585
2586 memset(&stream_update, 0, sizeof(stream_update));
2587 stream_update.dpms_off = &dpms_off;
2588
2589 mutex_lock(&adev->dm.dc_lock);
2590 stream_state = dc_stream_find_from_link(link);
2591
2592 if (stream_state == NULL) {
2593 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2594 mutex_unlock(&adev->dm.dc_lock);
2595 return;
2596 }
2597
2598 stream_update.stream = stream_state;
035f5496 2599 acrtc_state->force_dpms_off = true;
3c4d55c9 2600 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2601 stream_state, &stream_update,
2602 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2603 mutex_unlock(&adev->dm.dc_lock);
2604}
2605
4562236b
HW
2606static int dm_resume(void *handle)
2607{
2608 struct amdgpu_device *adev = handle;
4a580877 2609 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2610 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2611 struct amdgpu_dm_connector *aconnector;
4562236b 2612 struct drm_connector *connector;
f8d2d39e 2613 struct drm_connector_list_iter iter;
4562236b 2614 struct drm_crtc *crtc;
c2cea706 2615 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2616 struct dm_crtc_state *dm_new_crtc_state;
2617 struct drm_plane *plane;
2618 struct drm_plane_state *new_plane_state;
2619 struct dm_plane_state *dm_new_plane_state;
113b7a01 2620 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2621 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2622 struct dc_state *dc_state;
2623 int i, r, j;
4562236b 2624
53b3f8f4 2625 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2626 dc_state = dm->cached_dc_state;
2627
6d63fcc2
NK
2628 /*
2629 * The dc->current_state is backed up into dm->cached_dc_state
2630 * before we commit 0 streams.
2631 *
2632 * DC will clear link encoder assignments on the real state
2633 * but the changes won't propagate over to the copy we made
2634 * before the 0 streams commit.
2635 *
2636 * DC expects that link encoder assignments are *not* valid
32685b32
NK
2637 * when committing a state, so as a workaround we can copy
2638 * off of the current state.
2639 *
2640 * We lose the previous assignments, but we had already
2641 * commit 0 streams anyway.
6d63fcc2 2642 */
32685b32 2643 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
6d63fcc2 2644
af6902ec
NK
2645 if (dc_enable_dmub_notifications(adev->dm.dc))
2646 amdgpu_dm_outbox_init(adev);
524a0ba6 2647
cdaae837
BL
2648 r = dm_dmub_hw_init(adev);
2649 if (r)
2650 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2651
2652 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2653 dc_resume(dm->dc);
2654
2655 amdgpu_dm_irq_resume_early(adev);
2656
2657 for (i = 0; i < dc_state->stream_count; i++) {
2658 dc_state->streams[i]->mode_changed = true;
6984fa41
NK
2659 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2660 dc_state->stream_status[i].plane_states[j]->update_flags.raw
cdaae837
BL
2661 = 0xffffffff;
2662 }
2663 }
2664
2665 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2666
cdaae837
BL
2667 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2668
2669 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2670
2671 dc_release_state(dm->cached_dc_state);
2672 dm->cached_dc_state = NULL;
2673
2674 amdgpu_dm_irq_resume_late(adev);
2675
2676 mutex_unlock(&dm->dc_lock);
2677
2678 return 0;
2679 }
113b7a01
LL
2680 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2681 dc_release_state(dm_state->context);
2682 dm_state->context = dc_create_state(dm->dc);
2683 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2684 dc_resource_state_construct(dm->dc, dm_state->context);
2685
af6902ec
NK
2686 /* Re-enable outbox interrupts for DPIA. */
2687 if (dc_enable_dmub_notifications(adev->dm.dc))
2688 amdgpu_dm_outbox_init(adev);
2689
8c7aea40 2690 /* Before powering on DC we need to re-initialize DMUB. */
79d6b935 2691 dm_dmub_hw_resume(adev);
8c7aea40 2692
a80aa93d
ML
2693 /* power on hardware */
2694 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2695
4562236b
HW
2696 /* program HPD filter */
2697 dc_resume(dm->dc);
2698
4562236b
HW
2699 /*
2700 * early enable HPD Rx IRQ, should be done before set mode as short
2701 * pulse interrupts are used for MST
2702 */
2703 amdgpu_dm_irq_resume_early(adev);
2704
d20ebea8 2705 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2706 s3_handle_mst(ddev, false);
2707
4562236b 2708 /* Do detection*/
f8d2d39e
LP
2709 drm_connector_list_iter_begin(ddev, &iter);
2710 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2711 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2712
2713 /*
2714 * this is the case when traversing through already created
2715 * MST connectors, should be skipped
2716 */
f4346fb3
RL
2717 if (aconnector->dc_link &&
2718 aconnector->dc_link->type == dc_connection_mst_branch)
4562236b
HW
2719 continue;
2720
03ea364c 2721 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2722 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2723 DRM_ERROR("KMS: Failed to detect connector\n");
2724
2725 if (aconnector->base.force && new_connection_type == dc_connection_none)
2726 emulated_link_detect(aconnector->dc_link);
2727 else
2728 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2729
2730 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2731 aconnector->fake_enable = false;
2732
dcd5fb82
MF
2733 if (aconnector->dc_sink)
2734 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2735 aconnector->dc_sink = NULL;
2736 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2737 mutex_unlock(&aconnector->hpd_lock);
4562236b 2738 }
f8d2d39e 2739 drm_connector_list_iter_end(&iter);
4562236b 2740
1f6010a9 2741 /* Force mode set in atomic commit */
a80aa93d 2742 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2743 new_crtc_state->active_changed = true;
4f346e65 2744
fcb4019e
LSL
2745 /*
2746 * atomic_check is expected to create the dc states. We need to release
2747 * them here, since they were duplicated as part of the suspend
2748 * procedure.
2749 */
a80aa93d 2750 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2751 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2752 if (dm_new_crtc_state->stream) {
2753 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2754 dc_stream_release(dm_new_crtc_state->stream);
2755 dm_new_crtc_state->stream = NULL;
2756 }
2757 }
2758
a80aa93d 2759 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2760 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2761 if (dm_new_plane_state->dc_state) {
2762 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2763 dc_plane_state_release(dm_new_plane_state->dc_state);
2764 dm_new_plane_state->dc_state = NULL;
2765 }
2766 }
2767
2d1af6a1 2768 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2769
a80aa93d 2770 dm->cached_state = NULL;
0a214e2f 2771
9faa4237 2772 amdgpu_dm_irq_resume_late(adev);
4562236b 2773
9340dfd3
HW
2774 amdgpu_dm_smu_write_watermarks_table(adev);
2775
2d1af6a1 2776 return 0;
4562236b
HW
2777}
2778
b8592b48
LL
2779/**
2780 * DOC: DM Lifecycle
2781 *
2782 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2783 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2784 * the base driver's device list to be initialized and torn down accordingly.
2785 *
2786 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2787 */
2788
4562236b
HW
2789static const struct amd_ip_funcs amdgpu_dm_funcs = {
2790 .name = "dm",
2791 .early_init = dm_early_init,
7abcf6b5 2792 .late_init = dm_late_init,
4562236b
HW
2793 .sw_init = dm_sw_init,
2794 .sw_fini = dm_sw_fini,
e9669fb7 2795 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2796 .hw_init = dm_hw_init,
2797 .hw_fini = dm_hw_fini,
2798 .suspend = dm_suspend,
2799 .resume = dm_resume,
2800 .is_idle = dm_is_idle,
2801 .wait_for_idle = dm_wait_for_idle,
2802 .check_soft_reset = dm_check_soft_reset,
2803 .soft_reset = dm_soft_reset,
2804 .set_clockgating_state = dm_set_clockgating_state,
2805 .set_powergating_state = dm_set_powergating_state,
2806};
2807
2808const struct amdgpu_ip_block_version dm_ip_block =
2809{
2810 .type = AMD_IP_BLOCK_TYPE_DCE,
2811 .major = 1,
2812 .minor = 0,
2813 .rev = 0,
2814 .funcs = &amdgpu_dm_funcs,
2815};
2816
ca3268c4 2817
b8592b48
LL
2818/**
2819 * DOC: atomic
2820 *
2821 * *WIP*
2822 */
0a323b84 2823
b3663f70 2824static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2825 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2826 .get_format_info = amd_get_format_info,
366c1baa 2827 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2828 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2829 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2830};
2831
2832static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2833 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2834};
2835
94562810
RS
2836static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2837{
2838 u32 max_cll, min_cll, max, min, q, r;
2839 struct amdgpu_dm_backlight_caps *caps;
2840 struct amdgpu_display_manager *dm;
2841 struct drm_connector *conn_base;
2842 struct amdgpu_device *adev;
ec11fe37 2843 struct dc_link *link = NULL;
94562810
RS
2844 static const u8 pre_computed_values[] = {
2845 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2846 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2847 int i;
94562810
RS
2848
2849 if (!aconnector || !aconnector->dc_link)
2850 return;
2851
ec11fe37 2852 link = aconnector->dc_link;
2853 if (link->connector_signal != SIGNAL_TYPE_EDP)
2854 return;
2855
94562810 2856 conn_base = &aconnector->base;
1348969a 2857 adev = drm_to_adev(conn_base->dev);
94562810 2858 dm = &adev->dm;
7fd13bae
AD
2859 for (i = 0; i < dm->num_of_edps; i++) {
2860 if (link == dm->backlight_link[i])
2861 break;
2862 }
2863 if (i >= dm->num_of_edps)
2864 return;
2865 caps = &dm->backlight_caps[i];
94562810
RS
2866 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2867 caps->aux_support = false;
2868 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2869 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2870
d0ae0b64 2871 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2872 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2873 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2874 caps->aux_support = true;
2875
7a46f05e
TI
2876 if (amdgpu_backlight == 0)
2877 caps->aux_support = false;
2878 else if (amdgpu_backlight == 1)
2879 caps->aux_support = true;
2880
94562810
RS
2881 /* From the specification (CTA-861-G), for calculating the maximum
2882 * luminance we need to use:
2883 * Luminance = 50*2**(CV/32)
2884 * Where CV is a one-byte value.
2885 * For calculating this expression we may need float point precision;
2886 * to avoid this complexity level, we take advantage that CV is divided
2887 * by a constant. From the Euclids division algorithm, we know that CV
2888 * can be written as: CV = 32*q + r. Next, we replace CV in the
2889 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2890 * need to pre-compute the value of r/32. For pre-computing the values
2891 * We just used the following Ruby line:
2892 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2893 * The results of the above expressions can be verified at
2894 * pre_computed_values.
2895 */
2896 q = max_cll >> 5;
2897 r = max_cll % 32;
2898 max = (1 << q) * pre_computed_values[r];
2899
2900 // min luminance: maxLum * (CV/255)^2 / 100
2901 q = DIV_ROUND_CLOSEST(min_cll, 255);
2902 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2903
2904 caps->aux_max_input_signal = max;
2905 caps->aux_min_input_signal = min;
2906}
2907
97e51c16
HW
2908void amdgpu_dm_update_connector_after_detect(
2909 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2910{
2911 struct drm_connector *connector = &aconnector->base;
2912 struct drm_device *dev = connector->dev;
b73a22d3 2913 struct dc_sink *sink;
4562236b
HW
2914
2915 /* MST handled by drm_mst framework */
2916 if (aconnector->mst_mgr.mst_state == true)
2917 return;
2918
4562236b 2919 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2920 if (sink)
2921 dc_sink_retain(sink);
4562236b 2922
1f6010a9
DF
2923 /*
2924 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2925 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2926 * Skip if already done during boot.
4562236b
HW
2927 */
2928 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2929 && aconnector->dc_em_sink) {
2930
1f6010a9
DF
2931 /*
2932 * For S3 resume with headless use eml_sink to fake stream
2933 * because on resume connector->sink is set to NULL
4562236b
HW
2934 */
2935 mutex_lock(&dev->mode_config.mutex);
2936
2937 if (sink) {
922aa1e1 2938 if (aconnector->dc_sink) {
98e6436d 2939 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2940 /*
2941 * retain and release below are used to
2942 * bump up refcount for sink because the link doesn't point
2943 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2944 * reshuffle by UMD we will get into unwanted dc_sink release
2945 */
dcd5fb82 2946 dc_sink_release(aconnector->dc_sink);
922aa1e1 2947 }
4562236b 2948 aconnector->dc_sink = sink;
dcd5fb82 2949 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2950 amdgpu_dm_update_freesync_caps(connector,
2951 aconnector->edid);
4562236b 2952 } else {
98e6436d 2953 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2954 if (!aconnector->dc_sink) {
4562236b 2955 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2956 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2957 }
4562236b
HW
2958 }
2959
2960 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2961
2962 if (sink)
2963 dc_sink_release(sink);
4562236b
HW
2964 return;
2965 }
2966
2967 /*
2968 * TODO: temporary guard to look for proper fix
2969 * if this sink is MST sink, we should not do anything
2970 */
dcd5fb82
MF
2971 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2972 dc_sink_release(sink);
4562236b 2973 return;
dcd5fb82 2974 }
4562236b
HW
2975
2976 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2977 /*
2978 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2979 * Do nothing!!
2980 */
f1ad2f5e 2981 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2982 aconnector->connector_id);
dcd5fb82
MF
2983 if (sink)
2984 dc_sink_release(sink);
4562236b
HW
2985 return;
2986 }
2987
f1ad2f5e 2988 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2989 aconnector->connector_id, aconnector->dc_sink, sink);
2990
2991 mutex_lock(&dev->mode_config.mutex);
2992
1f6010a9
DF
2993 /*
2994 * 1. Update status of the drm connector
2995 * 2. Send an event and let userspace tell us what to do
2996 */
4562236b 2997 if (sink) {
1f6010a9
DF
2998 /*
2999 * TODO: check if we still need the S3 mode update workaround.
3000 * If yes, put it here.
3001 */
c64b0d6b 3002 if (aconnector->dc_sink) {
98e6436d 3003 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
3004 dc_sink_release(aconnector->dc_sink);
3005 }
4562236b
HW
3006
3007 aconnector->dc_sink = sink;
dcd5fb82 3008 dc_sink_retain(aconnector->dc_sink);
900b3cb1 3009 if (sink->dc_edid.length == 0) {
4562236b 3010 aconnector->edid = NULL;
e6142dd5
AP
3011 if (aconnector->dc_link->aux_mode) {
3012 drm_dp_cec_unset_edid(
3013 &aconnector->dm_dp_aux.aux);
3014 }
900b3cb1 3015 } else {
4562236b 3016 aconnector->edid =
e6142dd5 3017 (struct edid *)sink->dc_edid.raw_edid;
4562236b 3018
e6142dd5
AP
3019 if (aconnector->dc_link->aux_mode)
3020 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3021 aconnector->edid);
4562236b 3022 }
e6142dd5 3023
20543be9 3024 drm_connector_update_edid_property(connector, aconnector->edid);
98e6436d 3025 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 3026 update_connector_ext_caps(aconnector);
4562236b 3027 } else {
e86e8947 3028 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 3029 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 3030 drm_connector_update_edid_property(connector, NULL);
4562236b 3031 aconnector->num_modes = 0;
dcd5fb82 3032 dc_sink_release(aconnector->dc_sink);
4562236b 3033 aconnector->dc_sink = NULL;
5326c452 3034 aconnector->edid = NULL;
0c8620d6
BL
3035#ifdef CONFIG_DRM_AMD_DC_HDCP
3036 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3037 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3038 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3039#endif
4562236b
HW
3040 }
3041
3042 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 3043
0f877894
OV
3044 update_subconnector_property(aconnector);
3045
dcd5fb82
MF
3046 if (sink)
3047 dc_sink_release(sink);
4562236b
HW
3048}
3049
e27c41d5 3050static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 3051{
4562236b
HW
3052 struct drm_connector *connector = &aconnector->base;
3053 struct drm_device *dev = connector->dev;
fbbdadf2 3054 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 3055 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 3056 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 3057 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 3058
b972b4f9
HW
3059 if (adev->dm.disable_hpd_irq)
3060 return;
3061
035f5496
AP
3062 if (dm_con_state->base.state && dm_con_state->base.crtc)
3063 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3064 dm_con_state->base.state,
3065 dm_con_state->base.crtc));
1f6010a9
DF
3066 /*
3067 * In case of failure or MST no need to update connector status or notify the OS
3068 * since (for MST case) MST does this in its own context.
4562236b
HW
3069 */
3070 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3071
0c8620d6 3072#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3073 if (adev->dm.hdcp_workqueue) {
96a3b32e 3074 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3075 dm_con_state->update_hdcp = true;
3076 }
0c8620d6 3077#endif
2e0ac3d6
HW
3078 if (aconnector->fake_enable)
3079 aconnector->fake_enable = false;
3080
fbbdadf2
BL
3081 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3082 DRM_ERROR("KMS: Failed to detect connector\n");
3083
3084 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3085 emulated_link_detect(aconnector->dc_link);
3086
fbbdadf2
BL
3087 drm_modeset_lock_all(dev);
3088 dm_restore_drm_connector_state(dev, connector);
3089 drm_modeset_unlock_all(dev);
3090
3091 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
fc320a6f 3092 drm_kms_helper_connector_hotplug_event(connector);
fbbdadf2
BL
3093
3094 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3095 if (new_connection_type == dc_connection_none &&
035f5496
AP
3096 aconnector->dc_link->type == dc_connection_none &&
3097 dm_crtc_state)
3098 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 3099
3c4d55c9 3100 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3101
3102 drm_modeset_lock_all(dev);
3103 dm_restore_drm_connector_state(dev, connector);
3104 drm_modeset_unlock_all(dev);
3105
3106 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
fc320a6f 3107 drm_kms_helper_connector_hotplug_event(connector);
4562236b
HW
3108 }
3109 mutex_unlock(&aconnector->hpd_lock);
3110
3111}
3112
e27c41d5
JS
3113static void handle_hpd_irq(void *param)
3114{
3115 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3116
3117 handle_hpd_irq_helper(aconnector);
3118
3119}
3120
8e794421 3121static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3122{
3123 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3124 uint8_t dret;
3125 bool new_irq_handled = false;
3126 int dpcd_addr;
3127 int dpcd_bytes_to_read;
3128
3129 const int max_process_count = 30;
3130 int process_count = 0;
3131
3132 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3133
3134 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3135 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3136 /* DPCD 0x200 - 0x201 for downstream IRQ */
3137 dpcd_addr = DP_SINK_COUNT;
3138 } else {
3139 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3140 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3141 dpcd_addr = DP_SINK_COUNT_ESI;
3142 }
3143
3144 dret = drm_dp_dpcd_read(
3145 &aconnector->dm_dp_aux.aux,
3146 dpcd_addr,
3147 esi,
3148 dpcd_bytes_to_read);
3149
3150 while (dret == dpcd_bytes_to_read &&
3151 process_count < max_process_count) {
3152 uint8_t retry;
3153 dret = 0;
3154
3155 process_count++;
3156
f1ad2f5e 3157 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3158 /* handle HPD short pulse irq */
3159 if (aconnector->mst_mgr.mst_state)
3160 drm_dp_mst_hpd_irq(
3161 &aconnector->mst_mgr,
3162 esi,
3163 &new_irq_handled);
4562236b
HW
3164
3165 if (new_irq_handled) {
3166 /* ACK at DPCD to notify down stream */
3167 const int ack_dpcd_bytes_to_write =
3168 dpcd_bytes_to_read - 1;
3169
3170 for (retry = 0; retry < 3; retry++) {
3171 uint8_t wret;
3172
3173 wret = drm_dp_dpcd_write(
3174 &aconnector->dm_dp_aux.aux,
3175 dpcd_addr + 1,
3176 &esi[1],
3177 ack_dpcd_bytes_to_write);
3178 if (wret == ack_dpcd_bytes_to_write)
3179 break;
3180 }
3181
1f6010a9 3182 /* check if there is new irq to be handled */
4562236b
HW
3183 dret = drm_dp_dpcd_read(
3184 &aconnector->dm_dp_aux.aux,
3185 dpcd_addr,
3186 esi,
3187 dpcd_bytes_to_read);
3188
3189 new_irq_handled = false;
d4a6e8a9 3190 } else {
4562236b 3191 break;
d4a6e8a9 3192 }
4562236b
HW
3193 }
3194
3195 if (process_count == max_process_count)
f1ad2f5e 3196 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3197}
3198
8e794421
WL
3199static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3200 union hpd_irq_data hpd_irq_data)
3201{
3202 struct hpd_rx_irq_offload_work *offload_work =
3203 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3204
3205 if (!offload_work) {
3206 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3207 return;
3208 }
3209
3210 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3211 offload_work->data = hpd_irq_data;
3212 offload_work->offload_wq = offload_wq;
3213
3214 queue_work(offload_wq->wq, &offload_work->work);
3215 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3216}
3217
4562236b
HW
3218static void handle_hpd_rx_irq(void *param)
3219{
c84dec2f 3220 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3221 struct drm_connector *connector = &aconnector->base;
3222 struct drm_device *dev = connector->dev;
53cbf65c 3223 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3224 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3225 bool result = false;
fbbdadf2 3226 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3227 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3228 union hpd_irq_data hpd_irq_data;
8e794421
WL
3229 bool link_loss = false;
3230 bool has_left_work = false;
3231 int idx = aconnector->base.index;
3232 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3233
3234 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3235
b972b4f9
HW
3236 if (adev->dm.disable_hpd_irq)
3237 return;
3238
1f6010a9
DF
3239 /*
3240 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3241 * conflict, after implement i2c helper, this mutex should be
3242 * retired.
3243 */
b86e7eef 3244 mutex_lock(&aconnector->hpd_lock);
4562236b 3245
8e794421
WL
3246 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3247 &link_loss, true, &has_left_work);
3083a984 3248
8e794421
WL
3249 if (!has_left_work)
3250 goto out;
3251
3252 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3253 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3254 goto out;
3255 }
3256
3257 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3258 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3259 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3260 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3261 goto out;
3262 }
3083a984 3263
8e794421
WL
3264 if (link_loss) {
3265 bool skip = false;
d2aa1356 3266
8e794421
WL
3267 spin_lock(&offload_wq->offload_lock);
3268 skip = offload_wq->is_handling_link_loss;
3269
3270 if (!skip)
3271 offload_wq->is_handling_link_loss = true;
3272
3273 spin_unlock(&offload_wq->offload_lock);
3274
3275 if (!skip)
3276 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3277
3278 goto out;
3279 }
3280 }
c8ea79a8 3281
3083a984 3282out:
c8ea79a8 3283 if (result && !is_mst_root_connector) {
4562236b 3284 /* Downstream Port status changed. */
fbbdadf2
BL
3285 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3286 DRM_ERROR("KMS: Failed to detect connector\n");
3287
3288 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3289 emulated_link_detect(dc_link);
3290
3291 if (aconnector->fake_enable)
3292 aconnector->fake_enable = false;
3293
3294 amdgpu_dm_update_connector_after_detect(aconnector);
3295
3296
3297 drm_modeset_lock_all(dev);
3298 dm_restore_drm_connector_state(dev, connector);
3299 drm_modeset_unlock_all(dev);
3300
fc320a6f 3301 drm_kms_helper_connector_hotplug_event(connector);
fbbdadf2 3302 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3303
3304 if (aconnector->fake_enable)
3305 aconnector->fake_enable = false;
3306
4562236b
HW
3307 amdgpu_dm_update_connector_after_detect(aconnector);
3308
3309
3310 drm_modeset_lock_all(dev);
3311 dm_restore_drm_connector_state(dev, connector);
3312 drm_modeset_unlock_all(dev);
3313
fc320a6f 3314 drm_kms_helper_connector_hotplug_event(connector);
4562236b
HW
3315 }
3316 }
2a0f9270 3317#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3318 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3319 if (adev->dm.hdcp_workqueue)
3320 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3321 }
2a0f9270 3322#endif
4562236b 3323
b86e7eef 3324 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3325 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3326
3327 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3328}
3329
3330static void register_hpd_handlers(struct amdgpu_device *adev)
3331{
4a580877 3332 struct drm_device *dev = adev_to_drm(adev);
4562236b 3333 struct drm_connector *connector;
c84dec2f 3334 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3335 const struct dc_link *dc_link;
3336 struct dc_interrupt_params int_params = {0};
3337
3338 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3339 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3340
3341 list_for_each_entry(connector,
3342 &dev->mode_config.connector_list, head) {
3343
c84dec2f 3344 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3345 dc_link = aconnector->dc_link;
3346
3347 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3348 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3349 int_params.irq_source = dc_link->irq_source_hpd;
3350
3351 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3352 handle_hpd_irq,
3353 (void *) aconnector);
3354 }
3355
3356 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3357
3358 /* Also register for DP short pulse (hpd_rx). */
3359 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3360 int_params.irq_source = dc_link->irq_source_hpd_rx;
3361
3362 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3363 handle_hpd_rx_irq,
3364 (void *) aconnector);
8e794421
WL
3365
3366 if (adev->dm.hpd_rx_offload_wq)
3367 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3368 aconnector;
4562236b
HW
3369 }
3370 }
3371}
3372
55e56389
MR
3373#if defined(CONFIG_DRM_AMD_DC_SI)
3374/* Register IRQ sources and initialize IRQ callbacks */
3375static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3376{
3377 struct dc *dc = adev->dm.dc;
3378 struct common_irq_params *c_irq_params;
3379 struct dc_interrupt_params int_params = {0};
3380 int r;
3381 int i;
3382 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3383
3384 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3385 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3386
3387 /*
3388 * Actions of amdgpu_irq_add_id():
3389 * 1. Register a set() function with base driver.
3390 * Base driver will call set() function to enable/disable an
3391 * interrupt in DC hardware.
3392 * 2. Register amdgpu_dm_irq_handler().
3393 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3394 * coming from DC hardware.
3395 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3396 * for acknowledging and handling. */
3397
3398 /* Use VBLANK interrupt */
3399 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3400 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3401 if (r) {
3402 DRM_ERROR("Failed to add crtc irq id!\n");
3403 return r;
3404 }
3405
3406 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3407 int_params.irq_source =
3408 dc_interrupt_to_irq_source(dc, i+1 , 0);
3409
3410 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3411
3412 c_irq_params->adev = adev;
3413 c_irq_params->irq_src = int_params.irq_source;
3414
3415 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3416 dm_crtc_high_irq, c_irq_params);
3417 }
3418
3419 /* Use GRPH_PFLIP interrupt */
3420 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3421 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3422 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3423 if (r) {
3424 DRM_ERROR("Failed to add page flip irq id!\n");
3425 return r;
3426 }
3427
3428 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3429 int_params.irq_source =
3430 dc_interrupt_to_irq_source(dc, i, 0);
3431
3432 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3433
3434 c_irq_params->adev = adev;
3435 c_irq_params->irq_src = int_params.irq_source;
3436
3437 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3438 dm_pflip_high_irq, c_irq_params);
3439
3440 }
3441
3442 /* HPD */
3443 r = amdgpu_irq_add_id(adev, client_id,
3444 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3445 if (r) {
3446 DRM_ERROR("Failed to add hpd irq id!\n");
3447 return r;
3448 }
3449
3450 register_hpd_handlers(adev);
3451
3452 return 0;
3453}
3454#endif
3455
4562236b
HW
3456/* Register IRQ sources and initialize IRQ callbacks */
3457static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3458{
3459 struct dc *dc = adev->dm.dc;
3460 struct common_irq_params *c_irq_params;
3461 struct dc_interrupt_params int_params = {0};
3462 int r;
3463 int i;
1ffdeca6 3464 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3465
c08182f2 3466 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3467 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3468
3469 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3470 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3471
1f6010a9
DF
3472 /*
3473 * Actions of amdgpu_irq_add_id():
4562236b
HW
3474 * 1. Register a set() function with base driver.
3475 * Base driver will call set() function to enable/disable an
3476 * interrupt in DC hardware.
3477 * 2. Register amdgpu_dm_irq_handler().
3478 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3479 * coming from DC hardware.
3480 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3481 * for acknowledging and handling. */
3482
b57de80a 3483 /* Use VBLANK interrupt */
e9029155 3484 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3485 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3486 if (r) {
3487 DRM_ERROR("Failed to add crtc irq id!\n");
3488 return r;
3489 }
3490
3491 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3492 int_params.irq_source =
3d761e79 3493 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3494
b57de80a 3495 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3496
3497 c_irq_params->adev = adev;
3498 c_irq_params->irq_src = int_params.irq_source;
3499
3500 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3501 dm_crtc_high_irq, c_irq_params);
3502 }
3503
d2574c33
MK
3504 /* Use VUPDATE interrupt */
3505 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3506 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3507 if (r) {
3508 DRM_ERROR("Failed to add vupdate irq id!\n");
3509 return r;
3510 }
3511
3512 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3513 int_params.irq_source =
3514 dc_interrupt_to_irq_source(dc, i, 0);
3515
3516 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3517
3518 c_irq_params->adev = adev;
3519 c_irq_params->irq_src = int_params.irq_source;
3520
3521 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3522 dm_vupdate_high_irq, c_irq_params);
3523 }
3524
3d761e79 3525 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3526 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3527 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3528 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3529 if (r) {
3530 DRM_ERROR("Failed to add page flip irq id!\n");
3531 return r;
3532 }
3533
3534 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3535 int_params.irq_source =
3536 dc_interrupt_to_irq_source(dc, i, 0);
3537
3538 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3539
3540 c_irq_params->adev = adev;
3541 c_irq_params->irq_src = int_params.irq_source;
3542
3543 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3544 dm_pflip_high_irq, c_irq_params);
3545
3546 }
3547
3548 /* HPD */
2c8ad2d5
AD
3549 r = amdgpu_irq_add_id(adev, client_id,
3550 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3551 if (r) {
3552 DRM_ERROR("Failed to add hpd irq id!\n");
3553 return r;
3554 }
3555
3556 register_hpd_handlers(adev);
3557
3558 return 0;
3559}
3560
b86a1aa3 3561#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3562/* Register IRQ sources and initialize IRQ callbacks */
3563static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3564{
3565 struct dc *dc = adev->dm.dc;
3566 struct common_irq_params *c_irq_params;
3567 struct dc_interrupt_params int_params = {0};
3568 int r;
3569 int i;
660d5406
WL
3570#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3571 static const unsigned int vrtl_int_srcid[] = {
3572 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3573 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3574 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3575 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3576 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3577 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3578 };
3579#endif
ff5ef992
AD
3580
3581 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3582 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3583
1f6010a9
DF
3584 /*
3585 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3586 * 1. Register a set() function with base driver.
3587 * Base driver will call set() function to enable/disable an
3588 * interrupt in DC hardware.
3589 * 2. Register amdgpu_dm_irq_handler().
3590 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3591 * coming from DC hardware.
3592 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3593 * for acknowledging and handling.
1f6010a9 3594 */
ff5ef992
AD
3595
3596 /* Use VSTARTUP interrupt */
3597 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3598 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3599 i++) {
3760f76c 3600 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3601
3602 if (r) {
3603 DRM_ERROR("Failed to add crtc irq id!\n");
3604 return r;
3605 }
3606
3607 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3608 int_params.irq_source =
3609 dc_interrupt_to_irq_source(dc, i, 0);
3610
3611 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3612
3613 c_irq_params->adev = adev;
3614 c_irq_params->irq_src = int_params.irq_source;
3615
2346ef47
NK
3616 amdgpu_dm_irq_register_interrupt(
3617 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3618 }
3619
86bc2219
WL
3620 /* Use otg vertical line interrupt */
3621#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3622 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3623 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3624 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3625
3626 if (r) {
3627 DRM_ERROR("Failed to add vline0 irq id!\n");
3628 return r;
3629 }
3630
3631 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3632 int_params.irq_source =
660d5406
WL
3633 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3634
3635 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3636 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3637 break;
3638 }
86bc2219
WL
3639
3640 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3641 - DC_IRQ_SOURCE_DC1_VLINE0];
3642
3643 c_irq_params->adev = adev;
3644 c_irq_params->irq_src = int_params.irq_source;
3645
3646 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3647 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3648 }
3649#endif
3650
2346ef47
NK
3651 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3652 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3653 * to trigger at end of each vblank, regardless of state of the lock,
3654 * matching DCE behaviour.
3655 */
3656 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3657 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3658 i++) {
3659 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3660
3661 if (r) {
3662 DRM_ERROR("Failed to add vupdate irq id!\n");
3663 return r;
3664 }
3665
3666 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3667 int_params.irq_source =
3668 dc_interrupt_to_irq_source(dc, i, 0);
3669
3670 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3671
3672 c_irq_params->adev = adev;
3673 c_irq_params->irq_src = int_params.irq_source;
3674
ff5ef992 3675 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3676 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3677 }
3678
ff5ef992
AD
3679 /* Use GRPH_PFLIP interrupt */
3680 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
de95753c 3681 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
ff5ef992 3682 i++) {
3760f76c 3683 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3684 if (r) {
3685 DRM_ERROR("Failed to add page flip irq id!\n");
3686 return r;
3687 }
3688
3689 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3690 int_params.irq_source =
3691 dc_interrupt_to_irq_source(dc, i, 0);
3692
3693 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3694
3695 c_irq_params->adev = adev;
3696 c_irq_params->irq_src = int_params.irq_source;
3697
3698 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3699 dm_pflip_high_irq, c_irq_params);
3700
3701 }
3702
81927e28
JS
3703 /* HPD */
3704 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3705 &adev->hpd_irq);
3706 if (r) {
3707 DRM_ERROR("Failed to add hpd irq id!\n");
3708 return r;
3709 }
a08f16cf 3710
81927e28 3711 register_hpd_handlers(adev);
a08f16cf 3712
81927e28
JS
3713 return 0;
3714}
3715/* Register Outbox IRQ sources and initialize IRQ callbacks */
3716static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3717{
3718 struct dc *dc = adev->dm.dc;
3719 struct common_irq_params *c_irq_params;
3720 struct dc_interrupt_params int_params = {0};
3721 int r, i;
3722
3723 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3724 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3725
3726 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3727 &adev->dmub_outbox_irq);
3728 if (r) {
3729 DRM_ERROR("Failed to add outbox irq id!\n");
3730 return r;
3731 }
3732
3733 if (dc->ctx->dmub_srv) {
3734 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3735 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3736 int_params.irq_source =
81927e28 3737 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3738
81927e28 3739 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3740
3741 c_irq_params->adev = adev;
3742 c_irq_params->irq_src = int_params.irq_source;
3743
3744 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3745 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3746 }
3747
ff5ef992
AD
3748 return 0;
3749}
3750#endif
3751
eb3dc897
NK
3752/*
3753 * Acquires the lock for the atomic state object and returns
3754 * the new atomic state.
3755 *
3756 * This should only be called during atomic check.
3757 */
17ce8a69
RL
3758int dm_atomic_get_state(struct drm_atomic_state *state,
3759 struct dm_atomic_state **dm_state)
eb3dc897
NK
3760{
3761 struct drm_device *dev = state->dev;
1348969a 3762 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3763 struct amdgpu_display_manager *dm = &adev->dm;
3764 struct drm_private_state *priv_state;
eb3dc897
NK
3765
3766 if (*dm_state)
3767 return 0;
3768
eb3dc897
NK
3769 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3770 if (IS_ERR(priv_state))
3771 return PTR_ERR(priv_state);
3772
3773 *dm_state = to_dm_atomic_state(priv_state);
3774
3775 return 0;
3776}
3777
dfd84d90 3778static struct dm_atomic_state *
eb3dc897
NK
3779dm_atomic_get_new_state(struct drm_atomic_state *state)
3780{
3781 struct drm_device *dev = state->dev;
1348969a 3782 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3783 struct amdgpu_display_manager *dm = &adev->dm;
3784 struct drm_private_obj *obj;
3785 struct drm_private_state *new_obj_state;
3786 int i;
3787
3788 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3789 if (obj->funcs == dm->atomic_obj.funcs)
3790 return to_dm_atomic_state(new_obj_state);
3791 }
3792
3793 return NULL;
3794}
3795
eb3dc897
NK
3796static struct drm_private_state *
3797dm_atomic_duplicate_state(struct drm_private_obj *obj)
3798{
3799 struct dm_atomic_state *old_state, *new_state;
3800
3801 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3802 if (!new_state)
3803 return NULL;
3804
3805 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3806
813d20dc
AW
3807 old_state = to_dm_atomic_state(obj->state);
3808
3809 if (old_state && old_state->context)
3810 new_state->context = dc_copy_state(old_state->context);
3811
eb3dc897
NK
3812 if (!new_state->context) {
3813 kfree(new_state);
3814 return NULL;
3815 }
3816
eb3dc897
NK
3817 return &new_state->base;
3818}
3819
3820static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3821 struct drm_private_state *state)
3822{
3823 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3824
3825 if (dm_state && dm_state->context)
3826 dc_release_state(dm_state->context);
3827
3828 kfree(dm_state);
3829}
3830
3831static struct drm_private_state_funcs dm_atomic_state_funcs = {
3832 .atomic_duplicate_state = dm_atomic_duplicate_state,
3833 .atomic_destroy_state = dm_atomic_destroy_state,
3834};
3835
4562236b
HW
3836static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3837{
eb3dc897 3838 struct dm_atomic_state *state;
4562236b
HW
3839 int r;
3840
3841 adev->mode_info.mode_config_initialized = true;
3842
4a580877
LT
3843 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3844 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3845
4a580877
LT
3846 adev_to_drm(adev)->mode_config.max_width = 16384;
3847 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3848
4a580877
LT
3849 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3850 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3851 /* indicates support for immediate flip */
4a580877 3852 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3853
4a580877 3854 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3855
eb3dc897
NK
3856 state = kzalloc(sizeof(*state), GFP_KERNEL);
3857 if (!state)
3858 return -ENOMEM;
3859
813d20dc 3860 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3861 if (!state->context) {
3862 kfree(state);
3863 return -ENOMEM;
3864 }
3865
3866 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3867
4a580877 3868 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3869 &adev->dm.atomic_obj,
eb3dc897
NK
3870 &state->base,
3871 &dm_atomic_state_funcs);
3872
3dc9b1ce 3873 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3874 if (r) {
3875 dc_release_state(state->context);
3876 kfree(state);
4562236b 3877 return r;
b67a468a 3878 }
4562236b 3879
6ce8f316 3880 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3881 if (r) {
3882 dc_release_state(state->context);
3883 kfree(state);
6ce8f316 3884 return r;
b67a468a 3885 }
6ce8f316 3886
4562236b
HW
3887 return 0;
3888}
3889
206bbafe
DF
3890#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3891#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3892#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3893
4562236b
HW
3894#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3895 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3896
7fd13bae
AD
3897static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3898 int bl_idx)
206bbafe
DF
3899{
3900#if defined(CONFIG_ACPI)
3901 struct amdgpu_dm_backlight_caps caps;
3902
58965855
FS
3903 memset(&caps, 0, sizeof(caps));
3904
7fd13bae 3905 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3906 return;
3907
f9b7f370 3908 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3909 if (caps.caps_valid) {
7fd13bae 3910 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3911 if (caps.aux_support)
3912 return;
7fd13bae
AD
3913 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3914 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3915 } else {
7fd13bae 3916 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3917 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3918 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3919 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3920 }
3921#else
7fd13bae 3922 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3923 return;
3924
7fd13bae
AD
3925 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3926 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3927#endif
3928}
3929
69d9f427
AM
3930static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3931 unsigned *min, unsigned *max)
94562810 3932{
94562810 3933 if (!caps)
69d9f427 3934 return 0;
94562810 3935
69d9f427
AM
3936 if (caps->aux_support) {
3937 // Firmware limits are in nits, DC API wants millinits.
3938 *max = 1000 * caps->aux_max_input_signal;
3939 *min = 1000 * caps->aux_min_input_signal;
94562810 3940 } else {
69d9f427
AM
3941 // Firmware limits are 8-bit, PWM control is 16-bit.
3942 *max = 0x101 * caps->max_input_signal;
3943 *min = 0x101 * caps->min_input_signal;
94562810 3944 }
69d9f427
AM
3945 return 1;
3946}
94562810 3947
69d9f427
AM
3948static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3949 uint32_t brightness)
3950{
3951 unsigned min, max;
94562810 3952
69d9f427
AM
3953 if (!get_brightness_range(caps, &min, &max))
3954 return brightness;
3955
3956 // Rescale 0..255 to min..max
3957 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3958 AMDGPU_MAX_BL_LEVEL);
3959}
3960
3961static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3962 uint32_t brightness)
3963{
3964 unsigned min, max;
3965
3966 if (!get_brightness_range(caps, &min, &max))
3967 return brightness;
3968
3969 if (brightness < min)
3970 return 0;
3971 // Rescale min..max to 0..255
3972 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3973 max - min);
94562810
RS
3974}
3975
4052287a 3976static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3977 int bl_idx,
3d6c9164 3978 u32 user_brightness)
4562236b 3979{
206bbafe 3980 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3981 struct dc_link *link;
3982 u32 brightness;
94562810 3983 bool rc;
4562236b 3984
7fd13bae
AD
3985 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3986 caps = dm->backlight_caps[bl_idx];
94562810 3987
7fd13bae 3988 dm->brightness[bl_idx] = user_brightness;
1f579254
AD
3989 /* update scratch register */
3990 if (bl_idx == 0)
3991 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
7fd13bae
AD
3992 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3993 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3994
3d6c9164 3995 /* Change brightness based on AUX property */
118b4627 3996 if (caps.aux_support) {
7fd13bae
AD
3997 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3998 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3999 if (!rc)
4000 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 4001 } else {
7fd13bae
AD
4002 rc = dc_link_set_backlight_level(link, brightness, 0);
4003 if (!rc)
4004 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 4005 }
94562810 4006
4052287a
S
4007 if (rc)
4008 dm->actual_brightness[bl_idx] = user_brightness;
4562236b
HW
4009}
4010
3d6c9164 4011static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 4012{
620a0d27 4013 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4014 int i;
3d6c9164 4015
7fd13bae
AD
4016 for (i = 0; i < dm->num_of_edps; i++) {
4017 if (bd == dm->backlight_dev[i])
4018 break;
4019 }
4020 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4021 i = 0;
4022 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
4023
4024 return 0;
4025}
4026
7fd13bae
AD
4027static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4028 int bl_idx)
3d6c9164 4029{
0ad3e64e 4030 struct amdgpu_dm_backlight_caps caps;
7fd13bae 4031 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 4032
7fd13bae
AD
4033 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4034 caps = dm->backlight_caps[bl_idx];
620a0d27 4035
0ad3e64e 4036 if (caps.aux_support) {
0ad3e64e
AD
4037 u32 avg, peak;
4038 bool rc;
4039
4040 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4041 if (!rc)
7fd13bae 4042 return dm->brightness[bl_idx];
0ad3e64e
AD
4043 return convert_brightness_to_user(&caps, avg);
4044 } else {
7fd13bae 4045 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
4046
4047 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 4048 return dm->brightness[bl_idx];
0ad3e64e
AD
4049 return convert_brightness_to_user(&caps, ret);
4050 }
4562236b
HW
4051}
4052
3d6c9164
AD
4053static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4054{
4055 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 4056 int i;
3d6c9164 4057
7fd13bae
AD
4058 for (i = 0; i < dm->num_of_edps; i++) {
4059 if (bd == dm->backlight_dev[i])
4060 break;
4061 }
4062 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4063 i = 0;
4064 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
4065}
4066
4562236b 4067static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 4068 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
4069 .get_brightness = amdgpu_dm_backlight_get_brightness,
4070 .update_status = amdgpu_dm_backlight_update_status,
4071};
4072
7578ecda
AD
4073static void
4074amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4075{
4076 char bl_name[16];
4077 struct backlight_properties props = { 0 };
4078
7fd13bae
AD
4079 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4080 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4081
4562236b 4082 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4083 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4084 props.type = BACKLIGHT_RAW;
4085
4086 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4087 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4088
7fd13bae
AD
4089 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4090 adev_to_drm(dm->adev)->dev,
4091 dm,
4092 &amdgpu_dm_backlight_ops,
4093 &props);
4562236b 4094
7fd13bae 4095 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4096 DRM_ERROR("DM: Backlight registration failed!\n");
4097 else
f1ad2f5e 4098 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4099}
4562236b
HW
4100#endif
4101
df534fff 4102static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4103 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4104 enum drm_plane_type plane_type,
4105 const struct dc_plane_cap *plane_cap)
df534fff 4106{
f180b4bc 4107 struct drm_plane *plane;
df534fff
S
4108 unsigned long possible_crtcs;
4109 int ret = 0;
4110
f180b4bc 4111 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4112 if (!plane) {
4113 DRM_ERROR("KMS: Failed to allocate plane\n");
4114 return -ENOMEM;
4115 }
b2fddb13 4116 plane->type = plane_type;
df534fff
S
4117
4118 /*
b2fddb13
NK
4119 * HACK: IGT tests expect that the primary plane for a CRTC
4120 * can only have one possible CRTC. Only expose support for
4121 * any CRTC if they're not going to be used as a primary plane
4122 * for a CRTC - like overlay or underlay planes.
df534fff
S
4123 */
4124 possible_crtcs = 1 << plane_id;
4125 if (plane_id >= dm->dc->caps.max_streams)
4126 possible_crtcs = 0xff;
4127
cc1fec57 4128 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4129
4130 if (ret) {
4131 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4132 kfree(plane);
df534fff
S
4133 return ret;
4134 }
4135
54087768
NK
4136 if (mode_info)
4137 mode_info->planes[plane_id] = plane;
4138
df534fff
S
4139 return ret;
4140}
4141
89fc8d4e
HW
4142
4143static void register_backlight_device(struct amdgpu_display_manager *dm,
4144 struct dc_link *link)
4145{
4146#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4147 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4148
4149 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4150 link->type != dc_connection_none) {
1f6010a9
DF
4151 /*
4152 * Event if registration failed, we should continue with
89fc8d4e
HW
4153 * DM initialization because not having a backlight control
4154 * is better then a black screen.
4155 */
7fd13bae 4156 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4157 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4158
7fd13bae 4159 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4160 dm->backlight_link[dm->num_of_edps] = link;
4161 dm->num_of_edps++;
4162 }
89fc8d4e
HW
4163 }
4164#endif
4165}
4166
4167
1f6010a9
DF
4168/*
4169 * In this architecture, the association
4562236b
HW
4170 * connector -> encoder -> crtc
4171 * id not really requried. The crtc and connector will hold the
4172 * display_index as an abstraction to use with DAL component
4173 *
4174 * Returns 0 on success
4175 */
7578ecda 4176static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4177{
4178 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4179 int32_t i;
c84dec2f 4180 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4181 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4182 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4183 uint32_t link_cnt;
cc1fec57 4184 int32_t primary_planes;
fbbdadf2 4185 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4186 const struct dc_plane_cap *plane;
9470620e 4187 bool psr_feature_enabled = false;
4562236b 4188
d58159de
AD
4189 dm->display_indexes_num = dm->dc->caps.max_streams;
4190 /* Update the actual used number of crtc */
4191 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4192
4562236b 4193 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4194 if (amdgpu_dm_mode_config_init(dm->adev)) {
4195 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4196 return -EINVAL;
4562236b
HW
4197 }
4198
b2fddb13
NK
4199 /* There is one primary plane per CRTC */
4200 primary_planes = dm->dc->caps.max_streams;
54087768 4201 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4202
b2fddb13
NK
4203 /*
4204 * Initialize primary planes, implicit planes for legacy IOCTLS.
4205 * Order is reversed to match iteration order in atomic check.
4206 */
4207 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4208 plane = &dm->dc->caps.planes[i];
4209
b2fddb13 4210 if (initialize_plane(dm, mode_info, i,
cc1fec57 4211 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4212 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4213 goto fail;
d4e13b0d 4214 }
df534fff 4215 }
92f3ac40 4216
0d579c7e
NK
4217 /*
4218 * Initialize overlay planes, index starting after primary planes.
4219 * These planes have a higher DRM index than the primary planes since
4220 * they should be considered as having a higher z-order.
4221 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4222 *
4223 * Only support DCN for now, and only expose one so we don't encourage
4224 * userspace to use up all the pipes.
0d579c7e 4225 */
cc1fec57
NK
4226 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4227 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4228
4229 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4230 continue;
4231
4232 if (!plane->blends_with_above || !plane->blends_with_below)
4233 continue;
4234
ea36ad34 4235 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4236 continue;
4237
54087768 4238 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4239 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4240 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4241 goto fail;
d4e13b0d 4242 }
cc1fec57
NK
4243
4244 /* Only create one overlay plane. */
4245 break;
d4e13b0d 4246 }
4562236b 4247
d4e13b0d 4248 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4249 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4250 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4251 goto fail;
4562236b 4252 }
4562236b 4253
50610b74 4254#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28 4255 /* Use Outbox interrupt */
1d789535 4256 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4257 case IP_VERSION(3, 0, 0):
4258 case IP_VERSION(3, 1, 2):
4259 case IP_VERSION(3, 1, 3):
b5b8ed44 4260 case IP_VERSION(3, 1, 5):
de7cc1b4 4261 case IP_VERSION(3, 1, 6):
c08182f2 4262 case IP_VERSION(2, 1, 0):
81927e28
JS
4263 if (register_outbox_irq_handlers(dm->adev)) {
4264 DRM_ERROR("DM: Failed to initialize IRQ\n");
4265 goto fail;
4266 }
4267 break;
4268 default:
c08182f2 4269 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4270 adev->ip_versions[DCE_HWIP][0]);
81927e28 4271 }
9470620e
NK
4272
4273 /* Determine whether to enable PSR support by default. */
4274 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4275 switch (adev->ip_versions[DCE_HWIP][0]) {
4276 case IP_VERSION(3, 1, 2):
4277 case IP_VERSION(3, 1, 3):
b5b8ed44 4278 case IP_VERSION(3, 1, 5):
de7cc1b4 4279 case IP_VERSION(3, 1, 6):
9470620e
NK
4280 psr_feature_enabled = true;
4281 break;
4282 default:
4283 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4284 break;
4285 }
4286 }
50610b74 4287#endif
81927e28 4288
fdda8f34
MD
4289 /* Disable vblank IRQs aggressively for power-saving. */
4290 adev_to_drm(adev)->vblank_disable_immediate = true;
4291
4562236b
HW
4292 /* loops over all connectors on the board */
4293 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4294 struct dc_link *link = NULL;
4562236b
HW
4295
4296 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4297 DRM_ERROR(
4298 "KMS: Cannot support more than %d display indexes\n",
4299 AMDGPU_DM_MAX_DISPLAY_INDEX);
4300 continue;
4301 }
4302
4303 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4304 if (!aconnector)
cd8a2ae8 4305 goto fail;
4562236b
HW
4306
4307 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4308 if (!aencoder)
cd8a2ae8 4309 goto fail;
4562236b
HW
4310
4311 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4312 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4313 goto fail;
4562236b
HW
4314 }
4315
4316 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4317 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4318 goto fail;
4562236b
HW
4319 }
4320
89fc8d4e
HW
4321 link = dc_get_link_at_index(dm->dc, i);
4322
fbbdadf2
BL
4323 if (!dc_link_detect_sink(link, &new_connection_type))
4324 DRM_ERROR("KMS: Failed to detect connector\n");
4325
4326 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4327 emulated_link_detect(link);
4328 amdgpu_dm_update_connector_after_detect(aconnector);
4329
4330 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4331 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4332 register_backlight_device(dm, link);
dab60582
RL
4333 if (dm->num_of_edps)
4334 update_connector_ext_caps(aconnector);
9470620e 4335 if (psr_feature_enabled)
397a9bc5 4336 amdgpu_dm_set_psr_caps(link);
fdda8f34
MD
4337
4338 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4339 * PSR is also supported.
4340 */
4341 if (link->psr_settings.psr_feature_enabled)
4342 adev_to_drm(adev)->vblank_disable_immediate = false;
89fc8d4e
HW
4343 }
4344
4345
4562236b
HW
4346 }
4347
4348 /* Software is initialized. Now we can register interrupt handlers. */
4349 switch (adev->asic_type) {
55e56389
MR
4350#if defined(CONFIG_DRM_AMD_DC_SI)
4351 case CHIP_TAHITI:
4352 case CHIP_PITCAIRN:
4353 case CHIP_VERDE:
4354 case CHIP_OLAND:
4355 if (dce60_register_irq_handlers(dm->adev)) {
4356 DRM_ERROR("DM: Failed to initialize IRQ\n");
4357 goto fail;
4358 }
4359 break;
4360#endif
4562236b
HW
4361 case CHIP_BONAIRE:
4362 case CHIP_HAWAII:
cd4b356f
AD
4363 case CHIP_KAVERI:
4364 case CHIP_KABINI:
4365 case CHIP_MULLINS:
4562236b
HW
4366 case CHIP_TONGA:
4367 case CHIP_FIJI:
4368 case CHIP_CARRIZO:
4369 case CHIP_STONEY:
4370 case CHIP_POLARIS11:
4371 case CHIP_POLARIS10:
b264d345 4372 case CHIP_POLARIS12:
7737de91 4373 case CHIP_VEGAM:
2c8ad2d5 4374 case CHIP_VEGA10:
2325ff30 4375 case CHIP_VEGA12:
1fe6bf2f 4376 case CHIP_VEGA20:
4562236b
HW
4377 if (dce110_register_irq_handlers(dm->adev)) {
4378 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4379 goto fail;
4562236b
HW
4380 }
4381 break;
4382 default:
c08182f2 4383#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4384 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4385 case IP_VERSION(1, 0, 0):
4386 case IP_VERSION(1, 0, 1):
c08182f2
AD
4387 case IP_VERSION(2, 0, 2):
4388 case IP_VERSION(2, 0, 3):
4389 case IP_VERSION(2, 0, 0):
4390 case IP_VERSION(2, 1, 0):
4391 case IP_VERSION(3, 0, 0):
4392 case IP_VERSION(3, 0, 2):
4393 case IP_VERSION(3, 0, 3):
4394 case IP_VERSION(3, 0, 1):
4395 case IP_VERSION(3, 1, 2):
4396 case IP_VERSION(3, 1, 3):
b5b8ed44 4397 case IP_VERSION(3, 1, 5):
de7cc1b4 4398 case IP_VERSION(3, 1, 6):
c08182f2
AD
4399 if (dcn10_register_irq_handlers(dm->adev)) {
4400 DRM_ERROR("DM: Failed to initialize IRQ\n");
4401 goto fail;
4402 }
4403 break;
4404 default:
2cbc6f42 4405 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4406 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4407 goto fail;
c08182f2
AD
4408 }
4409#endif
2cbc6f42 4410 break;
4562236b
HW
4411 }
4412
4562236b 4413 return 0;
cd8a2ae8 4414fail:
4562236b 4415 kfree(aencoder);
4562236b 4416 kfree(aconnector);
54087768 4417
59d0f396 4418 return -EINVAL;
4562236b
HW
4419}
4420
7578ecda 4421static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4422{
eb3dc897 4423 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4424 return;
4425}
4426
4427/******************************************************************************
4428 * amdgpu_display_funcs functions
4429 *****************************************************************************/
4430
1f6010a9 4431/*
4562236b
HW
4432 * dm_bandwidth_update - program display watermarks
4433 *
4434 * @adev: amdgpu_device pointer
4435 *
4436 * Calculate and program the display watermarks and line buffer allocation.
4437 */
4438static void dm_bandwidth_update(struct amdgpu_device *adev)
4439{
49c07a99 4440 /* TODO: implement later */
4562236b
HW
4441}
4442
39cc5be2 4443static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4444 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4445 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4446 .backlight_set_level = NULL, /* never called for DC */
4447 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4448 .hpd_sense = NULL,/* called unconditionally */
4449 .hpd_set_polarity = NULL, /* called unconditionally */
4450 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4451 .page_flip_get_scanoutpos =
4452 dm_crtc_get_scanoutpos,/* called unconditionally */
4453 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4454 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4455};
4456
4457#if defined(CONFIG_DEBUG_KERNEL_DC)
4458
3ee6b26b
AD
4459static ssize_t s3_debug_store(struct device *device,
4460 struct device_attribute *attr,
4461 const char *buf,
4462 size_t count)
4562236b
HW
4463{
4464 int ret;
4465 int s3_state;
ef1de361 4466 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4467 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4468
4469 ret = kstrtoint(buf, 0, &s3_state);
4470
4471 if (ret == 0) {
4472 if (s3_state) {
4473 dm_resume(adev);
4a580877 4474 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4475 } else
4476 dm_suspend(adev);
4477 }
4478
4479 return ret == 0 ? count : 0;
4480}
4481
4482DEVICE_ATTR_WO(s3_debug);
4483
4484#endif
4485
4486static int dm_early_init(void *handle)
4487{
4488 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4489
4562236b 4490 switch (adev->asic_type) {
55e56389
MR
4491#if defined(CONFIG_DRM_AMD_DC_SI)
4492 case CHIP_TAHITI:
4493 case CHIP_PITCAIRN:
4494 case CHIP_VERDE:
4495 adev->mode_info.num_crtc = 6;
4496 adev->mode_info.num_hpd = 6;
4497 adev->mode_info.num_dig = 6;
4498 break;
4499 case CHIP_OLAND:
4500 adev->mode_info.num_crtc = 2;
4501 adev->mode_info.num_hpd = 2;
4502 adev->mode_info.num_dig = 2;
4503 break;
4504#endif
4562236b
HW
4505 case CHIP_BONAIRE:
4506 case CHIP_HAWAII:
4507 adev->mode_info.num_crtc = 6;
4508 adev->mode_info.num_hpd = 6;
4509 adev->mode_info.num_dig = 6;
4562236b 4510 break;
cd4b356f
AD
4511 case CHIP_KAVERI:
4512 adev->mode_info.num_crtc = 4;
4513 adev->mode_info.num_hpd = 6;
4514 adev->mode_info.num_dig = 7;
cd4b356f
AD
4515 break;
4516 case CHIP_KABINI:
4517 case CHIP_MULLINS:
4518 adev->mode_info.num_crtc = 2;
4519 adev->mode_info.num_hpd = 6;
4520 adev->mode_info.num_dig = 6;
cd4b356f 4521 break;
4562236b
HW
4522 case CHIP_FIJI:
4523 case CHIP_TONGA:
4524 adev->mode_info.num_crtc = 6;
4525 adev->mode_info.num_hpd = 6;
4526 adev->mode_info.num_dig = 7;
4562236b
HW
4527 break;
4528 case CHIP_CARRIZO:
4529 adev->mode_info.num_crtc = 3;
4530 adev->mode_info.num_hpd = 6;
4531 adev->mode_info.num_dig = 9;
4562236b
HW
4532 break;
4533 case CHIP_STONEY:
4534 adev->mode_info.num_crtc = 2;
4535 adev->mode_info.num_hpd = 6;
4536 adev->mode_info.num_dig = 9;
4562236b
HW
4537 break;
4538 case CHIP_POLARIS11:
b264d345 4539 case CHIP_POLARIS12:
4562236b
HW
4540 adev->mode_info.num_crtc = 5;
4541 adev->mode_info.num_hpd = 5;
4542 adev->mode_info.num_dig = 5;
4562236b
HW
4543 break;
4544 case CHIP_POLARIS10:
7737de91 4545 case CHIP_VEGAM:
4562236b
HW
4546 adev->mode_info.num_crtc = 6;
4547 adev->mode_info.num_hpd = 6;
4548 adev->mode_info.num_dig = 6;
4562236b 4549 break;
2c8ad2d5 4550 case CHIP_VEGA10:
2325ff30 4551 case CHIP_VEGA12:
1fe6bf2f 4552 case CHIP_VEGA20:
2c8ad2d5
AD
4553 adev->mode_info.num_crtc = 6;
4554 adev->mode_info.num_hpd = 6;
4555 adev->mode_info.num_dig = 6;
4556 break;
4562236b 4557 default:
c08182f2 4558#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4559 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4560 case IP_VERSION(2, 0, 2):
4561 case IP_VERSION(3, 0, 0):
4562 adev->mode_info.num_crtc = 6;
4563 adev->mode_info.num_hpd = 6;
4564 adev->mode_info.num_dig = 6;
4565 break;
4566 case IP_VERSION(2, 0, 0):
4567 case IP_VERSION(3, 0, 2):
4568 adev->mode_info.num_crtc = 5;
4569 adev->mode_info.num_hpd = 5;
4570 adev->mode_info.num_dig = 5;
4571 break;
4572 case IP_VERSION(2, 0, 3):
4573 case IP_VERSION(3, 0, 3):
4574 adev->mode_info.num_crtc = 2;
4575 adev->mode_info.num_hpd = 2;
4576 adev->mode_info.num_dig = 2;
4577 break;
559f591d
AD
4578 case IP_VERSION(1, 0, 0):
4579 case IP_VERSION(1, 0, 1):
c08182f2
AD
4580 case IP_VERSION(3, 0, 1):
4581 case IP_VERSION(2, 1, 0):
4582 case IP_VERSION(3, 1, 2):
4583 case IP_VERSION(3, 1, 3):
b5b8ed44 4584 case IP_VERSION(3, 1, 5):
de7cc1b4 4585 case IP_VERSION(3, 1, 6):
c08182f2
AD
4586 adev->mode_info.num_crtc = 4;
4587 adev->mode_info.num_hpd = 4;
4588 adev->mode_info.num_dig = 4;
4589 break;
4590 default:
2cbc6f42 4591 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4592 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4593 return -EINVAL;
c08182f2
AD
4594 }
4595#endif
2cbc6f42 4596 break;
4562236b
HW
4597 }
4598
c8dd5715
MD
4599 amdgpu_dm_set_irq_funcs(adev);
4600
39cc5be2
AD
4601 if (adev->mode_info.funcs == NULL)
4602 adev->mode_info.funcs = &dm_display_funcs;
4603
1f6010a9
DF
4604 /*
4605 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4606 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4607 * amdgpu_device_init()
4608 */
4562236b
HW
4609#if defined(CONFIG_DEBUG_KERNEL_DC)
4610 device_create_file(
4a580877 4611 adev_to_drm(adev)->dev,
4562236b
HW
4612 &dev_attr_s3_debug);
4613#endif
4614
4615 return 0;
4616}
4617
9b690ef3 4618static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4619 struct dc_stream_state *new_stream,
4620 struct dc_stream_state *old_stream)
9b690ef3 4621{
2afda735 4622 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4623}
4624
4625static bool modereset_required(struct drm_crtc_state *crtc_state)
4626{
2afda735 4627 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4628}
4629
7578ecda 4630static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4631{
4632 drm_encoder_cleanup(encoder);
4633 kfree(encoder);
4634}
4635
4636static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4637 .destroy = amdgpu_dm_encoder_destroy,
4638};
4639
e7b07cee 4640
6300b3bd
MK
4641static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4642 struct drm_framebuffer *fb,
4643 int *min_downscale, int *max_upscale)
4644{
4645 struct amdgpu_device *adev = drm_to_adev(dev);
4646 struct dc *dc = adev->dm.dc;
4647 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4648 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4649
4650 switch (fb->format->format) {
4651 case DRM_FORMAT_P010:
4652 case DRM_FORMAT_NV12:
4653 case DRM_FORMAT_NV21:
4654 *max_upscale = plane_cap->max_upscale_factor.nv12;
4655 *min_downscale = plane_cap->max_downscale_factor.nv12;
4656 break;
4657
4658 case DRM_FORMAT_XRGB16161616F:
4659 case DRM_FORMAT_ARGB16161616F:
4660 case DRM_FORMAT_XBGR16161616F:
4661 case DRM_FORMAT_ABGR16161616F:
4662 *max_upscale = plane_cap->max_upscale_factor.fp16;
4663 *min_downscale = plane_cap->max_downscale_factor.fp16;
4664 break;
4665
4666 default:
4667 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4668 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4669 break;
4670 }
4671
4672 /*
4673 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4674 * scaling factor of 1.0 == 1000 units.
4675 */
4676 if (*max_upscale == 1)
4677 *max_upscale = 1000;
4678
4679 if (*min_downscale == 1)
4680 *min_downscale = 1000;
4681}
4682
4683
4375d625
S
4684static int fill_dc_scaling_info(struct amdgpu_device *adev,
4685 const struct drm_plane_state *state,
695af5f9 4686 struct dc_scaling_info *scaling_info)
e7b07cee 4687{
6300b3bd 4688 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4689
695af5f9 4690 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4691
695af5f9
NK
4692 /* Source is fixed 16.16 but we ignore mantissa for now... */
4693 scaling_info->src_rect.x = state->src_x >> 16;
4694 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4695
d89f6048
HW
4696 /*
4697 * For reasons we don't (yet) fully understand a non-zero
4698 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4699 * system hang on DCN1x.
4700 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4701 * let's reject both non-zero src_x and src_y.
4702 *
4703 * We currently know of only one use-case to reproduce a
4704 * scenario with non-zero src_x and src_y for NV12, which
4705 * is to gesture the YouTube Android app into full screen
4706 * on ChromeOS.
4707 */
4375d625
S
4708 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4709 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4710 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4711 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4712 return -EINVAL;
4713
695af5f9
NK
4714 scaling_info->src_rect.width = state->src_w >> 16;
4715 if (scaling_info->src_rect.width == 0)
4716 return -EINVAL;
4717
4718 scaling_info->src_rect.height = state->src_h >> 16;
4719 if (scaling_info->src_rect.height == 0)
4720 return -EINVAL;
4721
4722 scaling_info->dst_rect.x = state->crtc_x;
4723 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4724
4725 if (state->crtc_w == 0)
695af5f9 4726 return -EINVAL;
e7b07cee 4727
695af5f9 4728 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4729
4730 if (state->crtc_h == 0)
695af5f9 4731 return -EINVAL;
e7b07cee 4732
695af5f9 4733 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4734
695af5f9
NK
4735 /* DRM doesn't specify clipping on destination output. */
4736 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4737
6300b3bd
MK
4738 /* Validate scaling per-format with DC plane caps */
4739 if (state->plane && state->plane->dev && state->fb) {
4740 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4741 &min_downscale, &max_upscale);
4742 } else {
4743 min_downscale = 250;
4744 max_upscale = 16000;
4745 }
4746
6491f0c0
NK
4747 scale_w = scaling_info->dst_rect.width * 1000 /
4748 scaling_info->src_rect.width;
e7b07cee 4749
6300b3bd 4750 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4751 return -EINVAL;
4752
4753 scale_h = scaling_info->dst_rect.height * 1000 /
4754 scaling_info->src_rect.height;
4755
6300b3bd 4756 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4757 return -EINVAL;
4758
695af5f9
NK
4759 /*
4760 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4761 * assume reasonable defaults based on the format.
4762 */
e7b07cee 4763
695af5f9 4764 return 0;
4562236b 4765}
695af5f9 4766
a3241991
BN
4767static void
4768fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4769 uint64_t tiling_flags)
e7b07cee 4770{
a3241991
BN
4771 /* Fill GFX8 params */
4772 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4773 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4774
a3241991
BN
4775 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4776 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4777 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4778 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4779 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4780
a3241991
BN
4781 /* XXX fix me for VI */
4782 tiling_info->gfx8.num_banks = num_banks;
4783 tiling_info->gfx8.array_mode =
4784 DC_ARRAY_2D_TILED_THIN1;
4785 tiling_info->gfx8.tile_split = tile_split;
4786 tiling_info->gfx8.bank_width = bankw;
4787 tiling_info->gfx8.bank_height = bankh;
4788 tiling_info->gfx8.tile_aspect = mtaspect;
4789 tiling_info->gfx8.tile_mode =
4790 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4791 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4792 == DC_ARRAY_1D_TILED_THIN1) {
4793 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4794 }
4795
a3241991
BN
4796 tiling_info->gfx8.pipe_config =
4797 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4798}
4799
a3241991
BN
4800static void
4801fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4802 union dc_tiling_info *tiling_info)
4803{
4804 tiling_info->gfx9.num_pipes =
4805 adev->gfx.config.gb_addr_config_fields.num_pipes;
4806 tiling_info->gfx9.num_banks =
4807 adev->gfx.config.gb_addr_config_fields.num_banks;
4808 tiling_info->gfx9.pipe_interleave =
4809 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4810 tiling_info->gfx9.num_shader_engines =
4811 adev->gfx.config.gb_addr_config_fields.num_se;
4812 tiling_info->gfx9.max_compressed_frags =
4813 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4814 tiling_info->gfx9.num_rb_per_se =
4815 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4816 tiling_info->gfx9.shaderEnable = 1;
1d789535 4817 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4818 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4819}
4820
695af5f9 4821static int
a3241991
BN
4822validate_dcc(struct amdgpu_device *adev,
4823 const enum surface_pixel_format format,
4824 const enum dc_rotation_angle rotation,
4825 const union dc_tiling_info *tiling_info,
4826 const struct dc_plane_dcc_param *dcc,
4827 const struct dc_plane_address *address,
4828 const struct plane_size *plane_size)
7df7e505
NK
4829{
4830 struct dc *dc = adev->dm.dc;
8daa1218
NC
4831 struct dc_dcc_surface_param input;
4832 struct dc_surface_dcc_cap output;
7df7e505 4833
8daa1218
NC
4834 memset(&input, 0, sizeof(input));
4835 memset(&output, 0, sizeof(output));
4836
a3241991 4837 if (!dcc->enable)
87b7ebc2
RS
4838 return 0;
4839
a3241991
BN
4840 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4841 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4842 return -EINVAL;
7df7e505 4843
695af5f9 4844 input.format = format;
12e2b2d4
DL
4845 input.surface_size.width = plane_size->surface_size.width;
4846 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4847 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4848
695af5f9 4849 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4850 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4851 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4852 input.scan = SCAN_DIRECTION_VERTICAL;
4853
4854 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4855 return -EINVAL;
7df7e505
NK
4856
4857 if (!output.capable)
09e5665a 4858 return -EINVAL;
7df7e505 4859
a3241991
BN
4860 if (dcc->independent_64b_blks == 0 &&
4861 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4862 return -EINVAL;
7df7e505 4863
a3241991
BN
4864 return 0;
4865}
4866
37384b3f
BN
4867static bool
4868modifier_has_dcc(uint64_t modifier)
4869{
4870 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4871}
4872
4873static unsigned
4874modifier_gfx9_swizzle_mode(uint64_t modifier)
4875{
4876 if (modifier == DRM_FORMAT_MOD_LINEAR)
4877 return 0;
4878
4879 return AMD_FMT_MOD_GET(TILE, modifier);
4880}
4881
dfbbfe3c
BN
4882static const struct drm_format_info *
4883amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4884{
816853f9 4885 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4886}
4887
37384b3f
BN
4888static void
4889fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4890 union dc_tiling_info *tiling_info,
4891 uint64_t modifier)
4892{
4893 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4894 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4895 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4896 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4897
4898 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4899
4900 if (!IS_AMD_FMT_MOD(modifier))
4901 return;
4902
4903 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4904 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4905
4906 if (adev->family >= AMDGPU_FAMILY_NV) {
4907 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4908 } else {
4909 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4910
4911 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4912 }
4913}
4914
faa37f54
BN
4915enum dm_micro_swizzle {
4916 MICRO_SWIZZLE_Z = 0,
4917 MICRO_SWIZZLE_S = 1,
4918 MICRO_SWIZZLE_D = 2,
4919 MICRO_SWIZZLE_R = 3
4920};
4921
4922static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4923 uint32_t format,
4924 uint64_t modifier)
4925{
4926 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4927 const struct drm_format_info *info = drm_format_info(format);
fe180178 4928 int i;
faa37f54
BN
4929
4930 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4931
4932 if (!info)
4933 return false;
4934
4935 /*
fe180178
QZ
4936 * We always have to allow these modifiers:
4937 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4938 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4939 */
fe180178
QZ
4940 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4941 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4942 return true;
fe180178 4943 }
faa37f54 4944
fe180178
QZ
4945 /* Check that the modifier is on the list of the plane's supported modifiers. */
4946 for (i = 0; i < plane->modifier_count; i++) {
4947 if (modifier == plane->modifiers[i])
4948 break;
4949 }
4950 if (i == plane->modifier_count)
faa37f54
BN
4951 return false;
4952
4953 /*
4954 * For D swizzle the canonical modifier depends on the bpp, so check
4955 * it here.
4956 */
4957 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4958 adev->family >= AMDGPU_FAMILY_NV) {
4959 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4960 return false;
4961 }
4962
4963 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4964 info->cpp[0] < 8)
4965 return false;
4966
4967 if (modifier_has_dcc(modifier)) {
4968 /* Per radeonsi comments 16/64 bpp are more complicated. */
4969 if (info->cpp[0] != 4)
4970 return false;
951796f2
SS
4971 /* We support multi-planar formats, but not when combined with
4972 * additional DCC metadata planes. */
4973 if (info->num_planes > 1)
4974 return false;
faa37f54
BN
4975 }
4976
4977 return true;
4978}
4979
4980static void
4981add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4982{
4983 if (!*mods)
4984 return;
4985
4986 if (*cap - *size < 1) {
4987 uint64_t new_cap = *cap * 2;
4988 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4989
4990 if (!new_mods) {
4991 kfree(*mods);
4992 *mods = NULL;
4993 return;
4994 }
4995
4996 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4997 kfree(*mods);
4998 *mods = new_mods;
4999 *cap = new_cap;
5000 }
5001
5002 (*mods)[*size] = mod;
5003 *size += 1;
5004}
5005
5006static void
5007add_gfx9_modifiers(const struct amdgpu_device *adev,
5008 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5009{
5010 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5011 int pipe_xor_bits = min(8, pipes +
5012 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5013 int bank_xor_bits = min(8 - pipe_xor_bits,
5014 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5015 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5016 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5017
5018
5019 if (adev->family == AMDGPU_FAMILY_RV) {
5020 /* Raven2 and later */
5021 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5022
5023 /*
5024 * No _D DCC swizzles yet because we only allow 32bpp, which
5025 * doesn't support _D on DCN
5026 */
5027
5028 if (has_constant_encode) {
5029 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5030 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5031 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5032 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5033 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5034 AMD_FMT_MOD_SET(DCC, 1) |
5035 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5036 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5037 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5038 }
5039
5040 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5041 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5042 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5043 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5044 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5045 AMD_FMT_MOD_SET(DCC, 1) |
5046 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5047 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5048 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5049
5050 if (has_constant_encode) {
5051 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5053 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5054 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5055 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5056 AMD_FMT_MOD_SET(DCC, 1) |
5057 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5058 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5059 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5060
5061 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5062 AMD_FMT_MOD_SET(RB, rb) |
5063 AMD_FMT_MOD_SET(PIPE, pipes));
5064 }
5065
5066 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5067 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5068 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5069 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5070 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5071 AMD_FMT_MOD_SET(DCC, 1) |
5072 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5073 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5074 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5075 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5076 AMD_FMT_MOD_SET(RB, rb) |
5077 AMD_FMT_MOD_SET(PIPE, pipes));
5078 }
5079
5080 /*
5081 * Only supported for 64bpp on Raven, will be filtered on format in
5082 * dm_plane_format_mod_supported.
5083 */
5084 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5085 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5086 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5087 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5088 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5089
5090 if (adev->family == AMDGPU_FAMILY_RV) {
5091 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5093 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5094 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5096 }
5097
5098 /*
5099 * Only supported for 64bpp on Raven, will be filtered on format in
5100 * dm_plane_format_mod_supported.
5101 */
5102 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5104 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5105
5106 if (adev->family == AMDGPU_FAMILY_RV) {
5107 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5108 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5109 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5110 }
5111}
5112
5113static void
5114add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5115 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5116{
5117 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5118
5119 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5120 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5121 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5122 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5123 AMD_FMT_MOD_SET(DCC, 1) |
5124 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5125 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5126 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5127
5128 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5129 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5130 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5131 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5132 AMD_FMT_MOD_SET(DCC, 1) |
5133 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5134 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5135 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5136 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5137
5138 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5139 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5140 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5141 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5142
5143 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5144 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5145 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5146 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5147
5148
5149 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5150 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5151 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5152 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5153
5154 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5155 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5156 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5157}
5158
5159static void
5160add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5161 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5162{
5163 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5164 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5165
5166 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5167 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5168 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5169 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5170 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5171 AMD_FMT_MOD_SET(DCC, 1) |
5172 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5173 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5174 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5175 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5176
7f6ab50a
JA
5177 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5178 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5179 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5180 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5181 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5182 AMD_FMT_MOD_SET(DCC, 1) |
5183 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5184 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5185 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5186
faa37f54
BN
5187 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5188 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5189 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5190 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5191 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5192 AMD_FMT_MOD_SET(DCC, 1) |
5193 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5194 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5195 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5196 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5197 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5198
7f6ab50a
JA
5199 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5200 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5201 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5202 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5203 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5204 AMD_FMT_MOD_SET(DCC, 1) |
5205 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5206 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5207 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5208 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5209
faa37f54
BN
5210 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5211 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5212 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5213 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5214 AMD_FMT_MOD_SET(PACKERS, pkrs));
5215
5216 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5217 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5218 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5219 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5220 AMD_FMT_MOD_SET(PACKERS, pkrs));
5221
5222 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5223 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5224 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5225 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5226
5227 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5228 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5229 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5230}
5231
5232static int
5233get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5234{
5235 uint64_t size = 0, capacity = 128;
5236 *mods = NULL;
5237
5238 /* We have not hooked up any pre-GFX9 modifiers. */
5239 if (adev->family < AMDGPU_FAMILY_AI)
5240 return 0;
5241
5242 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5243
5244 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5245 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5246 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5247 return *mods ? 0 : -ENOMEM;
5248 }
5249
5250 switch (adev->family) {
5251 case AMDGPU_FAMILY_AI:
5252 case AMDGPU_FAMILY_RV:
5253 add_gfx9_modifiers(adev, mods, &size, &capacity);
5254 break;
5255 case AMDGPU_FAMILY_NV:
5256 case AMDGPU_FAMILY_VGH:
1ebcaebd 5257 case AMDGPU_FAMILY_YC:
b5b8ed44 5258 case AMDGPU_FAMILY_GC_10_3_6:
de7cc1b4 5259 case AMDGPU_FAMILY_GC_10_3_7:
1d789535 5260 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5261 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5262 else
5263 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5264 break;
5265 }
5266
5267 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5268
5269 /* INVALID marks the end of the list. */
5270 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5271
5272 if (!*mods)
5273 return -ENOMEM;
5274
5275 return 0;
5276}
5277
37384b3f
BN
5278static int
5279fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5280 const struct amdgpu_framebuffer *afb,
5281 const enum surface_pixel_format format,
5282 const enum dc_rotation_angle rotation,
5283 const struct plane_size *plane_size,
5284 union dc_tiling_info *tiling_info,
5285 struct dc_plane_dcc_param *dcc,
5286 struct dc_plane_address *address,
5287 const bool force_disable_dcc)
5288{
5289 const uint64_t modifier = afb->base.modifier;
2be7f77f 5290 int ret = 0;
37384b3f
BN
5291
5292 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5293 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5294
5295 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5296 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5297 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5298 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5299
5300 dcc->enable = 1;
5301 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5302 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5303 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5304 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5305 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5306 else if (independent_128b_blks)
5307 dcc->dcc_ind_blk = hubp_ind_block_128b;
5308 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5309 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5310 else
5311 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5312 } else {
5313 if (independent_64b_blks)
5314 dcc->dcc_ind_blk = hubp_ind_block_64b;
5315 else
5316 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5317 }
37384b3f
BN
5318
5319 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5320 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5321 }
5322
5323 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5324 if (ret)
2be7f77f 5325 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5326
2be7f77f 5327 return ret;
09e5665a
NK
5328}
5329
5330static int
320932bf 5331fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5332 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5333 const enum surface_pixel_format format,
5334 const enum dc_rotation_angle rotation,
5335 const uint64_t tiling_flags,
09e5665a 5336 union dc_tiling_info *tiling_info,
12e2b2d4 5337 struct plane_size *plane_size,
09e5665a 5338 struct dc_plane_dcc_param *dcc,
87b7ebc2 5339 struct dc_plane_address *address,
5888f07a 5340 bool tmz_surface,
87b7ebc2 5341 bool force_disable_dcc)
09e5665a 5342{
320932bf 5343 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5344 int ret;
5345
5346 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5347 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5348 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5349 memset(address, 0, sizeof(*address));
5350
5888f07a
HW
5351 address->tmz_surface = tmz_surface;
5352
695af5f9 5353 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5354 uint64_t addr = afb->address + fb->offsets[0];
5355
12e2b2d4
DL
5356 plane_size->surface_size.x = 0;
5357 plane_size->surface_size.y = 0;
5358 plane_size->surface_size.width = fb->width;
5359 plane_size->surface_size.height = fb->height;
5360 plane_size->surface_pitch =
320932bf
NK
5361 fb->pitches[0] / fb->format->cpp[0];
5362
e0634e8d 5363 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5364 address->grph.addr.low_part = lower_32_bits(addr);
5365 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5366 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5367 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5368 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5369
12e2b2d4
DL
5370 plane_size->surface_size.x = 0;
5371 plane_size->surface_size.y = 0;
5372 plane_size->surface_size.width = fb->width;
5373 plane_size->surface_size.height = fb->height;
5374 plane_size->surface_pitch =
320932bf
NK
5375 fb->pitches[0] / fb->format->cpp[0];
5376
12e2b2d4
DL
5377 plane_size->chroma_size.x = 0;
5378 plane_size->chroma_size.y = 0;
320932bf 5379 /* TODO: set these based on surface format */
12e2b2d4
DL
5380 plane_size->chroma_size.width = fb->width / 2;
5381 plane_size->chroma_size.height = fb->height / 2;
320932bf 5382
12e2b2d4 5383 plane_size->chroma_pitch =
320932bf
NK
5384 fb->pitches[1] / fb->format->cpp[1];
5385
e0634e8d
NK
5386 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5387 address->video_progressive.luma_addr.low_part =
be7b9b32 5388 lower_32_bits(luma_addr);
e0634e8d 5389 address->video_progressive.luma_addr.high_part =
be7b9b32 5390 upper_32_bits(luma_addr);
e0634e8d
NK
5391 address->video_progressive.chroma_addr.low_part =
5392 lower_32_bits(chroma_addr);
5393 address->video_progressive.chroma_addr.high_part =
5394 upper_32_bits(chroma_addr);
5395 }
09e5665a 5396
a3241991 5397 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5398 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5399 rotation, plane_size,
5400 tiling_info, dcc,
5401 address,
5402 force_disable_dcc);
09e5665a
NK
5403 if (ret)
5404 return ret;
a3241991
BN
5405 } else {
5406 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5407 }
5408
5409 return 0;
7df7e505
NK
5410}
5411
d74004b6 5412static void
695af5f9 5413fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5414 bool *per_pixel_alpha, bool *global_alpha,
5415 int *global_alpha_value)
5416{
5417 *per_pixel_alpha = false;
5418 *global_alpha = false;
5419 *global_alpha_value = 0xff;
5420
5421 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5422 return;
5423
5424 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5425 static const uint32_t alpha_formats[] = {
5426 DRM_FORMAT_ARGB8888,
5427 DRM_FORMAT_RGBA8888,
5428 DRM_FORMAT_ABGR8888,
5429 };
5430 uint32_t format = plane_state->fb->format->format;
5431 unsigned int i;
5432
5433 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5434 if (format == alpha_formats[i]) {
5435 *per_pixel_alpha = true;
5436 break;
5437 }
5438 }
5439 }
5440
5441 if (plane_state->alpha < 0xffff) {
5442 *global_alpha = true;
5443 *global_alpha_value = plane_state->alpha >> 8;
5444 }
5445}
5446
004fefa3
NK
5447static int
5448fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5449 const enum surface_pixel_format format,
004fefa3
NK
5450 enum dc_color_space *color_space)
5451{
5452 bool full_range;
5453
5454 *color_space = COLOR_SPACE_SRGB;
5455
5456 /* DRM color properties only affect non-RGB formats. */
695af5f9 5457 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5458 return 0;
5459
5460 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5461
5462 switch (plane_state->color_encoding) {
5463 case DRM_COLOR_YCBCR_BT601:
5464 if (full_range)
5465 *color_space = COLOR_SPACE_YCBCR601;
5466 else
5467 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5468 break;
5469
5470 case DRM_COLOR_YCBCR_BT709:
5471 if (full_range)
5472 *color_space = COLOR_SPACE_YCBCR709;
5473 else
5474 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5475 break;
5476
5477 case DRM_COLOR_YCBCR_BT2020:
5478 if (full_range)
5479 *color_space = COLOR_SPACE_2020_YCBCR;
5480 else
5481 return -EINVAL;
5482 break;
5483
5484 default:
5485 return -EINVAL;
5486 }
5487
5488 return 0;
5489}
5490
695af5f9
NK
5491static int
5492fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5493 const struct drm_plane_state *plane_state,
5494 const uint64_t tiling_flags,
5495 struct dc_plane_info *plane_info,
87b7ebc2 5496 struct dc_plane_address *address,
5888f07a 5497 bool tmz_surface,
87b7ebc2 5498 bool force_disable_dcc)
695af5f9
NK
5499{
5500 const struct drm_framebuffer *fb = plane_state->fb;
5501 const struct amdgpu_framebuffer *afb =
5502 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5503 int ret;
5504
5505 memset(plane_info, 0, sizeof(*plane_info));
5506
5507 switch (fb->format->format) {
5508 case DRM_FORMAT_C8:
5509 plane_info->format =
5510 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5511 break;
5512 case DRM_FORMAT_RGB565:
5513 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5514 break;
5515 case DRM_FORMAT_XRGB8888:
5516 case DRM_FORMAT_ARGB8888:
5517 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5518 break;
5519 case DRM_FORMAT_XRGB2101010:
5520 case DRM_FORMAT_ARGB2101010:
5521 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5522 break;
5523 case DRM_FORMAT_XBGR2101010:
5524 case DRM_FORMAT_ABGR2101010:
5525 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5526 break;
5527 case DRM_FORMAT_XBGR8888:
5528 case DRM_FORMAT_ABGR8888:
5529 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5530 break;
5531 case DRM_FORMAT_NV21:
5532 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5533 break;
5534 case DRM_FORMAT_NV12:
5535 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5536 break;
cbec6477
SW
5537 case DRM_FORMAT_P010:
5538 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5539 break;
492548dc
SW
5540 case DRM_FORMAT_XRGB16161616F:
5541 case DRM_FORMAT_ARGB16161616F:
5542 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5543 break;
2a5195dc
MK
5544 case DRM_FORMAT_XBGR16161616F:
5545 case DRM_FORMAT_ABGR16161616F:
5546 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5547 break;
58020403
MK
5548 case DRM_FORMAT_XRGB16161616:
5549 case DRM_FORMAT_ARGB16161616:
5550 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5551 break;
5552 case DRM_FORMAT_XBGR16161616:
5553 case DRM_FORMAT_ABGR16161616:
5554 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5555 break;
695af5f9
NK
5556 default:
5557 DRM_ERROR(
92f1d09c
SA
5558 "Unsupported screen format %p4cc\n",
5559 &fb->format->format);
695af5f9
NK
5560 return -EINVAL;
5561 }
5562
5563 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5564 case DRM_MODE_ROTATE_0:
5565 plane_info->rotation = ROTATION_ANGLE_0;
5566 break;
5567 case DRM_MODE_ROTATE_90:
5568 plane_info->rotation = ROTATION_ANGLE_90;
5569 break;
5570 case DRM_MODE_ROTATE_180:
5571 plane_info->rotation = ROTATION_ANGLE_180;
5572 break;
5573 case DRM_MODE_ROTATE_270:
5574 plane_info->rotation = ROTATION_ANGLE_270;
5575 break;
5576 default:
5577 plane_info->rotation = ROTATION_ANGLE_0;
5578 break;
5579 }
5580
5581 plane_info->visible = true;
5582 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5583
6d83a32d
MS
5584 plane_info->layer_index = 0;
5585
695af5f9
NK
5586 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5587 &plane_info->color_space);
5588 if (ret)
5589 return ret;
5590
5591 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5592 plane_info->rotation, tiling_flags,
5593 &plane_info->tiling_info,
5594 &plane_info->plane_size,
5888f07a 5595 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5596 force_disable_dcc);
695af5f9
NK
5597 if (ret)
5598 return ret;
5599
5600 fill_blending_from_plane_state(
5601 plane_state, &plane_info->per_pixel_alpha,
5602 &plane_info->global_alpha, &plane_info->global_alpha_value);
5603
5604 return 0;
5605}
5606
5607static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5608 struct dc_plane_state *dc_plane_state,
5609 struct drm_plane_state *plane_state,
5610 struct drm_crtc_state *crtc_state)
e7b07cee 5611{
cf020d49 5612 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5613 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5614 struct dc_scaling_info scaling_info;
5615 struct dc_plane_info plane_info;
695af5f9 5616 int ret;
87b7ebc2 5617 bool force_disable_dcc = false;
e7b07cee 5618
4375d625 5619 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5620 if (ret)
5621 return ret;
e7b07cee 5622
695af5f9
NK
5623 dc_plane_state->src_rect = scaling_info.src_rect;
5624 dc_plane_state->dst_rect = scaling_info.dst_rect;
5625 dc_plane_state->clip_rect = scaling_info.clip_rect;
5626 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5627
87b7ebc2 5628 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5629 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5630 afb->tiling_flags,
695af5f9 5631 &plane_info,
87b7ebc2 5632 &dc_plane_state->address,
6eed95b0 5633 afb->tmz_surface,
87b7ebc2 5634 force_disable_dcc);
004fefa3
NK
5635 if (ret)
5636 return ret;
5637
695af5f9
NK
5638 dc_plane_state->format = plane_info.format;
5639 dc_plane_state->color_space = plane_info.color_space;
5640 dc_plane_state->format = plane_info.format;
5641 dc_plane_state->plane_size = plane_info.plane_size;
5642 dc_plane_state->rotation = plane_info.rotation;
5643 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5644 dc_plane_state->stereo_format = plane_info.stereo_format;
5645 dc_plane_state->tiling_info = plane_info.tiling_info;
5646 dc_plane_state->visible = plane_info.visible;
5647 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5648 dc_plane_state->global_alpha = plane_info.global_alpha;
5649 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5650 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5651 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5652 dc_plane_state->flip_int_enabled = true;
695af5f9 5653
e277adc5
LSL
5654 /*
5655 * Always set input transfer function, since plane state is refreshed
5656 * every time.
5657 */
cf020d49
NK
5658 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5659 if (ret)
5660 return ret;
e7b07cee 5661
cf020d49 5662 return 0;
e7b07cee
HW
5663}
5664
3ee6b26b
AD
5665static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5666 const struct dm_connector_state *dm_state,
5667 struct dc_stream_state *stream)
e7b07cee
HW
5668{
5669 enum amdgpu_rmx_type rmx_type;
5670
5671 struct rect src = { 0 }; /* viewport in composition space*/
5672 struct rect dst = { 0 }; /* stream addressable area */
5673
5674 /* no mode. nothing to be done */
5675 if (!mode)
5676 return;
5677
5678 /* Full screen scaling by default */
5679 src.width = mode->hdisplay;
5680 src.height = mode->vdisplay;
5681 dst.width = stream->timing.h_addressable;
5682 dst.height = stream->timing.v_addressable;
5683
f4791779
HW
5684 if (dm_state) {
5685 rmx_type = dm_state->scaling;
5686 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5687 if (src.width * dst.height <
5688 src.height * dst.width) {
5689 /* height needs less upscaling/more downscaling */
5690 dst.width = src.width *
5691 dst.height / src.height;
5692 } else {
5693 /* width needs less upscaling/more downscaling */
5694 dst.height = src.height *
5695 dst.width / src.width;
5696 }
5697 } else if (rmx_type == RMX_CENTER) {
5698 dst = src;
e7b07cee 5699 }
e7b07cee 5700
f4791779
HW
5701 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5702 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5703
f4791779
HW
5704 if (dm_state->underscan_enable) {
5705 dst.x += dm_state->underscan_hborder / 2;
5706 dst.y += dm_state->underscan_vborder / 2;
5707 dst.width -= dm_state->underscan_hborder;
5708 dst.height -= dm_state->underscan_vborder;
5709 }
e7b07cee
HW
5710 }
5711
5712 stream->src = src;
5713 stream->dst = dst;
5714
4711c033
LT
5715 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5716 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5717
5718}
5719
3ee6b26b 5720static enum dc_color_depth
42ba01fc 5721convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5722 bool is_y420, int requested_bpc)
e7b07cee 5723{
1bc22f20 5724 uint8_t bpc;
01c22997 5725
1bc22f20
SW
5726 if (is_y420) {
5727 bpc = 8;
5728
5729 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5730 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5731 bpc = 16;
5732 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5733 bpc = 12;
5734 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5735 bpc = 10;
5736 } else {
5737 bpc = (uint8_t)connector->display_info.bpc;
5738 /* Assume 8 bpc by default if no bpc is specified. */
5739 bpc = bpc ? bpc : 8;
5740 }
e7b07cee 5741
cbd14ae7 5742 if (requested_bpc > 0) {
01c22997
NK
5743 /*
5744 * Cap display bpc based on the user requested value.
5745 *
5746 * The value for state->max_bpc may not correctly updated
5747 * depending on when the connector gets added to the state
5748 * or if this was called outside of atomic check, so it
5749 * can't be used directly.
5750 */
cbd14ae7 5751 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5752
1825fd34
NK
5753 /* Round down to the nearest even number. */
5754 bpc = bpc - (bpc & 1);
5755 }
07e3a1cf 5756
e7b07cee
HW
5757 switch (bpc) {
5758 case 0:
1f6010a9
DF
5759 /*
5760 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5761 * EDID revision before 1.4
5762 * TODO: Fix edid parsing
5763 */
5764 return COLOR_DEPTH_888;
5765 case 6:
5766 return COLOR_DEPTH_666;
5767 case 8:
5768 return COLOR_DEPTH_888;
5769 case 10:
5770 return COLOR_DEPTH_101010;
5771 case 12:
5772 return COLOR_DEPTH_121212;
5773 case 14:
5774 return COLOR_DEPTH_141414;
5775 case 16:
5776 return COLOR_DEPTH_161616;
5777 default:
5778 return COLOR_DEPTH_UNDEFINED;
5779 }
5780}
5781
3ee6b26b
AD
5782static enum dc_aspect_ratio
5783get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5784{
e11d4147
LSL
5785 /* 1-1 mapping, since both enums follow the HDMI spec. */
5786 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5787}
5788
3ee6b26b
AD
5789static enum dc_color_space
5790get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5791{
5792 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5793
5794 switch (dc_crtc_timing->pixel_encoding) {
5795 case PIXEL_ENCODING_YCBCR422:
5796 case PIXEL_ENCODING_YCBCR444:
5797 case PIXEL_ENCODING_YCBCR420:
5798 {
5799 /*
5800 * 27030khz is the separation point between HDTV and SDTV
5801 * according to HDMI spec, we use YCbCr709 and YCbCr601
5802 * respectively
5803 */
380604e2 5804 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5805 if (dc_crtc_timing->flags.Y_ONLY)
5806 color_space =
5807 COLOR_SPACE_YCBCR709_LIMITED;
5808 else
5809 color_space = COLOR_SPACE_YCBCR709;
5810 } else {
5811 if (dc_crtc_timing->flags.Y_ONLY)
5812 color_space =
5813 COLOR_SPACE_YCBCR601_LIMITED;
5814 else
5815 color_space = COLOR_SPACE_YCBCR601;
5816 }
5817
5818 }
5819 break;
5820 case PIXEL_ENCODING_RGB:
5821 color_space = COLOR_SPACE_SRGB;
5822 break;
5823
5824 default:
5825 WARN_ON(1);
5826 break;
5827 }
5828
5829 return color_space;
5830}
5831
ea117312
TA
5832static bool adjust_colour_depth_from_display_info(
5833 struct dc_crtc_timing *timing_out,
5834 const struct drm_display_info *info)
400443e8 5835{
ea117312 5836 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5837 int normalized_clk;
400443e8 5838 do {
380604e2 5839 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5840 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5841 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5842 normalized_clk /= 2;
5843 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5844 switch (depth) {
5845 case COLOR_DEPTH_888:
5846 break;
400443e8
ML
5847 case COLOR_DEPTH_101010:
5848 normalized_clk = (normalized_clk * 30) / 24;
5849 break;
5850 case COLOR_DEPTH_121212:
5851 normalized_clk = (normalized_clk * 36) / 24;
5852 break;
5853 case COLOR_DEPTH_161616:
5854 normalized_clk = (normalized_clk * 48) / 24;
5855 break;
5856 default:
ea117312
TA
5857 /* The above depths are the only ones valid for HDMI. */
5858 return false;
400443e8 5859 }
ea117312
TA
5860 if (normalized_clk <= info->max_tmds_clock) {
5861 timing_out->display_color_depth = depth;
5862 return true;
5863 }
5864 } while (--depth > COLOR_DEPTH_666);
5865 return false;
400443e8 5866}
e7b07cee 5867
42ba01fc
NK
5868static void fill_stream_properties_from_drm_display_mode(
5869 struct dc_stream_state *stream,
5870 const struct drm_display_mode *mode_in,
5871 const struct drm_connector *connector,
5872 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5873 const struct dc_stream_state *old_stream,
5874 int requested_bpc)
e7b07cee
HW
5875{
5876 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5877 const struct drm_display_info *info = &connector->display_info;
d4252eee 5878 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5879 struct hdmi_vendor_infoframe hv_frame;
5880 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5881
acf83f86
WL
5882 memset(&hv_frame, 0, sizeof(hv_frame));
5883 memset(&avi_frame, 0, sizeof(avi_frame));
5884
e7b07cee
HW
5885 timing_out->h_border_left = 0;
5886 timing_out->h_border_right = 0;
5887 timing_out->v_border_top = 0;
5888 timing_out->v_border_bottom = 0;
5889 /* TODO: un-hardcode */
fe61a2f1 5890 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5891 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5892 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5893 else if (drm_mode_is_420_also(info, mode_in)
5894 && aconnector->force_yuv420_output)
5895 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
c03d0b52 5896 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
ceb3dbb4 5897 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5898 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5899 else
5900 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5901
5902 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5903 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5904 connector,
5905 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5906 requested_bpc);
e7b07cee
HW
5907 timing_out->scan_type = SCANNING_TYPE_NODATA;
5908 timing_out->hdmi_vic = 0;
b333730d
BL
5909
5910 if(old_stream) {
5911 timing_out->vic = old_stream->timing.vic;
5912 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5913 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5914 } else {
5915 timing_out->vic = drm_match_cea_mode(mode_in);
5916 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5917 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5918 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5919 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5920 }
e7b07cee 5921
1cb1d477
WL
5922 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5923 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5924 timing_out->vic = avi_frame.video_code;
5925 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5926 timing_out->hdmi_vic = hv_frame.vic;
5927 }
5928
fe8858bb
NC
5929 if (is_freesync_video_mode(mode_in, aconnector)) {
5930 timing_out->h_addressable = mode_in->hdisplay;
5931 timing_out->h_total = mode_in->htotal;
5932 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5933 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5934 timing_out->v_total = mode_in->vtotal;
5935 timing_out->v_addressable = mode_in->vdisplay;
5936 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5937 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5938 timing_out->pix_clk_100hz = mode_in->clock * 10;
5939 } else {
5940 timing_out->h_addressable = mode_in->crtc_hdisplay;
5941 timing_out->h_total = mode_in->crtc_htotal;
5942 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5943 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5944 timing_out->v_total = mode_in->crtc_vtotal;
5945 timing_out->v_addressable = mode_in->crtc_vdisplay;
5946 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5947 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5948 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5949 }
a85ba005 5950
e7b07cee 5951 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5952
5953 stream->output_color_space = get_output_color_space(timing_out);
5954
e43a432c
AK
5955 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5956 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5957 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5958 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5959 drm_mode_is_420_also(info, mode_in) &&
5960 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5961 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5962 adjust_colour_depth_from_display_info(timing_out, info);
5963 }
5964 }
e7b07cee
HW
5965}
5966
3ee6b26b
AD
5967static void fill_audio_info(struct audio_info *audio_info,
5968 const struct drm_connector *drm_connector,
5969 const struct dc_sink *dc_sink)
e7b07cee
HW
5970{
5971 int i = 0;
5972 int cea_revision = 0;
5973 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5974
5975 audio_info->manufacture_id = edid_caps->manufacturer_id;
5976 audio_info->product_id = edid_caps->product_id;
5977
5978 cea_revision = drm_connector->display_info.cea_rev;
5979
090afc1e 5980 strscpy(audio_info->display_name,
d2b2562c 5981 edid_caps->display_name,
090afc1e 5982 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5983
b830ebc9 5984 if (cea_revision >= 3) {
e7b07cee
HW
5985 audio_info->mode_count = edid_caps->audio_mode_count;
5986
5987 for (i = 0; i < audio_info->mode_count; ++i) {
5988 audio_info->modes[i].format_code =
5989 (enum audio_format_code)
5990 (edid_caps->audio_modes[i].format_code);
5991 audio_info->modes[i].channel_count =
5992 edid_caps->audio_modes[i].channel_count;
5993 audio_info->modes[i].sample_rates.all =
5994 edid_caps->audio_modes[i].sample_rate;
5995 audio_info->modes[i].sample_size =
5996 edid_caps->audio_modes[i].sample_size;
5997 }
5998 }
5999
6000 audio_info->flags.all = edid_caps->speaker_flags;
6001
6002 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 6003 if (drm_connector->latency_present[0]) {
e7b07cee
HW
6004 audio_info->video_latency = drm_connector->video_latency[0];
6005 audio_info->audio_latency = drm_connector->audio_latency[0];
6006 }
6007
6008 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6009
6010}
6011
3ee6b26b
AD
6012static void
6013copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6014 struct drm_display_mode *dst_mode)
e7b07cee
HW
6015{
6016 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6017 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6018 dst_mode->crtc_clock = src_mode->crtc_clock;
6019 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6020 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 6021 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
6022 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6023 dst_mode->crtc_htotal = src_mode->crtc_htotal;
6024 dst_mode->crtc_hskew = src_mode->crtc_hskew;
6025 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6026 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6027 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6028 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6029 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6030}
6031
3ee6b26b
AD
6032static void
6033decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6034 const struct drm_display_mode *native_mode,
6035 bool scale_enabled)
e7b07cee
HW
6036{
6037 if (scale_enabled) {
6038 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6039 } else if (native_mode->clock == drm_mode->clock &&
6040 native_mode->htotal == drm_mode->htotal &&
6041 native_mode->vtotal == drm_mode->vtotal) {
6042 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6043 } else {
6044 /* no scaling nor amdgpu inserted, no need to patch */
6045 }
6046}
6047
aed15309
ML
6048static struct dc_sink *
6049create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 6050{
2e0ac3d6 6051 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 6052 struct dc_sink *sink = NULL;
2e0ac3d6
HW
6053 sink_init_data.link = aconnector->dc_link;
6054 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6055
6056 sink = dc_sink_create(&sink_init_data);
423788c7 6057 if (!sink) {
2e0ac3d6 6058 DRM_ERROR("Failed to create sink!\n");
aed15309 6059 return NULL;
423788c7 6060 }
2e0ac3d6 6061 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 6062
aed15309 6063 return sink;
2e0ac3d6
HW
6064}
6065
fa2123db
ML
6066static void set_multisync_trigger_params(
6067 struct dc_stream_state *stream)
6068{
ec372186
ML
6069 struct dc_stream_state *master = NULL;
6070
fa2123db 6071 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
6072 master = stream->triggered_crtc_reset.event_source;
6073 stream->triggered_crtc_reset.event =
6074 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6075 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6076 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
6077 }
6078}
6079
6080static void set_master_stream(struct dc_stream_state *stream_set[],
6081 int stream_count)
6082{
6083 int j, highest_rfr = 0, master_stream = 0;
6084
6085 for (j = 0; j < stream_count; j++) {
6086 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6087 int refresh_rate = 0;
6088
380604e2 6089 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
6090 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6091 if (refresh_rate > highest_rfr) {
6092 highest_rfr = refresh_rate;
6093 master_stream = j;
6094 }
6095 }
6096 }
6097 for (j = 0; j < stream_count; j++) {
03736f4c 6098 if (stream_set[j])
fa2123db
ML
6099 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6100 }
6101}
6102
6103static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6104{
6105 int i = 0;
ec372186 6106 struct dc_stream_state *stream;
fa2123db
ML
6107
6108 if (context->stream_count < 2)
6109 return;
6110 for (i = 0; i < context->stream_count ; i++) {
6111 if (!context->streams[i])
6112 continue;
1f6010a9
DF
6113 /*
6114 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6115 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6116 * For now it's set to false
fa2123db 6117 */
fa2123db 6118 }
ec372186 6119
fa2123db 6120 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6121
6122 for (i = 0; i < context->stream_count ; i++) {
6123 stream = context->streams[i];
6124
6125 if (!stream)
6126 continue;
6127
6128 set_multisync_trigger_params(stream);
6129 }
fa2123db
ML
6130}
6131
ea2be5c0 6132#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6133static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6134 struct dc_sink *sink, struct dc_stream_state *stream,
6135 struct dsc_dec_dpcd_caps *dsc_caps)
6136{
6137 stream->timing.flags.DSC = 0;
63ad5371 6138 dsc_caps->is_dsc_supported = false;
998b7ad2 6139
2665f63a
ML
6140 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6141 sink->sink_signal == SIGNAL_TYPE_EDP)) {
50b1f44e
FZ
6142 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6143 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6144 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6145 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6146 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6147 dsc_caps);
998b7ad2
FZ
6148 }
6149}
6150
2665f63a
ML
6151static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6152 struct dc_sink *sink, struct dc_stream_state *stream,
6153 struct dsc_dec_dpcd_caps *dsc_caps,
6154 uint32_t max_dsc_target_bpp_limit_override)
6155{
6156 const struct dc_link_settings *verified_link_cap = NULL;
6157 uint32_t link_bw_in_kbps;
6158 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6159 struct dc *dc = sink->ctx->dc;
6160 struct dc_dsc_bw_range bw_range = {0};
6161 struct dc_dsc_config dsc_cfg = {0};
6162
6163 verified_link_cap = dc_link_get_link_cap(stream->link);
6164 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6165 edp_min_bpp_x16 = 8 * 16;
6166 edp_max_bpp_x16 = 8 * 16;
6167
6168 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6169 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6170
6171 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6172 edp_min_bpp_x16 = edp_max_bpp_x16;
6173
6174 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6175 dc->debug.dsc_min_slice_height_override,
6176 edp_min_bpp_x16, edp_max_bpp_x16,
6177 dsc_caps,
6178 &stream->timing,
6179 &bw_range)) {
6180
6181 if (bw_range.max_kbps < link_bw_in_kbps) {
6182 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6183 dsc_caps,
6184 dc->debug.dsc_min_slice_height_override,
6185 max_dsc_target_bpp_limit_override,
6186 0,
6187 &stream->timing,
6188 &dsc_cfg)) {
6189 stream->timing.dsc_cfg = dsc_cfg;
6190 stream->timing.flags.DSC = 1;
6191 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6192 }
6193 return;
6194 }
6195 }
6196
6197 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6198 dsc_caps,
6199 dc->debug.dsc_min_slice_height_override,
6200 max_dsc_target_bpp_limit_override,
6201 link_bw_in_kbps,
6202 &stream->timing,
6203 &dsc_cfg)) {
6204 stream->timing.dsc_cfg = dsc_cfg;
6205 stream->timing.flags.DSC = 1;
6206 }
6207}
6208
998b7ad2
FZ
6209static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6210 struct dc_sink *sink, struct dc_stream_state *stream,
6211 struct dsc_dec_dpcd_caps *dsc_caps)
6212{
6213 struct drm_connector *drm_connector = &aconnector->base;
6214 uint32_t link_bandwidth_kbps;
f1c1a982 6215 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 6216 struct dc *dc = sink->ctx->dc;
50b1f44e
FZ
6217 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6218 uint32_t dsc_max_supported_bw_in_kbps;
998b7ad2
FZ
6219
6220 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6221 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6222
6223 if (stream->link && stream->link->local_sink)
6224 max_dsc_target_bpp_limit_override =
6225 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
de7cc1b4 6226
998b7ad2
FZ
6227 /* Set DSC policy according to dsc_clock_en */
6228 dc_dsc_policy_set_enable_dsc_when_not_needed(
6229 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6230
2665f63a
ML
6231 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6232 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6233
6234 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6235
6236 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
50b1f44e
FZ
6237 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6238 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
998b7ad2
FZ
6239 dsc_caps,
6240 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6241 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6242 link_bandwidth_kbps,
6243 &stream->timing,
6244 &stream->timing.dsc_cfg)) {
50b1f44e
FZ
6245 stream->timing.flags.DSC = 1;
6246 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6247 __func__, drm_connector->name);
6248 }
6249 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6250 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6251 max_supported_bw_in_kbps = link_bandwidth_kbps;
6252 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6253
6254 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6255 max_supported_bw_in_kbps > 0 &&
6256 dsc_max_supported_bw_in_kbps > 0)
6257 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6258 dsc_caps,
6259 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6260 max_dsc_target_bpp_limit_override,
6261 dsc_max_supported_bw_in_kbps,
6262 &stream->timing,
6263 &stream->timing.dsc_cfg)) {
6264 stream->timing.flags.DSC = 1;
6265 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6266 __func__, drm_connector->name);
6267 }
998b7ad2
FZ
6268 }
6269 }
6270
6271 /* Overwrite the stream flag if DSC is enabled through debugfs */
6272 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6273 stream->timing.flags.DSC = 1;
6274
6275 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6276 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6277
6278 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6279 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6280
6281 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6282 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6283}
433e5dec 6284#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6285
5fd953a3
RS
6286/**
6287 * DOC: FreeSync Video
6288 *
6289 * When a userspace application wants to play a video, the content follows a
6290 * standard format definition that usually specifies the FPS for that format.
6291 * The below list illustrates some video format and the expected FPS,
6292 * respectively:
6293 *
6294 * - TV/NTSC (23.976 FPS)
6295 * - Cinema (24 FPS)
6296 * - TV/PAL (25 FPS)
6297 * - TV/NTSC (29.97 FPS)
6298 * - TV/NTSC (30 FPS)
6299 * - Cinema HFR (48 FPS)
6300 * - TV/PAL (50 FPS)
6301 * - Commonly used (60 FPS)
12cdff6b 6302 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6303 *
6304 * The list of standards video format is not huge and can be added to the
6305 * connector modeset list beforehand. With that, userspace can leverage
6306 * FreeSync to extends the front porch in order to attain the target refresh
6307 * rate. Such a switch will happen seamlessly, without screen blanking or
6308 * reprogramming of the output in any other way. If the userspace requests a
6309 * modesetting change compatible with FreeSync modes that only differ in the
6310 * refresh rate, DC will skip the full update and avoid blink during the
6311 * transition. For example, the video player can change the modesetting from
6312 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6313 * causing any display blink. This same concept can be applied to a mode
6314 * setting change.
6315 */
a85ba005
NC
6316static struct drm_display_mode *
6317get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6318 bool use_probed_modes)
6319{
6320 struct drm_display_mode *m, *m_pref = NULL;
6321 u16 current_refresh, highest_refresh;
6322 struct list_head *list_head = use_probed_modes ?
6323 &aconnector->base.probed_modes :
6324 &aconnector->base.modes;
6325
6326 if (aconnector->freesync_vid_base.clock != 0)
6327 return &aconnector->freesync_vid_base;
6328
6329 /* Find the preferred mode */
6330 list_for_each_entry (m, list_head, head) {
6331 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6332 m_pref = m;
6333 break;
6334 }
6335 }
6336
6337 if (!m_pref) {
6338 /* Probably an EDID with no preferred mode. Fallback to first entry */
6339 m_pref = list_first_entry_or_null(
6340 &aconnector->base.modes, struct drm_display_mode, head);
6341 if (!m_pref) {
6342 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6343 return NULL;
6344 }
6345 }
6346
6347 highest_refresh = drm_mode_vrefresh(m_pref);
6348
6349 /*
6350 * Find the mode with highest refresh rate with same resolution.
6351 * For some monitors, preferred mode is not the mode with highest
6352 * supported refresh rate.
6353 */
6354 list_for_each_entry (m, list_head, head) {
6355 current_refresh = drm_mode_vrefresh(m);
6356
6357 if (m->hdisplay == m_pref->hdisplay &&
6358 m->vdisplay == m_pref->vdisplay &&
6359 highest_refresh < current_refresh) {
6360 highest_refresh = current_refresh;
6361 m_pref = m;
6362 }
6363 }
6364
426c89aa 6365 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
a85ba005
NC
6366 return m_pref;
6367}
6368
fe8858bb 6369static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6370 struct amdgpu_dm_connector *aconnector)
6371{
6372 struct drm_display_mode *high_mode;
6373 int timing_diff;
6374
6375 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6376 if (!high_mode || !mode)
6377 return false;
6378
6379 timing_diff = high_mode->vtotal - mode->vtotal;
6380
6381 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6382 high_mode->hdisplay != mode->hdisplay ||
6383 high_mode->vdisplay != mode->vdisplay ||
6384 high_mode->hsync_start != mode->hsync_start ||
6385 high_mode->hsync_end != mode->hsync_end ||
6386 high_mode->htotal != mode->htotal ||
6387 high_mode->hskew != mode->hskew ||
6388 high_mode->vscan != mode->vscan ||
6389 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6390 high_mode->vsync_end - mode->vsync_end != timing_diff)
6391 return false;
6392 else
6393 return true;
6394}
6395
f11d9373 6396static struct dc_stream_state *
3ee6b26b
AD
6397create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6398 const struct drm_display_mode *drm_mode,
b333730d 6399 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6400 const struct dc_stream_state *old_stream,
6401 int requested_bpc)
e7b07cee
HW
6402{
6403 struct drm_display_mode *preferred_mode = NULL;
391ef035 6404 struct drm_connector *drm_connector;
42ba01fc
NK
6405 const struct drm_connector_state *con_state =
6406 dm_state ? &dm_state->base : NULL;
0971c40e 6407 struct dc_stream_state *stream = NULL;
e7b07cee 6408 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6409 struct drm_display_mode saved_mode;
6410 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6411 bool native_mode_found = false;
b0781603
NK
6412 bool recalculate_timing = false;
6413 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6414 int mode_refresh;
58124bf8 6415 int preferred_refresh = 0;
defeb878 6416#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6417 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6418#endif
aed15309 6419 struct dc_sink *sink = NULL;
a85ba005
NC
6420
6421 memset(&saved_mode, 0, sizeof(saved_mode));
6422
b830ebc9 6423 if (aconnector == NULL) {
e7b07cee 6424 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6425 return stream;
e7b07cee
HW
6426 }
6427
e7b07cee 6428 drm_connector = &aconnector->base;
2e0ac3d6 6429
f4ac176e 6430 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6431 sink = create_fake_sink(aconnector);
6432 if (!sink)
6433 return stream;
aed15309
ML
6434 } else {
6435 sink = aconnector->dc_sink;
dcd5fb82 6436 dc_sink_retain(sink);
f4ac176e 6437 }
2e0ac3d6 6438
aed15309 6439 stream = dc_create_stream_for_sink(sink);
4562236b 6440
b830ebc9 6441 if (stream == NULL) {
e7b07cee 6442 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6443 goto finish;
e7b07cee
HW
6444 }
6445
ceb3dbb4
JL
6446 stream->dm_stream_context = aconnector;
6447
4a36fcba
WL
6448 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6449 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6450
e7b07cee
HW
6451 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6452 /* Search for preferred mode */
6453 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6454 native_mode_found = true;
6455 break;
6456 }
6457 }
6458 if (!native_mode_found)
6459 preferred_mode = list_first_entry_or_null(
6460 &aconnector->base.modes,
6461 struct drm_display_mode,
6462 head);
6463
b333730d
BL
6464 mode_refresh = drm_mode_vrefresh(&mode);
6465
b830ebc9 6466 if (preferred_mode == NULL) {
1f6010a9
DF
6467 /*
6468 * This may not be an error, the use case is when we have no
e7b07cee
HW
6469 * usermode calls to reset and set mode upon hotplug. In this
6470 * case, we call set mode ourselves to restore the previous mode
6471 * and the modelist may not be filled in in time.
6472 */
f1ad2f5e 6473 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6474 } else {
de05abe6 6475 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
a85ba005
NC
6476 if (recalculate_timing) {
6477 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
426c89aa
VS
6478 drm_mode_copy(&saved_mode, &mode);
6479 drm_mode_copy(&mode, freesync_mode);
a85ba005
NC
6480 } else {
6481 decide_crtc_timing_for_drm_display_mode(
b0781603 6482 &mode, preferred_mode, scale);
a85ba005 6483
b0781603
NK
6484 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6485 }
e7b07cee
HW
6486 }
6487
a85ba005
NC
6488 if (recalculate_timing)
6489 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6490 else if (!dm_state)
f783577c
JFZ
6491 drm_mode_set_crtcinfo(&mode, 0);
6492
a85ba005 6493 /*
b333730d
BL
6494 * If scaling is enabled and refresh rate didn't change
6495 * we copy the vic and polarities of the old timings
6496 */
b0781603 6497 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6498 fill_stream_properties_from_drm_display_mode(
6499 stream, &mode, &aconnector->base, con_state, NULL,
6500 requested_bpc);
b333730d 6501 else
a85ba005
NC
6502 fill_stream_properties_from_drm_display_mode(
6503 stream, &mode, &aconnector->base, con_state, old_stream,
6504 requested_bpc);
b333730d 6505
defeb878 6506#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6507 /* SST DSC determination policy */
6508 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6509 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6510 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6511#endif
6512
e7b07cee
HW
6513 update_stream_scaling_settings(&mode, dm_state, stream);
6514
6515 fill_audio_info(
6516 &stream->audio_info,
6517 drm_connector,
aed15309 6518 sink);
e7b07cee 6519
ceb3dbb4 6520 update_stream_signal(stream, sink);
9182b4cb 6521
d832fc3b 6522 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6523 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6524
8a488f5d
RL
6525 if (stream->link->psr_settings.psr_feature_enabled) {
6526 //
6527 // should decide stream support vsc sdp colorimetry capability
6528 // before building vsc info packet
6529 //
6530 stream->use_vsc_sdp_for_colorimetry = false;
6531 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6532 stream->use_vsc_sdp_for_colorimetry =
6533 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6534 } else {
6535 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6536 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6537 }
0c5a0bbb 6538 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
1a365683
RL
6539 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6540
8c322309 6541 }
aed15309 6542finish:
dcd5fb82 6543 dc_sink_release(sink);
9e3efe3e 6544
e7b07cee
HW
6545 return stream;
6546}
6547
7578ecda 6548static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6549{
6550 drm_crtc_cleanup(crtc);
6551 kfree(crtc);
6552}
6553
6554static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6555 struct drm_crtc_state *state)
e7b07cee
HW
6556{
6557 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6558
6559 /* TODO Destroy dc_stream objects are stream object is flattened */
6560 if (cur->stream)
6561 dc_stream_release(cur->stream);
6562
6563
6564 __drm_atomic_helper_crtc_destroy_state(state);
6565
6566
6567 kfree(state);
6568}
6569
6570static void dm_crtc_reset_state(struct drm_crtc *crtc)
6571{
6572 struct dm_crtc_state *state;
6573
6574 if (crtc->state)
6575 dm_crtc_destroy_state(crtc, crtc->state);
6576
6577 state = kzalloc(sizeof(*state), GFP_KERNEL);
6578 if (WARN_ON(!state))
6579 return;
6580
1f8a52ec 6581 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6582}
6583
6584static struct drm_crtc_state *
6585dm_crtc_duplicate_state(struct drm_crtc *crtc)
6586{
6587 struct dm_crtc_state *state, *cur;
6588
6589 cur = to_dm_crtc_state(crtc->state);
6590
6591 if (WARN_ON(!crtc->state))
6592 return NULL;
6593
2004f45e 6594 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6595 if (!state)
6596 return NULL;
e7b07cee
HW
6597
6598 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6599
6600 if (cur->stream) {
6601 state->stream = cur->stream;
6602 dc_stream_retain(state->stream);
6603 }
6604
d6ef9b41 6605 state->active_planes = cur->active_planes;
98e6436d 6606 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6607 state->abm_level = cur->abm_level;
bb47de73
NK
6608 state->vrr_supported = cur->vrr_supported;
6609 state->freesync_config = cur->freesync_config;
cf020d49
NK
6610 state->cm_has_degamma = cur->cm_has_degamma;
6611 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6612 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6613 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6614
6615 return &state->base;
6616}
6617
86bc2219 6618#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6619static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6620{
6621 crtc_debugfs_init(crtc);
6622
6623 return 0;
6624}
6625#endif
6626
d2574c33
MK
6627static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6628{
6629 enum dc_irq_source irq_source;
6630 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6631 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6632 int rc;
6633
6634 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6635
6636 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6637
4711c033
LT
6638 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6639 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6640 return rc;
6641}
589d2739
HW
6642
6643static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6644{
6645 enum dc_irq_source irq_source;
6646 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6647 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6648 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6649#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6650 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6651 struct vblank_control_work *work;
ea3b4242 6652#endif
d2574c33
MK
6653 int rc = 0;
6654
6655 if (enable) {
6656 /* vblank irq on -> Only need vupdate irq in vrr mode */
6657 if (amdgpu_dm_vrr_active(acrtc_state))
6658 rc = dm_set_vupdate_irq(crtc, true);
6659 } else {
6660 /* vblank irq off -> vupdate irq off */
6661 rc = dm_set_vupdate_irq(crtc, false);
6662 }
6663
6664 if (rc)
6665 return rc;
589d2739
HW
6666
6667 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6668
6669 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6670 return -EBUSY;
6671
98ab5f35
BL
6672 if (amdgpu_in_reset(adev))
6673 return 0;
6674
4928b480 6675#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6676 if (dm->vblank_control_workqueue) {
6677 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6678 if (!work)
6679 return -ENOMEM;
09a5df6c 6680
06dd1888
NK
6681 INIT_WORK(&work->work, vblank_control_worker);
6682 work->dm = dm;
6683 work->acrtc = acrtc;
6684 work->enable = enable;
09a5df6c 6685
06dd1888
NK
6686 if (acrtc_state->stream) {
6687 dc_stream_retain(acrtc_state->stream);
6688 work->stream = acrtc_state->stream;
6689 }
58aa1c50 6690
06dd1888
NK
6691 queue_work(dm->vblank_control_workqueue, &work->work);
6692 }
4928b480 6693#endif
71338cb4 6694
71338cb4 6695 return 0;
589d2739
HW
6696}
6697
6698static int dm_enable_vblank(struct drm_crtc *crtc)
6699{
6700 return dm_set_vblank(crtc, true);
6701}
6702
6703static void dm_disable_vblank(struct drm_crtc *crtc)
6704{
6705 dm_set_vblank(crtc, false);
6706}
6707
e7b07cee
HW
6708/* Implemented only the options currently availible for the driver */
6709static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6710 .reset = dm_crtc_reset_state,
6711 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6712 .set_config = drm_atomic_helper_set_config,
6713 .page_flip = drm_atomic_helper_page_flip,
6714 .atomic_duplicate_state = dm_crtc_duplicate_state,
6715 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6716 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6717 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6718 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6719 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6720 .enable_vblank = dm_enable_vblank,
6721 .disable_vblank = dm_disable_vblank,
e3eff4b5 6722 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6723#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6724 .late_register = amdgpu_dm_crtc_late_register,
6725#endif
e7b07cee
HW
6726};
6727
6728static enum drm_connector_status
6729amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6730{
6731 bool connected;
c84dec2f 6732 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6733
1f6010a9
DF
6734 /*
6735 * Notes:
e7b07cee
HW
6736 * 1. This interface is NOT called in context of HPD irq.
6737 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6738 * makes it a bad place for *any* MST-related activity.
6739 */
e7b07cee 6740
8580d60b
HW
6741 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6742 !aconnector->fake_enable)
e7b07cee
HW
6743 connected = (aconnector->dc_sink != NULL);
6744 else
6745 connected = (aconnector->base.force == DRM_FORCE_ON);
6746
0f877894
OV
6747 update_subconnector_property(aconnector);
6748
e7b07cee
HW
6749 return (connected ? connector_status_connected :
6750 connector_status_disconnected);
6751}
6752
3ee6b26b
AD
6753int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6754 struct drm_connector_state *connector_state,
6755 struct drm_property *property,
6756 uint64_t val)
e7b07cee
HW
6757{
6758 struct drm_device *dev = connector->dev;
1348969a 6759 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6760 struct dm_connector_state *dm_old_state =
6761 to_dm_connector_state(connector->state);
6762 struct dm_connector_state *dm_new_state =
6763 to_dm_connector_state(connector_state);
6764
6765 int ret = -EINVAL;
6766
6767 if (property == dev->mode_config.scaling_mode_property) {
6768 enum amdgpu_rmx_type rmx_type;
6769
6770 switch (val) {
6771 case DRM_MODE_SCALE_CENTER:
6772 rmx_type = RMX_CENTER;
6773 break;
6774 case DRM_MODE_SCALE_ASPECT:
6775 rmx_type = RMX_ASPECT;
6776 break;
6777 case DRM_MODE_SCALE_FULLSCREEN:
6778 rmx_type = RMX_FULL;
6779 break;
6780 case DRM_MODE_SCALE_NONE:
6781 default:
6782 rmx_type = RMX_OFF;
6783 break;
6784 }
6785
6786 if (dm_old_state->scaling == rmx_type)
6787 return 0;
6788
6789 dm_new_state->scaling = rmx_type;
6790 ret = 0;
6791 } else if (property == adev->mode_info.underscan_hborder_property) {
6792 dm_new_state->underscan_hborder = val;
6793 ret = 0;
6794 } else if (property == adev->mode_info.underscan_vborder_property) {
6795 dm_new_state->underscan_vborder = val;
6796 ret = 0;
6797 } else if (property == adev->mode_info.underscan_property) {
6798 dm_new_state->underscan_enable = val;
6799 ret = 0;
c1ee92f9
DF
6800 } else if (property == adev->mode_info.abm_level_property) {
6801 dm_new_state->abm_level = val;
6802 ret = 0;
e7b07cee
HW
6803 }
6804
6805 return ret;
6806}
6807
3ee6b26b
AD
6808int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6809 const struct drm_connector_state *state,
6810 struct drm_property *property,
6811 uint64_t *val)
e7b07cee
HW
6812{
6813 struct drm_device *dev = connector->dev;
1348969a 6814 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6815 struct dm_connector_state *dm_state =
6816 to_dm_connector_state(state);
6817 int ret = -EINVAL;
6818
6819 if (property == dev->mode_config.scaling_mode_property) {
6820 switch (dm_state->scaling) {
6821 case RMX_CENTER:
6822 *val = DRM_MODE_SCALE_CENTER;
6823 break;
6824 case RMX_ASPECT:
6825 *val = DRM_MODE_SCALE_ASPECT;
6826 break;
6827 case RMX_FULL:
6828 *val = DRM_MODE_SCALE_FULLSCREEN;
6829 break;
6830 case RMX_OFF:
6831 default:
6832 *val = DRM_MODE_SCALE_NONE;
6833 break;
6834 }
6835 ret = 0;
6836 } else if (property == adev->mode_info.underscan_hborder_property) {
6837 *val = dm_state->underscan_hborder;
6838 ret = 0;
6839 } else if (property == adev->mode_info.underscan_vborder_property) {
6840 *val = dm_state->underscan_vborder;
6841 ret = 0;
6842 } else if (property == adev->mode_info.underscan_property) {
6843 *val = dm_state->underscan_enable;
6844 ret = 0;
c1ee92f9
DF
6845 } else if (property == adev->mode_info.abm_level_property) {
6846 *val = dm_state->abm_level;
6847 ret = 0;
e7b07cee 6848 }
c1ee92f9 6849
e7b07cee
HW
6850 return ret;
6851}
6852
526c654a
ED
6853static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6854{
6855 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6856
6857 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6858}
6859
7578ecda 6860static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6861{
c84dec2f 6862 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6863 const struct dc_link *link = aconnector->dc_link;
1348969a 6864 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6865 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6866 int i;
ada8ce15 6867
5dff80bd
AG
6868 /*
6869 * Call only if mst_mgr was iniitalized before since it's not done
6870 * for all connector types.
6871 */
6872 if (aconnector->mst_mgr.dev)
6873 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6874
e7b07cee
HW
6875#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6876 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6877 for (i = 0; i < dm->num_of_edps; i++) {
6878 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6879 backlight_device_unregister(dm->backlight_dev[i]);
6880 dm->backlight_dev[i] = NULL;
6881 }
e7b07cee
HW
6882 }
6883#endif
dcd5fb82
MF
6884
6885 if (aconnector->dc_em_sink)
6886 dc_sink_release(aconnector->dc_em_sink);
6887 aconnector->dc_em_sink = NULL;
6888 if (aconnector->dc_sink)
6889 dc_sink_release(aconnector->dc_sink);
6890 aconnector->dc_sink = NULL;
6891
e86e8947 6892 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6893 drm_connector_unregister(connector);
6894 drm_connector_cleanup(connector);
526c654a
ED
6895 if (aconnector->i2c) {
6896 i2c_del_adapter(&aconnector->i2c->base);
6897 kfree(aconnector->i2c);
6898 }
7daec99f 6899 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6900
e7b07cee
HW
6901 kfree(connector);
6902}
6903
6904void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6905{
6906 struct dm_connector_state *state =
6907 to_dm_connector_state(connector->state);
6908
df099b9b
LSL
6909 if (connector->state)
6910 __drm_atomic_helper_connector_destroy_state(connector->state);
6911
e7b07cee
HW
6912 kfree(state);
6913
6914 state = kzalloc(sizeof(*state), GFP_KERNEL);
6915
6916 if (state) {
6917 state->scaling = RMX_OFF;
6918 state->underscan_enable = false;
6919 state->underscan_hborder = 0;
6920 state->underscan_vborder = 0;
01933ba4 6921 state->base.max_requested_bpc = 8;
3261e013
ML
6922 state->vcpi_slots = 0;
6923 state->pbn = 0;
c3e50f89
NK
6924 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6925 state->abm_level = amdgpu_dm_abm_level;
6926
df099b9b 6927 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6928 }
6929}
6930
3ee6b26b
AD
6931struct drm_connector_state *
6932amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6933{
6934 struct dm_connector_state *state =
6935 to_dm_connector_state(connector->state);
6936
6937 struct dm_connector_state *new_state =
6938 kmemdup(state, sizeof(*state), GFP_KERNEL);
6939
98e6436d
AK
6940 if (!new_state)
6941 return NULL;
e7b07cee 6942
98e6436d
AK
6943 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6944
6945 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6946 new_state->abm_level = state->abm_level;
922454c2
NK
6947 new_state->scaling = state->scaling;
6948 new_state->underscan_enable = state->underscan_enable;
6949 new_state->underscan_hborder = state->underscan_hborder;
6950 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6951 new_state->vcpi_slots = state->vcpi_slots;
6952 new_state->pbn = state->pbn;
98e6436d 6953 return &new_state->base;
e7b07cee
HW
6954}
6955
14f04fa4
AD
6956static int
6957amdgpu_dm_connector_late_register(struct drm_connector *connector)
6958{
6959 struct amdgpu_dm_connector *amdgpu_dm_connector =
6960 to_amdgpu_dm_connector(connector);
00a8037e 6961 int r;
14f04fa4 6962
00a8037e
AD
6963 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6964 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6965 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6966 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6967 if (r)
6968 return r;
6969 }
6970
6971#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6972 connector_debugfs_init(amdgpu_dm_connector);
6973#endif
6974
6975 return 0;
6976}
6977
e7b07cee
HW
6978static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6979 .reset = amdgpu_dm_connector_funcs_reset,
6980 .detect = amdgpu_dm_connector_detect,
6981 .fill_modes = drm_helper_probe_single_connector_modes,
6982 .destroy = amdgpu_dm_connector_destroy,
6983 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6984 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6985 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6986 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6987 .late_register = amdgpu_dm_connector_late_register,
526c654a 6988 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6989};
6990
e7b07cee
HW
6991static int get_modes(struct drm_connector *connector)
6992{
6993 return amdgpu_dm_connector_get_modes(connector);
6994}
6995
c84dec2f 6996static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6997{
6998 struct dc_sink_init_data init_params = {
6999 .link = aconnector->dc_link,
7000 .sink_signal = SIGNAL_TYPE_VIRTUAL
7001 };
70e8ffc5 7002 struct edid *edid;
e7b07cee 7003
a89ff457 7004 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
7005 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7006 aconnector->base.name);
7007
7008 aconnector->base.force = DRM_FORCE_OFF;
7009 aconnector->base.override_edid = false;
7010 return;
7011 }
7012
70e8ffc5
HW
7013 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7014
e7b07cee
HW
7015 aconnector->edid = edid;
7016
7017 aconnector->dc_em_sink = dc_link_add_remote_sink(
7018 aconnector->dc_link,
7019 (uint8_t *)edid,
7020 (edid->extensions + 1) * EDID_LENGTH,
7021 &init_params);
7022
dcd5fb82 7023 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
7024 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7025 aconnector->dc_link->local_sink :
7026 aconnector->dc_em_sink;
dcd5fb82
MF
7027 dc_sink_retain(aconnector->dc_sink);
7028 }
e7b07cee
HW
7029}
7030
c84dec2f 7031static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
7032{
7033 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7034
1f6010a9
DF
7035 /*
7036 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
7037 * Those settings have to be != 0 to get initial modeset
7038 */
7039 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7040 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7041 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7042 }
7043
7044
7045 aconnector->base.override_edid = true;
7046 create_eml_sink(aconnector);
7047}
7048
17ce8a69 7049struct dc_stream_state *
cbd14ae7
SW
7050create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7051 const struct drm_display_mode *drm_mode,
7052 const struct dm_connector_state *dm_state,
7053 const struct dc_stream_state *old_stream)
7054{
7055 struct drm_connector *connector = &aconnector->base;
1348969a 7056 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 7057 struct dc_stream_state *stream;
4b7da34b
SW
7058 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7059 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
7060 enum dc_status dc_result = DC_OK;
7061
7062 do {
7063 stream = create_stream_for_sink(aconnector, drm_mode,
7064 dm_state, old_stream,
7065 requested_bpc);
7066 if (stream == NULL) {
7067 DRM_ERROR("Failed to create stream for sink!\n");
7068 break;
7069 }
7070
7071 dc_result = dc_validate_stream(adev->dm.dc, stream);
7072
7073 if (dc_result != DC_OK) {
74a16675 7074 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
7075 drm_mode->hdisplay,
7076 drm_mode->vdisplay,
7077 drm_mode->clock,
74a16675
RS
7078 dc_result,
7079 dc_status_to_str(dc_result));
cbd14ae7
SW
7080
7081 dc_stream_release(stream);
7082 stream = NULL;
7083 requested_bpc -= 2; /* lower bpc to retry validation */
7084 }
7085
7086 } while (stream == NULL && requested_bpc >= 6);
7087
68eb3ae3
WS
7088 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7089 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7090
7091 aconnector->force_yuv420_output = true;
7092 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7093 dm_state, old_stream);
7094 aconnector->force_yuv420_output = false;
7095 }
7096
cbd14ae7
SW
7097 return stream;
7098}
7099
ba9ca088 7100enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 7101 struct drm_display_mode *mode)
e7b07cee
HW
7102{
7103 int result = MODE_ERROR;
7104 struct dc_sink *dc_sink;
e7b07cee 7105 /* TODO: Unhardcode stream count */
0971c40e 7106 struct dc_stream_state *stream;
c84dec2f 7107 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
7108
7109 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7110 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7111 return result;
7112
1f6010a9
DF
7113 /*
7114 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
7115 * EDID mgmt
7116 */
7117 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7118 !aconnector->dc_em_sink)
7119 handle_edid_mgmt(aconnector);
7120
c84dec2f 7121 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 7122
ad975f44
VL
7123 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7124 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
7125 DRM_ERROR("dc_sink is NULL!\n");
7126 goto fail;
7127 }
7128
cbd14ae7
SW
7129 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7130 if (stream) {
7131 dc_stream_release(stream);
e7b07cee 7132 result = MODE_OK;
cbd14ae7 7133 }
e7b07cee
HW
7134
7135fail:
7136 /* TODO: error handling*/
7137 return result;
7138}
7139
88694af9
NK
7140static int fill_hdr_info_packet(const struct drm_connector_state *state,
7141 struct dc_info_packet *out)
7142{
7143 struct hdmi_drm_infoframe frame;
7144 unsigned char buf[30]; /* 26 + 4 */
7145 ssize_t len;
7146 int ret, i;
7147
7148 memset(out, 0, sizeof(*out));
7149
7150 if (!state->hdr_output_metadata)
7151 return 0;
7152
7153 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7154 if (ret)
7155 return ret;
7156
7157 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7158 if (len < 0)
7159 return (int)len;
7160
7161 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7162 if (len != 30)
7163 return -EINVAL;
7164
7165 /* Prepare the infopacket for DC. */
7166 switch (state->connector->connector_type) {
7167 case DRM_MODE_CONNECTOR_HDMIA:
7168 out->hb0 = 0x87; /* type */
7169 out->hb1 = 0x01; /* version */
7170 out->hb2 = 0x1A; /* length */
7171 out->sb[0] = buf[3]; /* checksum */
7172 i = 1;
7173 break;
7174
7175 case DRM_MODE_CONNECTOR_DisplayPort:
7176 case DRM_MODE_CONNECTOR_eDP:
7177 out->hb0 = 0x00; /* sdp id, zero */
7178 out->hb1 = 0x87; /* type */
7179 out->hb2 = 0x1D; /* payload len - 1 */
7180 out->hb3 = (0x13 << 2); /* sdp version */
7181 out->sb[0] = 0x01; /* version */
7182 out->sb[1] = 0x1A; /* length */
7183 i = 2;
7184 break;
7185
7186 default:
7187 return -EINVAL;
7188 }
7189
7190 memcpy(&out->sb[i], &buf[4], 26);
7191 out->valid = true;
7192
7193 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7194 sizeof(out->sb), false);
7195
7196 return 0;
7197}
7198
88694af9
NK
7199static int
7200amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7201 struct drm_atomic_state *state)
88694af9 7202{
51e857af
SP
7203 struct drm_connector_state *new_con_state =
7204 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7205 struct drm_connector_state *old_con_state =
7206 drm_atomic_get_old_connector_state(state, conn);
7207 struct drm_crtc *crtc = new_con_state->crtc;
7208 struct drm_crtc_state *new_crtc_state;
7209 int ret;
7210
e8a98235
RS
7211 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7212
88694af9
NK
7213 if (!crtc)
7214 return 0;
7215
72921cdf 7216 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7217 struct dc_info_packet hdr_infopacket;
7218
7219 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7220 if (ret)
7221 return ret;
7222
7223 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7224 if (IS_ERR(new_crtc_state))
7225 return PTR_ERR(new_crtc_state);
7226
7227 /*
7228 * DC considers the stream backends changed if the
7229 * static metadata changes. Forcing the modeset also
7230 * gives a simple way for userspace to switch from
b232d4ed
NK
7231 * 8bpc to 10bpc when setting the metadata to enter
7232 * or exit HDR.
7233 *
7234 * Changing the static metadata after it's been
7235 * set is permissible, however. So only force a
7236 * modeset if we're entering or exiting HDR.
88694af9 7237 */
b232d4ed
NK
7238 new_crtc_state->mode_changed =
7239 !old_con_state->hdr_output_metadata ||
7240 !new_con_state->hdr_output_metadata;
88694af9
NK
7241 }
7242
7243 return 0;
7244}
7245
e7b07cee
HW
7246static const struct drm_connector_helper_funcs
7247amdgpu_dm_connector_helper_funcs = {
7248 /*
1f6010a9 7249 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7250 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7251 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7252 * in get_modes call back, not just return the modes count
7253 */
e7b07cee
HW
7254 .get_modes = get_modes,
7255 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7256 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7257};
7258
7259static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7260{
7261}
7262
d6ef9b41 7263static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7264{
7265 struct drm_atomic_state *state = new_crtc_state->state;
7266 struct drm_plane *plane;
7267 int num_active = 0;
7268
7269 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7270 struct drm_plane_state *new_plane_state;
7271
7272 /* Cursor planes are "fake". */
7273 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7274 continue;
7275
7276 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7277
7278 if (!new_plane_state) {
7279 /*
7280 * The plane is enable on the CRTC and hasn't changed
7281 * state. This means that it previously passed
7282 * validation and is therefore enabled.
7283 */
7284 num_active += 1;
7285 continue;
7286 }
7287
7288 /* We need a framebuffer to be considered enabled. */
7289 num_active += (new_plane_state->fb != NULL);
7290 }
7291
d6ef9b41
NK
7292 return num_active;
7293}
7294
8fe684e9
NK
7295static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7296 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7297{
7298 struct dm_crtc_state *dm_new_crtc_state =
7299 to_dm_crtc_state(new_crtc_state);
7300
7301 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7302
7303 if (!dm_new_crtc_state->stream)
7304 return;
7305
7306 dm_new_crtc_state->active_planes =
7307 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7308}
7309
3ee6b26b 7310static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7311 struct drm_atomic_state *state)
e7b07cee 7312{
29b77ad7
MR
7313 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7314 crtc);
1348969a 7315 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7316 struct dc *dc = adev->dm.dc;
29b77ad7 7317 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7318 int ret = -EINVAL;
7319
5b8c5969 7320 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7321
29b77ad7 7322 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7323
bcd74374
ND
7324 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7325 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7326 return ret;
7327 }
7328
bc92c065 7329 /*
b836a274
MD
7330 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7331 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7332 * planes are disabled, which is not supported by the hardware. And there is legacy
7333 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7334 */
29b77ad7 7335 if (crtc_state->enable &&
ea9522f5
SS
7336 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7337 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7338 return -EINVAL;
ea9522f5 7339 }
c14a005c 7340
b836a274
MD
7341 /* In some use cases, like reset, no stream is attached */
7342 if (!dm_crtc_state->stream)
7343 return 0;
7344
62c933f9 7345 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7346 return 0;
7347
ea9522f5 7348 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7349 return ret;
7350}
7351
3ee6b26b
AD
7352static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7353 const struct drm_display_mode *mode,
7354 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7355{
7356 return true;
7357}
7358
7359static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7360 .disable = dm_crtc_helper_disable,
7361 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7362 .mode_fixup = dm_crtc_helper_mode_fixup,
7363 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7364};
7365
7366static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7367{
7368
7369}
7370
3261e013
ML
7371static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7372{
7373 switch (display_color_depth) {
7374 case COLOR_DEPTH_666:
7375 return 6;
7376 case COLOR_DEPTH_888:
7377 return 8;
7378 case COLOR_DEPTH_101010:
7379 return 10;
7380 case COLOR_DEPTH_121212:
7381 return 12;
7382 case COLOR_DEPTH_141414:
7383 return 14;
7384 case COLOR_DEPTH_161616:
7385 return 16;
7386 default:
7387 break;
7388 }
7389 return 0;
7390}
7391
3ee6b26b
AD
7392static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7393 struct drm_crtc_state *crtc_state,
7394 struct drm_connector_state *conn_state)
e7b07cee 7395{
3261e013
ML
7396 struct drm_atomic_state *state = crtc_state->state;
7397 struct drm_connector *connector = conn_state->connector;
7398 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7399 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7400 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7401 struct drm_dp_mst_topology_mgr *mst_mgr;
7402 struct drm_dp_mst_port *mst_port;
7403 enum dc_color_depth color_depth;
7404 int clock, bpp = 0;
1bc22f20 7405 bool is_y420 = false;
3261e013
ML
7406
7407 if (!aconnector->port || !aconnector->dc_sink)
7408 return 0;
7409
7410 mst_port = aconnector->port;
7411 mst_mgr = &aconnector->mst_port->mst_mgr;
7412
7413 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7414 return 0;
7415
7416 if (!state->duplicated) {
cbd14ae7 7417 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7418 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7419 aconnector->force_yuv420_output;
cbd14ae7
SW
7420 color_depth = convert_color_depth_from_display_info(connector,
7421 is_y420,
7422 max_bpc);
3261e013
ML
7423 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7424 clock = adjusted_mode->clock;
dc48529f 7425 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7426 }
7427 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7428 mst_mgr,
7429 mst_port,
1c6c1cb5 7430 dm_new_connector_state->pbn,
03ca9600 7431 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7432 if (dm_new_connector_state->vcpi_slots < 0) {
7433 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7434 return dm_new_connector_state->vcpi_slots;
7435 }
e7b07cee
HW
7436 return 0;
7437}
7438
7439const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7440 .disable = dm_encoder_helper_disable,
7441 .atomic_check = dm_encoder_helper_atomic_check
7442};
7443
d9fe1a4c 7444#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7445static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7446 struct dc_state *dc_state,
7447 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7448{
7449 struct dc_stream_state *stream = NULL;
7450 struct drm_connector *connector;
5760dcb9 7451 struct drm_connector_state *new_con_state;
29b9ba74
ML
7452 struct amdgpu_dm_connector *aconnector;
7453 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7454 int i, j;
7455 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7456
5760dcb9 7457 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7458
7459 aconnector = to_amdgpu_dm_connector(connector);
7460
7461 if (!aconnector->port)
7462 continue;
7463
7464 if (!new_con_state || !new_con_state->crtc)
7465 continue;
7466
7467 dm_conn_state = to_dm_connector_state(new_con_state);
7468
7469 for (j = 0; j < dc_state->stream_count; j++) {
7470 stream = dc_state->streams[j];
7471 if (!stream)
7472 continue;
7473
7474 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7475 break;
7476
7477 stream = NULL;
7478 }
7479
7480 if (!stream)
7481 continue;
7482
29b9ba74 7483 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7484 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7485 for (j = 0; j < dc_state->stream_count; j++) {
7486 if (vars[j].aconnector == aconnector) {
7487 pbn = vars[j].pbn;
7488 break;
7489 }
7490 }
7491
a550bb16
HW
7492 if (j == dc_state->stream_count)
7493 continue;
7494
7495 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7496
7497 if (stream->timing.flags.DSC != 1) {
7498 dm_conn_state->pbn = pbn;
7499 dm_conn_state->vcpi_slots = slot_num;
7500
7501 drm_dp_mst_atomic_enable_dsc(state,
7502 aconnector->port,
7503 dm_conn_state->pbn,
7504 0,
7505 false);
7506 continue;
7507 }
7508
29b9ba74
ML
7509 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7510 aconnector->port,
7511 pbn, pbn_div,
7512 true);
7513 if (vcpi < 0)
7514 return vcpi;
7515
7516 dm_conn_state->pbn = pbn;
7517 dm_conn_state->vcpi_slots = vcpi;
7518 }
7519 return 0;
7520}
d9fe1a4c 7521#endif
29b9ba74 7522
e7b07cee
HW
7523static void dm_drm_plane_reset(struct drm_plane *plane)
7524{
7525 struct dm_plane_state *amdgpu_state = NULL;
7526
7527 if (plane->state)
7528 plane->funcs->atomic_destroy_state(plane, plane->state);
7529
7530 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7531 WARN_ON(amdgpu_state == NULL);
1f6010a9 7532
7ddaef96
NK
7533 if (amdgpu_state)
7534 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7535}
7536
7537static struct drm_plane_state *
7538dm_drm_plane_duplicate_state(struct drm_plane *plane)
7539{
7540 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7541
7542 old_dm_plane_state = to_dm_plane_state(plane->state);
7543 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7544 if (!dm_plane_state)
7545 return NULL;
7546
7547 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7548
3be5262e
HW
7549 if (old_dm_plane_state->dc_state) {
7550 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7551 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7552 }
7553
7554 return &dm_plane_state->base;
7555}
7556
dfd84d90 7557static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7558 struct drm_plane_state *state)
e7b07cee
HW
7559{
7560 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7561
3be5262e
HW
7562 if (dm_plane_state->dc_state)
7563 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7564
0627bbd3 7565 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7566}
7567
7568static const struct drm_plane_funcs dm_plane_funcs = {
7569 .update_plane = drm_atomic_helper_update_plane,
7570 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7571 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7572 .reset = dm_drm_plane_reset,
7573 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7574 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7575 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7576};
7577
3ee6b26b
AD
7578static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7579 struct drm_plane_state *new_state)
e7b07cee
HW
7580{
7581 struct amdgpu_framebuffer *afb;
7582 struct drm_gem_object *obj;
5d43be0c 7583 struct amdgpu_device *adev;
e7b07cee 7584 struct amdgpu_bo *rbo;
e7b07cee 7585 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7586 struct list_head list;
7587 struct ttm_validate_buffer tv;
7588 struct ww_acquire_ctx ticket;
5d43be0c
CK
7589 uint32_t domain;
7590 int r;
e7b07cee
HW
7591
7592 if (!new_state->fb) {
4711c033 7593 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7594 return 0;
7595 }
7596
7597 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7598 obj = new_state->fb->obj[0];
e7b07cee 7599 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7600 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7601 INIT_LIST_HEAD(&list);
7602
7603 tv.bo = &rbo->tbo;
7604 tv.num_shared = 1;
7605 list_add(&tv.head, &list);
7606
9165fb87 7607 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7608 if (r) {
7609 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7610 return r;
0f257b09 7611 }
e7b07cee 7612
5d43be0c 7613 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7614 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7615 else
7616 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7617
7b7c6c81 7618 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7619 if (unlikely(r != 0)) {
30b7c614
HW
7620 if (r != -ERESTARTSYS)
7621 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7622 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7623 return r;
7624 }
7625
bb812f1e
JZ
7626 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7627 if (unlikely(r != 0)) {
7628 amdgpu_bo_unpin(rbo);
0f257b09 7629 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7630 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7631 return r;
7632 }
7df7e505 7633
0f257b09 7634 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7635
7b7c6c81 7636 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7637
7638 amdgpu_bo_ref(rbo);
7639
cf322b49
NK
7640 /**
7641 * We don't do surface updates on planes that have been newly created,
7642 * but we also don't have the afb->address during atomic check.
7643 *
7644 * Fill in buffer attributes depending on the address here, but only on
7645 * newly created planes since they're not being used by DC yet and this
7646 * won't modify global state.
7647 */
7648 dm_plane_state_old = to_dm_plane_state(plane->state);
7649 dm_plane_state_new = to_dm_plane_state(new_state);
7650
3be5262e 7651 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7652 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7653 struct dc_plane_state *plane_state =
7654 dm_plane_state_new->dc_state;
7655 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7656
320932bf 7657 fill_plane_buffer_attributes(
695af5f9 7658 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7659 afb->tiling_flags,
cf322b49
NK
7660 &plane_state->tiling_info, &plane_state->plane_size,
7661 &plane_state->dcc, &plane_state->address,
6eed95b0 7662 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7663 }
7664
e7b07cee
HW
7665 return 0;
7666}
7667
3ee6b26b
AD
7668static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7669 struct drm_plane_state *old_state)
e7b07cee
HW
7670{
7671 struct amdgpu_bo *rbo;
e7b07cee
HW
7672 int r;
7673
7674 if (!old_state->fb)
7675 return;
7676
e68d14dd 7677 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7678 r = amdgpu_bo_reserve(rbo, false);
7679 if (unlikely(r)) {
7680 DRM_ERROR("failed to reserve rbo before unpin\n");
7681 return;
b830ebc9
HW
7682 }
7683
7684 amdgpu_bo_unpin(rbo);
7685 amdgpu_bo_unreserve(rbo);
7686 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7687}
7688
8c44515b
AP
7689static int dm_plane_helper_check_state(struct drm_plane_state *state,
7690 struct drm_crtc_state *new_crtc_state)
7691{
6300b3bd
MK
7692 struct drm_framebuffer *fb = state->fb;
7693 int min_downscale, max_upscale;
7694 int min_scale = 0;
7695 int max_scale = INT_MAX;
7696
40d916a2 7697 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7698 if (fb && state->crtc) {
40d916a2
NC
7699 /* Validate viewport to cover the case when only the position changes */
7700 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7701 int viewport_width = state->crtc_w;
7702 int viewport_height = state->crtc_h;
7703
7704 if (state->crtc_x < 0)
7705 viewport_width += state->crtc_x;
7706 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7707 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7708
7709 if (state->crtc_y < 0)
7710 viewport_height += state->crtc_y;
7711 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7712 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7713
4abdb72b
NC
7714 if (viewport_width < 0 || viewport_height < 0) {
7715 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7716 return -EINVAL;
7717 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7718 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7719 return -EINVAL;
4abdb72b
NC
7720 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7721 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7722 return -EINVAL;
4abdb72b
NC
7723 }
7724
40d916a2
NC
7725 }
7726
7727 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7728 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7729 &min_downscale, &max_upscale);
7730 /*
7731 * Convert to drm convention: 16.16 fixed point, instead of dc's
7732 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7733 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7734 */
7735 min_scale = (1000 << 16) / max_upscale;
7736 max_scale = (1000 << 16) / min_downscale;
7737 }
8c44515b 7738
8c44515b 7739 return drm_atomic_helper_check_plane_state(
6300b3bd 7740 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7741}
7742
7578ecda 7743static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7744 struct drm_atomic_state *state)
cbd19488 7745{
7c11b99a
MR
7746 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7747 plane);
1348969a 7748 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7749 struct dc *dc = adev->dm.dc;
78171832 7750 struct dm_plane_state *dm_plane_state;
695af5f9 7751 struct dc_scaling_info scaling_info;
8c44515b 7752 struct drm_crtc_state *new_crtc_state;
695af5f9 7753 int ret;
78171832 7754
ba5c1649 7755 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7756
ba5c1649 7757 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7758
3be5262e 7759 if (!dm_plane_state->dc_state)
9a3329b1 7760 return 0;
cbd19488 7761
8c44515b 7762 new_crtc_state =
dec92020 7763 drm_atomic_get_new_crtc_state(state,
ba5c1649 7764 new_plane_state->crtc);
8c44515b
AP
7765 if (!new_crtc_state)
7766 return -EINVAL;
7767
ba5c1649 7768 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7769 if (ret)
7770 return ret;
7771
4375d625 7772 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7773 if (ret)
7774 return ret;
a05bcff1 7775
62c933f9 7776 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7777 return 0;
7778
7779 return -EINVAL;
7780}
7781
674e78ac 7782static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7783 struct drm_atomic_state *state)
674e78ac
NK
7784{
7785 /* Only support async updates on cursor planes. */
7786 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7787 return -EINVAL;
7788
7789 return 0;
7790}
7791
7792static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7793 struct drm_atomic_state *state)
674e78ac 7794{
5ddb0bd4
MR
7795 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7796 plane);
674e78ac 7797 struct drm_plane_state *old_state =
5ddb0bd4 7798 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7799
e8a98235
RS
7800 trace_amdgpu_dm_atomic_update_cursor(new_state);
7801
332af874 7802 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7803
7804 plane->state->src_x = new_state->src_x;
7805 plane->state->src_y = new_state->src_y;
7806 plane->state->src_w = new_state->src_w;
7807 plane->state->src_h = new_state->src_h;
7808 plane->state->crtc_x = new_state->crtc_x;
7809 plane->state->crtc_y = new_state->crtc_y;
7810 plane->state->crtc_w = new_state->crtc_w;
7811 plane->state->crtc_h = new_state->crtc_h;
7812
7813 handle_cursor_update(plane, old_state);
7814}
7815
e7b07cee
HW
7816static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7817 .prepare_fb = dm_plane_helper_prepare_fb,
7818 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7819 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7820 .atomic_async_check = dm_plane_atomic_async_check,
7821 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7822};
7823
7824/*
7825 * TODO: these are currently initialized to rgb formats only.
7826 * For future use cases we should either initialize them dynamically based on
7827 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7828 * check will succeed, and let DC implement proper check
e7b07cee 7829 */
d90371b0 7830static const uint32_t rgb_formats[] = {
e7b07cee
HW
7831 DRM_FORMAT_XRGB8888,
7832 DRM_FORMAT_ARGB8888,
7833 DRM_FORMAT_RGBA8888,
7834 DRM_FORMAT_XRGB2101010,
7835 DRM_FORMAT_XBGR2101010,
7836 DRM_FORMAT_ARGB2101010,
7837 DRM_FORMAT_ABGR2101010,
58020403
MK
7838 DRM_FORMAT_XRGB16161616,
7839 DRM_FORMAT_XBGR16161616,
7840 DRM_FORMAT_ARGB16161616,
7841 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7842 DRM_FORMAT_XBGR8888,
7843 DRM_FORMAT_ABGR8888,
46dd9ff7 7844 DRM_FORMAT_RGB565,
e7b07cee
HW
7845};
7846
0d579c7e
NK
7847static const uint32_t overlay_formats[] = {
7848 DRM_FORMAT_XRGB8888,
7849 DRM_FORMAT_ARGB8888,
7850 DRM_FORMAT_RGBA8888,
7851 DRM_FORMAT_XBGR8888,
7852 DRM_FORMAT_ABGR8888,
7267a1a9 7853 DRM_FORMAT_RGB565
e7b07cee
HW
7854};
7855
7856static const u32 cursor_formats[] = {
7857 DRM_FORMAT_ARGB8888
7858};
7859
37c6a93b
NK
7860static int get_plane_formats(const struct drm_plane *plane,
7861 const struct dc_plane_cap *plane_cap,
7862 uint32_t *formats, int max_formats)
e7b07cee 7863{
37c6a93b
NK
7864 int i, num_formats = 0;
7865
7866 /*
7867 * TODO: Query support for each group of formats directly from
7868 * DC plane caps. This will require adding more formats to the
7869 * caps list.
7870 */
e7b07cee 7871
f180b4bc 7872 switch (plane->type) {
e7b07cee 7873 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7874 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7875 if (num_formats >= max_formats)
7876 break;
7877
7878 formats[num_formats++] = rgb_formats[i];
7879 }
7880
ea36ad34 7881 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7882 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7883 if (plane_cap && plane_cap->pixel_format_support.p010)
7884 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7885 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7886 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7887 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7888 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7889 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7890 }
e7b07cee 7891 break;
37c6a93b 7892
e7b07cee 7893 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7894 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7895 if (num_formats >= max_formats)
7896 break;
7897
7898 formats[num_formats++] = overlay_formats[i];
7899 }
e7b07cee 7900 break;
37c6a93b 7901
e7b07cee 7902 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7903 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7904 if (num_formats >= max_formats)
7905 break;
7906
7907 formats[num_formats++] = cursor_formats[i];
7908 }
e7b07cee
HW
7909 break;
7910 }
7911
37c6a93b
NK
7912 return num_formats;
7913}
7914
7915static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7916 struct drm_plane *plane,
7917 unsigned long possible_crtcs,
7918 const struct dc_plane_cap *plane_cap)
7919{
7920 uint32_t formats[32];
7921 int num_formats;
7922 int res = -EPERM;
ecc874a6 7923 unsigned int supported_rotations;
faa37f54 7924 uint64_t *modifiers = NULL;
37c6a93b
NK
7925
7926 num_formats = get_plane_formats(plane, plane_cap, formats,
7927 ARRAY_SIZE(formats));
7928
faa37f54
BN
7929 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7930 if (res)
7931 return res;
7932
2af10429
TE
7933 if (modifiers == NULL)
7934 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7935
4a580877 7936 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7937 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7938 modifiers, plane->type, NULL);
7939 kfree(modifiers);
37c6a93b
NK
7940 if (res)
7941 return res;
7942
cc1fec57
NK
7943 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7944 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7945 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7946 BIT(DRM_MODE_BLEND_PREMULTI);
7947
7948 drm_plane_create_alpha_property(plane);
7949 drm_plane_create_blend_mode_property(plane, blend_caps);
7950 }
7951
fc8e5230 7952 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7953 plane_cap &&
7954 (plane_cap->pixel_format_support.nv12 ||
7955 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7956 /* This only affects YUV formats. */
7957 drm_plane_create_color_properties(
7958 plane,
7959 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7960 BIT(DRM_COLOR_YCBCR_BT709) |
7961 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7962 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7963 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7964 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7965 }
7966
ecc874a6
PLG
7967 supported_rotations =
7968 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7969 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7970
1347385f
SS
7971 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7972 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7973 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7974 supported_rotations);
ecc874a6 7975
f180b4bc 7976 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7977
96719c54 7978 /* Create (reset) the plane state */
f180b4bc
HW
7979 if (plane->funcs->reset)
7980 plane->funcs->reset(plane);
96719c54 7981
37c6a93b 7982 return 0;
e7b07cee
HW
7983}
7984
7578ecda
AD
7985static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7986 struct drm_plane *plane,
7987 uint32_t crtc_index)
e7b07cee
HW
7988{
7989 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7990 struct drm_plane *cursor_plane;
e7b07cee
HW
7991
7992 int res = -ENOMEM;
7993
7994 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7995 if (!cursor_plane)
7996 goto fail;
7997
f180b4bc 7998 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7999 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
8000
8001 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8002 if (!acrtc)
8003 goto fail;
8004
8005 res = drm_crtc_init_with_planes(
8006 dm->ddev,
8007 &acrtc->base,
8008 plane,
f180b4bc 8009 cursor_plane,
e7b07cee
HW
8010 &amdgpu_dm_crtc_funcs, NULL);
8011
8012 if (res)
8013 goto fail;
8014
8015 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8016
96719c54
HW
8017 /* Create (reset) the plane state */
8018 if (acrtc->base.funcs->reset)
8019 acrtc->base.funcs->reset(&acrtc->base);
8020
e7b07cee
HW
8021 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8022 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8023
8024 acrtc->crtc_id = crtc_index;
8025 acrtc->base.enabled = false;
c37e2d29 8026 acrtc->otg_inst = -1;
e7b07cee
HW
8027
8028 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
8029 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8030 true, MAX_COLOR_LUT_ENTRIES);
086247a4 8031 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 8032
e7b07cee
HW
8033 return 0;
8034
8035fail:
b830ebc9
HW
8036 kfree(acrtc);
8037 kfree(cursor_plane);
e7b07cee
HW
8038 return res;
8039}
8040
8041
8042static int to_drm_connector_type(enum signal_type st)
8043{
8044 switch (st) {
8045 case SIGNAL_TYPE_HDMI_TYPE_A:
8046 return DRM_MODE_CONNECTOR_HDMIA;
8047 case SIGNAL_TYPE_EDP:
8048 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
8049 case SIGNAL_TYPE_LVDS:
8050 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
8051 case SIGNAL_TYPE_RGB:
8052 return DRM_MODE_CONNECTOR_VGA;
8053 case SIGNAL_TYPE_DISPLAY_PORT:
8054 case SIGNAL_TYPE_DISPLAY_PORT_MST:
8055 return DRM_MODE_CONNECTOR_DisplayPort;
8056 case SIGNAL_TYPE_DVI_DUAL_LINK:
8057 case SIGNAL_TYPE_DVI_SINGLE_LINK:
8058 return DRM_MODE_CONNECTOR_DVID;
8059 case SIGNAL_TYPE_VIRTUAL:
8060 return DRM_MODE_CONNECTOR_VIRTUAL;
8061
8062 default:
8063 return DRM_MODE_CONNECTOR_Unknown;
8064 }
8065}
8066
2b4c1c05
DV
8067static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8068{
62afb4ad
JRS
8069 struct drm_encoder *encoder;
8070
8071 /* There is only one encoder per connector */
8072 drm_connector_for_each_possible_encoder(connector, encoder)
8073 return encoder;
8074
8075 return NULL;
2b4c1c05
DV
8076}
8077
e7b07cee
HW
8078static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8079{
e7b07cee
HW
8080 struct drm_encoder *encoder;
8081 struct amdgpu_encoder *amdgpu_encoder;
8082
2b4c1c05 8083 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
8084
8085 if (encoder == NULL)
8086 return;
8087
8088 amdgpu_encoder = to_amdgpu_encoder(encoder);
8089
8090 amdgpu_encoder->native_mode.clock = 0;
8091
8092 if (!list_empty(&connector->probed_modes)) {
8093 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 8094
e7b07cee 8095 list_for_each_entry(preferred_mode,
b830ebc9
HW
8096 &connector->probed_modes,
8097 head) {
8098 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8099 amdgpu_encoder->native_mode = *preferred_mode;
8100
e7b07cee
HW
8101 break;
8102 }
8103
8104 }
8105}
8106
3ee6b26b
AD
8107static struct drm_display_mode *
8108amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8109 char *name,
8110 int hdisplay, int vdisplay)
e7b07cee
HW
8111{
8112 struct drm_device *dev = encoder->dev;
8113 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8114 struct drm_display_mode *mode = NULL;
8115 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8116
8117 mode = drm_mode_duplicate(dev, native_mode);
8118
b830ebc9 8119 if (mode == NULL)
e7b07cee
HW
8120 return NULL;
8121
8122 mode->hdisplay = hdisplay;
8123 mode->vdisplay = vdisplay;
8124 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 8125 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
8126
8127 return mode;
8128
8129}
8130
8131static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 8132 struct drm_connector *connector)
e7b07cee
HW
8133{
8134 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8135 struct drm_display_mode *mode = NULL;
8136 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
8137 struct amdgpu_dm_connector *amdgpu_dm_connector =
8138 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8139 int i;
8140 int n;
8141 struct mode_size {
8142 char name[DRM_DISPLAY_MODE_LEN];
8143 int w;
8144 int h;
b830ebc9 8145 } common_modes[] = {
e7b07cee
HW
8146 { "640x480", 640, 480},
8147 { "800x600", 800, 600},
8148 { "1024x768", 1024, 768},
8149 { "1280x720", 1280, 720},
8150 { "1280x800", 1280, 800},
8151 {"1280x1024", 1280, 1024},
8152 { "1440x900", 1440, 900},
8153 {"1680x1050", 1680, 1050},
8154 {"1600x1200", 1600, 1200},
8155 {"1920x1080", 1920, 1080},
8156 {"1920x1200", 1920, 1200}
8157 };
8158
b830ebc9 8159 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
8160
8161 for (i = 0; i < n; i++) {
8162 struct drm_display_mode *curmode = NULL;
8163 bool mode_existed = false;
8164
8165 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
8166 common_modes[i].h > native_mode->vdisplay ||
8167 (common_modes[i].w == native_mode->hdisplay &&
8168 common_modes[i].h == native_mode->vdisplay))
8169 continue;
e7b07cee
HW
8170
8171 list_for_each_entry(curmode, &connector->probed_modes, head) {
8172 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 8173 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
8174 mode_existed = true;
8175 break;
8176 }
8177 }
8178
8179 if (mode_existed)
8180 continue;
8181
8182 mode = amdgpu_dm_create_common_mode(encoder,
8183 common_modes[i].name, common_modes[i].w,
8184 common_modes[i].h);
588a7017
ZQ
8185 if (!mode)
8186 continue;
8187
e7b07cee 8188 drm_mode_probed_add(connector, mode);
c84dec2f 8189 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
8190 }
8191}
8192
d77de788
SS
8193static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8194{
8195 struct drm_encoder *encoder;
8196 struct amdgpu_encoder *amdgpu_encoder;
8197 const struct drm_display_mode *native_mode;
8198
8199 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8200 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8201 return;
8202
8203 encoder = amdgpu_dm_connector_to_encoder(connector);
8204 if (!encoder)
8205 return;
8206
8207 amdgpu_encoder = to_amdgpu_encoder(encoder);
8208
8209 native_mode = &amdgpu_encoder->native_mode;
8210 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8211 return;
8212
8213 drm_connector_set_panel_orientation_with_quirk(connector,
8214 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8215 native_mode->hdisplay,
8216 native_mode->vdisplay);
8217}
8218
3ee6b26b
AD
8219static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8220 struct edid *edid)
e7b07cee 8221{
c84dec2f
HW
8222 struct amdgpu_dm_connector *amdgpu_dm_connector =
8223 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8224
8225 if (edid) {
8226 /* empty probed_modes */
8227 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8228 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8229 drm_add_edid_modes(connector, edid);
8230
f1e5e913
YMM
8231 /* sorting the probed modes before calling function
8232 * amdgpu_dm_get_native_mode() since EDID can have
8233 * more than one preferred mode. The modes that are
8234 * later in the probed mode list could be of higher
8235 * and preferred resolution. For example, 3840x2160
8236 * resolution in base EDID preferred timing and 4096x2160
8237 * preferred resolution in DID extension block later.
8238 */
8239 drm_mode_sort(&connector->probed_modes);
e7b07cee 8240 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8241
8242 /* Freesync capabilities are reset by calling
8243 * drm_add_edid_modes() and need to be
8244 * restored here.
8245 */
8246 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8247
8248 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8249 } else {
c84dec2f 8250 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8251 }
e7b07cee
HW
8252}
8253
a85ba005
NC
8254static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8255 struct drm_display_mode *mode)
8256{
8257 struct drm_display_mode *m;
8258
8259 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8260 if (drm_mode_equal(m, mode))
8261 return true;
8262 }
8263
8264 return false;
8265}
8266
8267static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8268{
8269 const struct drm_display_mode *m;
8270 struct drm_display_mode *new_mode;
8271 uint i;
8272 uint32_t new_modes_count = 0;
8273
8274 /* Standard FPS values
8275 *
12cdff6b
SC
8276 * 23.976 - TV/NTSC
8277 * 24 - Cinema
8278 * 25 - TV/PAL
8279 * 29.97 - TV/NTSC
8280 * 30 - TV/NTSC
8281 * 48 - Cinema HFR
8282 * 50 - TV/PAL
8283 * 60 - Commonly used
8284 * 48,72,96,120 - Multiples of 24
a85ba005 8285 */
9ce5ed6e
CIK
8286 static const uint32_t common_rates[] = {
8287 23976, 24000, 25000, 29970, 30000,
12cdff6b 8288 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8289 };
a85ba005
NC
8290
8291 /*
8292 * Find mode with highest refresh rate with the same resolution
8293 * as the preferred mode. Some monitors report a preferred mode
8294 * with lower resolution than the highest refresh rate supported.
8295 */
8296
8297 m = get_highest_refresh_rate_mode(aconnector, true);
8298 if (!m)
8299 return 0;
8300
8301 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8302 uint64_t target_vtotal, target_vtotal_diff;
8303 uint64_t num, den;
8304
8305 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8306 continue;
8307
8308 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8309 common_rates[i] > aconnector->max_vfreq * 1000)
8310 continue;
8311
8312 num = (unsigned long long)m->clock * 1000 * 1000;
8313 den = common_rates[i] * (unsigned long long)m->htotal;
8314 target_vtotal = div_u64(num, den);
8315 target_vtotal_diff = target_vtotal - m->vtotal;
8316
8317 /* Check for illegal modes */
8318 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8319 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8320 m->vtotal + target_vtotal_diff < m->vsync_end)
8321 continue;
8322
8323 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8324 if (!new_mode)
8325 goto out;
8326
8327 new_mode->vtotal += (u16)target_vtotal_diff;
8328 new_mode->vsync_start += (u16)target_vtotal_diff;
8329 new_mode->vsync_end += (u16)target_vtotal_diff;
8330 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8331 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8332
8333 if (!is_duplicate_mode(aconnector, new_mode)) {
8334 drm_mode_probed_add(&aconnector->base, new_mode);
8335 new_modes_count += 1;
8336 } else
8337 drm_mode_destroy(aconnector->base.dev, new_mode);
8338 }
8339 out:
8340 return new_modes_count;
8341}
8342
8343static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8344 struct edid *edid)
8345{
8346 struct amdgpu_dm_connector *amdgpu_dm_connector =
8347 to_amdgpu_dm_connector(connector);
8348
de05abe6 8349 if (!edid)
a85ba005 8350 return;
fe8858bb 8351
a85ba005
NC
8352 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8353 amdgpu_dm_connector->num_modes +=
8354 add_fs_modes(amdgpu_dm_connector);
8355}
8356
7578ecda 8357static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8358{
c84dec2f
HW
8359 struct amdgpu_dm_connector *amdgpu_dm_connector =
8360 to_amdgpu_dm_connector(connector);
e7b07cee 8361 struct drm_encoder *encoder;
c84dec2f 8362 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8363
2b4c1c05 8364 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8365
5c0e6840 8366 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8367 amdgpu_dm_connector->num_modes =
8368 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8369 } else {
8370 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8371 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8372 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8373 }
3e332d3a 8374 amdgpu_dm_fbc_init(connector);
5099114b 8375
c84dec2f 8376 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8377}
8378
3ee6b26b
AD
8379void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8380 struct amdgpu_dm_connector *aconnector,
8381 int connector_type,
8382 struct dc_link *link,
8383 int link_index)
e7b07cee 8384{
1348969a 8385 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8386
f04bee34
NK
8387 /*
8388 * Some of the properties below require access to state, like bpc.
8389 * Allocate some default initial connector state with our reset helper.
8390 */
8391 if (aconnector->base.funcs->reset)
8392 aconnector->base.funcs->reset(&aconnector->base);
8393
e7b07cee
HW
8394 aconnector->connector_id = link_index;
8395 aconnector->dc_link = link;
8396 aconnector->base.interlace_allowed = false;
8397 aconnector->base.doublescan_allowed = false;
8398 aconnector->base.stereo_allowed = false;
8399 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8400 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8401 aconnector->audio_inst = -1;
e7b07cee
HW
8402 mutex_init(&aconnector->hpd_lock);
8403
1f6010a9
DF
8404 /*
8405 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8406 * which means HPD hot plug not supported
8407 */
e7b07cee
HW
8408 switch (connector_type) {
8409 case DRM_MODE_CONNECTOR_HDMIA:
8410 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8411 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8412 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8413 break;
8414 case DRM_MODE_CONNECTOR_DisplayPort:
8415 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
d715c9a2 8416 link->link_enc = link_enc_cfg_get_link_enc(link);
7b201d53 8417 ASSERT(link->link_enc);
f6e03f80
JS
8418 if (link->link_enc)
8419 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8420 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8421 break;
8422 case DRM_MODE_CONNECTOR_DVID:
8423 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8424 break;
8425 default:
8426 break;
8427 }
8428
8429 drm_object_attach_property(&aconnector->base.base,
8430 dm->ddev->mode_config.scaling_mode_property,
8431 DRM_MODE_SCALE_NONE);
8432
8433 drm_object_attach_property(&aconnector->base.base,
8434 adev->mode_info.underscan_property,
8435 UNDERSCAN_OFF);
8436 drm_object_attach_property(&aconnector->base.base,
8437 adev->mode_info.underscan_hborder_property,
8438 0);
8439 drm_object_attach_property(&aconnector->base.base,
8440 adev->mode_info.underscan_vborder_property,
8441 0);
1825fd34 8442
8c61b31e
JFZ
8443 if (!aconnector->mst_port)
8444 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8445
4a8ca46b
RL
8446 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8447 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8448 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8449
c1ee92f9 8450 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8451 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8452 drm_object_attach_property(&aconnector->base.base,
8453 adev->mode_info.abm_level_property, 0);
8454 }
bb47de73
NK
8455
8456 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8457 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8458 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8459 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8460
8c61b31e
JFZ
8461 if (!aconnector->mst_port)
8462 drm_connector_attach_vrr_capable_property(&aconnector->base);
8463
0c8620d6 8464#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8465 if (adev->dm.hdcp_workqueue)
53e108aa 8466 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8467#endif
bb47de73 8468 }
e7b07cee
HW
8469}
8470
7578ecda
AD
8471static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8472 struct i2c_msg *msgs, int num)
e7b07cee
HW
8473{
8474 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8475 struct ddc_service *ddc_service = i2c->ddc_service;
8476 struct i2c_command cmd;
8477 int i;
8478 int result = -EIO;
8479
b830ebc9 8480 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8481
8482 if (!cmd.payloads)
8483 return result;
8484
8485 cmd.number_of_payloads = num;
8486 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8487 cmd.speed = 100;
8488
8489 for (i = 0; i < num; i++) {
8490 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8491 cmd.payloads[i].address = msgs[i].addr;
8492 cmd.payloads[i].length = msgs[i].len;
8493 cmd.payloads[i].data = msgs[i].buf;
8494 }
8495
c85e6e54
DF
8496 if (dc_submit_i2c(
8497 ddc_service->ctx->dc,
8498 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8499 &cmd))
8500 result = num;
8501
8502 kfree(cmd.payloads);
8503 return result;
8504}
8505
7578ecda 8506static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8507{
8508 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8509}
8510
8511static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8512 .master_xfer = amdgpu_dm_i2c_xfer,
8513 .functionality = amdgpu_dm_i2c_func,
8514};
8515
3ee6b26b
AD
8516static struct amdgpu_i2c_adapter *
8517create_i2c(struct ddc_service *ddc_service,
8518 int link_index,
8519 int *res)
e7b07cee
HW
8520{
8521 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8522 struct amdgpu_i2c_adapter *i2c;
8523
b830ebc9 8524 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8525 if (!i2c)
8526 return NULL;
e7b07cee
HW
8527 i2c->base.owner = THIS_MODULE;
8528 i2c->base.class = I2C_CLASS_DDC;
8529 i2c->base.dev.parent = &adev->pdev->dev;
8530 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8531 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8532 i2c_set_adapdata(&i2c->base, i2c);
8533 i2c->ddc_service = ddc_service;
f6e03f80
JS
8534 if (i2c->ddc_service->ddc_pin)
8535 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8536
8537 return i2c;
8538}
8539
89fc8d4e 8540
1f6010a9
DF
8541/*
8542 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8543 * dc_link which will be represented by this aconnector.
8544 */
7578ecda
AD
8545static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8546 struct amdgpu_dm_connector *aconnector,
8547 uint32_t link_index,
8548 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8549{
8550 int res = 0;
8551 int connector_type;
8552 struct dc *dc = dm->dc;
8553 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8554 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8555
8556 link->priv = aconnector;
e7b07cee 8557
f1ad2f5e 8558 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8559
8560 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8561 if (!i2c) {
8562 DRM_ERROR("Failed to create i2c adapter data\n");
8563 return -ENOMEM;
8564 }
8565
e7b07cee
HW
8566 aconnector->i2c = i2c;
8567 res = i2c_add_adapter(&i2c->base);
8568
8569 if (res) {
8570 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8571 goto out_free;
8572 }
8573
8574 connector_type = to_drm_connector_type(link->connector_signal);
8575
17165de2 8576 res = drm_connector_init_with_ddc(
e7b07cee
HW
8577 dm->ddev,
8578 &aconnector->base,
8579 &amdgpu_dm_connector_funcs,
17165de2
AP
8580 connector_type,
8581 &i2c->base);
e7b07cee
HW
8582
8583 if (res) {
8584 DRM_ERROR("connector_init failed\n");
8585 aconnector->connector_id = -1;
8586 goto out_free;
8587 }
8588
8589 drm_connector_helper_add(
8590 &aconnector->base,
8591 &amdgpu_dm_connector_helper_funcs);
8592
8593 amdgpu_dm_connector_init_helper(
8594 dm,
8595 aconnector,
8596 connector_type,
8597 link,
8598 link_index);
8599
cde4c44d 8600 drm_connector_attach_encoder(
e7b07cee
HW
8601 &aconnector->base, &aencoder->base);
8602
e7b07cee
HW
8603 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8604 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8605 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8606
e7b07cee
HW
8607out_free:
8608 if (res) {
8609 kfree(i2c);
8610 aconnector->i2c = NULL;
8611 }
8612 return res;
8613}
8614
8615int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8616{
8617 switch (adev->mode_info.num_crtc) {
8618 case 1:
8619 return 0x1;
8620 case 2:
8621 return 0x3;
8622 case 3:
8623 return 0x7;
8624 case 4:
8625 return 0xf;
8626 case 5:
8627 return 0x1f;
8628 case 6:
8629 default:
8630 return 0x3f;
8631 }
8632}
8633
7578ecda
AD
8634static int amdgpu_dm_encoder_init(struct drm_device *dev,
8635 struct amdgpu_encoder *aencoder,
8636 uint32_t link_index)
e7b07cee 8637{
1348969a 8638 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8639
8640 int res = drm_encoder_init(dev,
8641 &aencoder->base,
8642 &amdgpu_dm_encoder_funcs,
8643 DRM_MODE_ENCODER_TMDS,
8644 NULL);
8645
8646 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8647
8648 if (!res)
8649 aencoder->encoder_id = link_index;
8650 else
8651 aencoder->encoder_id = -1;
8652
8653 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8654
8655 return res;
8656}
8657
3ee6b26b
AD
8658static void manage_dm_interrupts(struct amdgpu_device *adev,
8659 struct amdgpu_crtc *acrtc,
8660 bool enable)
e7b07cee
HW
8661{
8662 /*
8fe684e9
NK
8663 * We have no guarantee that the frontend index maps to the same
8664 * backend index - some even map to more than one.
8665 *
8666 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8667 */
8668 int irq_type =
734dd01d 8669 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8670 adev,
8671 acrtc->crtc_id);
8672
8673 if (enable) {
8674 drm_crtc_vblank_on(&acrtc->base);
8675 amdgpu_irq_get(
8676 adev,
8677 &adev->pageflip_irq,
8678 irq_type);
86bc2219
WL
8679#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8680 amdgpu_irq_get(
8681 adev,
8682 &adev->vline0_irq,
8683 irq_type);
8684#endif
e7b07cee 8685 } else {
86bc2219
WL
8686#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8687 amdgpu_irq_put(
8688 adev,
8689 &adev->vline0_irq,
8690 irq_type);
8691#endif
e7b07cee
HW
8692 amdgpu_irq_put(
8693 adev,
8694 &adev->pageflip_irq,
8695 irq_type);
8696 drm_crtc_vblank_off(&acrtc->base);
8697 }
8698}
8699
8fe684e9
NK
8700static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8701 struct amdgpu_crtc *acrtc)
8702{
8703 int irq_type =
8704 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8705
8706 /**
8707 * This reads the current state for the IRQ and force reapplies
8708 * the setting to hardware.
8709 */
8710 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8711}
8712
3ee6b26b
AD
8713static bool
8714is_scaling_state_different(const struct dm_connector_state *dm_state,
8715 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8716{
8717 if (dm_state->scaling != old_dm_state->scaling)
8718 return true;
8719 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8720 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8721 return true;
8722 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8723 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8724 return true;
b830ebc9
HW
8725 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8726 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8727 return true;
e7b07cee
HW
8728 return false;
8729}
8730
0c8620d6
BL
8731#ifdef CONFIG_DRM_AMD_DC_HDCP
8732static bool is_content_protection_different(struct drm_connector_state *state,
8733 const struct drm_connector_state *old_state,
8734 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8735{
8736 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8737 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8738
31c0ed90 8739 /* Handle: Type0/1 change */
53e108aa
BL
8740 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8741 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8742 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8743 return true;
8744 }
8745
31c0ed90
BL
8746 /* CP is being re enabled, ignore this
8747 *
8748 * Handles: ENABLED -> DESIRED
8749 */
0c8620d6
BL
8750 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8751 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8752 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8753 return false;
8754 }
8755
31c0ed90
BL
8756 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8757 *
8758 * Handles: UNDESIRED -> ENABLED
8759 */
0c8620d6
BL
8760 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8761 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8762 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8763
0d9a947b
QZ
8764 /* Stream removed and re-enabled
8765 *
8766 * Can sometimes overlap with the HPD case,
8767 * thus set update_hdcp to false to avoid
8768 * setting HDCP multiple times.
8769 *
8770 * Handles: DESIRED -> DESIRED (Special case)
8771 */
8772 if (!(old_state->crtc && old_state->crtc->enabled) &&
8773 state->crtc && state->crtc->enabled &&
8774 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8775 dm_con_state->update_hdcp = false;
8776 return true;
8777 }
8778
8779 /* Hot-plug, headless s3, dpms
8780 *
8781 * Only start HDCP if the display is connected/enabled.
8782 * update_hdcp flag will be set to false until the next
8783 * HPD comes in.
31c0ed90
BL
8784 *
8785 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8786 */
97f6c917
BL
8787 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8788 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8789 dm_con_state->update_hdcp = false;
0c8620d6 8790 return true;
97f6c917 8791 }
0c8620d6 8792
31c0ed90
BL
8793 /*
8794 * Handles: UNDESIRED -> UNDESIRED
8795 * DESIRED -> DESIRED
8796 * ENABLED -> ENABLED
8797 */
0c8620d6
BL
8798 if (old_state->content_protection == state->content_protection)
8799 return false;
8800
31c0ed90
BL
8801 /*
8802 * Handles: UNDESIRED -> DESIRED
8803 * DESIRED -> UNDESIRED
8804 * ENABLED -> UNDESIRED
8805 */
97f6c917 8806 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8807 return true;
8808
31c0ed90
BL
8809 /*
8810 * Handles: DESIRED -> ENABLED
8811 */
0c8620d6
BL
8812 return false;
8813}
8814
0c8620d6 8815#endif
3ee6b26b
AD
8816static void remove_stream(struct amdgpu_device *adev,
8817 struct amdgpu_crtc *acrtc,
8818 struct dc_stream_state *stream)
e7b07cee
HW
8819{
8820 /* this is the update mode case */
e7b07cee
HW
8821
8822 acrtc->otg_inst = -1;
8823 acrtc->enabled = false;
8824}
8825
7578ecda
AD
8826static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8827 struct dc_cursor_position *position)
2a8f6ccb 8828{
f4c2cc43 8829 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8830 int x, y;
8831 int xorigin = 0, yorigin = 0;
8832
e371e19c 8833 if (!crtc || !plane->state->fb)
2a8f6ccb 8834 return 0;
2a8f6ccb
HW
8835
8836 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8837 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8838 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8839 __func__,
8840 plane->state->crtc_w,
8841 plane->state->crtc_h);
8842 return -EINVAL;
8843 }
8844
8845 x = plane->state->crtc_x;
8846 y = plane->state->crtc_y;
c14a005c 8847
e371e19c
NK
8848 if (x <= -amdgpu_crtc->max_cursor_width ||
8849 y <= -amdgpu_crtc->max_cursor_height)
8850 return 0;
8851
2a8f6ccb
HW
8852 if (x < 0) {
8853 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8854 x = 0;
8855 }
8856 if (y < 0) {
8857 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8858 y = 0;
8859 }
8860 position->enable = true;
d243b6ff 8861 position->translate_by_source = true;
2a8f6ccb
HW
8862 position->x = x;
8863 position->y = y;
8864 position->x_hotspot = xorigin;
8865 position->y_hotspot = yorigin;
8866
8867 return 0;
8868}
8869
3ee6b26b
AD
8870static void handle_cursor_update(struct drm_plane *plane,
8871 struct drm_plane_state *old_plane_state)
e7b07cee 8872{
1348969a 8873 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8874 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8875 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8876 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8877 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8878 uint64_t address = afb ? afb->address : 0;
6a30a929 8879 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8880 struct dc_cursor_attributes attributes;
8881 int ret;
8882
e7b07cee
HW
8883 if (!plane->state->fb && !old_plane_state->fb)
8884 return;
8885
cb2318b7 8886 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8887 __func__,
8888 amdgpu_crtc->crtc_id,
8889 plane->state->crtc_w,
8890 plane->state->crtc_h);
2a8f6ccb
HW
8891
8892 ret = get_cursor_position(plane, crtc, &position);
8893 if (ret)
8894 return;
8895
8896 if (!position.enable) {
8897 /* turn off cursor */
674e78ac
NK
8898 if (crtc_state && crtc_state->stream) {
8899 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8900 dc_stream_set_cursor_position(crtc_state->stream,
8901 &position);
674e78ac
NK
8902 mutex_unlock(&adev->dm.dc_lock);
8903 }
2a8f6ccb 8904 return;
e7b07cee 8905 }
e7b07cee 8906
2a8f6ccb
HW
8907 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8908 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8909
c1cefe11 8910 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8911 attributes.address.high_part = upper_32_bits(address);
8912 attributes.address.low_part = lower_32_bits(address);
8913 attributes.width = plane->state->crtc_w;
8914 attributes.height = plane->state->crtc_h;
8915 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8916 attributes.rotation_angle = 0;
8917 attributes.attribute_flags.value = 0;
8918
03a66367 8919 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8920
886daac9 8921 if (crtc_state->stream) {
674e78ac 8922 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8923 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8924 &attributes))
8925 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8926
2a8f6ccb
HW
8927 if (!dc_stream_set_cursor_position(crtc_state->stream,
8928 &position))
8929 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8930 mutex_unlock(&adev->dm.dc_lock);
886daac9 8931 }
2a8f6ccb 8932}
e7b07cee
HW
8933
8934static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8935{
8936
8937 assert_spin_locked(&acrtc->base.dev->event_lock);
8938 WARN_ON(acrtc->event);
8939
8940 acrtc->event = acrtc->base.state->event;
8941
8942 /* Set the flip status */
8943 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8944
8945 /* Mark this event as consumed */
8946 acrtc->base.state->event = NULL;
8947
cb2318b7
VL
8948 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8949 acrtc->crtc_id);
e7b07cee
HW
8950}
8951
bb47de73
NK
8952static void update_freesync_state_on_stream(
8953 struct amdgpu_display_manager *dm,
8954 struct dm_crtc_state *new_crtc_state,
180db303
NK
8955 struct dc_stream_state *new_stream,
8956 struct dc_plane_state *surface,
8957 u32 flip_timestamp_in_us)
bb47de73 8958{
09aef2c4 8959 struct mod_vrr_params vrr_params;
bb47de73 8960 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8961 struct amdgpu_device *adev = dm->adev;
585d450c 8962 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8963 unsigned long flags;
4cda3243 8964 bool pack_sdp_v1_3 = false;
bb47de73
NK
8965
8966 if (!new_stream)
8967 return;
8968
8969 /*
8970 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8971 * For now it's sufficient to just guard against these conditions.
8972 */
8973
8974 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8975 return;
8976
4a580877 8977 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8978 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8979
180db303
NK
8980 if (surface) {
8981 mod_freesync_handle_preflip(
8982 dm->freesync_module,
8983 surface,
8984 new_stream,
8985 flip_timestamp_in_us,
8986 &vrr_params);
09aef2c4
MK
8987
8988 if (adev->family < AMDGPU_FAMILY_AI &&
8989 amdgpu_dm_vrr_active(new_crtc_state)) {
8990 mod_freesync_handle_v_update(dm->freesync_module,
8991 new_stream, &vrr_params);
e63e2491
EB
8992
8993 /* Need to call this before the frame ends. */
8994 dc_stream_adjust_vmin_vmax(dm->dc,
8995 new_crtc_state->stream,
8996 &vrr_params.adjust);
09aef2c4 8997 }
180db303 8998 }
bb47de73
NK
8999
9000 mod_freesync_build_vrr_infopacket(
9001 dm->freesync_module,
9002 new_stream,
180db303 9003 &vrr_params,
ecd0136b
HT
9004 PACKET_TYPE_VRR,
9005 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
9006 &vrr_infopacket,
9007 pack_sdp_v1_3);
bb47de73 9008
8a48b44c 9009 new_crtc_state->freesync_timing_changed |=
585d450c 9010 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
9011 &vrr_params.adjust,
9012 sizeof(vrr_params.adjust)) != 0);
bb47de73 9013
8a48b44c 9014 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
9015 (memcmp(&new_crtc_state->vrr_infopacket,
9016 &vrr_infopacket,
9017 sizeof(vrr_infopacket)) != 0);
9018
585d450c 9019 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
9020 new_crtc_state->vrr_infopacket = vrr_infopacket;
9021
585d450c 9022 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
9023 new_stream->vrr_infopacket = vrr_infopacket;
9024
9025 if (new_crtc_state->freesync_vrr_info_changed)
9026 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9027 new_crtc_state->base.crtc->base.id,
9028 (int)new_crtc_state->base.vrr_enabled,
180db303 9029 (int)vrr_params.state);
09aef2c4 9030
4a580877 9031 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
9032}
9033
585d450c 9034static void update_stream_irq_parameters(
e854194c
MK
9035 struct amdgpu_display_manager *dm,
9036 struct dm_crtc_state *new_crtc_state)
9037{
9038 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 9039 struct mod_vrr_params vrr_params;
e854194c 9040 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 9041 struct amdgpu_device *adev = dm->adev;
585d450c 9042 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 9043 unsigned long flags;
e854194c
MK
9044
9045 if (!new_stream)
9046 return;
9047
9048 /*
9049 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9050 * For now it's sufficient to just guard against these conditions.
9051 */
9052 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9053 return;
9054
4a580877 9055 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 9056 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 9057
e854194c
MK
9058 if (new_crtc_state->vrr_supported &&
9059 config.min_refresh_in_uhz &&
9060 config.max_refresh_in_uhz) {
a85ba005
NC
9061 /*
9062 * if freesync compatible mode was set, config.state will be set
9063 * in atomic check
9064 */
9065 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9066 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9067 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9068 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9069 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9070 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9071 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9072 } else {
9073 config.state = new_crtc_state->base.vrr_enabled ?
9074 VRR_STATE_ACTIVE_VARIABLE :
9075 VRR_STATE_INACTIVE;
9076 }
e854194c
MK
9077 } else {
9078 config.state = VRR_STATE_UNSUPPORTED;
9079 }
9080
9081 mod_freesync_build_vrr_params(dm->freesync_module,
9082 new_stream,
9083 &config, &vrr_params);
9084
9085 new_crtc_state->freesync_timing_changed |=
585d450c
AP
9086 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9087 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 9088
585d450c
AP
9089 new_crtc_state->freesync_config = config;
9090 /* Copy state for access from DM IRQ handler */
9091 acrtc->dm_irq_params.freesync_config = config;
9092 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9093 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 9094 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
9095}
9096
66b0c973
MK
9097static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9098 struct dm_crtc_state *new_state)
9099{
9100 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9101 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9102
9103 if (!old_vrr_active && new_vrr_active) {
9104 /* Transition VRR inactive -> active:
9105 * While VRR is active, we must not disable vblank irq, as a
9106 * reenable after disable would compute bogus vblank/pflip
9107 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
9108 *
9109 * We also need vupdate irq for the actual core vblank handling
9110 * at end of vblank.
66b0c973 9111 */
d2574c33 9112 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
9113 drm_crtc_vblank_get(new_state->base.crtc);
9114 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9115 __func__, new_state->base.crtc->base.id);
9116 } else if (old_vrr_active && !new_vrr_active) {
9117 /* Transition VRR active -> inactive:
9118 * Allow vblank irq disable again for fixed refresh rate.
9119 */
d2574c33 9120 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
9121 drm_crtc_vblank_put(new_state->base.crtc);
9122 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9123 __func__, new_state->base.crtc->base.id);
9124 }
9125}
9126
8ad27806
NK
9127static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9128{
9129 struct drm_plane *plane;
5760dcb9 9130 struct drm_plane_state *old_plane_state;
8ad27806
NK
9131 int i;
9132
9133 /*
9134 * TODO: Make this per-stream so we don't issue redundant updates for
9135 * commits with multiple streams.
9136 */
5760dcb9 9137 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
9138 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9139 handle_cursor_update(plane, old_plane_state);
9140}
9141
3be5262e 9142static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 9143 struct dc_state *dc_state,
3ee6b26b
AD
9144 struct drm_device *dev,
9145 struct amdgpu_display_manager *dm,
9146 struct drm_crtc *pcrtc,
420cd472 9147 bool wait_for_vblank)
e7b07cee 9148{
efc8278e 9149 uint32_t i;
8a48b44c 9150 uint64_t timestamp_ns;
e7b07cee 9151 struct drm_plane *plane;
0bc9706d 9152 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 9153 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
9154 struct drm_crtc_state *new_pcrtc_state =
9155 drm_atomic_get_new_crtc_state(state, pcrtc);
9156 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
9157 struct dm_crtc_state *dm_old_crtc_state =
9158 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 9159 int planes_count = 0, vpos, hpos;
570c91d5 9160 long r;
e7b07cee 9161 unsigned long flags;
8a48b44c 9162 struct amdgpu_bo *abo;
fdd1fe57
MK
9163 uint32_t target_vblank, last_flip_vblank;
9164 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 9165 bool pflip_present = false;
bc7f670e
DF
9166 struct {
9167 struct dc_surface_update surface_updates[MAX_SURFACES];
9168 struct dc_plane_info plane_infos[MAX_SURFACES];
9169 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 9170 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 9171 struct dc_stream_update stream_update;
74aa7bd4 9172 } *bundle;
bc7f670e 9173
74aa7bd4 9174 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 9175
74aa7bd4
DF
9176 if (!bundle) {
9177 dm_error("Failed to allocate update bundle\n");
4b510503
NK
9178 goto cleanup;
9179 }
e7b07cee 9180
8ad27806
NK
9181 /*
9182 * Disable the cursor first if we're disabling all the planes.
9183 * It'll remain on the screen after the planes are re-enabled
9184 * if we don't.
9185 */
9186 if (acrtc_state->active_planes == 0)
9187 amdgpu_dm_commit_cursors(state);
9188
e7b07cee 9189 /* update planes when needed */
efc8278e 9190 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9191 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9192 struct drm_crtc_state *new_crtc_state;
0bc9706d 9193 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9194 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9195 bool plane_needs_flip;
c7af5f77 9196 struct dc_plane_state *dc_plane;
54d76575 9197 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9198
80c218d5
NK
9199 /* Cursor plane is handled after stream updates */
9200 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9201 continue;
e7b07cee 9202
f5ba60fe
DD
9203 if (!fb || !crtc || pcrtc != crtc)
9204 continue;
9205
9206 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9207 if (!new_crtc_state->active)
e7b07cee
HW
9208 continue;
9209
bc7f670e 9210 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9211
74aa7bd4 9212 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9213 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9214 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9215 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9216 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9217 }
8a48b44c 9218
4375d625 9219 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9220 &bundle->scaling_infos[planes_count]);
8a48b44c 9221
695af5f9
NK
9222 bundle->surface_updates[planes_count].scaling_info =
9223 &bundle->scaling_infos[planes_count];
8a48b44c 9224
f5031000 9225 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9226
f5031000 9227 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9228
f5031000
DF
9229 if (!plane_needs_flip) {
9230 planes_count += 1;
9231 continue;
9232 }
8a48b44c 9233
2fac0f53
CK
9234 abo = gem_to_amdgpu_bo(fb->obj[0]);
9235
f8308898
AG
9236 /*
9237 * Wait for all fences on this FB. Do limited wait to avoid
9238 * deadlock during GPU reset when this fence will not signal
9239 * but we hold reservation lock for the BO.
9240 */
7bc80a54
CK
9241 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9242 DMA_RESV_USAGE_WRITE, false,
d3fae3b3 9243 msecs_to_jiffies(5000));
f8308898 9244 if (unlikely(r <= 0))
ed8a5fb2 9245 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9246
695af5f9 9247 fill_dc_plane_info_and_addr(
8ce5d842 9248 dm->adev, new_plane_state,
6eed95b0 9249 afb->tiling_flags,
695af5f9 9250 &bundle->plane_infos[planes_count],
87b7ebc2 9251 &bundle->flip_addrs[planes_count].address,
6eed95b0 9252 afb->tmz_surface, false);
87b7ebc2 9253
4711c033 9254 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9255 new_plane_state->plane->index,
9256 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9257
9258 bundle->surface_updates[planes_count].plane_info =
9259 &bundle->plane_infos[planes_count];
8a48b44c 9260
caff0e66
NK
9261 /*
9262 * Only allow immediate flips for fast updates that don't
9263 * change FB pitch, DCC state, rotation or mirroing.
9264 */
f5031000 9265 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9266 crtc->state->async_flip &&
caff0e66 9267 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9268
f5031000
DF
9269 timestamp_ns = ktime_get_ns();
9270 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9271 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9272 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9273
f5031000
DF
9274 if (!bundle->surface_updates[planes_count].surface) {
9275 DRM_ERROR("No surface for CRTC: id=%d\n",
9276 acrtc_attach->crtc_id);
9277 continue;
bc7f670e
DF
9278 }
9279
f5031000
DF
9280 if (plane == pcrtc->primary)
9281 update_freesync_state_on_stream(
9282 dm,
9283 acrtc_state,
9284 acrtc_state->stream,
9285 dc_plane,
9286 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9287
4711c033 9288 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9289 __func__,
9290 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9291 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9292
9293 planes_count += 1;
9294
8a48b44c
DF
9295 }
9296
74aa7bd4 9297 if (pflip_present) {
634092b1
MK
9298 if (!vrr_active) {
9299 /* Use old throttling in non-vrr fixed refresh rate mode
9300 * to keep flip scheduling based on target vblank counts
9301 * working in a backwards compatible way, e.g., for
9302 * clients using the GLX_OML_sync_control extension or
9303 * DRI3/Present extension with defined target_msc.
9304 */
e3eff4b5 9305 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9306 }
9307 else {
9308 /* For variable refresh rate mode only:
9309 * Get vblank of last completed flip to avoid > 1 vrr
9310 * flips per video frame by use of throttling, but allow
9311 * flip programming anywhere in the possibly large
9312 * variable vrr vblank interval for fine-grained flip
9313 * timing control and more opportunity to avoid stutter
9314 * on late submission of flips.
9315 */
9316 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9317 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9318 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9319 }
9320
fdd1fe57 9321 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9322
9323 /*
9324 * Wait until we're out of the vertical blank period before the one
9325 * targeted by the flip
9326 */
9327 while ((acrtc_attach->enabled &&
9328 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9329 0, &vpos, &hpos, NULL,
9330 NULL, &pcrtc->hwmode)
9331 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9332 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9333 (int)(target_vblank -
e3eff4b5 9334 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9335 usleep_range(1000, 1100);
9336 }
9337
8fe684e9
NK
9338 /**
9339 * Prepare the flip event for the pageflip interrupt to handle.
9340 *
9341 * This only works in the case where we've already turned on the
9342 * appropriate hardware blocks (eg. HUBP) so in the transition case
9343 * from 0 -> n planes we have to skip a hardware generated event
9344 * and rely on sending it from software.
9345 */
9346 if (acrtc_attach->base.state->event &&
035f5496
AP
9347 acrtc_state->active_planes > 0 &&
9348 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9349 drm_crtc_vblank_get(pcrtc);
9350
9351 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9352
9353 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9354 prepare_flip_isr(acrtc_attach);
9355
9356 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9357 }
9358
9359 if (acrtc_state->stream) {
8a48b44c 9360 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9361 bundle->stream_update.vrr_infopacket =
8a48b44c 9362 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9363 }
e7b07cee
HW
9364 }
9365
bc92c065 9366 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9367 if ((planes_count || acrtc_state->active_planes == 0) &&
9368 acrtc_state->stream) {
96160687 9369#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9370 /*
9371 * If PSR or idle optimizations are enabled then flush out
9372 * any pending work before hardware programming.
9373 */
06dd1888
NK
9374 if (dm->vblank_control_workqueue)
9375 flush_workqueue(dm->vblank_control_workqueue);
96160687 9376#endif
58aa1c50 9377
b6e881c9 9378 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9379 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9380 bundle->stream_update.src = acrtc_state->stream->src;
9381 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9382 }
9383
cf020d49
NK
9384 if (new_pcrtc_state->color_mgmt_changed) {
9385 /*
9386 * TODO: This isn't fully correct since we've actually
9387 * already modified the stream in place.
9388 */
9389 bundle->stream_update.gamut_remap =
9390 &acrtc_state->stream->gamut_remap_matrix;
9391 bundle->stream_update.output_csc_transform =
9392 &acrtc_state->stream->csc_color_matrix;
9393 bundle->stream_update.out_transfer_func =
9394 acrtc_state->stream->out_transfer_func;
9395 }
bc7f670e 9396
8a48b44c 9397 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9398 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9399 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9400
e63e2491
EB
9401 /*
9402 * If FreeSync state on the stream has changed then we need to
9403 * re-adjust the min/max bounds now that DC doesn't handle this
9404 * as part of commit.
9405 */
a85ba005 9406 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9407 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9408 dc_stream_adjust_vmin_vmax(
9409 dm->dc, acrtc_state->stream,
585d450c 9410 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9411 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9412 }
bc7f670e 9413 mutex_lock(&dm->dc_lock);
8c322309 9414 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9415 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9416 amdgpu_dm_psr_disable(acrtc_state->stream);
9417
bc7f670e 9418 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9419 bundle->surface_updates,
bc7f670e
DF
9420 planes_count,
9421 acrtc_state->stream,
efc8278e
AJ
9422 &bundle->stream_update,
9423 dc_state);
8c322309 9424
8fe684e9
NK
9425 /**
9426 * Enable or disable the interrupts on the backend.
9427 *
9428 * Most pipes are put into power gating when unused.
9429 *
9430 * When power gating is enabled on a pipe we lose the
9431 * interrupt enablement state when power gating is disabled.
9432 *
9433 * So we need to update the IRQ control state in hardware
9434 * whenever the pipe turns on (since it could be previously
9435 * power gated) or off (since some pipes can't be power gated
9436 * on some ASICs).
9437 */
9438 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9439 dm_update_pflip_irq_state(drm_to_adev(dev),
9440 acrtc_attach);
8fe684e9 9441
8c322309 9442 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9443 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9444 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9445 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9446
9447 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9448 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9449 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9450 struct amdgpu_dm_connector *aconn =
9451 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9452
9453 if (aconn->psr_skip_count > 0)
9454 aconn->psr_skip_count--;
58aa1c50
NK
9455
9456 /* Allow PSR when skip count is 0. */
9457 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9458 } else {
9459 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9460 }
9461
bc7f670e 9462 mutex_unlock(&dm->dc_lock);
e7b07cee 9463 }
4b510503 9464
8ad27806
NK
9465 /*
9466 * Update cursor state *after* programming all the planes.
9467 * This avoids redundant programming in the case where we're going
9468 * to be disabling a single plane - those pipes are being disabled.
9469 */
9470 if (acrtc_state->active_planes)
9471 amdgpu_dm_commit_cursors(state);
80c218d5 9472
4b510503 9473cleanup:
74aa7bd4 9474 kfree(bundle);
e7b07cee
HW
9475}
9476
6ce8f316
NK
9477static void amdgpu_dm_commit_audio(struct drm_device *dev,
9478 struct drm_atomic_state *state)
9479{
1348969a 9480 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9481 struct amdgpu_dm_connector *aconnector;
9482 struct drm_connector *connector;
9483 struct drm_connector_state *old_con_state, *new_con_state;
9484 struct drm_crtc_state *new_crtc_state;
9485 struct dm_crtc_state *new_dm_crtc_state;
9486 const struct dc_stream_status *status;
9487 int i, inst;
9488
9489 /* Notify device removals. */
9490 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9491 if (old_con_state->crtc != new_con_state->crtc) {
9492 /* CRTC changes require notification. */
9493 goto notify;
9494 }
9495
9496 if (!new_con_state->crtc)
9497 continue;
9498
9499 new_crtc_state = drm_atomic_get_new_crtc_state(
9500 state, new_con_state->crtc);
9501
9502 if (!new_crtc_state)
9503 continue;
9504
9505 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9506 continue;
9507
9508 notify:
9509 aconnector = to_amdgpu_dm_connector(connector);
9510
9511 mutex_lock(&adev->dm.audio_lock);
9512 inst = aconnector->audio_inst;
9513 aconnector->audio_inst = -1;
9514 mutex_unlock(&adev->dm.audio_lock);
9515
9516 amdgpu_dm_audio_eld_notify(adev, inst);
9517 }
9518
9519 /* Notify audio device additions. */
9520 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9521 if (!new_con_state->crtc)
9522 continue;
9523
9524 new_crtc_state = drm_atomic_get_new_crtc_state(
9525 state, new_con_state->crtc);
9526
9527 if (!new_crtc_state)
9528 continue;
9529
9530 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9531 continue;
9532
9533 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9534 if (!new_dm_crtc_state->stream)
9535 continue;
9536
9537 status = dc_stream_get_status(new_dm_crtc_state->stream);
9538 if (!status)
9539 continue;
9540
9541 aconnector = to_amdgpu_dm_connector(connector);
9542
9543 mutex_lock(&adev->dm.audio_lock);
9544 inst = status->audio_inst;
9545 aconnector->audio_inst = inst;
9546 mutex_unlock(&adev->dm.audio_lock);
9547
9548 amdgpu_dm_audio_eld_notify(adev, inst);
9549 }
9550}
9551
1f6010a9 9552/*
27b3f4fc
LSL
9553 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9554 * @crtc_state: the DRM CRTC state
9555 * @stream_state: the DC stream state.
9556 *
9557 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9558 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9559 */
9560static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9561 struct dc_stream_state *stream_state)
9562{
b9952f93 9563 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9564}
e7b07cee 9565
b8592b48
LL
9566/**
9567 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9568 * @state: The atomic state to commit
9569 *
9570 * This will tell DC to commit the constructed DC state from atomic_check,
9571 * programming the hardware. Any failures here implies a hardware failure, since
9572 * atomic check should have filtered anything non-kosher.
9573 */
7578ecda 9574static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9575{
9576 struct drm_device *dev = state->dev;
1348969a 9577 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9578 struct amdgpu_display_manager *dm = &adev->dm;
9579 struct dm_atomic_state *dm_state;
eb3dc897 9580 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9581 uint32_t i, j;
5cc6dcbd 9582 struct drm_crtc *crtc;
0bc9706d 9583 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9584 unsigned long flags;
9585 bool wait_for_vblank = true;
9586 struct drm_connector *connector;
c2cea706 9587 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9588 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9589 int crtc_disable_count = 0;
6ee90e88 9590 bool mode_set_reset_required = false;
e7b07cee 9591
e8a98235
RS
9592 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9593
e7b07cee
HW
9594 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9595
eb3dc897
NK
9596 dm_state = dm_atomic_get_new_state(state);
9597 if (dm_state && dm_state->context) {
9598 dc_state = dm_state->context;
9599 } else {
9600 /* No state changes, retain current state. */
813d20dc 9601 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9602 ASSERT(dc_state_temp);
9603 dc_state = dc_state_temp;
9604 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9605 }
e7b07cee 9606
6d90a208
AP
9607 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9608 new_crtc_state, i) {
9609 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9610
9611 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9612
9613 if (old_crtc_state->active &&
9614 (!new_crtc_state->active ||
9615 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9616 manage_dm_interrupts(adev, acrtc, false);
9617 dc_stream_release(dm_old_crtc_state->stream);
9618 }
9619 }
9620
8976f73b
RS
9621 drm_atomic_helper_calc_timestamping_constants(state);
9622
e7b07cee 9623 /* update changed items */
0bc9706d 9624 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9625 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9626
54d76575
LSL
9627 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9628 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9629
4711c033 9630 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9631 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9632 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9633 "connectors_changed:%d\n",
9634 acrtc->crtc_id,
0bc9706d
LSL
9635 new_crtc_state->enable,
9636 new_crtc_state->active,
9637 new_crtc_state->planes_changed,
9638 new_crtc_state->mode_changed,
9639 new_crtc_state->active_changed,
9640 new_crtc_state->connectors_changed);
e7b07cee 9641
5c68c652
VL
9642 /* Disable cursor if disabling crtc */
9643 if (old_crtc_state->active && !new_crtc_state->active) {
9644 struct dc_cursor_position position;
9645
9646 memset(&position, 0, sizeof(position));
9647 mutex_lock(&dm->dc_lock);
9648 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9649 mutex_unlock(&dm->dc_lock);
9650 }
9651
27b3f4fc
LSL
9652 /* Copy all transient state flags into dc state */
9653 if (dm_new_crtc_state->stream) {
9654 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9655 dm_new_crtc_state->stream);
9656 }
9657
e7b07cee
HW
9658 /* handles headless hotplug case, updating new_state and
9659 * aconnector as needed
9660 */
9661
54d76575 9662 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9663
4711c033 9664 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9665
54d76575 9666 if (!dm_new_crtc_state->stream) {
e7b07cee 9667 /*
b830ebc9
HW
9668 * this could happen because of issues with
9669 * userspace notifications delivery.
9670 * In this case userspace tries to set mode on
1f6010a9
DF
9671 * display which is disconnected in fact.
9672 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9673 * We expect reset mode will come soon.
9674 *
9675 * This can also happen when unplug is done
9676 * during resume sequence ended
9677 *
9678 * In this case, we want to pretend we still
9679 * have a sink to keep the pipe running so that
9680 * hw state is consistent with the sw state
9681 */
f1ad2f5e 9682 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9683 __func__, acrtc->base.base.id);
9684 continue;
9685 }
9686
54d76575
LSL
9687 if (dm_old_crtc_state->stream)
9688 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9689
97028037
LP
9690 pm_runtime_get_noresume(dev->dev);
9691
e7b07cee 9692 acrtc->enabled = true;
0bc9706d
LSL
9693 acrtc->hw_mode = new_crtc_state->mode;
9694 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9695 mode_set_reset_required = true;
0bc9706d 9696 } else if (modereset_required(new_crtc_state)) {
4711c033 9697 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9698 /* i.e. reset mode */
6ee90e88 9699 if (dm_old_crtc_state->stream)
54d76575 9700 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9701
6ee90e88 9702 mode_set_reset_required = true;
e7b07cee
HW
9703 }
9704 } /* for_each_crtc_in_state() */
9705
eb3dc897 9706 if (dc_state) {
6ee90e88 9707 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9708 if (mode_set_reset_required) {
96160687 9709#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9710 if (dm->vblank_control_workqueue)
9711 flush_workqueue(dm->vblank_control_workqueue);
96160687 9712#endif
6ee90e88 9713 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9714 }
6ee90e88 9715
eb3dc897 9716 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9717 mutex_lock(&dm->dc_lock);
eb3dc897 9718 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9719#if defined(CONFIG_DRM_AMD_DC_DCN)
9720 /* Allow idle optimization when vblank count is 0 for display off */
9721 if (dm->active_vblank_irq_count == 0)
9722 dc_allow_idle_optimizations(dm->dc,true);
9723#endif
674e78ac 9724 mutex_unlock(&dm->dc_lock);
fa2123db 9725 }
fe8858bb 9726
0bc9706d 9727 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9728 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9729
54d76575 9730 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9731
54d76575 9732 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9733 const struct dc_stream_status *status =
54d76575 9734 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9735
eb3dc897 9736 if (!status)
09f609c3
LL
9737 status = dc_stream_get_status_from_state(dc_state,
9738 dm_new_crtc_state->stream);
e7b07cee 9739 if (!status)
54d76575 9740 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9741 else
9742 acrtc->otg_inst = status->primary_otg_inst;
9743 }
9744 }
0c8620d6
BL
9745#ifdef CONFIG_DRM_AMD_DC_HDCP
9746 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9747 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9748 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9749 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9750
9751 new_crtc_state = NULL;
9752
9753 if (acrtc)
9754 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9755
9756 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9757
9758 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9759 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9760 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9761 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9762 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9763 continue;
9764 }
9765
9766 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9767 hdcp_update_display(
9768 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9769 new_con_state->hdcp_content_type,
0e86d3d4 9770 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9771 }
9772#endif
e7b07cee 9773
02d6a6fc 9774 /* Handle connector state changes */
c2cea706 9775 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9776 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9777 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9778 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9779 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9780 struct dc_stream_update stream_update;
b232d4ed 9781 struct dc_info_packet hdr_packet;
e7b07cee 9782 struct dc_stream_status *status = NULL;
b232d4ed 9783 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9784
efc8278e 9785 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9786 memset(&stream_update, 0, sizeof(stream_update));
9787
44d09c6a 9788 if (acrtc) {
0bc9706d 9789 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9790 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9791 }
0bc9706d 9792
e7b07cee 9793 /* Skip any modesets/resets */
0bc9706d 9794 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9795 continue;
9796
54d76575 9797 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9798 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9799
b232d4ed
NK
9800 scaling_changed = is_scaling_state_different(dm_new_con_state,
9801 dm_old_con_state);
9802
9803 abm_changed = dm_new_crtc_state->abm_level !=
9804 dm_old_crtc_state->abm_level;
9805
9806 hdr_changed =
72921cdf 9807 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9808
9809 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9810 continue;
e7b07cee 9811
b6e881c9 9812 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9813 if (scaling_changed) {
02d6a6fc 9814 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9815 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9816
02d6a6fc
DF
9817 stream_update.src = dm_new_crtc_state->stream->src;
9818 stream_update.dst = dm_new_crtc_state->stream->dst;
9819 }
9820
b232d4ed 9821 if (abm_changed) {
02d6a6fc
DF
9822 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9823
9824 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9825 }
70e8ffc5 9826
b232d4ed
NK
9827 if (hdr_changed) {
9828 fill_hdr_info_packet(new_con_state, &hdr_packet);
9829 stream_update.hdr_static_metadata = &hdr_packet;
9830 }
9831
54d76575 9832 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9833
9834 if (WARN_ON(!status))
9835 continue;
9836
3be5262e 9837 WARN_ON(!status->plane_count);
e7b07cee 9838
02d6a6fc
DF
9839 /*
9840 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9841 * Here we create an empty update on each plane.
9842 * To fix this, DC should permit updating only stream properties.
9843 */
9844 for (j = 0; j < status->plane_count; j++)
efc8278e 9845 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9846
9847
9848 mutex_lock(&dm->dc_lock);
9849 dc_commit_updates_for_stream(dm->dc,
efc8278e 9850 dummy_updates,
02d6a6fc
DF
9851 status->plane_count,
9852 dm_new_crtc_state->stream,
efc8278e
AJ
9853 &stream_update,
9854 dc_state);
02d6a6fc 9855 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9856 }
9857
b5e83f6f 9858 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9859 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9860 new_crtc_state, i) {
fe2a1965
LP
9861 if (old_crtc_state->active && !new_crtc_state->active)
9862 crtc_disable_count++;
9863
54d76575 9864 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9865 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9866
585d450c
AP
9867 /* For freesync config update on crtc state and params for irq */
9868 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9869
66b0c973
MK
9870 /* Handle vrr on->off / off->on transitions */
9871 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9872 dm_new_crtc_state);
e7b07cee
HW
9873 }
9874
8fe684e9
NK
9875 /**
9876 * Enable interrupts for CRTCs that are newly enabled or went through
9877 * a modeset. It was intentionally deferred until after the front end
9878 * state was modified to wait until the OTG was on and so the IRQ
9879 * handlers didn't access stale or invalid state.
9880 */
9881 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9882 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9883#ifdef CONFIG_DEBUG_FS
86bc2219 9884 bool configure_crc = false;
8e7b6fee 9885 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9886#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9887 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9888#endif
9889 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9890 cur_crc_src = acrtc->dm_irq_params.crc_src;
9891 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9892#endif
585d450c
AP
9893 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9894
8fe684e9
NK
9895 if (new_crtc_state->active &&
9896 (!old_crtc_state->active ||
9897 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9898 dc_stream_retain(dm_new_crtc_state->stream);
9899 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9900 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9901
24eb9374 9902#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9903 /**
9904 * Frontend may have changed so reapply the CRC capture
9905 * settings for the stream.
9906 */
9907 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9908
8e7b6fee 9909 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9910 configure_crc = true;
9911#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9912 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9913 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9914 acrtc->dm_irq_params.crc_window.update_win = true;
9915 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9916 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9917 crc_rd_wrk->crtc = crtc;
9918 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9919 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9920 }
86bc2219 9921#endif
e2881d6d 9922 }
c920888c 9923
86bc2219 9924 if (configure_crc)
bbc49fc0
WL
9925 if (amdgpu_dm_crtc_configure_crc_source(
9926 crtc, dm_new_crtc_state, cur_crc_src))
9927 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9928#endif
8fe684e9
NK
9929 }
9930 }
e7b07cee 9931
420cd472 9932 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9933 if (new_crtc_state->async_flip)
420cd472
DF
9934 wait_for_vblank = false;
9935
e7b07cee 9936 /* update planes when needed per crtc*/
5cc6dcbd 9937 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9938 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9939
54d76575 9940 if (dm_new_crtc_state->stream)
eb3dc897 9941 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9942 dm, crtc, wait_for_vblank);
e7b07cee
HW
9943 }
9944
6ce8f316
NK
9945 /* Update audio instances for each connector. */
9946 amdgpu_dm_commit_audio(dev, state);
9947
7230362c
AD
9948#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9949 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9950 /* restore the backlight level */
7fd13bae
AD
9951 for (i = 0; i < dm->num_of_edps; i++) {
9952 if (dm->backlight_dev[i] &&
4052287a 9953 (dm->actual_brightness[i] != dm->brightness[i]))
7fd13bae
AD
9954 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9955 }
7230362c 9956#endif
e7b07cee
HW
9957 /*
9958 * send vblank event on all events not handled in flip and
9959 * mark consumed event for drm_atomic_helper_commit_hw_done
9960 */
4a580877 9961 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9962 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9963
0bc9706d
LSL
9964 if (new_crtc_state->event)
9965 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9966
0bc9706d 9967 new_crtc_state->event = NULL;
e7b07cee 9968 }
4a580877 9969 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9970
29c8f234
LL
9971 /* Signal HW programming completion */
9972 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9973
9974 if (wait_for_vblank)
320a1274 9975 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9976
9977 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9978
5f6fab24
AD
9979 /* return the stolen vga memory back to VRAM */
9980 if (!adev->mman.keep_stolen_vga_memory)
9981 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9982 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9983
1f6010a9
DF
9984 /*
9985 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9986 * so we can put the GPU into runtime suspend if we're not driving any
9987 * displays anymore
9988 */
fe2a1965
LP
9989 for (i = 0; i < crtc_disable_count; i++)
9990 pm_runtime_put_autosuspend(dev->dev);
97028037 9991 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9992
9993 if (dc_state_temp)
9994 dc_release_state(dc_state_temp);
e7b07cee
HW
9995}
9996
9997
9998static int dm_force_atomic_commit(struct drm_connector *connector)
9999{
10000 int ret = 0;
10001 struct drm_device *ddev = connector->dev;
10002 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10003 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10004 struct drm_plane *plane = disconnected_acrtc->base.primary;
10005 struct drm_connector_state *conn_state;
10006 struct drm_crtc_state *crtc_state;
10007 struct drm_plane_state *plane_state;
10008
10009 if (!state)
10010 return -ENOMEM;
10011
10012 state->acquire_ctx = ddev->mode_config.acquire_ctx;
10013
10014 /* Construct an atomic state to restore previous display setting */
10015
10016 /*
10017 * Attach connectors to drm_atomic_state
10018 */
10019 conn_state = drm_atomic_get_connector_state(state, connector);
10020
10021 ret = PTR_ERR_OR_ZERO(conn_state);
10022 if (ret)
2dc39051 10023 goto out;
e7b07cee
HW
10024
10025 /* Attach crtc to drm_atomic_state*/
10026 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10027
10028 ret = PTR_ERR_OR_ZERO(crtc_state);
10029 if (ret)
2dc39051 10030 goto out;
e7b07cee
HW
10031
10032 /* force a restore */
10033 crtc_state->mode_changed = true;
10034
10035 /* Attach plane to drm_atomic_state */
10036 plane_state = drm_atomic_get_plane_state(state, plane);
10037
10038 ret = PTR_ERR_OR_ZERO(plane_state);
10039 if (ret)
2dc39051 10040 goto out;
e7b07cee
HW
10041
10042 /* Call commit internally with the state we just constructed */
10043 ret = drm_atomic_commit(state);
e7b07cee 10044
2dc39051 10045out:
e7b07cee 10046 drm_atomic_state_put(state);
2dc39051
VL
10047 if (ret)
10048 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
10049
10050 return ret;
10051}
10052
10053/*
1f6010a9
DF
10054 * This function handles all cases when set mode does not come upon hotplug.
10055 * This includes when a display is unplugged then plugged back into the
10056 * same port and when running without usermode desktop manager supprot
e7b07cee 10057 */
3ee6b26b
AD
10058void dm_restore_drm_connector_state(struct drm_device *dev,
10059 struct drm_connector *connector)
e7b07cee 10060{
c84dec2f 10061 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
10062 struct amdgpu_crtc *disconnected_acrtc;
10063 struct dm_crtc_state *acrtc_state;
10064
10065 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10066 return;
10067
10068 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
10069 if (!disconnected_acrtc)
10070 return;
e7b07cee 10071
70e8ffc5
HW
10072 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10073 if (!acrtc_state->stream)
e7b07cee
HW
10074 return;
10075
10076 /*
10077 * If the previous sink is not released and different from the current,
10078 * we deduce we are in a state where we can not rely on usermode call
10079 * to turn on the display, so we do it here
10080 */
10081 if (acrtc_state->stream->sink != aconnector->dc_sink)
10082 dm_force_atomic_commit(&aconnector->base);
10083}
10084
1f6010a9 10085/*
e7b07cee
HW
10086 * Grabs all modesetting locks to serialize against any blocking commits,
10087 * Waits for completion of all non blocking commits.
10088 */
3ee6b26b
AD
10089static int do_aquire_global_lock(struct drm_device *dev,
10090 struct drm_atomic_state *state)
e7b07cee
HW
10091{
10092 struct drm_crtc *crtc;
10093 struct drm_crtc_commit *commit;
10094 long ret;
10095
1f6010a9
DF
10096 /*
10097 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
10098 * ensure that when the framework release it the
10099 * extra locks we are locking here will get released to
10100 */
10101 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10102 if (ret)
10103 return ret;
10104
10105 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10106 spin_lock(&crtc->commit_lock);
10107 commit = list_first_entry_or_null(&crtc->commit_list,
10108 struct drm_crtc_commit, commit_entry);
10109 if (commit)
10110 drm_crtc_commit_get(commit);
10111 spin_unlock(&crtc->commit_lock);
10112
10113 if (!commit)
10114 continue;
10115
1f6010a9
DF
10116 /*
10117 * Make sure all pending HW programming completed and
e7b07cee
HW
10118 * page flips done
10119 */
10120 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10121
10122 if (ret > 0)
10123 ret = wait_for_completion_interruptible_timeout(
10124 &commit->flip_done, 10*HZ);
10125
10126 if (ret == 0)
10127 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 10128 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
10129
10130 drm_crtc_commit_put(commit);
10131 }
10132
10133 return ret < 0 ? ret : 0;
10134}
10135
bb47de73
NK
10136static void get_freesync_config_for_crtc(
10137 struct dm_crtc_state *new_crtc_state,
10138 struct dm_connector_state *new_con_state)
98e6436d
AK
10139{
10140 struct mod_freesync_config config = {0};
98e6436d
AK
10141 struct amdgpu_dm_connector *aconnector =
10142 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 10143 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 10144 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 10145 bool fs_vid_mode = false;
98e6436d 10146
a057ec46 10147 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
10148 vrefresh >= aconnector->min_vfreq &&
10149 vrefresh <= aconnector->max_vfreq;
bb47de73 10150
a057ec46
IB
10151 if (new_crtc_state->vrr_supported) {
10152 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
10153 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10154
10155 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10156 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 10157 config.vsif_supported = true;
180db303 10158 config.btr = true;
98e6436d 10159
a85ba005
NC
10160 if (fs_vid_mode) {
10161 config.state = VRR_STATE_ACTIVE_FIXED;
10162 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10163 goto out;
10164 } else if (new_crtc_state->base.vrr_enabled) {
10165 config.state = VRR_STATE_ACTIVE_VARIABLE;
10166 } else {
10167 config.state = VRR_STATE_INACTIVE;
10168 }
10169 }
10170out:
bb47de73
NK
10171 new_crtc_state->freesync_config = config;
10172}
98e6436d 10173
bb47de73
NK
10174static void reset_freesync_config_for_crtc(
10175 struct dm_crtc_state *new_crtc_state)
10176{
10177 new_crtc_state->vrr_supported = false;
98e6436d 10178
bb47de73
NK
10179 memset(&new_crtc_state->vrr_infopacket, 0,
10180 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
10181}
10182
a85ba005
NC
10183static bool
10184is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10185 struct drm_crtc_state *new_crtc_state)
10186{
1cbd7887 10187 const struct drm_display_mode *old_mode, *new_mode;
a85ba005
NC
10188
10189 if (!old_crtc_state || !new_crtc_state)
10190 return false;
10191
1cbd7887
VS
10192 old_mode = &old_crtc_state->mode;
10193 new_mode = &new_crtc_state->mode;
10194
10195 if (old_mode->clock == new_mode->clock &&
10196 old_mode->hdisplay == new_mode->hdisplay &&
10197 old_mode->vdisplay == new_mode->vdisplay &&
10198 old_mode->htotal == new_mode->htotal &&
10199 old_mode->vtotal != new_mode->vtotal &&
10200 old_mode->hsync_start == new_mode->hsync_start &&
10201 old_mode->vsync_start != new_mode->vsync_start &&
10202 old_mode->hsync_end == new_mode->hsync_end &&
10203 old_mode->vsync_end != new_mode->vsync_end &&
10204 old_mode->hskew == new_mode->hskew &&
10205 old_mode->vscan == new_mode->vscan &&
10206 (old_mode->vsync_end - old_mode->vsync_start) ==
10207 (new_mode->vsync_end - new_mode->vsync_start))
a85ba005
NC
10208 return true;
10209
10210 return false;
10211}
10212
10213static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10214 uint64_t num, den, res;
10215 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10216
10217 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10218
10219 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10220 den = (unsigned long long)new_crtc_state->mode.htotal *
10221 (unsigned long long)new_crtc_state->mode.vtotal;
10222
10223 res = div_u64(num, den);
10224 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10225}
10226
f11d9373 10227static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
17ce8a69
RL
10228 struct drm_atomic_state *state,
10229 struct drm_crtc *crtc,
10230 struct drm_crtc_state *old_crtc_state,
10231 struct drm_crtc_state *new_crtc_state,
10232 bool enable,
10233 bool *lock_and_validation_needed)
e7b07cee 10234{
eb3dc897 10235 struct dm_atomic_state *dm_state = NULL;
54d76575 10236 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10237 struct dc_stream_state *new_stream;
62f55537 10238 int ret = 0;
d4d4a645 10239
1f6010a9
DF
10240 /*
10241 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10242 * update changed items
10243 */
4b9674e5
LL
10244 struct amdgpu_crtc *acrtc = NULL;
10245 struct amdgpu_dm_connector *aconnector = NULL;
10246 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10247 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10248
4b9674e5 10249 new_stream = NULL;
9635b754 10250
4b9674e5
LL
10251 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10252 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10253 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10254 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10255
4b9674e5
LL
10256 /* TODO This hack should go away */
10257 if (aconnector && enable) {
10258 /* Make sure fake sink is created in plug-in scenario */
10259 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10260 &aconnector->base);
10261 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10262 &aconnector->base);
19f89e23 10263
4b9674e5
LL
10264 if (IS_ERR(drm_new_conn_state)) {
10265 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10266 goto fail;
10267 }
19f89e23 10268
4b9674e5
LL
10269 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10270 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10271
02d35a67
JFZ
10272 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10273 goto skip_modeset;
10274
cbd14ae7
SW
10275 new_stream = create_validate_stream_for_sink(aconnector,
10276 &new_crtc_state->mode,
10277 dm_new_conn_state,
10278 dm_old_crtc_state->stream);
19f89e23 10279
4b9674e5
LL
10280 /*
10281 * we can have no stream on ACTION_SET if a display
10282 * was disconnected during S3, in this case it is not an
10283 * error, the OS will be updated after detection, and
10284 * will do the right thing on next atomic commit
10285 */
19f89e23 10286
4b9674e5
LL
10287 if (!new_stream) {
10288 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10289 __func__, acrtc->base.base.id);
10290 ret = -ENOMEM;
10291 goto fail;
10292 }
e7b07cee 10293
3d4e52d0
VL
10294 /*
10295 * TODO: Check VSDB bits to decide whether this should
10296 * be enabled or not.
10297 */
10298 new_stream->triggered_crtc_reset.enabled =
10299 dm->force_timing_sync;
10300
4b9674e5 10301 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10302
88694af9
NK
10303 ret = fill_hdr_info_packet(drm_new_conn_state,
10304 &new_stream->hdr_static_metadata);
10305 if (ret)
10306 goto fail;
10307
7e930949
NK
10308 /*
10309 * If we already removed the old stream from the context
10310 * (and set the new stream to NULL) then we can't reuse
10311 * the old stream even if the stream and scaling are unchanged.
10312 * We'll hit the BUG_ON and black screen.
10313 *
10314 * TODO: Refactor this function to allow this check to work
10315 * in all conditions.
10316 */
de05abe6 10317 if (dm_new_crtc_state->stream &&
a85ba005
NC
10318 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10319 goto skip_modeset;
10320
7e930949
NK
10321 if (dm_new_crtc_state->stream &&
10322 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10323 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10324 new_crtc_state->mode_changed = false;
10325 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10326 new_crtc_state->mode_changed);
62f55537 10327 }
4b9674e5 10328 }
b830ebc9 10329
02d35a67 10330 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10331 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10332 goto skip_modeset;
e7b07cee 10333
4711c033 10334 DRM_DEBUG_ATOMIC(
4b9674e5
LL
10335 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10336 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10337 "connectors_changed:%d\n",
10338 acrtc->crtc_id,
10339 new_crtc_state->enable,
10340 new_crtc_state->active,
10341 new_crtc_state->planes_changed,
10342 new_crtc_state->mode_changed,
10343 new_crtc_state->active_changed,
10344 new_crtc_state->connectors_changed);
62f55537 10345
4b9674e5
LL
10346 /* Remove stream for any changed/disabled CRTC */
10347 if (!enable) {
62f55537 10348
4b9674e5
LL
10349 if (!dm_old_crtc_state->stream)
10350 goto skip_modeset;
eb3dc897 10351
de05abe6 10352 if (dm_new_crtc_state->stream &&
a85ba005
NC
10353 is_timing_unchanged_for_freesync(new_crtc_state,
10354 old_crtc_state)) {
10355 new_crtc_state->mode_changed = false;
10356 DRM_DEBUG_DRIVER(
10357 "Mode change not required for front porch change, "
10358 "setting mode_changed to %d",
10359 new_crtc_state->mode_changed);
10360
10361 set_freesync_fixed_config(dm_new_crtc_state);
10362
10363 goto skip_modeset;
de05abe6 10364 } else if (aconnector &&
a85ba005
NC
10365 is_freesync_video_mode(&new_crtc_state->mode,
10366 aconnector)) {
e88ebd83
SC
10367 struct drm_display_mode *high_mode;
10368
10369 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10370 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10371 set_freesync_fixed_config(dm_new_crtc_state);
10372 }
a85ba005
NC
10373 }
10374
4b9674e5
LL
10375 ret = dm_atomic_get_state(state, &dm_state);
10376 if (ret)
10377 goto fail;
e7b07cee 10378
4b9674e5
LL
10379 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10380 crtc->base.id);
62f55537 10381
4b9674e5
LL
10382 /* i.e. reset mode */
10383 if (dc_remove_stream_from_ctx(
10384 dm->dc,
10385 dm_state->context,
10386 dm_old_crtc_state->stream) != DC_OK) {
10387 ret = -EINVAL;
10388 goto fail;
10389 }
62f55537 10390
4b9674e5
LL
10391 dc_stream_release(dm_old_crtc_state->stream);
10392 dm_new_crtc_state->stream = NULL;
bb47de73 10393
4b9674e5 10394 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10395
4b9674e5 10396 *lock_and_validation_needed = true;
62f55537 10397
4b9674e5
LL
10398 } else {/* Add stream for any updated/enabled CRTC */
10399 /*
10400 * Quick fix to prevent NULL pointer on new_stream when
10401 * added MST connectors not found in existing crtc_state in the chained mode
10402 * TODO: need to dig out the root cause of that
10403 */
10404 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10405 goto skip_modeset;
62f55537 10406
4b9674e5
LL
10407 if (modereset_required(new_crtc_state))
10408 goto skip_modeset;
62f55537 10409
4b9674e5
LL
10410 if (modeset_required(new_crtc_state, new_stream,
10411 dm_old_crtc_state->stream)) {
62f55537 10412
4b9674e5 10413 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10414
4b9674e5
LL
10415 ret = dm_atomic_get_state(state, &dm_state);
10416 if (ret)
10417 goto fail;
27b3f4fc 10418
4b9674e5 10419 dm_new_crtc_state->stream = new_stream;
62f55537 10420
4b9674e5 10421 dc_stream_retain(new_stream);
1dc90497 10422
4711c033
LT
10423 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10424 crtc->base.id);
1dc90497 10425
4b9674e5
LL
10426 if (dc_add_stream_to_ctx(
10427 dm->dc,
10428 dm_state->context,
10429 dm_new_crtc_state->stream) != DC_OK) {
10430 ret = -EINVAL;
10431 goto fail;
9b690ef3
BL
10432 }
10433
4b9674e5
LL
10434 *lock_and_validation_needed = true;
10435 }
10436 }
e277adc5 10437
4b9674e5
LL
10438skip_modeset:
10439 /* Release extra reference */
10440 if (new_stream)
10441 dc_stream_release(new_stream);
e277adc5 10442
4b9674e5
LL
10443 /*
10444 * We want to do dc stream updates that do not require a
10445 * full modeset below.
10446 */
2afda735 10447 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10448 return 0;
10449 /*
10450 * Given above conditions, the dc state cannot be NULL because:
10451 * 1. We're in the process of enabling CRTCs (just been added
10452 * to the dc context, or already is on the context)
10453 * 2. Has a valid connector attached, and
10454 * 3. Is currently active and enabled.
10455 * => The dc stream state currently exists.
10456 */
10457 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10458
4b9674e5 10459 /* Scaling or underscan settings */
c521fc31
RL
10460 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10461 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10462 update_stream_scaling_settings(
10463 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10464
b05e2c5e
DF
10465 /* ABM settings */
10466 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10467
4b9674e5
LL
10468 /*
10469 * Color management settings. We also update color properties
10470 * when a modeset is needed, to ensure it gets reprogrammed.
10471 */
10472 if (dm_new_crtc_state->base.color_mgmt_changed ||
10473 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10474 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10475 if (ret)
10476 goto fail;
62f55537 10477 }
e7b07cee 10478
4b9674e5
LL
10479 /* Update Freesync settings. */
10480 get_freesync_config_for_crtc(dm_new_crtc_state,
10481 dm_new_conn_state);
10482
62f55537 10483 return ret;
9635b754
DS
10484
10485fail:
10486 if (new_stream)
10487 dc_stream_release(new_stream);
10488 return ret;
62f55537 10489}
9b690ef3 10490
f6ff2a08
NK
10491static bool should_reset_plane(struct drm_atomic_state *state,
10492 struct drm_plane *plane,
10493 struct drm_plane_state *old_plane_state,
10494 struct drm_plane_state *new_plane_state)
10495{
10496 struct drm_plane *other;
10497 struct drm_plane_state *old_other_state, *new_other_state;
10498 struct drm_crtc_state *new_crtc_state;
10499 int i;
10500
70a1efac
NK
10501 /*
10502 * TODO: Remove this hack once the checks below are sufficient
10503 * enough to determine when we need to reset all the planes on
10504 * the stream.
10505 */
10506 if (state->allow_modeset)
10507 return true;
10508
f6ff2a08
NK
10509 /* Exit early if we know that we're adding or removing the plane. */
10510 if (old_plane_state->crtc != new_plane_state->crtc)
10511 return true;
10512
10513 /* old crtc == new_crtc == NULL, plane not in context. */
10514 if (!new_plane_state->crtc)
10515 return false;
10516
10517 new_crtc_state =
10518 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10519
10520 if (!new_crtc_state)
10521 return true;
10522
7316c4ad
NK
10523 /* CRTC Degamma changes currently require us to recreate planes. */
10524 if (new_crtc_state->color_mgmt_changed)
10525 return true;
10526
f6ff2a08
NK
10527 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10528 return true;
10529
10530 /*
10531 * If there are any new primary or overlay planes being added or
10532 * removed then the z-order can potentially change. To ensure
10533 * correct z-order and pipe acquisition the current DC architecture
10534 * requires us to remove and recreate all existing planes.
10535 *
10536 * TODO: Come up with a more elegant solution for this.
10537 */
10538 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10539 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10540 if (other->type == DRM_PLANE_TYPE_CURSOR)
10541 continue;
10542
10543 if (old_other_state->crtc != new_plane_state->crtc &&
10544 new_other_state->crtc != new_plane_state->crtc)
10545 continue;
10546
10547 if (old_other_state->crtc != new_other_state->crtc)
10548 return true;
10549
dc4cb30d
NK
10550 /* Src/dst size and scaling updates. */
10551 if (old_other_state->src_w != new_other_state->src_w ||
10552 old_other_state->src_h != new_other_state->src_h ||
10553 old_other_state->crtc_w != new_other_state->crtc_w ||
10554 old_other_state->crtc_h != new_other_state->crtc_h)
10555 return true;
10556
10557 /* Rotation / mirroring updates. */
10558 if (old_other_state->rotation != new_other_state->rotation)
10559 return true;
10560
10561 /* Blending updates. */
10562 if (old_other_state->pixel_blend_mode !=
10563 new_other_state->pixel_blend_mode)
10564 return true;
10565
10566 /* Alpha updates. */
10567 if (old_other_state->alpha != new_other_state->alpha)
10568 return true;
10569
10570 /* Colorspace changes. */
10571 if (old_other_state->color_range != new_other_state->color_range ||
10572 old_other_state->color_encoding != new_other_state->color_encoding)
10573 return true;
10574
9a81cc60
NK
10575 /* Framebuffer checks fall at the end. */
10576 if (!old_other_state->fb || !new_other_state->fb)
10577 continue;
10578
10579 /* Pixel format changes can require bandwidth updates. */
10580 if (old_other_state->fb->format != new_other_state->fb->format)
10581 return true;
10582
6eed95b0
BN
10583 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10584 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10585
10586 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10587 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10588 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10589 return true;
10590 }
10591
10592 return false;
10593}
10594
b0455fda
SS
10595static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10596 struct drm_plane_state *new_plane_state,
10597 struct drm_framebuffer *fb)
10598{
e72868c4
SS
10599 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10600 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10601 unsigned int pitch;
e72868c4 10602 bool linear;
b0455fda
SS
10603
10604 if (fb->width > new_acrtc->max_cursor_width ||
10605 fb->height > new_acrtc->max_cursor_height) {
10606 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10607 new_plane_state->fb->width,
10608 new_plane_state->fb->height);
10609 return -EINVAL;
10610 }
10611 if (new_plane_state->src_w != fb->width << 16 ||
10612 new_plane_state->src_h != fb->height << 16) {
10613 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10614 return -EINVAL;
10615 }
10616
10617 /* Pitch in pixels */
10618 pitch = fb->pitches[0] / fb->format->cpp[0];
10619
10620 if (fb->width != pitch) {
10621 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10622 fb->width, pitch);
10623 return -EINVAL;
10624 }
10625
10626 switch (pitch) {
10627 case 64:
10628 case 128:
10629 case 256:
10630 /* FB pitch is supported by cursor plane */
10631 break;
10632 default:
10633 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10634 return -EINVAL;
10635 }
10636
e72868c4
SS
10637 /* Core DRM takes care of checking FB modifiers, so we only need to
10638 * check tiling flags when the FB doesn't have a modifier. */
10639 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10640 if (adev->family < AMDGPU_FAMILY_AI) {
10641 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10642 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10643 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10644 } else {
10645 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10646 }
10647 if (!linear) {
10648 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10649 return -EINVAL;
10650 }
10651 }
10652
b0455fda
SS
10653 return 0;
10654}
10655
9e869063
LL
10656static int dm_update_plane_state(struct dc *dc,
10657 struct drm_atomic_state *state,
10658 struct drm_plane *plane,
10659 struct drm_plane_state *old_plane_state,
10660 struct drm_plane_state *new_plane_state,
10661 bool enable,
10662 bool *lock_and_validation_needed)
62f55537 10663{
eb3dc897
NK
10664
10665 struct dm_atomic_state *dm_state = NULL;
62f55537 10666 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10667 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10668 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10669 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10670 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10671 bool needs_reset;
62f55537 10672 int ret = 0;
e7b07cee 10673
9b690ef3 10674
9e869063
LL
10675 new_plane_crtc = new_plane_state->crtc;
10676 old_plane_crtc = old_plane_state->crtc;
10677 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10678 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10679
626bf90f
SS
10680 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10681 if (!enable || !new_plane_crtc ||
10682 drm_atomic_plane_disabling(plane->state, new_plane_state))
10683 return 0;
10684
10685 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10686
5f581248
SS
10687 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10688 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10689 return -EINVAL;
10690 }
10691
24f99d2b 10692 if (new_plane_state->fb) {
b0455fda
SS
10693 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10694 new_plane_state->fb);
10695 if (ret)
10696 return ret;
24f99d2b
SS
10697 }
10698
9e869063 10699 return 0;
626bf90f 10700 }
9b690ef3 10701
f6ff2a08
NK
10702 needs_reset = should_reset_plane(state, plane, old_plane_state,
10703 new_plane_state);
10704
9e869063
LL
10705 /* Remove any changed/removed planes */
10706 if (!enable) {
f6ff2a08 10707 if (!needs_reset)
9e869063 10708 return 0;
a7b06724 10709
9e869063
LL
10710 if (!old_plane_crtc)
10711 return 0;
62f55537 10712
9e869063
LL
10713 old_crtc_state = drm_atomic_get_old_crtc_state(
10714 state, old_plane_crtc);
10715 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10716
9e869063
LL
10717 if (!dm_old_crtc_state->stream)
10718 return 0;
62f55537 10719
9e869063
LL
10720 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10721 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10722
9e869063
LL
10723 ret = dm_atomic_get_state(state, &dm_state);
10724 if (ret)
10725 return ret;
eb3dc897 10726
9e869063
LL
10727 if (!dc_remove_plane_from_context(
10728 dc,
10729 dm_old_crtc_state->stream,
10730 dm_old_plane_state->dc_state,
10731 dm_state->context)) {
62f55537 10732
c3537613 10733 return -EINVAL;
9e869063 10734 }
e7b07cee 10735
9b690ef3 10736
9e869063
LL
10737 dc_plane_state_release(dm_old_plane_state->dc_state);
10738 dm_new_plane_state->dc_state = NULL;
1dc90497 10739
9e869063 10740 *lock_and_validation_needed = true;
1dc90497 10741
9e869063
LL
10742 } else { /* Add new planes */
10743 struct dc_plane_state *dc_new_plane_state;
1dc90497 10744
9e869063
LL
10745 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10746 return 0;
e7b07cee 10747
9e869063
LL
10748 if (!new_plane_crtc)
10749 return 0;
e7b07cee 10750
9e869063
LL
10751 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10752 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10753
9e869063
LL
10754 if (!dm_new_crtc_state->stream)
10755 return 0;
62f55537 10756
f6ff2a08 10757 if (!needs_reset)
9e869063 10758 return 0;
62f55537 10759
8c44515b
AP
10760 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10761 if (ret)
10762 return ret;
10763
9e869063 10764 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10765
9e869063
LL
10766 dc_new_plane_state = dc_create_plane_state(dc);
10767 if (!dc_new_plane_state)
10768 return -ENOMEM;
62f55537 10769
4711c033
LT
10770 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10771 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10772
695af5f9 10773 ret = fill_dc_plane_attributes(
1348969a 10774 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10775 dc_new_plane_state,
10776 new_plane_state,
10777 new_crtc_state);
10778 if (ret) {
10779 dc_plane_state_release(dc_new_plane_state);
10780 return ret;
10781 }
62f55537 10782
9e869063
LL
10783 ret = dm_atomic_get_state(state, &dm_state);
10784 if (ret) {
10785 dc_plane_state_release(dc_new_plane_state);
10786 return ret;
10787 }
eb3dc897 10788
9e869063
LL
10789 /*
10790 * Any atomic check errors that occur after this will
10791 * not need a release. The plane state will be attached
10792 * to the stream, and therefore part of the atomic
10793 * state. It'll be released when the atomic state is
10794 * cleaned.
10795 */
10796 if (!dc_add_plane_to_context(
10797 dc,
10798 dm_new_crtc_state->stream,
10799 dc_new_plane_state,
10800 dm_state->context)) {
62f55537 10801
9e869063
LL
10802 dc_plane_state_release(dc_new_plane_state);
10803 return -EINVAL;
10804 }
8c45c5db 10805
9e869063 10806 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10807
214993e1
ML
10808 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10809
9e869063
LL
10810 /* Tell DC to do a full surface update every time there
10811 * is a plane change. Inefficient, but works for now.
10812 */
10813 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10814
10815 *lock_and_validation_needed = true;
62f55537 10816 }
e7b07cee
HW
10817
10818
62f55537
AG
10819 return ret;
10820}
a87fa993 10821
69cb5629
VZ
10822static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10823 int *src_w, int *src_h)
10824{
10825 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10826 case DRM_MODE_ROTATE_90:
10827 case DRM_MODE_ROTATE_270:
10828 *src_w = plane_state->src_h >> 16;
10829 *src_h = plane_state->src_w >> 16;
10830 break;
10831 case DRM_MODE_ROTATE_0:
10832 case DRM_MODE_ROTATE_180:
10833 default:
10834 *src_w = plane_state->src_w >> 16;
10835 *src_h = plane_state->src_h >> 16;
10836 break;
10837 }
10838}
10839
12f4849a
SS
10840static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10841 struct drm_crtc *crtc,
10842 struct drm_crtc_state *new_crtc_state)
10843{
d1bfbe8a
SS
10844 struct drm_plane *cursor = crtc->cursor, *underlying;
10845 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10846 int i;
10847 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
69cb5629
VZ
10848 int cursor_src_w, cursor_src_h;
10849 int underlying_src_w, underlying_src_h;
12f4849a
SS
10850
10851 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10852 * cursor per pipe but it's going to inherit the scaling and
10853 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 10854 * blending properties match the underlying planes'. */
12f4849a 10855
d1bfbe8a
SS
10856 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10857 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
10858 return 0;
10859 }
10860
69cb5629
VZ
10861 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10862 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10863 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
12f4849a 10864
d1bfbe8a
SS
10865 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10866 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10867 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10868 continue;
12f4849a 10869
d1bfbe8a
SS
10870 /* Ignore disabled planes */
10871 if (!new_underlying_state->fb)
10872 continue;
10873
69cb5629
VZ
10874 dm_get_oriented_plane_size(new_underlying_state,
10875 &underlying_src_w, &underlying_src_h);
10876 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10877 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
d1bfbe8a
SS
10878
10879 if (cursor_scale_w != underlying_scale_w ||
10880 cursor_scale_h != underlying_scale_h) {
10881 drm_dbg_atomic(crtc->dev,
10882 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10883 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10884 return -EINVAL;
10885 }
10886
10887 /* If this plane covers the whole CRTC, no need to check planes underneath */
10888 if (new_underlying_state->crtc_x <= 0 &&
10889 new_underlying_state->crtc_y <= 0 &&
10890 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10891 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10892 break;
12f4849a
SS
10893 }
10894
10895 return 0;
10896}
10897
e10517b3 10898#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10899static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10900{
10901 struct drm_connector *connector;
128f8ed5 10902 struct drm_connector_state *conn_state, *old_conn_state;
44be939f
ML
10903 struct amdgpu_dm_connector *aconnector = NULL;
10904 int i;
128f8ed5
RL
10905 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10906 if (!conn_state->crtc)
10907 conn_state = old_conn_state;
10908
44be939f
ML
10909 if (conn_state->crtc != crtc)
10910 continue;
10911
10912 aconnector = to_amdgpu_dm_connector(connector);
10913 if (!aconnector->port || !aconnector->mst_port)
10914 aconnector = NULL;
10915 else
10916 break;
10917 }
10918
10919 if (!aconnector)
10920 return 0;
10921
10922 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10923}
e10517b3 10924#endif
44be939f 10925
b8592b48
LL
10926/**
10927 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10928 * @dev: The DRM device
10929 * @state: The atomic state to commit
10930 *
10931 * Validate that the given atomic state is programmable by DC into hardware.
10932 * This involves constructing a &struct dc_state reflecting the new hardware
10933 * state we wish to commit, then querying DC to see if it is programmable. It's
10934 * important not to modify the existing DC state. Otherwise, atomic_check
10935 * may unexpectedly commit hardware changes.
10936 *
10937 * When validating the DC state, it's important that the right locks are
10938 * acquired. For full updates case which removes/adds/updates streams on one
10939 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10940 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10941 * flip using DRMs synchronization events.
b8592b48
LL
10942 *
10943 * Note that DM adds the affected connectors for all CRTCs in state, when that
10944 * might not seem necessary. This is because DC stream creation requires the
10945 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10946 * be possible but non-trivial - a possible TODO item.
10947 *
10948 * Return: -Error code if validation failed.
10949 */
7578ecda
AD
10950static int amdgpu_dm_atomic_check(struct drm_device *dev,
10951 struct drm_atomic_state *state)
62f55537 10952{
1348969a 10953 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10954 struct dm_atomic_state *dm_state = NULL;
62f55537 10955 struct dc *dc = adev->dm.dc;
62f55537 10956 struct drm_connector *connector;
c2cea706 10957 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10958 struct drm_crtc *crtc;
fc9e9920 10959 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10960 struct drm_plane *plane;
10961 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10962 enum dc_status status;
1e88ad0a 10963 int ret, i;
62f55537 10964 bool lock_and_validation_needed = false;
214993e1 10965 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6513104b
HW
10966#if defined(CONFIG_DRM_AMD_DC_DCN)
10967 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
10968 struct drm_dp_mst_topology_state *mst_state;
10969 struct drm_dp_mst_topology_mgr *mgr;
6513104b 10970#endif
62f55537 10971
e8a98235 10972 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10973
62f55537 10974 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
10975 if (ret) {
10976 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 10977 goto fail;
68ca1c3e 10978 }
62f55537 10979
c5892a10
SW
10980 /* Check connector changes */
10981 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10982 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10983 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10984
10985 /* Skip connectors that are disabled or part of modeset already. */
10986 if (!old_con_state->crtc && !new_con_state->crtc)
10987 continue;
10988
10989 if (!new_con_state->crtc)
10990 continue;
10991
10992 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10993 if (IS_ERR(new_crtc_state)) {
68ca1c3e 10994 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
10995 ret = PTR_ERR(new_crtc_state);
10996 goto fail;
10997 }
10998
10999 if (dm_old_con_state->abm_level !=
11000 dm_new_con_state->abm_level)
11001 new_crtc_state->connectors_changed = true;
11002 }
11003
e10517b3 11004#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 11005 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
11006 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11007 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11008 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
11009 if (ret) {
11010 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 11011 goto fail;
68ca1c3e 11012 }
44be939f
ML
11013 }
11014 }
17ce8a69 11015 pre_validate_dsc(state, &dm_state, vars);
44be939f 11016 }
e10517b3 11017#endif
1e88ad0a 11018 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
11019 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11020
1e88ad0a 11021 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 11022 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
11023 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11024 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 11025 continue;
7bef1af3 11026
03fc4cf4 11027 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
11028 if (ret) {
11029 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 11030 goto fail;
68ca1c3e 11031 }
03fc4cf4 11032
1e88ad0a
S
11033 if (!new_crtc_state->enable)
11034 continue;
fc9e9920 11035
1e88ad0a 11036 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
11037 if (ret) {
11038 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 11039 goto fail;
68ca1c3e 11040 }
fc9e9920 11041
1e88ad0a 11042 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
11043 if (ret) {
11044 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 11045 goto fail;
68ca1c3e 11046 }
115a385c 11047
cbac53f7 11048 if (dm_old_crtc_state->dsc_force_changed)
115a385c 11049 new_crtc_state->mode_changed = true;
e7b07cee
HW
11050 }
11051
2d9e6431
NK
11052 /*
11053 * Add all primary and overlay planes on the CRTC to the state
11054 * whenever a plane is enabled to maintain correct z-ordering
11055 * and to enable fast surface updates.
11056 */
11057 drm_for_each_crtc(crtc, dev) {
11058 bool modified = false;
11059
11060 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11061 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11062 continue;
11063
11064 if (new_plane_state->crtc == crtc ||
11065 old_plane_state->crtc == crtc) {
11066 modified = true;
11067 break;
11068 }
11069 }
11070
11071 if (!modified)
11072 continue;
11073
11074 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11075 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11076 continue;
11077
11078 new_plane_state =
11079 drm_atomic_get_plane_state(state, plane);
11080
11081 if (IS_ERR(new_plane_state)) {
11082 ret = PTR_ERR(new_plane_state);
68ca1c3e 11083 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
11084 goto fail;
11085 }
11086 }
11087 }
11088
62f55537 11089 /* Remove exiting planes if they are modified */
9e869063
LL
11090 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11091 ret = dm_update_plane_state(dc, state, plane,
11092 old_plane_state,
11093 new_plane_state,
11094 false,
11095 &lock_and_validation_needed);
68ca1c3e
S
11096 if (ret) {
11097 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11098 goto fail;
68ca1c3e 11099 }
62f55537
AG
11100 }
11101
11102 /* Disable all crtcs which require disable */
4b9674e5
LL
11103 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11104 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11105 old_crtc_state,
11106 new_crtc_state,
11107 false,
11108 &lock_and_validation_needed);
68ca1c3e
S
11109 if (ret) {
11110 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 11111 goto fail;
68ca1c3e 11112 }
62f55537
AG
11113 }
11114
11115 /* Enable all crtcs which require enable */
4b9674e5
LL
11116 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11117 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11118 old_crtc_state,
11119 new_crtc_state,
11120 true,
11121 &lock_and_validation_needed);
68ca1c3e
S
11122 if (ret) {
11123 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 11124 goto fail;
68ca1c3e 11125 }
62f55537
AG
11126 }
11127
11128 /* Add new/modified planes */
9e869063
LL
11129 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11130 ret = dm_update_plane_state(dc, state, plane,
11131 old_plane_state,
11132 new_plane_state,
11133 true,
11134 &lock_and_validation_needed);
68ca1c3e
S
11135 if (ret) {
11136 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 11137 goto fail;
68ca1c3e 11138 }
62f55537
AG
11139 }
11140
b349f76e
ES
11141 /* Run this here since we want to validate the streams we created */
11142 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
11143 if (ret) {
11144 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 11145 goto fail;
68ca1c3e 11146 }
62f55537 11147
214993e1
ML
11148 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11149 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11150 if (dm_new_crtc_state->mpo_requested)
11151 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11152 }
11153
12f4849a
SS
11154 /* Check cursor planes scaling */
11155 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11156 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
11157 if (ret) {
11158 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 11159 goto fail;
68ca1c3e 11160 }
12f4849a
SS
11161 }
11162
43d10d30
NK
11163 if (state->legacy_cursor_update) {
11164 /*
11165 * This is a fast cursor update coming from the plane update
11166 * helper, check if it can be done asynchronously for better
11167 * performance.
11168 */
11169 state->async_update =
11170 !drm_atomic_helper_async_check(dev, state);
11171
11172 /*
11173 * Skip the remaining global validation if this is an async
11174 * update. Cursor updates can be done without affecting
11175 * state or bandwidth calcs and this avoids the performance
11176 * penalty of locking the private state object and
11177 * allocating a new dc_state.
11178 */
11179 if (state->async_update)
11180 return 0;
11181 }
11182
ebdd27e1 11183 /* Check scaling and underscan changes*/
1f6010a9 11184 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
11185 * new stream into context w\o causing full reset. Need to
11186 * decide how to handle.
11187 */
c2cea706 11188 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
11189 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11190 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11191 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
11192
11193 /* Skip any modesets/resets */
0bc9706d
LSL
11194 if (!acrtc || drm_atomic_crtc_needs_modeset(
11195 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
11196 continue;
11197
b830ebc9 11198 /* Skip any thing not scale or underscan changes */
54d76575 11199 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
11200 continue;
11201
11202 lock_and_validation_needed = true;
11203 }
11204
41724ea2
BL
11205#if defined(CONFIG_DRM_AMD_DC_DCN)
11206 /* set the slot info for each mst_state based on the link encoding format */
11207 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11208 struct amdgpu_dm_connector *aconnector;
11209 struct drm_connector *connector;
11210 struct drm_connector_list_iter iter;
11211 u8 link_coding_cap;
11212
11213 if (!mgr->mst_state )
11214 continue;
11215
11216 drm_connector_list_iter_begin(dev, &iter);
11217 drm_for_each_connector_iter(connector, &iter) {
11218 int id = connector->index;
11219
11220 if (id == mst_state->mgr->conn_base_id) {
11221 aconnector = to_amdgpu_dm_connector(connector);
11222 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11223 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11224
11225 break;
11226 }
11227 }
11228 drm_connector_list_iter_end(&iter);
11229
11230 }
11231#endif
f6d7c7fa
NK
11232 /**
11233 * Streams and planes are reset when there are changes that affect
11234 * bandwidth. Anything that affects bandwidth needs to go through
11235 * DC global validation to ensure that the configuration can be applied
11236 * to hardware.
11237 *
11238 * We have to currently stall out here in atomic_check for outstanding
11239 * commits to finish in this case because our IRQ handlers reference
11240 * DRM state directly - we can end up disabling interrupts too early
11241 * if we don't.
11242 *
11243 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11244 */
f6d7c7fa 11245 if (lock_and_validation_needed) {
eb3dc897 11246 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11247 if (ret) {
11248 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11249 goto fail;
68ca1c3e 11250 }
e7b07cee
HW
11251
11252 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11253 if (ret) {
11254 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11255 goto fail;
68ca1c3e 11256 }
1dc90497 11257
d9fe1a4c 11258#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11259 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11260 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
8c20a1ed 11261 goto fail;
68ca1c3e 11262 }
8c20a1ed 11263
6513104b 11264 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11265 if (ret) {
11266 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11267 goto fail;
68ca1c3e 11268 }
d9fe1a4c 11269#endif
29b9ba74 11270
ded58c7b
ZL
11271 /*
11272 * Perform validation of MST topology in the state:
11273 * We need to perform MST atomic check before calling
11274 * dc_validate_global_state(), or there is a chance
11275 * to get stuck in an infinite loop and hang eventually.
11276 */
11277 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11278 if (ret) {
11279 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11280 goto fail;
68ca1c3e 11281 }
85fb8bb9 11282 status = dc_validate_global_state(dc, dm_state->context, true);
74a16675 11283 if (status != DC_OK) {
68ca1c3e 11284 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11285 dc_status_to_str(status), status);
e7b07cee
HW
11286 ret = -EINVAL;
11287 goto fail;
11288 }
bd200d19 11289 } else {
674e78ac 11290 /*
bd200d19
NK
11291 * The commit is a fast update. Fast updates shouldn't change
11292 * the DC context, affect global validation, and can have their
11293 * commit work done in parallel with other commits not touching
11294 * the same resource. If we have a new DC context as part of
11295 * the DM atomic state from validation we need to free it and
11296 * retain the existing one instead.
fde9f39a
MR
11297 *
11298 * Furthermore, since the DM atomic state only contains the DC
11299 * context and can safely be annulled, we can free the state
11300 * and clear the associated private object now to free
11301 * some memory and avoid a possible use-after-free later.
674e78ac 11302 */
bd200d19 11303
fde9f39a
MR
11304 for (i = 0; i < state->num_private_objs; i++) {
11305 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11306
fde9f39a
MR
11307 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11308 int j = state->num_private_objs-1;
bd200d19 11309
fde9f39a
MR
11310 dm_atomic_destroy_state(obj,
11311 state->private_objs[i].state);
11312
11313 /* If i is not at the end of the array then the
11314 * last element needs to be moved to where i was
11315 * before the array can safely be truncated.
11316 */
11317 if (i != j)
11318 state->private_objs[i] =
11319 state->private_objs[j];
bd200d19 11320
fde9f39a
MR
11321 state->private_objs[j].ptr = NULL;
11322 state->private_objs[j].state = NULL;
11323 state->private_objs[j].old_state = NULL;
11324 state->private_objs[j].new_state = NULL;
11325
11326 state->num_private_objs = j;
11327 break;
11328 }
bd200d19 11329 }
e7b07cee
HW
11330 }
11331
caff0e66
NK
11332 /* Store the overall update type for use later in atomic check. */
11333 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11334 struct dm_crtc_state *dm_new_crtc_state =
11335 to_dm_crtc_state(new_crtc_state);
11336
f6d7c7fa
NK
11337 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11338 UPDATE_TYPE_FULL :
11339 UPDATE_TYPE_FAST;
e7b07cee
HW
11340 }
11341
11342 /* Must be success */
11343 WARN_ON(ret);
e8a98235
RS
11344
11345 trace_amdgpu_dm_atomic_check_finish(state, ret);
11346
e7b07cee
HW
11347 return ret;
11348
11349fail:
11350 if (ret == -EDEADLK)
01e28f9c 11351 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11352 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11353 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11354 else
01e28f9c 11355 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11356
e8a98235
RS
11357 trace_amdgpu_dm_atomic_check_finish(state, ret);
11358
e7b07cee
HW
11359 return ret;
11360}
11361
3ee6b26b
AD
11362static bool is_dp_capable_without_timing_msa(struct dc *dc,
11363 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11364{
11365 uint8_t dpcd_data;
11366 bool capable = false;
11367
c84dec2f 11368 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11369 dm_helpers_dp_read_dpcd(
11370 NULL,
c84dec2f 11371 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11372 DP_DOWN_STREAM_PORT_COUNT,
11373 &dpcd_data,
11374 sizeof(dpcd_data))) {
11375 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11376 }
11377
11378 return capable;
11379}
f9b4f20c 11380
46db138d
SW
11381static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11382 unsigned int offset,
11383 unsigned int total_length,
11384 uint8_t *data,
11385 unsigned int length,
11386 struct amdgpu_hdmi_vsdb_info *vsdb)
11387{
11388 bool res;
11389 union dmub_rb_cmd cmd;
11390 struct dmub_cmd_send_edid_cea *input;
11391 struct dmub_cmd_edid_cea_output *output;
11392
11393 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11394 return false;
11395
11396 memset(&cmd, 0, sizeof(cmd));
11397
11398 input = &cmd.edid_cea.data.input;
11399
11400 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11401 cmd.edid_cea.header.sub_type = 0;
11402 cmd.edid_cea.header.payload_bytes =
11403 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11404 input->offset = offset;
11405 input->length = length;
eb9e59eb 11406 input->cea_total_length = total_length;
46db138d
SW
11407 memcpy(input->payload, data, length);
11408
11409 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11410 if (!res) {
11411 DRM_ERROR("EDID CEA parser failed\n");
11412 return false;
11413 }
11414
11415 output = &cmd.edid_cea.data.output;
11416
11417 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11418 if (!output->ack.success) {
11419 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11420 output->ack.offset);
11421 }
11422 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11423 if (!output->amd_vsdb.vsdb_found)
11424 return false;
11425
11426 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11427 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11428 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11429 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11430 } else {
b76a8062 11431 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11432 return false;
11433 }
11434
11435 return true;
11436}
11437
11438static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11439 uint8_t *edid_ext, int len,
11440 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11441{
11442 int i;
f9b4f20c
SW
11443
11444 /* send extension block to DMCU for parsing */
11445 for (i = 0; i < len; i += 8) {
11446 bool res;
11447 int offset;
11448
11449 /* send 8 bytes a time */
46db138d 11450 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11451 return false;
11452
11453 if (i+8 == len) {
11454 /* EDID block sent completed, expect result */
11455 int version, min_rate, max_rate;
11456
46db138d 11457 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11458 if (res) {
11459 /* amd vsdb found */
11460 vsdb_info->freesync_supported = 1;
11461 vsdb_info->amd_vsdb_version = version;
11462 vsdb_info->min_refresh_rate_hz = min_rate;
11463 vsdb_info->max_refresh_rate_hz = max_rate;
11464 return true;
11465 }
11466 /* not amd vsdb */
11467 return false;
11468 }
11469
11470 /* check for ack*/
46db138d 11471 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11472 if (!res)
11473 return false;
11474 }
11475
11476 return false;
11477}
11478
46db138d
SW
11479static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11480 uint8_t *edid_ext, int len,
11481 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11482{
11483 int i;
11484
11485 /* send extension block to DMCU for parsing */
11486 for (i = 0; i < len; i += 8) {
11487 /* send 8 bytes a time */
11488 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11489 return false;
11490 }
11491
11492 return vsdb_info->freesync_supported;
11493}
11494
11495static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11496 uint8_t *edid_ext, int len,
11497 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11498{
11499 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11500
11501 if (adev->dm.dmub_srv)
11502 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11503 else
11504 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11505}
11506
7c7dd774 11507static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11508 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11509{
11510 uint8_t *edid_ext = NULL;
11511 int i;
11512 bool valid_vsdb_found = false;
11513
11514 /*----- drm_find_cea_extension() -----*/
11515 /* No EDID or EDID extensions */
11516 if (edid == NULL || edid->extensions == 0)
7c7dd774 11517 return -ENODEV;
f9b4f20c
SW
11518
11519 /* Find CEA extension */
11520 for (i = 0; i < edid->extensions; i++) {
11521 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11522 if (edid_ext[0] == CEA_EXT)
11523 break;
11524 }
11525
11526 if (i == edid->extensions)
7c7dd774 11527 return -ENODEV;
f9b4f20c
SW
11528
11529 /*----- cea_db_offsets() -----*/
11530 if (edid_ext[0] != CEA_EXT)
7c7dd774 11531 return -ENODEV;
f9b4f20c
SW
11532
11533 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11534
11535 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11536}
11537
98e6436d
AK
11538void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11539 struct edid *edid)
e7b07cee 11540{
eb0709ba 11541 int i = 0;
e7b07cee
HW
11542 struct detailed_timing *timing;
11543 struct detailed_non_pixel *data;
11544 struct detailed_data_monitor_range *range;
c84dec2f
HW
11545 struct amdgpu_dm_connector *amdgpu_dm_connector =
11546 to_amdgpu_dm_connector(connector);
bb47de73 11547 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11548 struct dc_sink *sink;
e7b07cee
HW
11549
11550 struct drm_device *dev = connector->dev;
1348969a 11551 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11552 bool freesync_capable = false;
f9b4f20c 11553 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11554
8218d7f1
HW
11555 if (!connector->state) {
11556 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11557 goto update;
8218d7f1
HW
11558 }
11559
9b2fdc33
AP
11560 sink = amdgpu_dm_connector->dc_sink ?
11561 amdgpu_dm_connector->dc_sink :
11562 amdgpu_dm_connector->dc_em_sink;
11563
11564 if (!edid || !sink) {
98e6436d
AK
11565 dm_con_state = to_dm_connector_state(connector->state);
11566
11567 amdgpu_dm_connector->min_vfreq = 0;
11568 amdgpu_dm_connector->max_vfreq = 0;
11569 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11570 connector->display_info.monitor_range.min_vfreq = 0;
11571 connector->display_info.monitor_range.max_vfreq = 0;
11572 freesync_capable = false;
98e6436d 11573
bb47de73 11574 goto update;
98e6436d
AK
11575 }
11576
8218d7f1
HW
11577 dm_con_state = to_dm_connector_state(connector->state);
11578
e7b07cee 11579 if (!adev->dm.freesync_module)
bb47de73 11580 goto update;
f9b4f20c
SW
11581
11582
9b2fdc33
AP
11583 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11584 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11585 bool edid_check_required = false;
11586
11587 if (edid) {
e7b07cee
HW
11588 edid_check_required = is_dp_capable_without_timing_msa(
11589 adev->dm.dc,
c84dec2f 11590 amdgpu_dm_connector);
e7b07cee 11591 }
e7b07cee 11592
f9b4f20c
SW
11593 if (edid_check_required == true && (edid->version > 1 ||
11594 (edid->version == 1 && edid->revision > 1))) {
11595 for (i = 0; i < 4; i++) {
e7b07cee 11596
f9b4f20c
SW
11597 timing = &edid->detailed_timings[i];
11598 data = &timing->data.other_data;
11599 range = &data->data.range;
11600 /*
11601 * Check if monitor has continuous frequency mode
11602 */
11603 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11604 continue;
11605 /*
11606 * Check for flag range limits only. If flag == 1 then
11607 * no additional timing information provided.
11608 * Default GTF, GTF Secondary curve and CVT are not
11609 * supported
11610 */
11611 if (range->flags != 1)
11612 continue;
a0ffc3fd 11613
f9b4f20c
SW
11614 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11615 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11616 amdgpu_dm_connector->pixel_clock_mhz =
11617 range->pixel_clock_mhz * 10;
a0ffc3fd 11618
f9b4f20c
SW
11619 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11620 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11621
f9b4f20c
SW
11622 break;
11623 }
98e6436d 11624
f9b4f20c
SW
11625 if (amdgpu_dm_connector->max_vfreq -
11626 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11627
f9b4f20c
SW
11628 freesync_capable = true;
11629 }
11630 }
9b2fdc33 11631 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11632 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11633 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11634 timing = &edid->detailed_timings[i];
11635 data = &timing->data.other_data;
11636
11637 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11638 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11639 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11640 freesync_capable = true;
11641
11642 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11643 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11644 }
11645 }
bb47de73
NK
11646
11647update:
11648 if (dm_con_state)
11649 dm_con_state->freesync_capable = freesync_capable;
11650
11651 if (connector->vrr_capable_property)
11652 drm_connector_set_vrr_capable_property(connector,
11653 freesync_capable);
e7b07cee
HW
11654}
11655
3d4e52d0
VL
11656void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11657{
1348969a 11658 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11659 struct dc *dc = adev->dm.dc;
11660 int i;
11661
11662 mutex_lock(&adev->dm.dc_lock);
11663 if (dc->current_state) {
11664 for (i = 0; i < dc->current_state->stream_count; ++i)
11665 dc->current_state->streams[i]
11666 ->triggered_crtc_reset.enabled =
11667 adev->dm.force_timing_sync;
11668
11669 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11670 dc_trigger_sync(dc, dc->current_state);
11671 }
11672 mutex_unlock(&adev->dm.dc_lock);
11673}
9d83722d
RS
11674
11675void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11676 uint32_t value, const char *func_name)
11677{
11678#ifdef DM_CHECK_ADDR_0
11679 if (address == 0) {
11680 DC_ERR("invalid register write. address = 0");
11681 return;
11682 }
11683#endif
11684 cgs_write_register(ctx->cgs_device, address, value);
11685 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11686}
11687
11688uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11689 const char *func_name)
11690{
11691 uint32_t value;
11692#ifdef DM_CHECK_ADDR_0
11693 if (address == 0) {
11694 DC_ERR("invalid register read; address = 0\n");
11695 return 0;
11696 }
11697#endif
11698
11699 if (ctx->dmub_srv &&
11700 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11701 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11702 ASSERT(false);
11703 return 0;
11704 }
11705
11706 value = cgs_read_register(ctx->cgs_device, address);
11707
11708 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11709
11710 return value;
11711}
81927e28 11712
240e6d25
IB
11713static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11714 struct dc_context *ctx,
11715 uint8_t status_type,
11716 uint32_t *operation_result)
88f52b1f
JS
11717{
11718 struct amdgpu_device *adev = ctx->driver_context;
11719 int return_status = -1;
11720 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11721
11722 if (is_cmd_aux) {
11723 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11724 return_status = p_notify->aux_reply.length;
11725 *operation_result = p_notify->result;
11726 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11727 *operation_result = AUX_RET_ERROR_TIMEOUT;
11728 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11729 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11730 } else {
11731 *operation_result = AUX_RET_ERROR_UNKNOWN;
11732 }
11733 } else {
11734 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11735 return_status = 0;
11736 *operation_result = p_notify->sc_status;
11737 } else {
11738 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11739 }
11740 }
11741
11742 return return_status;
11743}
11744
11745int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11746 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11747{
11748 struct amdgpu_device *adev = ctx->driver_context;
11749 int ret = 0;
11750
88f52b1f
JS
11751 if (is_cmd_aux) {
11752 dc_process_dmub_aux_transfer_async(ctx->dc,
11753 link_index, (struct aux_payload *)cmd_payload);
11754 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11755 (struct set_config_cmd_payload *)cmd_payload,
11756 adev->dm.dmub_notify)) {
11757 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11758 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11759 (uint32_t *)operation_result);
11760 }
11761
9e3a50d2 11762 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11763 if (ret == 0) {
9e3a50d2 11764 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11765 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11766 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11767 (uint32_t *)operation_result);
81927e28 11768 }
81927e28 11769
88f52b1f
JS
11770 if (is_cmd_aux) {
11771 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11772 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11773
88f52b1f
JS
11774 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11775 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11776 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11777 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11778 adev->dm.dmub_notify->aux_reply.length);
11779 }
11780 }
81927e28
JS
11781 }
11782
88f52b1f
JS
11783 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11784 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11785 (uint32_t *)operation_result);
81927e28 11786}
1edf5ae1
ZL
11787
11788/*
11789 * Check whether seamless boot is supported.
11790 *
11791 * So far we only support seamless boot on CHIP_VANGOGH.
11792 * If everything goes well, we may consider expanding
11793 * seamless boot to other ASICs.
11794 */
11795bool check_seamless_boot_capability(struct amdgpu_device *adev)
11796{
11797 switch (adev->asic_type) {
11798 case CHIP_VANGOGH:
11799 if (!adev->mman.keep_stolen_vga_memory)
11800 return true;
11801 break;
11802 default:
11803 break;
11804 }
11805
11806 return false;
11807}