drm/amd/pm: restore user customized OD settings properly for Sienna Cichlid
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
1dc90497 32#include "dc/inc/core_types.h"
a7669aff 33#include "dal_asic_id.h"
cdca3f21 34#include "dmub/dmub_srv.h"
743b9786
NK
35#include "dc/inc/hw/dmcu.h"
36#include "dc/inc/hw/abm.h"
9a71c7d3 37#include "dc/dc_dmub_srv.h"
f9b4f20c 38#include "dc/dc_edid_parser.h"
81927e28 39#include "dc/dc_stat.h"
9d83722d 40#include "amdgpu_dm_trace.h"
4562236b
HW
41
42#include "vid.h"
43#include "amdgpu.h"
a49dcb88 44#include "amdgpu_display.h"
a94d5569 45#include "amdgpu_ucode.h"
4562236b
HW
46#include "atom.h"
47#include "amdgpu_dm.h"
52704fca
BL
48#ifdef CONFIG_DRM_AMD_DC_HDCP
49#include "amdgpu_dm_hdcp.h"
53e108aa 50#include <drm/drm_hdcp.h>
52704fca 51#endif
e7b07cee 52#include "amdgpu_pm.h"
4562236b
HW
53
54#include "amd_shared.h"
55#include "amdgpu_dm_irq.h"
56#include "dm_helpers.h"
e7b07cee 57#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
58#if defined(CONFIG_DEBUG_FS)
59#include "amdgpu_dm_debugfs.h"
60#endif
f4594cd1 61#include "amdgpu_dm_psr.h"
4562236b
HW
62
63#include "ivsrcid/ivsrcid_vislands30.h"
64
81927e28 65#include "i2caux_interface.h"
4562236b
HW
66#include <linux/module.h>
67#include <linux/moduleparam.h>
e7b07cee 68#include <linux/types.h>
97028037 69#include <linux/pm_runtime.h>
09d21852 70#include <linux/pci.h>
a94d5569 71#include <linux/firmware.h>
6ce8f316 72#include <linux/component.h>
4562236b
HW
73
74#include <drm/drm_atomic.h>
674e78ac 75#include <drm/drm_atomic_uapi.h>
4562236b
HW
76#include <drm/drm_atomic_helper.h>
77#include <drm/drm_dp_mst_helper.h>
e7b07cee 78#include <drm/drm_fb_helper.h>
09d21852 79#include <drm/drm_fourcc.h>
e7b07cee 80#include <drm/drm_edid.h>
09d21852 81#include <drm/drm_vblank.h>
6ce8f316 82#include <drm/drm_audio_component.h>
4562236b 83
b86a1aa3 84#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 85#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 86
ad941f7a
FX
87#include "dcn/dcn_1_0_offset.h"
88#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
89#include "soc15_hw_ip.h"
90#include "vega10_ip_offset.h"
ff5ef992
AD
91
92#include "soc15_common.h"
93#endif
94
e7b07cee 95#include "modules/inc/mod_freesync.h"
bbf854dc 96#include "modules/power/power_helpers.h"
ecd0136b 97#include "modules/inc/mod_info_packet.h"
e7b07cee 98
743b9786
NK
99#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
101#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
103#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
105#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
107#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
109#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
111#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
113#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 115
a94d5569
DF
116#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
117MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 118
5ea23931
RL
119#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
120MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121
8c7aea40
NK
122/* Number of bytes in PSP header for firmware. */
123#define PSP_HEADER_BYTES 0x100
124
125/* Number of bytes in PSP footer for firmware. */
126#define PSP_FOOTER_BYTES 0x100
127
b8592b48
LL
128/**
129 * DOC: overview
130 *
131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 132 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
133 * requests into DC requests, and DC responses into DRM responses.
134 *
135 * The root control structure is &struct amdgpu_display_manager.
136 */
137
7578ecda
AD
138/* basic init/fini API */
139static int amdgpu_dm_init(struct amdgpu_device *adev);
140static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 141static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 142
0f877894
OV
143static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144{
145 switch (link->dpcd_caps.dongle_type) {
146 case DISPLAY_DONGLE_NONE:
147 return DRM_MODE_SUBCONNECTOR_Native;
148 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 return DRM_MODE_SUBCONNECTOR_VGA;
150 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_DVID;
153 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 return DRM_MODE_SUBCONNECTOR_HDMIA;
156 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 default:
158 return DRM_MODE_SUBCONNECTOR_Unknown;
159 }
160}
161
162static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163{
164 struct dc_link *link = aconnector->dc_link;
165 struct drm_connector *connector = &aconnector->base;
166 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167
168 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 return;
170
171 if (aconnector->dc_sink)
172 subconnector = get_subconnector_type(link);
173
174 drm_object_property_set_value(&connector->base,
175 connector->dev->mode_config.dp_subconnector_property,
176 subconnector);
177}
178
1f6010a9
DF
179/*
180 * initializes drm_device display related structures, based on the information
7578ecda
AD
181 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182 * drm_encoder, drm_mode_config
183 *
184 * Returns 0 on success
185 */
186static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187/* removes and deallocates the drm structures, created by the above function */
188static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189
7578ecda 190static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 191 struct drm_plane *plane,
cc1fec57
NK
192 unsigned long possible_crtcs,
193 const struct dc_plane_cap *plane_cap);
7578ecda
AD
194static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 struct drm_plane *plane,
196 uint32_t link_index);
197static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 struct amdgpu_dm_connector *amdgpu_dm_connector,
199 uint32_t link_index,
200 struct amdgpu_encoder *amdgpu_encoder);
201static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 struct amdgpu_encoder *aencoder,
203 uint32_t link_index);
204
205static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206
7578ecda
AD
207static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208
209static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 struct drm_atomic_state *state);
211
674e78ac
NK
212static void handle_cursor_update(struct drm_plane *plane,
213 struct drm_plane_state *old_plane_state);
7578ecda 214
dfbbfe3c
BN
215static const struct drm_format_info *
216amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217
a85ba005
NC
218static bool
219is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 struct drm_crtc_state *new_crtc_state);
4562236b
HW
221/*
222 * dm_vblank_get_counter
223 *
224 * @brief
225 * Get counter for number of vertical blanks
226 *
227 * @param
228 * struct amdgpu_device *adev - [in] desired amdgpu device
229 * int disp_idx - [in] which CRTC to get the counter from
230 *
231 * @return
232 * Counter for vertical blanks
233 */
234static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235{
236 if (crtc >= adev->mode_info.num_crtc)
237 return 0;
238 else {
239 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240
585d450c 241 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
242 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 crtc);
4562236b
HW
244 return 0;
245 }
246
585d450c 247 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
248 }
249}
250
251static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 252 u32 *vbl, u32 *position)
4562236b 253{
81c50963
ST
254 uint32_t v_blank_start, v_blank_end, h_position, v_position;
255
4562236b
HW
256 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 return -EINVAL;
258 else {
259 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260
585d450c 261 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
262 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 crtc);
4562236b
HW
264 return 0;
265 }
266
81c50963
ST
267 /*
268 * TODO rework base driver to use values directly.
269 * for now parse it back into reg-format
270 */
585d450c 271 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
272 &v_blank_start,
273 &v_blank_end,
274 &h_position,
275 &v_position);
276
e806208d
AG
277 *position = v_position | (h_position << 16);
278 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
279 }
280
281 return 0;
282}
283
284static bool dm_is_idle(void *handle)
285{
286 /* XXX todo */
287 return true;
288}
289
290static int dm_wait_for_idle(void *handle)
291{
292 /* XXX todo */
293 return 0;
294}
295
296static bool dm_check_soft_reset(void *handle)
297{
298 return false;
299}
300
301static int dm_soft_reset(void *handle)
302{
303 /* XXX todo */
304 return 0;
305}
306
3ee6b26b
AD
307static struct amdgpu_crtc *
308get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 int otg_inst)
4562236b 310{
4a580877 311 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
312 struct drm_crtc *crtc;
313 struct amdgpu_crtc *amdgpu_crtc;
314
bcd74374 315 if (WARN_ON(otg_inst == -1))
4562236b 316 return adev->mode_info.crtcs[0];
4562236b
HW
317
318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321 if (amdgpu_crtc->otg_inst == otg_inst)
322 return amdgpu_crtc;
323 }
324
325 return NULL;
326}
327
585d450c
AP
328static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329{
330 return acrtc->dm_irq_params.freesync_config.state ==
331 VRR_STATE_ACTIVE_VARIABLE ||
332 acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_FIXED;
334}
335
66b0c973
MK
336static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337{
338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340}
341
a85ba005
NC
342static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 struct dm_crtc_state *new_state)
344{
345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
346 return true;
347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 return true;
349 else
350 return false;
351}
352
b8e8c934
HW
353/**
354 * dm_pflip_high_irq() - Handle pageflip interrupt
355 * @interrupt_params: ignored
356 *
357 * Handles the pageflip interrupt by notifying all interested parties
358 * that the pageflip has been completed.
359 */
4562236b
HW
360static void dm_pflip_high_irq(void *interrupt_params)
361{
4562236b
HW
362 struct amdgpu_crtc *amdgpu_crtc;
363 struct common_irq_params *irq_params = interrupt_params;
364 struct amdgpu_device *adev = irq_params->adev;
365 unsigned long flags;
71bbe51a 366 struct drm_pending_vblank_event *e;
71bbe51a
MK
367 uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 bool vrr_active;
4562236b
HW
369
370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372 /* IRQ could occur when in initial stage */
1f6010a9 373 /* TODO work and BO cleanup */
4562236b 374 if (amdgpu_crtc == NULL) {
cb2318b7 375 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
376 return;
377 }
378
4a580877 379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
380
381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
383 amdgpu_crtc->pflip_status,
384 AMDGPU_FLIP_SUBMITTED,
385 amdgpu_crtc->crtc_id,
386 amdgpu_crtc);
4a580877 387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
388 return;
389 }
390
71bbe51a
MK
391 /* page flip completed. */
392 e = amdgpu_crtc->event;
393 amdgpu_crtc->event = NULL;
4562236b 394
bcd74374 395 WARN_ON(!e);
1159898a 396
585d450c 397 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
398
399 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
400 if (!vrr_active ||
585d450c 401 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
402 &v_blank_end, &hpos, &vpos) ||
403 (vpos < v_blank_start)) {
404 /* Update to correct count and vblank timestamp if racing with
405 * vblank irq. This also updates to the correct vblank timestamp
406 * even in VRR mode, as scanout is past the front-porch atm.
407 */
408 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 409
71bbe51a
MK
410 /* Wake up userspace by sending the pageflip event with proper
411 * count and timestamp of vblank of flip completion.
412 */
413 if (e) {
414 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415
416 /* Event sent, so done with vblank for this flip */
417 drm_crtc_vblank_put(&amdgpu_crtc->base);
418 }
419 } else if (e) {
420 /* VRR active and inside front-porch: vblank count and
421 * timestamp for pageflip event will only be up to date after
422 * drm_crtc_handle_vblank() has been executed from late vblank
423 * irq handler after start of back-porch (vline 0). We queue the
424 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 * updated timestamp and count, once it runs after us.
426 *
427 * We need to open-code this instead of using the helper
428 * drm_crtc_arm_vblank_event(), as that helper would
429 * call drm_crtc_accurate_vblank_count(), which we must
430 * not call in VRR mode while we are in front-porch!
431 */
432
433 /* sequence will be replaced by real count during send-out. */
434 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 e->pipe = amdgpu_crtc->crtc_id;
436
4a580877 437 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
438 e = NULL;
439 }
4562236b 440
fdd1fe57
MK
441 /* Keep track of vblank of this flip for flip throttling. We use the
442 * cooked hw counter, as that one incremented at start of this vblank
443 * of pageflip completion, so last_flip_vblank is the forbidden count
444 * for queueing new pageflips if vsync + VRR is enabled.
445 */
5d1c59c4 446 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 447 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 448
54f5499a 449 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 450 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 451
cb2318b7
VL
452 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 amdgpu_crtc->crtc_id, amdgpu_crtc,
454 vrr_active, (int) !e);
4562236b
HW
455}
456
d2574c33
MK
457static void dm_vupdate_high_irq(void *interrupt_params)
458{
459 struct common_irq_params *irq_params = interrupt_params;
460 struct amdgpu_device *adev = irq_params->adev;
461 struct amdgpu_crtc *acrtc;
47588233
RS
462 struct drm_device *drm_dev;
463 struct drm_vblank_crtc *vblank;
464 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 465 unsigned long flags;
585d450c 466 int vrr_active;
d2574c33
MK
467
468 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
469
470 if (acrtc) {
585d450c 471 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
472 drm_dev = acrtc->base.dev;
473 vblank = &drm_dev->vblank[acrtc->base.index];
474 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 frame_duration_ns = vblank->time - previous_timestamp;
476
477 if (frame_duration_ns > 0) {
478 trace_amdgpu_refresh_rate_track(acrtc->base.index,
479 frame_duration_ns,
480 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 atomic64_set(&irq_params->previous_timestamp, vblank->time);
482 }
d2574c33 483
cb2318b7 484 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 485 acrtc->crtc_id,
585d450c 486 vrr_active);
d2574c33
MK
487
488 /* Core vblank handling is done here after end of front-porch in
489 * vrr mode, as vblank timestamping will give valid results
490 * while now done after front-porch. This will also deliver
491 * page-flip completion events that have been queued to us
492 * if a pageflip happened inside front-porch.
493 */
585d450c 494 if (vrr_active) {
d2574c33 495 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
496
497 /* BTR processing for pre-DCE12 ASICs */
585d450c 498 if (acrtc->dm_irq_params.stream &&
09aef2c4 499 adev->family < AMDGPU_FAMILY_AI) {
4a580877 500 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
501 mod_freesync_handle_v_update(
502 adev->dm.freesync_module,
585d450c
AP
503 acrtc->dm_irq_params.stream,
504 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
505
506 dc_stream_adjust_vmin_vmax(
507 adev->dm.dc,
585d450c
AP
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 510 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
511 }
512 }
d2574c33
MK
513 }
514}
515
b8e8c934
HW
516/**
517 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 518 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
519 *
520 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
521 * event handler.
522 */
4562236b
HW
523static void dm_crtc_high_irq(void *interrupt_params)
524{
525 struct common_irq_params *irq_params = interrupt_params;
526 struct amdgpu_device *adev = irq_params->adev;
4562236b 527 struct amdgpu_crtc *acrtc;
09aef2c4 528 unsigned long flags;
585d450c 529 int vrr_active;
4562236b 530
b57de80a 531 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
532 if (!acrtc)
533 return;
534
585d450c 535 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 536
cb2318b7 537 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 538 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 539
2346ef47
NK
540 /**
541 * Core vblank handling at start of front-porch is only possible
542 * in non-vrr mode, as only there vblank timestamping will give
543 * valid results while done in front-porch. Otherwise defer it
544 * to dm_vupdate_high_irq after end of front-porch.
545 */
585d450c 546 if (!vrr_active)
2346ef47
NK
547 drm_crtc_handle_vblank(&acrtc->base);
548
549 /**
550 * Following stuff must happen at start of vblank, for crc
551 * computation and below-the-range btr support in vrr mode.
552 */
16f17eda 553 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
554
555 /* BTR updates need to happen before VUPDATE on Vega and above. */
556 if (adev->family < AMDGPU_FAMILY_AI)
557 return;
16f17eda 558
4a580877 559 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 560
585d450c
AP
561 if (acrtc->dm_irq_params.stream &&
562 acrtc->dm_irq_params.vrr_params.supported &&
563 acrtc->dm_irq_params.freesync_config.state ==
564 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 565 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
566 acrtc->dm_irq_params.stream,
567 &acrtc->dm_irq_params.vrr_params);
16f17eda 568
585d450c
AP
569 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
571 }
572
2b5aed9a
MK
573 /*
574 * If there aren't any active_planes then DCH HUBP may be clock-gated.
575 * In that case, pageflip completion interrupts won't fire and pageflip
576 * completion events won't get delivered. Prevent this by sending
577 * pending pageflip events from here if a flip is still pending.
578 *
579 * If any planes are enabled, use dm_pflip_high_irq() instead, to
580 * avoid race conditions between flip programming and completion,
581 * which could cause too early flip completion events.
582 */
2346ef47
NK
583 if (adev->family >= AMDGPU_FAMILY_RV &&
584 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 585 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
586 if (acrtc->event) {
587 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
588 acrtc->event = NULL;
589 drm_crtc_vblank_put(&acrtc->base);
590 }
591 acrtc->pflip_status = AMDGPU_FLIP_NONE;
592 }
593
4a580877 594 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
595}
596
86bc2219 597#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 598#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
599/**
600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601 * DCN generation ASICs
48e01bf4 602 * @interrupt_params: interrupt parameters
86bc2219
WL
603 *
604 * Used to set crc window/read out crc value at vertical line 0 position
605 */
86bc2219
WL
606static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
607{
608 struct common_irq_params *irq_params = interrupt_params;
609 struct amdgpu_device *adev = irq_params->adev;
610 struct amdgpu_crtc *acrtc;
611
612 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
613
614 if (!acrtc)
615 return;
616
617 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
618}
619#endif
86bc2219 620
e25515e2 621#define DMUB_TRACE_MAX_READ 64
81927e28
JS
622/**
623 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
624 * @interrupt_params: used for determining the Outbox instance
625 *
626 * Handles the Outbox Interrupt
627 * event handler.
628 */
81927e28
JS
629static void dm_dmub_outbox1_low_irq(void *interrupt_params)
630{
631 struct dmub_notification notify;
632 struct common_irq_params *irq_params = interrupt_params;
633 struct amdgpu_device *adev = irq_params->adev;
634 struct amdgpu_display_manager *dm = &adev->dm;
635 struct dmcub_trace_buf_entry entry = { 0 };
636 uint32_t count = 0;
637
638 if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
640 do {
641 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
642 } while (notify.pending_notification);
643
644 if (adev->dm.dmub_notify)
645 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
646 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 complete(&adev->dm.dmub_aux_transfer_done);
648 // TODO : HPD Implementation
649
650 } else {
651 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
652 }
653 }
654
655
656 do {
657 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 entry.param0, entry.param1);
660
661 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
663 } else
664 break;
665
666 count++;
667
668 } while (count <= DMUB_TRACE_MAX_READ);
669
670 ASSERT(count <= DMUB_TRACE_MAX_READ);
671}
86bc2219
WL
672#endif
673
4562236b
HW
674static int dm_set_clockgating_state(void *handle,
675 enum amd_clockgating_state state)
676{
677 return 0;
678}
679
680static int dm_set_powergating_state(void *handle,
681 enum amd_powergating_state state)
682{
683 return 0;
684}
685
686/* Prototypes of private functions */
687static int dm_early_init(void* handle);
688
a32e24b4 689/* Allocate memory for FBC compressed data */
3e332d3a 690static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 691{
3e332d3a 692 struct drm_device *dev = connector->dev;
1348969a 693 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 694 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
695 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 struct drm_display_mode *mode;
42e67c3b
RL
697 unsigned long max_size = 0;
698
699 if (adev->dm.dc->fbc_compressor == NULL)
700 return;
a32e24b4 701
3e332d3a 702 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
703 return;
704
3e332d3a
RL
705 if (compressor->bo_ptr)
706 return;
42e67c3b 707
42e67c3b 708
3e332d3a
RL
709 list_for_each_entry(mode, &connector->modes, head) {
710 if (max_size < mode->htotal * mode->vtotal)
711 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
712 }
713
714 if (max_size) {
715 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 716 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 717 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
718
719 if (r)
42e67c3b
RL
720 DRM_ERROR("DM: Failed to initialize FBC\n");
721 else {
722 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
724 }
725
a32e24b4
RL
726 }
727
728}
a32e24b4 729
6ce8f316
NK
730static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 int pipe, bool *enabled,
732 unsigned char *buf, int max_bytes)
733{
734 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 735 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
736 struct drm_connector *connector;
737 struct drm_connector_list_iter conn_iter;
738 struct amdgpu_dm_connector *aconnector;
739 int ret = 0;
740
741 *enabled = false;
742
743 mutex_lock(&adev->dm.audio_lock);
744
745 drm_connector_list_iter_begin(dev, &conn_iter);
746 drm_for_each_connector_iter(connector, &conn_iter) {
747 aconnector = to_amdgpu_dm_connector(connector);
748 if (aconnector->audio_inst != port)
749 continue;
750
751 *enabled = true;
752 ret = drm_eld_size(connector->eld);
753 memcpy(buf, connector->eld, min(max_bytes, ret));
754
755 break;
756 }
757 drm_connector_list_iter_end(&conn_iter);
758
759 mutex_unlock(&adev->dm.audio_lock);
760
761 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
762
763 return ret;
764}
765
766static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 .get_eld = amdgpu_dm_audio_component_get_eld,
768};
769
770static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 struct device *hda_kdev, void *data)
772{
773 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 774 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
775 struct drm_audio_component *acomp = data;
776
777 acomp->ops = &amdgpu_dm_audio_component_ops;
778 acomp->dev = kdev;
779 adev->dm.audio_component = acomp;
780
781 return 0;
782}
783
784static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 struct device *hda_kdev, void *data)
786{
787 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 788 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
789 struct drm_audio_component *acomp = data;
790
791 acomp->ops = NULL;
792 acomp->dev = NULL;
793 adev->dm.audio_component = NULL;
794}
795
796static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
797 .bind = amdgpu_dm_audio_component_bind,
798 .unbind = amdgpu_dm_audio_component_unbind,
799};
800
801static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
802{
803 int i, ret;
804
805 if (!amdgpu_audio)
806 return 0;
807
808 adev->mode_info.audio.enabled = true;
809
810 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
811
812 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
813 adev->mode_info.audio.pin[i].channels = -1;
814 adev->mode_info.audio.pin[i].rate = -1;
815 adev->mode_info.audio.pin[i].bits_per_sample = -1;
816 adev->mode_info.audio.pin[i].status_bits = 0;
817 adev->mode_info.audio.pin[i].category_code = 0;
818 adev->mode_info.audio.pin[i].connected = false;
819 adev->mode_info.audio.pin[i].id =
820 adev->dm.dc->res_pool->audios[i]->inst;
821 adev->mode_info.audio.pin[i].offset = 0;
822 }
823
824 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
825 if (ret < 0)
826 return ret;
827
828 adev->dm.audio_registered = true;
829
830 return 0;
831}
832
833static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
834{
835 if (!amdgpu_audio)
836 return;
837
838 if (!adev->mode_info.audio.enabled)
839 return;
840
841 if (adev->dm.audio_registered) {
842 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
843 adev->dm.audio_registered = false;
844 }
845
846 /* TODO: Disable audio? */
847
848 adev->mode_info.audio.enabled = false;
849}
850
dfd84d90 851static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
852{
853 struct drm_audio_component *acomp = adev->dm.audio_component;
854
855 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
856 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
857
858 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
859 pin, -1);
860 }
861}
862
743b9786
NK
863static int dm_dmub_hw_init(struct amdgpu_device *adev)
864{
743b9786
NK
865 const struct dmcub_firmware_header_v1_0 *hdr;
866 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 867 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
868 const struct firmware *dmub_fw = adev->dm.dmub_fw;
869 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
870 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
871 struct dmub_srv_hw_params hw_params;
872 enum dmub_status status;
873 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 874 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
875 bool has_hw_support;
876
877 if (!dmub_srv)
878 /* DMUB isn't supported on the ASIC. */
879 return 0;
880
8c7aea40
NK
881 if (!fb_info) {
882 DRM_ERROR("No framebuffer info for DMUB service.\n");
883 return -EINVAL;
884 }
885
743b9786
NK
886 if (!dmub_fw) {
887 /* Firmware required for DMUB support. */
888 DRM_ERROR("No firmware provided for DMUB.\n");
889 return -EINVAL;
890 }
891
892 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
893 if (status != DMUB_STATUS_OK) {
894 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
895 return -EINVAL;
896 }
897
898 if (!has_hw_support) {
899 DRM_INFO("DMUB unsupported on ASIC\n");
900 return 0;
901 }
902
903 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
904
743b9786
NK
905 fw_inst_const = dmub_fw->data +
906 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 907 PSP_HEADER_BYTES;
743b9786
NK
908
909 fw_bss_data = dmub_fw->data +
910 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
911 le32_to_cpu(hdr->inst_const_bytes);
912
913 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
914 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
915 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
916
917 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
918
ddde28a5
HW
919 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
920 * amdgpu_ucode_init_single_fw will load dmub firmware
921 * fw_inst_const part to cw0; otherwise, the firmware back door load
922 * will be done by dm_dmub_hw_init
923 */
924 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
925 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
926 fw_inst_const_size);
927 }
928
a576b345
NK
929 if (fw_bss_data_size)
930 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
931 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
932
933 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
934 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
935 adev->bios_size);
936
937 /* Reset regions that need to be reset. */
938 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
939 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
940
941 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
942 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
943
944 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
945 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
946
947 /* Initialize hardware. */
948 memset(&hw_params, 0, sizeof(hw_params));
949 hw_params.fb_base = adev->gmc.fb_start;
950 hw_params.fb_offset = adev->gmc.aper_base;
951
31a7f4bb
HW
952 /* backdoor load firmware and trigger dmub running */
953 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
954 hw_params.load_inst_const = true;
955
743b9786
NK
956 if (dmcu)
957 hw_params.psp_version = dmcu->psp_version;
958
8c7aea40
NK
959 for (i = 0; i < fb_info->num_fb; ++i)
960 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
961
962 status = dmub_srv_hw_init(dmub_srv, &hw_params);
963 if (status != DMUB_STATUS_OK) {
964 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
965 return -EINVAL;
966 }
967
968 /* Wait for firmware load to finish. */
969 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
970 if (status != DMUB_STATUS_OK)
971 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
972
973 /* Init DMCU and ABM if available. */
974 if (dmcu && abm) {
975 dmcu->funcs->dmcu_init(dmcu);
976 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
977 }
978
051b7887
RL
979 if (!adev->dm.dc->ctx->dmub_srv)
980 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
981 if (!adev->dm.dc->ctx->dmub_srv) {
982 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
983 return -ENOMEM;
984 }
985
743b9786
NK
986 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
987 adev->dm.dmcub_fw_version);
988
989 return 0;
990}
991
a3fe0e33 992#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 993static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 994{
c0fb85ae
YZ
995 uint64_t pt_base;
996 uint32_t logical_addr_low;
997 uint32_t logical_addr_high;
998 uint32_t agp_base, agp_bot, agp_top;
999 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1000
c0fb85ae
YZ
1001 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1002 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1003
c0fb85ae
YZ
1004 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1005 /*
1006 * Raven2 has a HW issue that it is unable to use the vram which
1007 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1008 * workaround that increase system aperture high address (add 1)
1009 * to get rid of the VM fault and hardware hang.
1010 */
1011 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1012 else
1013 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1014
c0fb85ae
YZ
1015 agp_base = 0;
1016 agp_bot = adev->gmc.agp_start >> 24;
1017 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1018
c44a22b3 1019
c0fb85ae
YZ
1020 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1021 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1022 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1023 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1024 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1025 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1026
c0fb85ae
YZ
1027 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1028 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1029
1030 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1031 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1032 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1033
1034 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1035 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1036 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1037
1038 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1039 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1040 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1041
1042 pa_config->is_hvm_enabled = 0;
c44a22b3 1043
c44a22b3 1044}
e6cd859d 1045#endif
ea3b4242
QZ
1046#if defined(CONFIG_DRM_AMD_DC_DCN)
1047static void event_mall_stutter(struct work_struct *work)
1048{
1049
1050 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1051 struct amdgpu_display_manager *dm = vblank_work->dm;
1052
1053 mutex_lock(&dm->dc_lock);
1054
1055 if (vblank_work->enable)
1056 dm->active_vblank_irq_count++;
5af50b0b 1057 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1058 dm->active_vblank_irq_count--;
1059
2cbcb78c 1060 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1061
4711c033 1062 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242
QZ
1063
1064 mutex_unlock(&dm->dc_lock);
1065}
1066
1067static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1068{
1069
1070 int max_caps = dc->caps.max_links;
1071 struct vblank_workqueue *vblank_work;
1072 int i = 0;
1073
1074 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1075 if (ZERO_OR_NULL_PTR(vblank_work)) {
1076 kfree(vblank_work);
1077 return NULL;
1078 }
c44a22b3 1079
ea3b4242
QZ
1080 for (i = 0; i < max_caps; i++)
1081 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1082
1083 return vblank_work;
1084}
1085#endif
7578ecda 1086static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1087{
1088 struct dc_init_data init_data;
52704fca
BL
1089#ifdef CONFIG_DRM_AMD_DC_HDCP
1090 struct dc_callback_init init_params;
1091#endif
743b9786 1092 int r;
52704fca 1093
4a580877 1094 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1095 adev->dm.adev = adev;
1096
4562236b
HW
1097 /* Zero all the fields */
1098 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1099#ifdef CONFIG_DRM_AMD_DC_HDCP
1100 memset(&init_params, 0, sizeof(init_params));
1101#endif
4562236b 1102
674e78ac 1103 mutex_init(&adev->dm.dc_lock);
6ce8f316 1104 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1105#if defined(CONFIG_DRM_AMD_DC_DCN)
1106 spin_lock_init(&adev->dm.vblank_lock);
1107#endif
674e78ac 1108
4562236b
HW
1109 if(amdgpu_dm_irq_init(adev)) {
1110 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1111 goto error;
1112 }
1113
1114 init_data.asic_id.chip_family = adev->family;
1115
2dc31ca1 1116 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1117 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1118
770d13b1 1119 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1120 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1121 init_data.asic_id.atombios_base_address =
1122 adev->mode_info.atom_context->bios;
1123
1124 init_data.driver = adev;
1125
1126 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1127
1128 if (!adev->dm.cgs_device) {
1129 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1130 goto error;
1131 }
1132
1133 init_data.cgs_device = adev->dm.cgs_device;
1134
4562236b
HW
1135 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1136
60fb100b
AD
1137 switch (adev->asic_type) {
1138 case CHIP_CARRIZO:
1139 case CHIP_STONEY:
1140 case CHIP_RAVEN:
fe3db437 1141 case CHIP_RENOIR:
6e227308 1142 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1143 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1144 init_data.flags.disable_dmcu = true;
60fb100b 1145 break;
6df9218a 1146 case CHIP_VANGOGH:
1ebcaebd
NK
1147 case CHIP_YELLOW_CARP:
1148 init_data.flags.gpu_vm_support = true;
1149 break;
60fb100b
AD
1150 default:
1151 break;
1152 }
6e227308 1153
04b94af4
AD
1154 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1155 init_data.flags.fbc_support = true;
1156
d99f38ae
AD
1157 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1158 init_data.flags.multi_mon_pp_mclk_switch = true;
1159
eaf56410
LL
1160 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1161 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1162
1163 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1164 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1165
27eaa492 1166 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1167
0dd79532 1168 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1169 /* Display Core create. */
1170 adev->dm.dc = dc_create(&init_data);
1171
423788c7 1172 if (adev->dm.dc) {
76121231 1173 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1174 } else {
76121231 1175 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1176 goto error;
1177 }
4562236b 1178
8a791dab
HW
1179 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1180 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1181 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1182 }
1183
f99d8762
HW
1184 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1185 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1186
8a791dab
HW
1187 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1188 adev->dm.dc->debug.disable_stutter = true;
1189
1190 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1191 adev->dm.dc->debug.disable_dsc = true;
1192
1193 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1194 adev->dm.dc->debug.disable_clock_gate = true;
1195
743b9786
NK
1196 r = dm_dmub_hw_init(adev);
1197 if (r) {
1198 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1199 goto error;
1200 }
1201
bb6785c1
NK
1202 dc_hardware_init(adev->dm.dc);
1203
0b08c54b 1204#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1205 if (adev->apu_flags) {
e6cd859d
AD
1206 struct dc_phy_addr_space_config pa_config;
1207
0b08c54b 1208 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1209
0b08c54b
YZ
1210 // Call the DC init_memory func
1211 dc_setup_system_context(adev->dm.dc, &pa_config);
1212 }
1213#endif
c0fb85ae 1214
4562236b
HW
1215 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1216 if (!adev->dm.freesync_module) {
1217 DRM_ERROR(
1218 "amdgpu: failed to initialize freesync_module.\n");
1219 } else
f1ad2f5e 1220 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1221 adev->dm.freesync_module);
1222
e277adc5
LSL
1223 amdgpu_dm_init_color_mod();
1224
ea3b4242
QZ
1225#if defined(CONFIG_DRM_AMD_DC_DCN)
1226 if (adev->dm.dc->caps.max_links > 0) {
1227 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1228
1229 if (!adev->dm.vblank_workqueue)
1230 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1231 else
1232 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1233 }
1234#endif
1235
52704fca 1236#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1237 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1238 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1239
96a3b32e
BL
1240 if (!adev->dm.hdcp_workqueue)
1241 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1242 else
1243 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1244
96a3b32e
BL
1245 dc_init_callbacks(adev->dm.dc, &init_params);
1246 }
9a65df19
WL
1247#endif
1248#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1249 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1250#endif
81927e28
JS
1251 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1252 init_completion(&adev->dm.dmub_aux_transfer_done);
1253 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1254 if (!adev->dm.dmub_notify) {
1255 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1256 goto error;
1257 }
1258 amdgpu_dm_outbox_init(adev);
1259 }
1260
4562236b
HW
1261 if (amdgpu_dm_initialize_drm_device(adev)) {
1262 DRM_ERROR(
1263 "amdgpu: failed to initialize sw for display support.\n");
1264 goto error;
1265 }
1266
f74367e4
AD
1267 /* create fake encoders for MST */
1268 dm_dp_create_fake_mst_encoders(adev);
1269
4562236b
HW
1270 /* TODO: Add_display_info? */
1271
1272 /* TODO use dynamic cursor width */
4a580877
LT
1273 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1274 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1275
4a580877 1276 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1277 DRM_ERROR(
1278 "amdgpu: failed to initialize sw for display support.\n");
1279 goto error;
1280 }
1281
c0fb85ae 1282
f1ad2f5e 1283 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1284
1285 return 0;
1286error:
1287 amdgpu_dm_fini(adev);
1288
59d0f396 1289 return -EINVAL;
4562236b
HW
1290}
1291
e9669fb7
AG
1292static int amdgpu_dm_early_fini(void *handle)
1293{
1294 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1295
1296 amdgpu_dm_audio_fini(adev);
1297
1298 return 0;
1299}
1300
7578ecda 1301static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1302{
f74367e4
AD
1303 int i;
1304
1305 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1306 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1307 }
1308
4562236b 1309 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1310
9a65df19
WL
1311#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1312 if (adev->dm.crc_rd_wrk) {
1313 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1314 kfree(adev->dm.crc_rd_wrk);
1315 adev->dm.crc_rd_wrk = NULL;
1316 }
1317#endif
52704fca
BL
1318#ifdef CONFIG_DRM_AMD_DC_HDCP
1319 if (adev->dm.hdcp_workqueue) {
e96b1b29 1320 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1321 adev->dm.hdcp_workqueue = NULL;
1322 }
1323
1324 if (adev->dm.dc)
1325 dc_deinit_callbacks(adev->dm.dc);
1326#endif
51ba6912
QZ
1327
1328#if defined(CONFIG_DRM_AMD_DC_DCN)
1329 if (adev->dm.vblank_workqueue) {
1330 adev->dm.vblank_workqueue->dm = NULL;
1331 kfree(adev->dm.vblank_workqueue);
1332 adev->dm.vblank_workqueue = NULL;
1333 }
1334#endif
1335
3beac533 1336 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1337
81927e28
JS
1338 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1339 kfree(adev->dm.dmub_notify);
1340 adev->dm.dmub_notify = NULL;
1341 }
1342
743b9786
NK
1343 if (adev->dm.dmub_bo)
1344 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1345 &adev->dm.dmub_bo_gpu_addr,
1346 &adev->dm.dmub_bo_cpu_addr);
52704fca 1347
c8bdf2b6
ED
1348 /* DC Destroy TODO: Replace destroy DAL */
1349 if (adev->dm.dc)
1350 dc_destroy(&adev->dm.dc);
4562236b
HW
1351 /*
1352 * TODO: pageflip, vlank interrupt
1353 *
1354 * amdgpu_dm_irq_fini(adev);
1355 */
1356
1357 if (adev->dm.cgs_device) {
1358 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1359 adev->dm.cgs_device = NULL;
1360 }
1361 if (adev->dm.freesync_module) {
1362 mod_freesync_destroy(adev->dm.freesync_module);
1363 adev->dm.freesync_module = NULL;
1364 }
674e78ac 1365
6ce8f316 1366 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1367 mutex_destroy(&adev->dm.dc_lock);
1368
4562236b
HW
1369 return;
1370}
1371
a94d5569 1372static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1373{
a7669aff 1374 const char *fw_name_dmcu = NULL;
a94d5569
DF
1375 int r;
1376 const struct dmcu_firmware_header_v1_0 *hdr;
1377
1378 switch(adev->asic_type) {
55e56389
MR
1379#if defined(CONFIG_DRM_AMD_DC_SI)
1380 case CHIP_TAHITI:
1381 case CHIP_PITCAIRN:
1382 case CHIP_VERDE:
1383 case CHIP_OLAND:
1384#endif
a94d5569
DF
1385 case CHIP_BONAIRE:
1386 case CHIP_HAWAII:
1387 case CHIP_KAVERI:
1388 case CHIP_KABINI:
1389 case CHIP_MULLINS:
1390 case CHIP_TONGA:
1391 case CHIP_FIJI:
1392 case CHIP_CARRIZO:
1393 case CHIP_STONEY:
1394 case CHIP_POLARIS11:
1395 case CHIP_POLARIS10:
1396 case CHIP_POLARIS12:
1397 case CHIP_VEGAM:
1398 case CHIP_VEGA10:
1399 case CHIP_VEGA12:
1400 case CHIP_VEGA20:
476e955d 1401 case CHIP_NAVI10:
baebcf2e 1402 case CHIP_NAVI14:
30221ad8 1403 case CHIP_RENOIR:
79037324 1404 case CHIP_SIENNA_CICHLID:
a6c5308f 1405 case CHIP_NAVY_FLOUNDER:
2a411205 1406 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 1407 case CHIP_BEIGE_GOBY:
469989ca 1408 case CHIP_VANGOGH:
1ebcaebd 1409 case CHIP_YELLOW_CARP:
a94d5569 1410 return 0;
5ea23931
RL
1411 case CHIP_NAVI12:
1412 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1413 break;
a94d5569 1414 case CHIP_RAVEN:
a7669aff
HW
1415 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1416 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1417 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1418 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1419 else
a7669aff 1420 return 0;
a94d5569
DF
1421 break;
1422 default:
1423 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1424 return -EINVAL;
a94d5569
DF
1425 }
1426
1427 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1428 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1429 return 0;
1430 }
1431
1432 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1433 if (r == -ENOENT) {
1434 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1435 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1436 adev->dm.fw_dmcu = NULL;
1437 return 0;
1438 }
1439 if (r) {
1440 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1441 fw_name_dmcu);
1442 return r;
1443 }
1444
1445 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1446 if (r) {
1447 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1448 fw_name_dmcu);
1449 release_firmware(adev->dm.fw_dmcu);
1450 adev->dm.fw_dmcu = NULL;
1451 return r;
1452 }
1453
1454 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1455 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1456 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1457 adev->firmware.fw_size +=
1458 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1459
1460 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1461 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1462 adev->firmware.fw_size +=
1463 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1464
ee6e89c0
DF
1465 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1466
a94d5569
DF
1467 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1468
4562236b
HW
1469 return 0;
1470}
1471
743b9786
NK
1472static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1473{
1474 struct amdgpu_device *adev = ctx;
1475
1476 return dm_read_reg(adev->dm.dc->ctx, address);
1477}
1478
1479static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1480 uint32_t value)
1481{
1482 struct amdgpu_device *adev = ctx;
1483
1484 return dm_write_reg(adev->dm.dc->ctx, address, value);
1485}
1486
1487static int dm_dmub_sw_init(struct amdgpu_device *adev)
1488{
1489 struct dmub_srv_create_params create_params;
8c7aea40
NK
1490 struct dmub_srv_region_params region_params;
1491 struct dmub_srv_region_info region_info;
1492 struct dmub_srv_fb_params fb_params;
1493 struct dmub_srv_fb_info *fb_info;
1494 struct dmub_srv *dmub_srv;
743b9786
NK
1495 const struct dmcub_firmware_header_v1_0 *hdr;
1496 const char *fw_name_dmub;
1497 enum dmub_asic dmub_asic;
1498 enum dmub_status status;
1499 int r;
1500
1501 switch (adev->asic_type) {
1502 case CHIP_RENOIR:
1503 dmub_asic = DMUB_ASIC_DCN21;
1504 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1505 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1506 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1507 break;
79037324
BL
1508 case CHIP_SIENNA_CICHLID:
1509 dmub_asic = DMUB_ASIC_DCN30;
1510 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1511 break;
5ce868fc
BL
1512 case CHIP_NAVY_FLOUNDER:
1513 dmub_asic = DMUB_ASIC_DCN30;
1514 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1515 break;
469989ca
RL
1516 case CHIP_VANGOGH:
1517 dmub_asic = DMUB_ASIC_DCN301;
1518 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1519 break;
2a411205
BL
1520 case CHIP_DIMGREY_CAVEFISH:
1521 dmub_asic = DMUB_ASIC_DCN302;
1522 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1523 break;
656fe9b6
AP
1524 case CHIP_BEIGE_GOBY:
1525 dmub_asic = DMUB_ASIC_DCN303;
1526 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1527 break;
1ebcaebd
NK
1528 case CHIP_YELLOW_CARP:
1529 dmub_asic = DMUB_ASIC_DCN31;
1530 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1531 break;
743b9786
NK
1532
1533 default:
1534 /* ASIC doesn't support DMUB. */
1535 return 0;
1536 }
1537
743b9786
NK
1538 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1539 if (r) {
1540 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1541 return 0;
1542 }
1543
1544 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1545 if (r) {
1546 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1547 return 0;
1548 }
1549
743b9786 1550 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1551
9a6ed547
NK
1552 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1553 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1554 AMDGPU_UCODE_ID_DMCUB;
1555 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1556 adev->dm.dmub_fw;
1557 adev->firmware.fw_size +=
1558 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1559
9a6ed547
NK
1560 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1561 adev->dm.dmcub_fw_version);
1562 }
1563
1564 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1565
8c7aea40
NK
1566 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1567 dmub_srv = adev->dm.dmub_srv;
1568
1569 if (!dmub_srv) {
1570 DRM_ERROR("Failed to allocate DMUB service!\n");
1571 return -ENOMEM;
1572 }
1573
1574 memset(&create_params, 0, sizeof(create_params));
1575 create_params.user_ctx = adev;
1576 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1577 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1578 create_params.asic = dmub_asic;
1579
1580 /* Create the DMUB service. */
1581 status = dmub_srv_create(dmub_srv, &create_params);
1582 if (status != DMUB_STATUS_OK) {
1583 DRM_ERROR("Error creating DMUB service: %d\n", status);
1584 return -EINVAL;
1585 }
1586
1587 /* Calculate the size of all the regions for the DMUB service. */
1588 memset(&region_params, 0, sizeof(region_params));
1589
1590 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1591 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1592 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1593 region_params.vbios_size = adev->bios_size;
0922b899 1594 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1595 adev->dm.dmub_fw->data +
1596 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1597 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1598 region_params.fw_inst_const =
1599 adev->dm.dmub_fw->data +
1600 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1601 PSP_HEADER_BYTES;
8c7aea40
NK
1602
1603 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1604 &region_info);
1605
1606 if (status != DMUB_STATUS_OK) {
1607 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1608 return -EINVAL;
1609 }
1610
1611 /*
1612 * Allocate a framebuffer based on the total size of all the regions.
1613 * TODO: Move this into GART.
1614 */
1615 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1616 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1617 &adev->dm.dmub_bo_gpu_addr,
1618 &adev->dm.dmub_bo_cpu_addr);
1619 if (r)
1620 return r;
1621
1622 /* Rebase the regions on the framebuffer address. */
1623 memset(&fb_params, 0, sizeof(fb_params));
1624 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1625 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1626 fb_params.region_info = &region_info;
1627
1628 adev->dm.dmub_fb_info =
1629 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1630 fb_info = adev->dm.dmub_fb_info;
1631
1632 if (!fb_info) {
1633 DRM_ERROR(
1634 "Failed to allocate framebuffer info for DMUB service!\n");
1635 return -ENOMEM;
1636 }
1637
1638 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1639 if (status != DMUB_STATUS_OK) {
1640 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1641 return -EINVAL;
1642 }
1643
743b9786
NK
1644 return 0;
1645}
1646
a94d5569
DF
1647static int dm_sw_init(void *handle)
1648{
1649 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1650 int r;
1651
1652 r = dm_dmub_sw_init(adev);
1653 if (r)
1654 return r;
a94d5569
DF
1655
1656 return load_dmcu_fw(adev);
1657}
1658
4562236b
HW
1659static int dm_sw_fini(void *handle)
1660{
a94d5569
DF
1661 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1662
8c7aea40
NK
1663 kfree(adev->dm.dmub_fb_info);
1664 adev->dm.dmub_fb_info = NULL;
1665
743b9786
NK
1666 if (adev->dm.dmub_srv) {
1667 dmub_srv_destroy(adev->dm.dmub_srv);
1668 adev->dm.dmub_srv = NULL;
1669 }
1670
75e1658e
ND
1671 release_firmware(adev->dm.dmub_fw);
1672 adev->dm.dmub_fw = NULL;
743b9786 1673
75e1658e
ND
1674 release_firmware(adev->dm.fw_dmcu);
1675 adev->dm.fw_dmcu = NULL;
a94d5569 1676
4562236b
HW
1677 return 0;
1678}
1679
7abcf6b5 1680static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1681{
c84dec2f 1682 struct amdgpu_dm_connector *aconnector;
4562236b 1683 struct drm_connector *connector;
f8d2d39e 1684 struct drm_connector_list_iter iter;
7abcf6b5 1685 int ret = 0;
4562236b 1686
f8d2d39e
LP
1687 drm_connector_list_iter_begin(dev, &iter);
1688 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1689 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1690 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1691 aconnector->mst_mgr.aux) {
f1ad2f5e 1692 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1693 aconnector,
1694 aconnector->base.base.id);
7abcf6b5
AG
1695
1696 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1697 if (ret < 0) {
1698 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1699 aconnector->dc_link->type =
1700 dc_connection_single;
1701 break;
7abcf6b5 1702 }
f8d2d39e 1703 }
4562236b 1704 }
f8d2d39e 1705 drm_connector_list_iter_end(&iter);
4562236b 1706
7abcf6b5
AG
1707 return ret;
1708}
1709
1710static int dm_late_init(void *handle)
1711{
42e67c3b 1712 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1713
bbf854dc
DF
1714 struct dmcu_iram_parameters params;
1715 unsigned int linear_lut[16];
1716 int i;
17bdb4a8 1717 struct dmcu *dmcu = NULL;
bbf854dc 1718
17bdb4a8
JFZ
1719 dmcu = adev->dm.dc->res_pool->dmcu;
1720
bbf854dc
DF
1721 for (i = 0; i < 16; i++)
1722 linear_lut[i] = 0xFFFF * i / 15;
1723
1724 params.set = 0;
1725 params.backlight_ramping_start = 0xCCCC;
1726 params.backlight_ramping_reduction = 0xCCCCCCCC;
1727 params.backlight_lut_array_size = 16;
1728 params.backlight_lut_array = linear_lut;
1729
2ad0cdf9
AK
1730 /* Min backlight level after ABM reduction, Don't allow below 1%
1731 * 0xFFFF x 0.01 = 0x28F
1732 */
1733 params.min_abm_backlight = 0x28F;
5cb32419 1734 /* In the case where abm is implemented on dmcub,
6e568e43
JW
1735 * dmcu object will be null.
1736 * ABM 2.4 and up are implemented on dmcub.
1737 */
1738 if (dmcu) {
1739 if (!dmcu_load_iram(dmcu, params))
1740 return -EINVAL;
1741 } else if (adev->dm.dc->ctx->dmub_srv) {
1742 struct dc_link *edp_links[MAX_NUM_EDP];
1743 int edp_num;
bbf854dc 1744
6e568e43
JW
1745 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1746 for (i = 0; i < edp_num; i++) {
1747 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1748 return -EINVAL;
1749 }
1750 }
bbf854dc 1751
4a580877 1752 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1753}
1754
1755static void s3_handle_mst(struct drm_device *dev, bool suspend)
1756{
c84dec2f 1757 struct amdgpu_dm_connector *aconnector;
4562236b 1758 struct drm_connector *connector;
f8d2d39e 1759 struct drm_connector_list_iter iter;
fe7553be
LP
1760 struct drm_dp_mst_topology_mgr *mgr;
1761 int ret;
1762 bool need_hotplug = false;
4562236b 1763
f8d2d39e
LP
1764 drm_connector_list_iter_begin(dev, &iter);
1765 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1766 aconnector = to_amdgpu_dm_connector(connector);
1767 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1768 aconnector->mst_port)
1769 continue;
1770
1771 mgr = &aconnector->mst_mgr;
1772
1773 if (suspend) {
1774 drm_dp_mst_topology_mgr_suspend(mgr);
1775 } else {
6f85f738 1776 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1777 if (ret < 0) {
1778 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1779 need_hotplug = true;
1780 }
1781 }
4562236b 1782 }
f8d2d39e 1783 drm_connector_list_iter_end(&iter);
fe7553be
LP
1784
1785 if (need_hotplug)
1786 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1787}
1788
9340dfd3
HW
1789static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1790{
1791 struct smu_context *smu = &adev->smu;
1792 int ret = 0;
1793
1794 if (!is_support_sw_smu(adev))
1795 return 0;
1796
1797 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1798 * on window driver dc implementation.
1799 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1800 * should be passed to smu during boot up and resume from s3.
1801 * boot up: dc calculate dcn watermark clock settings within dc_create,
1802 * dcn20_resource_construct
1803 * then call pplib functions below to pass the settings to smu:
1804 * smu_set_watermarks_for_clock_ranges
1805 * smu_set_watermarks_table
1806 * navi10_set_watermarks_table
1807 * smu_write_watermarks_table
1808 *
1809 * For Renoir, clock settings of dcn watermark are also fixed values.
1810 * dc has implemented different flow for window driver:
1811 * dc_hardware_init / dc_set_power_state
1812 * dcn10_init_hw
1813 * notify_wm_ranges
1814 * set_wm_ranges
1815 * -- Linux
1816 * smu_set_watermarks_for_clock_ranges
1817 * renoir_set_watermarks_table
1818 * smu_write_watermarks_table
1819 *
1820 * For Linux,
1821 * dc_hardware_init -> amdgpu_dm_init
1822 * dc_set_power_state --> dm_resume
1823 *
1824 * therefore, this function apply to navi10/12/14 but not Renoir
1825 * *
1826 */
1827 switch(adev->asic_type) {
1828 case CHIP_NAVI10:
1829 case CHIP_NAVI14:
1830 case CHIP_NAVI12:
1831 break;
1832 default:
1833 return 0;
1834 }
1835
e7a95eea
EQ
1836 ret = smu_write_watermarks_table(smu);
1837 if (ret) {
1838 DRM_ERROR("Failed to update WMTABLE!\n");
1839 return ret;
9340dfd3
HW
1840 }
1841
9340dfd3
HW
1842 return 0;
1843}
1844
b8592b48
LL
1845/**
1846 * dm_hw_init() - Initialize DC device
28d687ea 1847 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1848 *
1849 * Initialize the &struct amdgpu_display_manager device. This involves calling
1850 * the initializers of each DM component, then populating the struct with them.
1851 *
1852 * Although the function implies hardware initialization, both hardware and
1853 * software are initialized here. Splitting them out to their relevant init
1854 * hooks is a future TODO item.
1855 *
1856 * Some notable things that are initialized here:
1857 *
1858 * - Display Core, both software and hardware
1859 * - DC modules that we need (freesync and color management)
1860 * - DRM software states
1861 * - Interrupt sources and handlers
1862 * - Vblank support
1863 * - Debug FS entries, if enabled
1864 */
4562236b
HW
1865static int dm_hw_init(void *handle)
1866{
1867 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1868 /* Create DAL display manager */
1869 amdgpu_dm_init(adev);
4562236b
HW
1870 amdgpu_dm_hpd_init(adev);
1871
4562236b
HW
1872 return 0;
1873}
1874
b8592b48
LL
1875/**
1876 * dm_hw_fini() - Teardown DC device
28d687ea 1877 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1878 *
1879 * Teardown components within &struct amdgpu_display_manager that require
1880 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1881 * were loaded. Also flush IRQ workqueues and disable them.
1882 */
4562236b
HW
1883static int dm_hw_fini(void *handle)
1884{
1885 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1886
1887 amdgpu_dm_hpd_fini(adev);
1888
1889 amdgpu_dm_irq_fini(adev);
21de3396 1890 amdgpu_dm_fini(adev);
4562236b
HW
1891 return 0;
1892}
1893
cdaae837
BL
1894
1895static int dm_enable_vblank(struct drm_crtc *crtc);
1896static void dm_disable_vblank(struct drm_crtc *crtc);
1897
1898static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1899 struct dc_state *state, bool enable)
1900{
1901 enum dc_irq_source irq_source;
1902 struct amdgpu_crtc *acrtc;
1903 int rc = -EBUSY;
1904 int i = 0;
1905
1906 for (i = 0; i < state->stream_count; i++) {
1907 acrtc = get_crtc_by_otg_inst(
1908 adev, state->stream_status[i].primary_otg_inst);
1909
1910 if (acrtc && state->stream_status[i].plane_count != 0) {
1911 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1912 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
1913 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1914 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
1915 if (rc)
1916 DRM_WARN("Failed to %s pflip interrupts\n",
1917 enable ? "enable" : "disable");
1918
1919 if (enable) {
1920 rc = dm_enable_vblank(&acrtc->base);
1921 if (rc)
1922 DRM_WARN("Failed to enable vblank interrupts\n");
1923 } else {
1924 dm_disable_vblank(&acrtc->base);
1925 }
1926
1927 }
1928 }
1929
1930}
1931
dfd84d90 1932static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1933{
1934 struct dc_state *context = NULL;
1935 enum dc_status res = DC_ERROR_UNEXPECTED;
1936 int i;
1937 struct dc_stream_state *del_streams[MAX_PIPES];
1938 int del_streams_count = 0;
1939
1940 memset(del_streams, 0, sizeof(del_streams));
1941
1942 context = dc_create_state(dc);
1943 if (context == NULL)
1944 goto context_alloc_fail;
1945
1946 dc_resource_state_copy_construct_current(dc, context);
1947
1948 /* First remove from context all streams */
1949 for (i = 0; i < context->stream_count; i++) {
1950 struct dc_stream_state *stream = context->streams[i];
1951
1952 del_streams[del_streams_count++] = stream;
1953 }
1954
1955 /* Remove all planes for removed streams and then remove the streams */
1956 for (i = 0; i < del_streams_count; i++) {
1957 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1958 res = DC_FAIL_DETACH_SURFACES;
1959 goto fail;
1960 }
1961
1962 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1963 if (res != DC_OK)
1964 goto fail;
1965 }
1966
1967
1968 res = dc_validate_global_state(dc, context, false);
1969
1970 if (res != DC_OK) {
1971 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1972 goto fail;
1973 }
1974
1975 res = dc_commit_state(dc, context);
1976
1977fail:
1978 dc_release_state(context);
1979
1980context_alloc_fail:
1981 return res;
1982}
1983
4562236b
HW
1984static int dm_suspend(void *handle)
1985{
1986 struct amdgpu_device *adev = handle;
1987 struct amdgpu_display_manager *dm = &adev->dm;
1988 int ret = 0;
4562236b 1989
53b3f8f4 1990 if (amdgpu_in_reset(adev)) {
cdaae837 1991 mutex_lock(&dm->dc_lock);
98ab5f35
BL
1992
1993#if defined(CONFIG_DRM_AMD_DC_DCN)
1994 dc_allow_idle_optimizations(adev->dm.dc, false);
1995#endif
1996
cdaae837
BL
1997 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1998
1999 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2000
2001 amdgpu_dm_commit_zero_streams(dm->dc);
2002
2003 amdgpu_dm_irq_suspend(adev);
2004
2005 return ret;
2006 }
4562236b 2007
d2f0b53b 2008 WARN_ON(adev->dm.cached_state);
4a580877 2009 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2010
4a580877 2011 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2012
4562236b
HW
2013 amdgpu_dm_irq_suspend(adev);
2014
32f5062d 2015 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2016
1c2075d4 2017 return 0;
4562236b
HW
2018}
2019
1daf8c63
AD
2020static struct amdgpu_dm_connector *
2021amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2022 struct drm_crtc *crtc)
4562236b
HW
2023{
2024 uint32_t i;
c2cea706 2025 struct drm_connector_state *new_con_state;
4562236b
HW
2026 struct drm_connector *connector;
2027 struct drm_crtc *crtc_from_state;
2028
c2cea706
LSL
2029 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2030 crtc_from_state = new_con_state->crtc;
4562236b
HW
2031
2032 if (crtc_from_state == crtc)
c84dec2f 2033 return to_amdgpu_dm_connector(connector);
4562236b
HW
2034 }
2035
2036 return NULL;
2037}
2038
fbbdadf2
BL
2039static void emulated_link_detect(struct dc_link *link)
2040{
2041 struct dc_sink_init_data sink_init_data = { 0 };
2042 struct display_sink_capability sink_caps = { 0 };
2043 enum dc_edid_status edid_status;
2044 struct dc_context *dc_ctx = link->ctx;
2045 struct dc_sink *sink = NULL;
2046 struct dc_sink *prev_sink = NULL;
2047
2048 link->type = dc_connection_none;
2049 prev_sink = link->local_sink;
2050
30164a16
VL
2051 if (prev_sink)
2052 dc_sink_release(prev_sink);
fbbdadf2
BL
2053
2054 switch (link->connector_signal) {
2055 case SIGNAL_TYPE_HDMI_TYPE_A: {
2056 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2057 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2058 break;
2059 }
2060
2061 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2062 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2063 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2064 break;
2065 }
2066
2067 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2068 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2069 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2070 break;
2071 }
2072
2073 case SIGNAL_TYPE_LVDS: {
2074 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2075 sink_caps.signal = SIGNAL_TYPE_LVDS;
2076 break;
2077 }
2078
2079 case SIGNAL_TYPE_EDP: {
2080 sink_caps.transaction_type =
2081 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2082 sink_caps.signal = SIGNAL_TYPE_EDP;
2083 break;
2084 }
2085
2086 case SIGNAL_TYPE_DISPLAY_PORT: {
2087 sink_caps.transaction_type =
2088 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2089 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2090 break;
2091 }
2092
2093 default:
2094 DC_ERROR("Invalid connector type! signal:%d\n",
2095 link->connector_signal);
2096 return;
2097 }
2098
2099 sink_init_data.link = link;
2100 sink_init_data.sink_signal = sink_caps.signal;
2101
2102 sink = dc_sink_create(&sink_init_data);
2103 if (!sink) {
2104 DC_ERROR("Failed to create sink!\n");
2105 return;
2106 }
2107
dcd5fb82 2108 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2109 link->local_sink = sink;
2110
2111 edid_status = dm_helpers_read_local_edid(
2112 link->ctx,
2113 link,
2114 sink);
2115
2116 if (edid_status != EDID_OK)
2117 DC_ERROR("Failed to read EDID");
2118
2119}
2120
cdaae837
BL
2121static void dm_gpureset_commit_state(struct dc_state *dc_state,
2122 struct amdgpu_display_manager *dm)
2123{
2124 struct {
2125 struct dc_surface_update surface_updates[MAX_SURFACES];
2126 struct dc_plane_info plane_infos[MAX_SURFACES];
2127 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2128 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2129 struct dc_stream_update stream_update;
2130 } * bundle;
2131 int k, m;
2132
2133 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2134
2135 if (!bundle) {
2136 dm_error("Failed to allocate update bundle\n");
2137 goto cleanup;
2138 }
2139
2140 for (k = 0; k < dc_state->stream_count; k++) {
2141 bundle->stream_update.stream = dc_state->streams[k];
2142
2143 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2144 bundle->surface_updates[m].surface =
2145 dc_state->stream_status->plane_states[m];
2146 bundle->surface_updates[m].surface->force_full_update =
2147 true;
2148 }
2149 dc_commit_updates_for_stream(
2150 dm->dc, bundle->surface_updates,
2151 dc_state->stream_status->plane_count,
efc8278e 2152 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2153 }
2154
2155cleanup:
2156 kfree(bundle);
2157
2158 return;
2159}
2160
3c4d55c9
AP
2161static void dm_set_dpms_off(struct dc_link *link)
2162{
2163 struct dc_stream_state *stream_state;
2164 struct amdgpu_dm_connector *aconnector = link->priv;
2165 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2166 struct dc_stream_update stream_update;
2167 bool dpms_off = true;
2168
2169 memset(&stream_update, 0, sizeof(stream_update));
2170 stream_update.dpms_off = &dpms_off;
2171
2172 mutex_lock(&adev->dm.dc_lock);
2173 stream_state = dc_stream_find_from_link(link);
2174
2175 if (stream_state == NULL) {
2176 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2177 mutex_unlock(&adev->dm.dc_lock);
2178 return;
2179 }
2180
2181 stream_update.stream = stream_state;
2182 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2183 stream_state, &stream_update,
2184 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2185 mutex_unlock(&adev->dm.dc_lock);
2186}
2187
4562236b
HW
2188static int dm_resume(void *handle)
2189{
2190 struct amdgpu_device *adev = handle;
4a580877 2191 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2192 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2193 struct amdgpu_dm_connector *aconnector;
4562236b 2194 struct drm_connector *connector;
f8d2d39e 2195 struct drm_connector_list_iter iter;
4562236b 2196 struct drm_crtc *crtc;
c2cea706 2197 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2198 struct dm_crtc_state *dm_new_crtc_state;
2199 struct drm_plane *plane;
2200 struct drm_plane_state *new_plane_state;
2201 struct dm_plane_state *dm_new_plane_state;
113b7a01 2202 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2203 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2204 struct dc_state *dc_state;
2205 int i, r, j;
4562236b 2206
53b3f8f4 2207 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2208 dc_state = dm->cached_dc_state;
2209
2210 r = dm_dmub_hw_init(adev);
2211 if (r)
2212 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2213
2214 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2215 dc_resume(dm->dc);
2216
2217 amdgpu_dm_irq_resume_early(adev);
2218
2219 for (i = 0; i < dc_state->stream_count; i++) {
2220 dc_state->streams[i]->mode_changed = true;
2221 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2222 dc_state->stream_status->plane_states[j]->update_flags.raw
2223 = 0xffffffff;
2224 }
2225 }
8fe44c08 2226#if defined(CONFIG_DRM_AMD_DC_DCN)
1ebcaebd
NK
2227 /*
2228 * Resource allocation happens for link encoders for newer ASIC in
2229 * dc_validate_global_state, so we need to revalidate it.
2230 *
2231 * This shouldn't fail (it passed once before), so warn if it does.
2232 */
2233 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2234#endif
cdaae837
BL
2235
2236 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2237
cdaae837
BL
2238 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2239
2240 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2241
2242 dc_release_state(dm->cached_dc_state);
2243 dm->cached_dc_state = NULL;
2244
2245 amdgpu_dm_irq_resume_late(adev);
2246
2247 mutex_unlock(&dm->dc_lock);
2248
2249 return 0;
2250 }
113b7a01
LL
2251 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2252 dc_release_state(dm_state->context);
2253 dm_state->context = dc_create_state(dm->dc);
2254 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2255 dc_resource_state_construct(dm->dc, dm_state->context);
2256
8c7aea40
NK
2257 /* Before powering on DC we need to re-initialize DMUB. */
2258 r = dm_dmub_hw_init(adev);
2259 if (r)
2260 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2261
a80aa93d
ML
2262 /* power on hardware */
2263 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2264
4562236b
HW
2265 /* program HPD filter */
2266 dc_resume(dm->dc);
2267
4562236b
HW
2268 /*
2269 * early enable HPD Rx IRQ, should be done before set mode as short
2270 * pulse interrupts are used for MST
2271 */
2272 amdgpu_dm_irq_resume_early(adev);
2273
d20ebea8 2274 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2275 s3_handle_mst(ddev, false);
2276
4562236b 2277 /* Do detection*/
f8d2d39e
LP
2278 drm_connector_list_iter_begin(ddev, &iter);
2279 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2280 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2281
2282 /*
2283 * this is the case when traversing through already created
2284 * MST connectors, should be skipped
2285 */
2286 if (aconnector->mst_port)
2287 continue;
2288
03ea364c 2289 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2290 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2291 DRM_ERROR("KMS: Failed to detect connector\n");
2292
2293 if (aconnector->base.force && new_connection_type == dc_connection_none)
2294 emulated_link_detect(aconnector->dc_link);
2295 else
2296 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2297
2298 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2299 aconnector->fake_enable = false;
2300
dcd5fb82
MF
2301 if (aconnector->dc_sink)
2302 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2303 aconnector->dc_sink = NULL;
2304 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2305 mutex_unlock(&aconnector->hpd_lock);
4562236b 2306 }
f8d2d39e 2307 drm_connector_list_iter_end(&iter);
4562236b 2308
1f6010a9 2309 /* Force mode set in atomic commit */
a80aa93d 2310 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2311 new_crtc_state->active_changed = true;
4f346e65 2312
fcb4019e
LSL
2313 /*
2314 * atomic_check is expected to create the dc states. We need to release
2315 * them here, since they were duplicated as part of the suspend
2316 * procedure.
2317 */
a80aa93d 2318 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2319 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2320 if (dm_new_crtc_state->stream) {
2321 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2322 dc_stream_release(dm_new_crtc_state->stream);
2323 dm_new_crtc_state->stream = NULL;
2324 }
2325 }
2326
a80aa93d 2327 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2328 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2329 if (dm_new_plane_state->dc_state) {
2330 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2331 dc_plane_state_release(dm_new_plane_state->dc_state);
2332 dm_new_plane_state->dc_state = NULL;
2333 }
2334 }
2335
2d1af6a1 2336 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2337
a80aa93d 2338 dm->cached_state = NULL;
0a214e2f 2339
9faa4237 2340 amdgpu_dm_irq_resume_late(adev);
4562236b 2341
9340dfd3
HW
2342 amdgpu_dm_smu_write_watermarks_table(adev);
2343
2d1af6a1 2344 return 0;
4562236b
HW
2345}
2346
b8592b48
LL
2347/**
2348 * DOC: DM Lifecycle
2349 *
2350 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2351 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2352 * the base driver's device list to be initialized and torn down accordingly.
2353 *
2354 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2355 */
2356
4562236b
HW
2357static const struct amd_ip_funcs amdgpu_dm_funcs = {
2358 .name = "dm",
2359 .early_init = dm_early_init,
7abcf6b5 2360 .late_init = dm_late_init,
4562236b
HW
2361 .sw_init = dm_sw_init,
2362 .sw_fini = dm_sw_fini,
e9669fb7 2363 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2364 .hw_init = dm_hw_init,
2365 .hw_fini = dm_hw_fini,
2366 .suspend = dm_suspend,
2367 .resume = dm_resume,
2368 .is_idle = dm_is_idle,
2369 .wait_for_idle = dm_wait_for_idle,
2370 .check_soft_reset = dm_check_soft_reset,
2371 .soft_reset = dm_soft_reset,
2372 .set_clockgating_state = dm_set_clockgating_state,
2373 .set_powergating_state = dm_set_powergating_state,
2374};
2375
2376const struct amdgpu_ip_block_version dm_ip_block =
2377{
2378 .type = AMD_IP_BLOCK_TYPE_DCE,
2379 .major = 1,
2380 .minor = 0,
2381 .rev = 0,
2382 .funcs = &amdgpu_dm_funcs,
2383};
2384
ca3268c4 2385
b8592b48
LL
2386/**
2387 * DOC: atomic
2388 *
2389 * *WIP*
2390 */
0a323b84 2391
b3663f70 2392static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2393 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2394 .get_format_info = amd_get_format_info,
366c1baa 2395 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2396 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2397 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2398};
2399
2400static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2401 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2402};
2403
94562810
RS
2404static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2405{
2406 u32 max_cll, min_cll, max, min, q, r;
2407 struct amdgpu_dm_backlight_caps *caps;
2408 struct amdgpu_display_manager *dm;
2409 struct drm_connector *conn_base;
2410 struct amdgpu_device *adev;
ec11fe37 2411 struct dc_link *link = NULL;
94562810
RS
2412 static const u8 pre_computed_values[] = {
2413 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2414 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2415
2416 if (!aconnector || !aconnector->dc_link)
2417 return;
2418
ec11fe37 2419 link = aconnector->dc_link;
2420 if (link->connector_signal != SIGNAL_TYPE_EDP)
2421 return;
2422
94562810 2423 conn_base = &aconnector->base;
1348969a 2424 adev = drm_to_adev(conn_base->dev);
94562810
RS
2425 dm = &adev->dm;
2426 caps = &dm->backlight_caps;
2427 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2428 caps->aux_support = false;
2429 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2430 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2431
2432 if (caps->ext_caps->bits.oled == 1 ||
2433 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2434 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2435 caps->aux_support = true;
2436
7a46f05e
TI
2437 if (amdgpu_backlight == 0)
2438 caps->aux_support = false;
2439 else if (amdgpu_backlight == 1)
2440 caps->aux_support = true;
2441
94562810
RS
2442 /* From the specification (CTA-861-G), for calculating the maximum
2443 * luminance we need to use:
2444 * Luminance = 50*2**(CV/32)
2445 * Where CV is a one-byte value.
2446 * For calculating this expression we may need float point precision;
2447 * to avoid this complexity level, we take advantage that CV is divided
2448 * by a constant. From the Euclids division algorithm, we know that CV
2449 * can be written as: CV = 32*q + r. Next, we replace CV in the
2450 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2451 * need to pre-compute the value of r/32. For pre-computing the values
2452 * We just used the following Ruby line:
2453 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2454 * The results of the above expressions can be verified at
2455 * pre_computed_values.
2456 */
2457 q = max_cll >> 5;
2458 r = max_cll % 32;
2459 max = (1 << q) * pre_computed_values[r];
2460
2461 // min luminance: maxLum * (CV/255)^2 / 100
2462 q = DIV_ROUND_CLOSEST(min_cll, 255);
2463 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2464
2465 caps->aux_max_input_signal = max;
2466 caps->aux_min_input_signal = min;
2467}
2468
97e51c16
HW
2469void amdgpu_dm_update_connector_after_detect(
2470 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2471{
2472 struct drm_connector *connector = &aconnector->base;
2473 struct drm_device *dev = connector->dev;
b73a22d3 2474 struct dc_sink *sink;
4562236b
HW
2475
2476 /* MST handled by drm_mst framework */
2477 if (aconnector->mst_mgr.mst_state == true)
2478 return;
2479
4562236b 2480 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2481 if (sink)
2482 dc_sink_retain(sink);
4562236b 2483
1f6010a9
DF
2484 /*
2485 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2486 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2487 * Skip if already done during boot.
4562236b
HW
2488 */
2489 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2490 && aconnector->dc_em_sink) {
2491
1f6010a9
DF
2492 /*
2493 * For S3 resume with headless use eml_sink to fake stream
2494 * because on resume connector->sink is set to NULL
4562236b
HW
2495 */
2496 mutex_lock(&dev->mode_config.mutex);
2497
2498 if (sink) {
922aa1e1 2499 if (aconnector->dc_sink) {
98e6436d 2500 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2501 /*
2502 * retain and release below are used to
2503 * bump up refcount for sink because the link doesn't point
2504 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2505 * reshuffle by UMD we will get into unwanted dc_sink release
2506 */
dcd5fb82 2507 dc_sink_release(aconnector->dc_sink);
922aa1e1 2508 }
4562236b 2509 aconnector->dc_sink = sink;
dcd5fb82 2510 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2511 amdgpu_dm_update_freesync_caps(connector,
2512 aconnector->edid);
4562236b 2513 } else {
98e6436d 2514 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2515 if (!aconnector->dc_sink) {
4562236b 2516 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2517 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2518 }
4562236b
HW
2519 }
2520
2521 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2522
2523 if (sink)
2524 dc_sink_release(sink);
4562236b
HW
2525 return;
2526 }
2527
2528 /*
2529 * TODO: temporary guard to look for proper fix
2530 * if this sink is MST sink, we should not do anything
2531 */
dcd5fb82
MF
2532 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2533 dc_sink_release(sink);
4562236b 2534 return;
dcd5fb82 2535 }
4562236b
HW
2536
2537 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2538 /*
2539 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2540 * Do nothing!!
2541 */
f1ad2f5e 2542 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2543 aconnector->connector_id);
dcd5fb82
MF
2544 if (sink)
2545 dc_sink_release(sink);
4562236b
HW
2546 return;
2547 }
2548
f1ad2f5e 2549 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2550 aconnector->connector_id, aconnector->dc_sink, sink);
2551
2552 mutex_lock(&dev->mode_config.mutex);
2553
1f6010a9
DF
2554 /*
2555 * 1. Update status of the drm connector
2556 * 2. Send an event and let userspace tell us what to do
2557 */
4562236b 2558 if (sink) {
1f6010a9
DF
2559 /*
2560 * TODO: check if we still need the S3 mode update workaround.
2561 * If yes, put it here.
2562 */
c64b0d6b 2563 if (aconnector->dc_sink) {
98e6436d 2564 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2565 dc_sink_release(aconnector->dc_sink);
2566 }
4562236b
HW
2567
2568 aconnector->dc_sink = sink;
dcd5fb82 2569 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2570 if (sink->dc_edid.length == 0) {
4562236b 2571 aconnector->edid = NULL;
e6142dd5
AP
2572 if (aconnector->dc_link->aux_mode) {
2573 drm_dp_cec_unset_edid(
2574 &aconnector->dm_dp_aux.aux);
2575 }
900b3cb1 2576 } else {
4562236b 2577 aconnector->edid =
e6142dd5 2578 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2579
c555f023 2580 drm_connector_update_edid_property(connector,
e6142dd5 2581 aconnector->edid);
e6142dd5
AP
2582 if (aconnector->dc_link->aux_mode)
2583 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2584 aconnector->edid);
4562236b 2585 }
e6142dd5 2586
98e6436d 2587 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2588 update_connector_ext_caps(aconnector);
4562236b 2589 } else {
e86e8947 2590 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2591 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2592 drm_connector_update_edid_property(connector, NULL);
4562236b 2593 aconnector->num_modes = 0;
dcd5fb82 2594 dc_sink_release(aconnector->dc_sink);
4562236b 2595 aconnector->dc_sink = NULL;
5326c452 2596 aconnector->edid = NULL;
0c8620d6
BL
2597#ifdef CONFIG_DRM_AMD_DC_HDCP
2598 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2599 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2600 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2601#endif
4562236b
HW
2602 }
2603
2604 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2605
0f877894
OV
2606 update_subconnector_property(aconnector);
2607
dcd5fb82
MF
2608 if (sink)
2609 dc_sink_release(sink);
4562236b
HW
2610}
2611
2612static void handle_hpd_irq(void *param)
2613{
c84dec2f 2614 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2615 struct drm_connector *connector = &aconnector->base;
2616 struct drm_device *dev = connector->dev;
fbbdadf2 2617 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2618 struct amdgpu_device *adev = drm_to_adev(dev);
b972b4f9 2619#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2620 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2621#endif
4562236b 2622
b972b4f9
HW
2623 if (adev->dm.disable_hpd_irq)
2624 return;
2625
1f6010a9
DF
2626 /*
2627 * In case of failure or MST no need to update connector status or notify the OS
2628 * since (for MST case) MST does this in its own context.
4562236b
HW
2629 */
2630 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2631
0c8620d6 2632#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2633 if (adev->dm.hdcp_workqueue) {
96a3b32e 2634 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2635 dm_con_state->update_hdcp = true;
2636 }
0c8620d6 2637#endif
2e0ac3d6
HW
2638 if (aconnector->fake_enable)
2639 aconnector->fake_enable = false;
2640
fbbdadf2
BL
2641 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2642 DRM_ERROR("KMS: Failed to detect connector\n");
2643
2644 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2645 emulated_link_detect(aconnector->dc_link);
2646
2647
2648 drm_modeset_lock_all(dev);
2649 dm_restore_drm_connector_state(dev, connector);
2650 drm_modeset_unlock_all(dev);
2651
2652 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2653 drm_kms_helper_hotplug_event(dev);
2654
2655 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2656 if (new_connection_type == dc_connection_none &&
2657 aconnector->dc_link->type == dc_connection_none)
2658 dm_set_dpms_off(aconnector->dc_link);
4562236b 2659
3c4d55c9 2660 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2661
2662 drm_modeset_lock_all(dev);
2663 dm_restore_drm_connector_state(dev, connector);
2664 drm_modeset_unlock_all(dev);
2665
2666 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2667 drm_kms_helper_hotplug_event(dev);
2668 }
2669 mutex_unlock(&aconnector->hpd_lock);
2670
2671}
2672
c84dec2f 2673static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2674{
2675 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2676 uint8_t dret;
2677 bool new_irq_handled = false;
2678 int dpcd_addr;
2679 int dpcd_bytes_to_read;
2680
2681 const int max_process_count = 30;
2682 int process_count = 0;
2683
2684 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2685
2686 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2687 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2688 /* DPCD 0x200 - 0x201 for downstream IRQ */
2689 dpcd_addr = DP_SINK_COUNT;
2690 } else {
2691 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2692 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2693 dpcd_addr = DP_SINK_COUNT_ESI;
2694 }
2695
2696 dret = drm_dp_dpcd_read(
2697 &aconnector->dm_dp_aux.aux,
2698 dpcd_addr,
2699 esi,
2700 dpcd_bytes_to_read);
2701
2702 while (dret == dpcd_bytes_to_read &&
2703 process_count < max_process_count) {
2704 uint8_t retry;
2705 dret = 0;
2706
2707 process_count++;
2708
f1ad2f5e 2709 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2710 /* handle HPD short pulse irq */
2711 if (aconnector->mst_mgr.mst_state)
2712 drm_dp_mst_hpd_irq(
2713 &aconnector->mst_mgr,
2714 esi,
2715 &new_irq_handled);
4562236b
HW
2716
2717 if (new_irq_handled) {
2718 /* ACK at DPCD to notify down stream */
2719 const int ack_dpcd_bytes_to_write =
2720 dpcd_bytes_to_read - 1;
2721
2722 for (retry = 0; retry < 3; retry++) {
2723 uint8_t wret;
2724
2725 wret = drm_dp_dpcd_write(
2726 &aconnector->dm_dp_aux.aux,
2727 dpcd_addr + 1,
2728 &esi[1],
2729 ack_dpcd_bytes_to_write);
2730 if (wret == ack_dpcd_bytes_to_write)
2731 break;
2732 }
2733
1f6010a9 2734 /* check if there is new irq to be handled */
4562236b
HW
2735 dret = drm_dp_dpcd_read(
2736 &aconnector->dm_dp_aux.aux,
2737 dpcd_addr,
2738 esi,
2739 dpcd_bytes_to_read);
2740
2741 new_irq_handled = false;
d4a6e8a9 2742 } else {
4562236b 2743 break;
d4a6e8a9 2744 }
4562236b
HW
2745 }
2746
2747 if (process_count == max_process_count)
f1ad2f5e 2748 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2749}
2750
2751static void handle_hpd_rx_irq(void *param)
2752{
c84dec2f 2753 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2754 struct drm_connector *connector = &aconnector->base;
2755 struct drm_device *dev = connector->dev;
53cbf65c 2756 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2757 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2758 bool result = false;
fbbdadf2 2759 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2760 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2761 union hpd_irq_data hpd_irq_data;
d2aa1356 2762 bool lock_flag = 0;
2a0f9270
BL
2763
2764 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2765
b972b4f9
HW
2766 if (adev->dm.disable_hpd_irq)
2767 return;
2768
2769
1f6010a9
DF
2770 /*
2771 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2772 * conflict, after implement i2c helper, this mutex should be
2773 * retired.
2774 */
b86e7eef 2775 mutex_lock(&aconnector->hpd_lock);
4562236b 2776
3083a984
QZ
2777 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2778
2779 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2780 (dc_link->type == dc_connection_mst_branch)) {
2781 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2782 result = true;
2783 dm_handle_hpd_rx_irq(aconnector);
2784 goto out;
2785 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2786 result = false;
2787 dm_handle_hpd_rx_irq(aconnector);
2788 goto out;
2789 }
2790 }
2791
d2aa1356
AP
2792 /*
2793 * TODO: We need the lock to avoid touching DC state while it's being
2794 * modified during automated compliance testing, or when link loss
2795 * happens. While this should be split into subhandlers and proper
2796 * interfaces to avoid having to conditionally lock like this in the
2797 * outer layer, we need this workaround temporarily to allow MST
2798 * lightup in some scenarios to avoid timeout.
2799 */
2800 if (!amdgpu_in_reset(adev) &&
2801 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2802 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
cf8b92a7 2803 mutex_lock(&adev->dm.dc_lock);
d2aa1356
AP
2804 lock_flag = 1;
2805 }
2806
2a0f9270 2807#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2808 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2809#else
c8ea79a8 2810 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2811#endif
d2aa1356 2812 if (!amdgpu_in_reset(adev) && lock_flag)
cf8b92a7 2813 mutex_unlock(&adev->dm.dc_lock);
c8ea79a8 2814
3083a984 2815out:
c8ea79a8 2816 if (result && !is_mst_root_connector) {
4562236b 2817 /* Downstream Port status changed. */
fbbdadf2
BL
2818 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2819 DRM_ERROR("KMS: Failed to detect connector\n");
2820
2821 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2822 emulated_link_detect(dc_link);
2823
2824 if (aconnector->fake_enable)
2825 aconnector->fake_enable = false;
2826
2827 amdgpu_dm_update_connector_after_detect(aconnector);
2828
2829
2830 drm_modeset_lock_all(dev);
2831 dm_restore_drm_connector_state(dev, connector);
2832 drm_modeset_unlock_all(dev);
2833
2834 drm_kms_helper_hotplug_event(dev);
2835 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2836
2837 if (aconnector->fake_enable)
2838 aconnector->fake_enable = false;
2839
4562236b
HW
2840 amdgpu_dm_update_connector_after_detect(aconnector);
2841
2842
2843 drm_modeset_lock_all(dev);
2844 dm_restore_drm_connector_state(dev, connector);
2845 drm_modeset_unlock_all(dev);
2846
2847 drm_kms_helper_hotplug_event(dev);
2848 }
2849 }
2a0f9270 2850#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2851 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2852 if (adev->dm.hdcp_workqueue)
2853 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2854 }
2a0f9270 2855#endif
4562236b 2856
b86e7eef 2857 if (dc_link->type != dc_connection_mst_branch)
e86e8947 2858 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
2859
2860 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
2861}
2862
2863static void register_hpd_handlers(struct amdgpu_device *adev)
2864{
4a580877 2865 struct drm_device *dev = adev_to_drm(adev);
4562236b 2866 struct drm_connector *connector;
c84dec2f 2867 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2868 const struct dc_link *dc_link;
2869 struct dc_interrupt_params int_params = {0};
2870
2871 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2872 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2873
2874 list_for_each_entry(connector,
2875 &dev->mode_config.connector_list, head) {
2876
c84dec2f 2877 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2878 dc_link = aconnector->dc_link;
2879
2880 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2881 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2882 int_params.irq_source = dc_link->irq_source_hpd;
2883
2884 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2885 handle_hpd_irq,
2886 (void *) aconnector);
2887 }
2888
2889 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2890
2891 /* Also register for DP short pulse (hpd_rx). */
2892 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2893 int_params.irq_source = dc_link->irq_source_hpd_rx;
2894
2895 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2896 handle_hpd_rx_irq,
2897 (void *) aconnector);
2898 }
2899 }
2900}
2901
55e56389
MR
2902#if defined(CONFIG_DRM_AMD_DC_SI)
2903/* Register IRQ sources and initialize IRQ callbacks */
2904static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2905{
2906 struct dc *dc = adev->dm.dc;
2907 struct common_irq_params *c_irq_params;
2908 struct dc_interrupt_params int_params = {0};
2909 int r;
2910 int i;
2911 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2912
2913 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2915
2916 /*
2917 * Actions of amdgpu_irq_add_id():
2918 * 1. Register a set() function with base driver.
2919 * Base driver will call set() function to enable/disable an
2920 * interrupt in DC hardware.
2921 * 2. Register amdgpu_dm_irq_handler().
2922 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923 * coming from DC hardware.
2924 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925 * for acknowledging and handling. */
2926
2927 /* Use VBLANK interrupt */
2928 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2929 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2930 if (r) {
2931 DRM_ERROR("Failed to add crtc irq id!\n");
2932 return r;
2933 }
2934
2935 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936 int_params.irq_source =
2937 dc_interrupt_to_irq_source(dc, i+1 , 0);
2938
2939 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2940
2941 c_irq_params->adev = adev;
2942 c_irq_params->irq_src = int_params.irq_source;
2943
2944 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945 dm_crtc_high_irq, c_irq_params);
2946 }
2947
2948 /* Use GRPH_PFLIP interrupt */
2949 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2950 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2951 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2952 if (r) {
2953 DRM_ERROR("Failed to add page flip irq id!\n");
2954 return r;
2955 }
2956
2957 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2958 int_params.irq_source =
2959 dc_interrupt_to_irq_source(dc, i, 0);
2960
2961 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2962
2963 c_irq_params->adev = adev;
2964 c_irq_params->irq_src = int_params.irq_source;
2965
2966 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2967 dm_pflip_high_irq, c_irq_params);
2968
2969 }
2970
2971 /* HPD */
2972 r = amdgpu_irq_add_id(adev, client_id,
2973 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2974 if (r) {
2975 DRM_ERROR("Failed to add hpd irq id!\n");
2976 return r;
2977 }
2978
2979 register_hpd_handlers(adev);
2980
2981 return 0;
2982}
2983#endif
2984
4562236b
HW
2985/* Register IRQ sources and initialize IRQ callbacks */
2986static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2987{
2988 struct dc *dc = adev->dm.dc;
2989 struct common_irq_params *c_irq_params;
2990 struct dc_interrupt_params int_params = {0};
2991 int r;
2992 int i;
1ffdeca6 2993 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2994
84374725 2995 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2996 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2997
2998 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2999 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3000
1f6010a9
DF
3001 /*
3002 * Actions of amdgpu_irq_add_id():
4562236b
HW
3003 * 1. Register a set() function with base driver.
3004 * Base driver will call set() function to enable/disable an
3005 * interrupt in DC hardware.
3006 * 2. Register amdgpu_dm_irq_handler().
3007 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3008 * coming from DC hardware.
3009 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3010 * for acknowledging and handling. */
3011
b57de80a 3012 /* Use VBLANK interrupt */
e9029155 3013 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3014 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3015 if (r) {
3016 DRM_ERROR("Failed to add crtc irq id!\n");
3017 return r;
3018 }
3019
3020 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3021 int_params.irq_source =
3d761e79 3022 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3023
b57de80a 3024 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3025
3026 c_irq_params->adev = adev;
3027 c_irq_params->irq_src = int_params.irq_source;
3028
3029 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3030 dm_crtc_high_irq, c_irq_params);
3031 }
3032
d2574c33
MK
3033 /* Use VUPDATE interrupt */
3034 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3035 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3036 if (r) {
3037 DRM_ERROR("Failed to add vupdate irq id!\n");
3038 return r;
3039 }
3040
3041 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3042 int_params.irq_source =
3043 dc_interrupt_to_irq_source(dc, i, 0);
3044
3045 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3046
3047 c_irq_params->adev = adev;
3048 c_irq_params->irq_src = int_params.irq_source;
3049
3050 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3051 dm_vupdate_high_irq, c_irq_params);
3052 }
3053
3d761e79 3054 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3055 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3056 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3057 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3058 if (r) {
3059 DRM_ERROR("Failed to add page flip irq id!\n");
3060 return r;
3061 }
3062
3063 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3064 int_params.irq_source =
3065 dc_interrupt_to_irq_source(dc, i, 0);
3066
3067 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3068
3069 c_irq_params->adev = adev;
3070 c_irq_params->irq_src = int_params.irq_source;
3071
3072 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3073 dm_pflip_high_irq, c_irq_params);
3074
3075 }
3076
3077 /* HPD */
2c8ad2d5
AD
3078 r = amdgpu_irq_add_id(adev, client_id,
3079 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3080 if (r) {
3081 DRM_ERROR("Failed to add hpd irq id!\n");
3082 return r;
3083 }
3084
3085 register_hpd_handlers(adev);
3086
3087 return 0;
3088}
3089
b86a1aa3 3090#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3091/* Register IRQ sources and initialize IRQ callbacks */
3092static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3093{
3094 struct dc *dc = adev->dm.dc;
3095 struct common_irq_params *c_irq_params;
3096 struct dc_interrupt_params int_params = {0};
3097 int r;
3098 int i;
660d5406
WL
3099#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3100 static const unsigned int vrtl_int_srcid[] = {
3101 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3102 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3103 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3104 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3105 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3106 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3107 };
3108#endif
ff5ef992
AD
3109
3110 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3111 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3112
1f6010a9
DF
3113 /*
3114 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3115 * 1. Register a set() function with base driver.
3116 * Base driver will call set() function to enable/disable an
3117 * interrupt in DC hardware.
3118 * 2. Register amdgpu_dm_irq_handler().
3119 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3120 * coming from DC hardware.
3121 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3122 * for acknowledging and handling.
1f6010a9 3123 */
ff5ef992
AD
3124
3125 /* Use VSTARTUP interrupt */
3126 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3127 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3128 i++) {
3760f76c 3129 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3130
3131 if (r) {
3132 DRM_ERROR("Failed to add crtc irq id!\n");
3133 return r;
3134 }
3135
3136 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3137 int_params.irq_source =
3138 dc_interrupt_to_irq_source(dc, i, 0);
3139
3140 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3141
3142 c_irq_params->adev = adev;
3143 c_irq_params->irq_src = int_params.irq_source;
3144
2346ef47
NK
3145 amdgpu_dm_irq_register_interrupt(
3146 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3147 }
3148
86bc2219
WL
3149 /* Use otg vertical line interrupt */
3150#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3151 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3152 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3153 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3154
3155 if (r) {
3156 DRM_ERROR("Failed to add vline0 irq id!\n");
3157 return r;
3158 }
3159
3160 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3161 int_params.irq_source =
660d5406
WL
3162 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3163
3164 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3165 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3166 break;
3167 }
86bc2219
WL
3168
3169 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3170 - DC_IRQ_SOURCE_DC1_VLINE0];
3171
3172 c_irq_params->adev = adev;
3173 c_irq_params->irq_src = int_params.irq_source;
3174
3175 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3176 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3177 }
3178#endif
3179
2346ef47
NK
3180 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3181 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3182 * to trigger at end of each vblank, regardless of state of the lock,
3183 * matching DCE behaviour.
3184 */
3185 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3186 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3187 i++) {
3188 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3189
3190 if (r) {
3191 DRM_ERROR("Failed to add vupdate irq id!\n");
3192 return r;
3193 }
3194
3195 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3196 int_params.irq_source =
3197 dc_interrupt_to_irq_source(dc, i, 0);
3198
3199 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3200
3201 c_irq_params->adev = adev;
3202 c_irq_params->irq_src = int_params.irq_source;
3203
ff5ef992 3204 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3205 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3206 }
3207
ff5ef992
AD
3208 /* Use GRPH_PFLIP interrupt */
3209 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3210 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3211 i++) {
3760f76c 3212 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3213 if (r) {
3214 DRM_ERROR("Failed to add page flip irq id!\n");
3215 return r;
3216 }
3217
3218 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3219 int_params.irq_source =
3220 dc_interrupt_to_irq_source(dc, i, 0);
3221
3222 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3223
3224 c_irq_params->adev = adev;
3225 c_irq_params->irq_src = int_params.irq_source;
3226
3227 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3228 dm_pflip_high_irq, c_irq_params);
3229
3230 }
3231
81927e28
JS
3232 /* HPD */
3233 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3234 &adev->hpd_irq);
3235 if (r) {
3236 DRM_ERROR("Failed to add hpd irq id!\n");
3237 return r;
3238 }
a08f16cf 3239
81927e28 3240 register_hpd_handlers(adev);
a08f16cf 3241
81927e28
JS
3242 return 0;
3243}
3244/* Register Outbox IRQ sources and initialize IRQ callbacks */
3245static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3246{
3247 struct dc *dc = adev->dm.dc;
3248 struct common_irq_params *c_irq_params;
3249 struct dc_interrupt_params int_params = {0};
3250 int r, i;
3251
3252 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3253 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3254
3255 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3256 &adev->dmub_outbox_irq);
3257 if (r) {
3258 DRM_ERROR("Failed to add outbox irq id!\n");
3259 return r;
3260 }
3261
3262 if (dc->ctx->dmub_srv) {
3263 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3264 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3265 int_params.irq_source =
81927e28 3266 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3267
81927e28 3268 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3269
3270 c_irq_params->adev = adev;
3271 c_irq_params->irq_src = int_params.irq_source;
3272
3273 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3274 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3275 }
3276
ff5ef992
AD
3277 return 0;
3278}
3279#endif
3280
eb3dc897
NK
3281/*
3282 * Acquires the lock for the atomic state object and returns
3283 * the new atomic state.
3284 *
3285 * This should only be called during atomic check.
3286 */
3287static int dm_atomic_get_state(struct drm_atomic_state *state,
3288 struct dm_atomic_state **dm_state)
3289{
3290 struct drm_device *dev = state->dev;
1348969a 3291 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3292 struct amdgpu_display_manager *dm = &adev->dm;
3293 struct drm_private_state *priv_state;
eb3dc897
NK
3294
3295 if (*dm_state)
3296 return 0;
3297
eb3dc897
NK
3298 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3299 if (IS_ERR(priv_state))
3300 return PTR_ERR(priv_state);
3301
3302 *dm_state = to_dm_atomic_state(priv_state);
3303
3304 return 0;
3305}
3306
dfd84d90 3307static struct dm_atomic_state *
eb3dc897
NK
3308dm_atomic_get_new_state(struct drm_atomic_state *state)
3309{
3310 struct drm_device *dev = state->dev;
1348969a 3311 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3312 struct amdgpu_display_manager *dm = &adev->dm;
3313 struct drm_private_obj *obj;
3314 struct drm_private_state *new_obj_state;
3315 int i;
3316
3317 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3318 if (obj->funcs == dm->atomic_obj.funcs)
3319 return to_dm_atomic_state(new_obj_state);
3320 }
3321
3322 return NULL;
3323}
3324
eb3dc897
NK
3325static struct drm_private_state *
3326dm_atomic_duplicate_state(struct drm_private_obj *obj)
3327{
3328 struct dm_atomic_state *old_state, *new_state;
3329
3330 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3331 if (!new_state)
3332 return NULL;
3333
3334 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3335
813d20dc
AW
3336 old_state = to_dm_atomic_state(obj->state);
3337
3338 if (old_state && old_state->context)
3339 new_state->context = dc_copy_state(old_state->context);
3340
eb3dc897
NK
3341 if (!new_state->context) {
3342 kfree(new_state);
3343 return NULL;
3344 }
3345
eb3dc897
NK
3346 return &new_state->base;
3347}
3348
3349static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3350 struct drm_private_state *state)
3351{
3352 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3353
3354 if (dm_state && dm_state->context)
3355 dc_release_state(dm_state->context);
3356
3357 kfree(dm_state);
3358}
3359
3360static struct drm_private_state_funcs dm_atomic_state_funcs = {
3361 .atomic_duplicate_state = dm_atomic_duplicate_state,
3362 .atomic_destroy_state = dm_atomic_destroy_state,
3363};
3364
4562236b
HW
3365static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3366{
eb3dc897 3367 struct dm_atomic_state *state;
4562236b
HW
3368 int r;
3369
3370 adev->mode_info.mode_config_initialized = true;
3371
4a580877
LT
3372 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3373 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3374
4a580877
LT
3375 adev_to_drm(adev)->mode_config.max_width = 16384;
3376 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3377
4a580877
LT
3378 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3379 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3380 /* indicates support for immediate flip */
4a580877 3381 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3382
4a580877 3383 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3384
eb3dc897
NK
3385 state = kzalloc(sizeof(*state), GFP_KERNEL);
3386 if (!state)
3387 return -ENOMEM;
3388
813d20dc 3389 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3390 if (!state->context) {
3391 kfree(state);
3392 return -ENOMEM;
3393 }
3394
3395 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3396
4a580877 3397 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3398 &adev->dm.atomic_obj,
eb3dc897
NK
3399 &state->base,
3400 &dm_atomic_state_funcs);
3401
3dc9b1ce 3402 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3403 if (r) {
3404 dc_release_state(state->context);
3405 kfree(state);
4562236b 3406 return r;
b67a468a 3407 }
4562236b 3408
6ce8f316 3409 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3410 if (r) {
3411 dc_release_state(state->context);
3412 kfree(state);
6ce8f316 3413 return r;
b67a468a 3414 }
6ce8f316 3415
4562236b
HW
3416 return 0;
3417}
3418
206bbafe
DF
3419#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3420#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3421#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3422
4562236b
HW
3423#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3424 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3425
206bbafe
DF
3426static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3427{
3428#if defined(CONFIG_ACPI)
3429 struct amdgpu_dm_backlight_caps caps;
3430
58965855
FS
3431 memset(&caps, 0, sizeof(caps));
3432
206bbafe
DF
3433 if (dm->backlight_caps.caps_valid)
3434 return;
3435
f9b7f370 3436 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3437 if (caps.caps_valid) {
94562810
RS
3438 dm->backlight_caps.caps_valid = true;
3439 if (caps.aux_support)
3440 return;
206bbafe
DF
3441 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3442 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3443 } else {
3444 dm->backlight_caps.min_input_signal =
3445 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3446 dm->backlight_caps.max_input_signal =
3447 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3448 }
3449#else
94562810
RS
3450 if (dm->backlight_caps.aux_support)
3451 return;
3452
8bcbc9ef
DF
3453 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3454 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3455#endif
3456}
3457
69d9f427
AM
3458static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3459 unsigned *min, unsigned *max)
94562810 3460{
94562810 3461 if (!caps)
69d9f427 3462 return 0;
94562810 3463
69d9f427
AM
3464 if (caps->aux_support) {
3465 // Firmware limits are in nits, DC API wants millinits.
3466 *max = 1000 * caps->aux_max_input_signal;
3467 *min = 1000 * caps->aux_min_input_signal;
94562810 3468 } else {
69d9f427
AM
3469 // Firmware limits are 8-bit, PWM control is 16-bit.
3470 *max = 0x101 * caps->max_input_signal;
3471 *min = 0x101 * caps->min_input_signal;
94562810 3472 }
69d9f427
AM
3473 return 1;
3474}
94562810 3475
69d9f427
AM
3476static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3477 uint32_t brightness)
3478{
3479 unsigned min, max;
94562810 3480
69d9f427
AM
3481 if (!get_brightness_range(caps, &min, &max))
3482 return brightness;
3483
3484 // Rescale 0..255 to min..max
3485 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3486 AMDGPU_MAX_BL_LEVEL);
3487}
3488
3489static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3490 uint32_t brightness)
3491{
3492 unsigned min, max;
3493
3494 if (!get_brightness_range(caps, &min, &max))
3495 return brightness;
3496
3497 if (brightness < min)
3498 return 0;
3499 // Rescale min..max to 0..255
3500 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3501 max - min);
94562810
RS
3502}
3503
3d6c9164
AD
3504static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3505 u32 user_brightness)
4562236b 3506{
206bbafe 3507 struct amdgpu_dm_backlight_caps caps;
118b4627 3508 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3d6c9164 3509 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
94562810 3510 bool rc;
118b4627 3511 int i;
4562236b 3512
206bbafe
DF
3513 amdgpu_dm_update_backlight_caps(dm);
3514 caps = dm->backlight_caps;
94562810 3515
3d6c9164
AD
3516 for (i = 0; i < dm->num_of_edps; i++) {
3517 dm->brightness[i] = user_brightness;
3518 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
118b4627 3519 link[i] = (struct dc_link *)dm->backlight_link[i];
3d6c9164 3520 }
94562810 3521
3d6c9164 3522 /* Change brightness based on AUX property */
118b4627
ML
3523 if (caps.aux_support) {
3524 for (i = 0; i < dm->num_of_edps; i++) {
3d6c9164 3525 rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
118b4627
ML
3526 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3527 if (!rc) {
cd11b58c 3528 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
118b4627
ML
3529 break;
3530 }
3531 }
3532 } else {
3533 for (i = 0; i < dm->num_of_edps; i++) {
3d6c9164 3534 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
118b4627 3535 if (!rc) {
cd11b58c 3536 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
118b4627
ML
3537 break;
3538 }
3539 }
3540 }
94562810
RS
3541
3542 return rc ? 0 : 1;
4562236b
HW
3543}
3544
3d6c9164 3545static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3546{
620a0d27 3547 struct amdgpu_display_manager *dm = bl_get_data(bd);
3d6c9164
AD
3548
3549 amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3550
3551 return 0;
3552}
3553
3554static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3555{
0ad3e64e
AD
3556 struct amdgpu_dm_backlight_caps caps;
3557
3558 amdgpu_dm_update_backlight_caps(dm);
3559 caps = dm->backlight_caps;
620a0d27 3560
0ad3e64e 3561 if (caps.aux_support) {
118b4627 3562 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
0ad3e64e
AD
3563 u32 avg, peak;
3564 bool rc;
3565
3566 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3567 if (!rc)
3d6c9164 3568 return dm->brightness[0];
0ad3e64e
AD
3569 return convert_brightness_to_user(&caps, avg);
3570 } else {
118b4627 3571 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
0ad3e64e
AD
3572
3573 if (ret == DC_ERROR_UNEXPECTED)
3d6c9164 3574 return dm->brightness[0];
0ad3e64e
AD
3575 return convert_brightness_to_user(&caps, ret);
3576 }
4562236b
HW
3577}
3578
3d6c9164
AD
3579static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3580{
3581 struct amdgpu_display_manager *dm = bl_get_data(bd);
3582
3583 return amdgpu_dm_backlight_get_level(dm);
3584}
3585
4562236b 3586static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3587 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3588 .get_brightness = amdgpu_dm_backlight_get_brightness,
3589 .update_status = amdgpu_dm_backlight_update_status,
3590};
3591
7578ecda
AD
3592static void
3593amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3594{
3595 char bl_name[16];
3596 struct backlight_properties props = { 0 };
3d6c9164 3597 int i;
4562236b 3598
206bbafe 3599 amdgpu_dm_update_backlight_caps(dm);
3d6c9164
AD
3600 for (i = 0; i < dm->num_of_edps; i++)
3601 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
206bbafe 3602
4562236b 3603 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3604 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3605 props.type = BACKLIGHT_RAW;
3606
3607 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3608 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3609
3610 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3611 adev_to_drm(dm->adev)->dev,
3612 dm,
3613 &amdgpu_dm_backlight_ops,
3614 &props);
4562236b 3615
74baea42 3616 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3617 DRM_ERROR("DM: Backlight registration failed!\n");
3618 else
f1ad2f5e 3619 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3620}
3621
3622#endif
3623
df534fff 3624static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3625 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3626 enum drm_plane_type plane_type,
3627 const struct dc_plane_cap *plane_cap)
df534fff 3628{
f180b4bc 3629 struct drm_plane *plane;
df534fff
S
3630 unsigned long possible_crtcs;
3631 int ret = 0;
3632
f180b4bc 3633 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3634 if (!plane) {
3635 DRM_ERROR("KMS: Failed to allocate plane\n");
3636 return -ENOMEM;
3637 }
b2fddb13 3638 plane->type = plane_type;
df534fff
S
3639
3640 /*
b2fddb13
NK
3641 * HACK: IGT tests expect that the primary plane for a CRTC
3642 * can only have one possible CRTC. Only expose support for
3643 * any CRTC if they're not going to be used as a primary plane
3644 * for a CRTC - like overlay or underlay planes.
df534fff
S
3645 */
3646 possible_crtcs = 1 << plane_id;
3647 if (plane_id >= dm->dc->caps.max_streams)
3648 possible_crtcs = 0xff;
3649
cc1fec57 3650 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3651
3652 if (ret) {
3653 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3654 kfree(plane);
df534fff
S
3655 return ret;
3656 }
3657
54087768
NK
3658 if (mode_info)
3659 mode_info->planes[plane_id] = plane;
3660
df534fff
S
3661 return ret;
3662}
3663
89fc8d4e
HW
3664
3665static void register_backlight_device(struct amdgpu_display_manager *dm,
3666 struct dc_link *link)
3667{
3668#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3669 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3670
3671 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3672 link->type != dc_connection_none) {
1f6010a9
DF
3673 /*
3674 * Event if registration failed, we should continue with
89fc8d4e
HW
3675 * DM initialization because not having a backlight control
3676 * is better then a black screen.
3677 */
118b4627
ML
3678 if (!dm->backlight_dev)
3679 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 3680
118b4627
ML
3681 if (dm->backlight_dev) {
3682 dm->backlight_link[dm->num_of_edps] = link;
3683 dm->num_of_edps++;
3684 }
89fc8d4e
HW
3685 }
3686#endif
3687}
3688
3689
1f6010a9
DF
3690/*
3691 * In this architecture, the association
4562236b
HW
3692 * connector -> encoder -> crtc
3693 * id not really requried. The crtc and connector will hold the
3694 * display_index as an abstraction to use with DAL component
3695 *
3696 * Returns 0 on success
3697 */
7578ecda 3698static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3699{
3700 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3701 int32_t i;
c84dec2f 3702 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3703 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3704 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3705 uint32_t link_cnt;
cc1fec57 3706 int32_t primary_planes;
fbbdadf2 3707 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3708 const struct dc_plane_cap *plane;
4562236b 3709
d58159de
AD
3710 dm->display_indexes_num = dm->dc->caps.max_streams;
3711 /* Update the actual used number of crtc */
3712 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3713
4562236b 3714 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3715 if (amdgpu_dm_mode_config_init(dm->adev)) {
3716 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3717 return -EINVAL;
4562236b
HW
3718 }
3719
b2fddb13
NK
3720 /* There is one primary plane per CRTC */
3721 primary_planes = dm->dc->caps.max_streams;
54087768 3722 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3723
b2fddb13
NK
3724 /*
3725 * Initialize primary planes, implicit planes for legacy IOCTLS.
3726 * Order is reversed to match iteration order in atomic check.
3727 */
3728 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3729 plane = &dm->dc->caps.planes[i];
3730
b2fddb13 3731 if (initialize_plane(dm, mode_info, i,
cc1fec57 3732 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3733 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3734 goto fail;
d4e13b0d 3735 }
df534fff 3736 }
92f3ac40 3737
0d579c7e
NK
3738 /*
3739 * Initialize overlay planes, index starting after primary planes.
3740 * These planes have a higher DRM index than the primary planes since
3741 * they should be considered as having a higher z-order.
3742 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3743 *
3744 * Only support DCN for now, and only expose one so we don't encourage
3745 * userspace to use up all the pipes.
0d579c7e 3746 */
cc1fec57
NK
3747 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3748 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3749
3750 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3751 continue;
3752
3753 if (!plane->blends_with_above || !plane->blends_with_below)
3754 continue;
3755
ea36ad34 3756 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3757 continue;
3758
54087768 3759 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3760 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3761 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3762 goto fail;
d4e13b0d 3763 }
cc1fec57
NK
3764
3765 /* Only create one overlay plane. */
3766 break;
d4e13b0d 3767 }
4562236b 3768
d4e13b0d 3769 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3770 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3771 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3772 goto fail;
4562236b 3773 }
4562236b 3774
50610b74 3775#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28
JS
3776 /* Use Outbox interrupt */
3777 switch (adev->asic_type) {
81927e28
JS
3778 case CHIP_SIENNA_CICHLID:
3779 case CHIP_NAVY_FLOUNDER:
1ebcaebd 3780 case CHIP_YELLOW_CARP:
81927e28
JS
3781 case CHIP_RENOIR:
3782 if (register_outbox_irq_handlers(dm->adev)) {
3783 DRM_ERROR("DM: Failed to initialize IRQ\n");
3784 goto fail;
3785 }
3786 break;
3787 default:
3788 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3789 }
50610b74 3790#endif
81927e28 3791
4562236b
HW
3792 /* loops over all connectors on the board */
3793 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3794 struct dc_link *link = NULL;
4562236b
HW
3795
3796 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3797 DRM_ERROR(
3798 "KMS: Cannot support more than %d display indexes\n",
3799 AMDGPU_DM_MAX_DISPLAY_INDEX);
3800 continue;
3801 }
3802
3803 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3804 if (!aconnector)
cd8a2ae8 3805 goto fail;
4562236b
HW
3806
3807 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3808 if (!aencoder)
cd8a2ae8 3809 goto fail;
4562236b
HW
3810
3811 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3812 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3813 goto fail;
4562236b
HW
3814 }
3815
3816 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3817 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3818 goto fail;
4562236b
HW
3819 }
3820
89fc8d4e
HW
3821 link = dc_get_link_at_index(dm->dc, i);
3822
fbbdadf2
BL
3823 if (!dc_link_detect_sink(link, &new_connection_type))
3824 DRM_ERROR("KMS: Failed to detect connector\n");
3825
3826 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3827 emulated_link_detect(link);
3828 amdgpu_dm_update_connector_after_detect(aconnector);
3829
3830 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3831 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3832 register_backlight_device(dm, link);
397a9bc5
RL
3833 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3834 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3835 }
3836
3837
4562236b
HW
3838 }
3839
3840 /* Software is initialized. Now we can register interrupt handlers. */
3841 switch (adev->asic_type) {
55e56389
MR
3842#if defined(CONFIG_DRM_AMD_DC_SI)
3843 case CHIP_TAHITI:
3844 case CHIP_PITCAIRN:
3845 case CHIP_VERDE:
3846 case CHIP_OLAND:
3847 if (dce60_register_irq_handlers(dm->adev)) {
3848 DRM_ERROR("DM: Failed to initialize IRQ\n");
3849 goto fail;
3850 }
3851 break;
3852#endif
4562236b
HW
3853 case CHIP_BONAIRE:
3854 case CHIP_HAWAII:
cd4b356f
AD
3855 case CHIP_KAVERI:
3856 case CHIP_KABINI:
3857 case CHIP_MULLINS:
4562236b
HW
3858 case CHIP_TONGA:
3859 case CHIP_FIJI:
3860 case CHIP_CARRIZO:
3861 case CHIP_STONEY:
3862 case CHIP_POLARIS11:
3863 case CHIP_POLARIS10:
b264d345 3864 case CHIP_POLARIS12:
7737de91 3865 case CHIP_VEGAM:
2c8ad2d5 3866 case CHIP_VEGA10:
2325ff30 3867 case CHIP_VEGA12:
1fe6bf2f 3868 case CHIP_VEGA20:
4562236b
HW
3869 if (dce110_register_irq_handlers(dm->adev)) {
3870 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3871 goto fail;
4562236b
HW
3872 }
3873 break;
b86a1aa3 3874#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3875 case CHIP_RAVEN:
fbd2afe5 3876 case CHIP_NAVI12:
476e955d 3877 case CHIP_NAVI10:
fce651e3 3878 case CHIP_NAVI14:
30221ad8 3879 case CHIP_RENOIR:
79037324 3880 case CHIP_SIENNA_CICHLID:
a6c5308f 3881 case CHIP_NAVY_FLOUNDER:
2a411205 3882 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 3883 case CHIP_BEIGE_GOBY:
469989ca 3884 case CHIP_VANGOGH:
1ebcaebd 3885 case CHIP_YELLOW_CARP:
ff5ef992
AD
3886 if (dcn10_register_irq_handlers(dm->adev)) {
3887 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3888 goto fail;
ff5ef992
AD
3889 }
3890 break;
3891#endif
4562236b 3892 default:
e63f8673 3893 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3894 goto fail;
4562236b
HW
3895 }
3896
4562236b 3897 return 0;
cd8a2ae8 3898fail:
4562236b 3899 kfree(aencoder);
4562236b 3900 kfree(aconnector);
54087768 3901
59d0f396 3902 return -EINVAL;
4562236b
HW
3903}
3904
7578ecda 3905static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 3906{
eb3dc897 3907 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3908 return;
3909}
3910
3911/******************************************************************************
3912 * amdgpu_display_funcs functions
3913 *****************************************************************************/
3914
1f6010a9 3915/*
4562236b
HW
3916 * dm_bandwidth_update - program display watermarks
3917 *
3918 * @adev: amdgpu_device pointer
3919 *
3920 * Calculate and program the display watermarks and line buffer allocation.
3921 */
3922static void dm_bandwidth_update(struct amdgpu_device *adev)
3923{
49c07a99 3924 /* TODO: implement later */
4562236b
HW
3925}
3926
39cc5be2 3927static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3928 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3929 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3930 .backlight_set_level = NULL, /* never called for DC */
3931 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3932 .hpd_sense = NULL,/* called unconditionally */
3933 .hpd_set_polarity = NULL, /* called unconditionally */
3934 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3935 .page_flip_get_scanoutpos =
3936 dm_crtc_get_scanoutpos,/* called unconditionally */
3937 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3938 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3939};
3940
3941#if defined(CONFIG_DEBUG_KERNEL_DC)
3942
3ee6b26b
AD
3943static ssize_t s3_debug_store(struct device *device,
3944 struct device_attribute *attr,
3945 const char *buf,
3946 size_t count)
4562236b
HW
3947{
3948 int ret;
3949 int s3_state;
ef1de361 3950 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3951 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3952
3953 ret = kstrtoint(buf, 0, &s3_state);
3954
3955 if (ret == 0) {
3956 if (s3_state) {
3957 dm_resume(adev);
4a580877 3958 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3959 } else
3960 dm_suspend(adev);
3961 }
3962
3963 return ret == 0 ? count : 0;
3964}
3965
3966DEVICE_ATTR_WO(s3_debug);
3967
3968#endif
3969
3970static int dm_early_init(void *handle)
3971{
3972 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3973
4562236b 3974 switch (adev->asic_type) {
55e56389
MR
3975#if defined(CONFIG_DRM_AMD_DC_SI)
3976 case CHIP_TAHITI:
3977 case CHIP_PITCAIRN:
3978 case CHIP_VERDE:
3979 adev->mode_info.num_crtc = 6;
3980 adev->mode_info.num_hpd = 6;
3981 adev->mode_info.num_dig = 6;
3982 break;
3983 case CHIP_OLAND:
3984 adev->mode_info.num_crtc = 2;
3985 adev->mode_info.num_hpd = 2;
3986 adev->mode_info.num_dig = 2;
3987 break;
3988#endif
4562236b
HW
3989 case CHIP_BONAIRE:
3990 case CHIP_HAWAII:
3991 adev->mode_info.num_crtc = 6;
3992 adev->mode_info.num_hpd = 6;
3993 adev->mode_info.num_dig = 6;
4562236b 3994 break;
cd4b356f
AD
3995 case CHIP_KAVERI:
3996 adev->mode_info.num_crtc = 4;
3997 adev->mode_info.num_hpd = 6;
3998 adev->mode_info.num_dig = 7;
cd4b356f
AD
3999 break;
4000 case CHIP_KABINI:
4001 case CHIP_MULLINS:
4002 adev->mode_info.num_crtc = 2;
4003 adev->mode_info.num_hpd = 6;
4004 adev->mode_info.num_dig = 6;
cd4b356f 4005 break;
4562236b
HW
4006 case CHIP_FIJI:
4007 case CHIP_TONGA:
4008 adev->mode_info.num_crtc = 6;
4009 adev->mode_info.num_hpd = 6;
4010 adev->mode_info.num_dig = 7;
4562236b
HW
4011 break;
4012 case CHIP_CARRIZO:
4013 adev->mode_info.num_crtc = 3;
4014 adev->mode_info.num_hpd = 6;
4015 adev->mode_info.num_dig = 9;
4562236b
HW
4016 break;
4017 case CHIP_STONEY:
4018 adev->mode_info.num_crtc = 2;
4019 adev->mode_info.num_hpd = 6;
4020 adev->mode_info.num_dig = 9;
4562236b
HW
4021 break;
4022 case CHIP_POLARIS11:
b264d345 4023 case CHIP_POLARIS12:
4562236b
HW
4024 adev->mode_info.num_crtc = 5;
4025 adev->mode_info.num_hpd = 5;
4026 adev->mode_info.num_dig = 5;
4562236b
HW
4027 break;
4028 case CHIP_POLARIS10:
7737de91 4029 case CHIP_VEGAM:
4562236b
HW
4030 adev->mode_info.num_crtc = 6;
4031 adev->mode_info.num_hpd = 6;
4032 adev->mode_info.num_dig = 6;
4562236b 4033 break;
2c8ad2d5 4034 case CHIP_VEGA10:
2325ff30 4035 case CHIP_VEGA12:
1fe6bf2f 4036 case CHIP_VEGA20:
2c8ad2d5
AD
4037 adev->mode_info.num_crtc = 6;
4038 adev->mode_info.num_hpd = 6;
4039 adev->mode_info.num_dig = 6;
4040 break;
b86a1aa3 4041#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 4042 case CHIP_RAVEN:
20f2ffe5
AD
4043 case CHIP_RENOIR:
4044 case CHIP_VANGOGH:
ff5ef992
AD
4045 adev->mode_info.num_crtc = 4;
4046 adev->mode_info.num_hpd = 4;
4047 adev->mode_info.num_dig = 4;
ff5ef992 4048 break;
476e955d 4049 case CHIP_NAVI10:
fbd2afe5 4050 case CHIP_NAVI12:
79037324 4051 case CHIP_SIENNA_CICHLID:
a6c5308f 4052 case CHIP_NAVY_FLOUNDER:
476e955d
HW
4053 adev->mode_info.num_crtc = 6;
4054 adev->mode_info.num_hpd = 6;
4055 adev->mode_info.num_dig = 6;
4056 break;
1ebcaebd
NK
4057 case CHIP_YELLOW_CARP:
4058 adev->mode_info.num_crtc = 4;
4059 adev->mode_info.num_hpd = 4;
4060 adev->mode_info.num_dig = 4;
4061 break;
fce651e3 4062 case CHIP_NAVI14:
2a411205 4063 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
4064 adev->mode_info.num_crtc = 5;
4065 adev->mode_info.num_hpd = 5;
4066 adev->mode_info.num_dig = 5;
4067 break;
656fe9b6
AP
4068 case CHIP_BEIGE_GOBY:
4069 adev->mode_info.num_crtc = 2;
4070 adev->mode_info.num_hpd = 2;
4071 adev->mode_info.num_dig = 2;
4072 break;
20f2ffe5 4073#endif
4562236b 4074 default:
e63f8673 4075 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
4076 return -EINVAL;
4077 }
4078
c8dd5715
MD
4079 amdgpu_dm_set_irq_funcs(adev);
4080
39cc5be2
AD
4081 if (adev->mode_info.funcs == NULL)
4082 adev->mode_info.funcs = &dm_display_funcs;
4083
1f6010a9
DF
4084 /*
4085 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4086 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4087 * amdgpu_device_init()
4088 */
4562236b
HW
4089#if defined(CONFIG_DEBUG_KERNEL_DC)
4090 device_create_file(
4a580877 4091 adev_to_drm(adev)->dev,
4562236b
HW
4092 &dev_attr_s3_debug);
4093#endif
4094
4095 return 0;
4096}
4097
9b690ef3 4098static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4099 struct dc_stream_state *new_stream,
4100 struct dc_stream_state *old_stream)
9b690ef3 4101{
2afda735 4102 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4103}
4104
4105static bool modereset_required(struct drm_crtc_state *crtc_state)
4106{
2afda735 4107 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4108}
4109
7578ecda 4110static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4111{
4112 drm_encoder_cleanup(encoder);
4113 kfree(encoder);
4114}
4115
4116static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4117 .destroy = amdgpu_dm_encoder_destroy,
4118};
4119
e7b07cee 4120
6300b3bd
MK
4121static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4122 struct drm_framebuffer *fb,
4123 int *min_downscale, int *max_upscale)
4124{
4125 struct amdgpu_device *adev = drm_to_adev(dev);
4126 struct dc *dc = adev->dm.dc;
4127 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4128 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4129
4130 switch (fb->format->format) {
4131 case DRM_FORMAT_P010:
4132 case DRM_FORMAT_NV12:
4133 case DRM_FORMAT_NV21:
4134 *max_upscale = plane_cap->max_upscale_factor.nv12;
4135 *min_downscale = plane_cap->max_downscale_factor.nv12;
4136 break;
4137
4138 case DRM_FORMAT_XRGB16161616F:
4139 case DRM_FORMAT_ARGB16161616F:
4140 case DRM_FORMAT_XBGR16161616F:
4141 case DRM_FORMAT_ABGR16161616F:
4142 *max_upscale = plane_cap->max_upscale_factor.fp16;
4143 *min_downscale = plane_cap->max_downscale_factor.fp16;
4144 break;
4145
4146 default:
4147 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4148 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4149 break;
4150 }
4151
4152 /*
4153 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4154 * scaling factor of 1.0 == 1000 units.
4155 */
4156 if (*max_upscale == 1)
4157 *max_upscale = 1000;
4158
4159 if (*min_downscale == 1)
4160 *min_downscale = 1000;
4161}
4162
4163
695af5f9
NK
4164static int fill_dc_scaling_info(const struct drm_plane_state *state,
4165 struct dc_scaling_info *scaling_info)
e7b07cee 4166{
6300b3bd 4167 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4168
695af5f9 4169 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4170
695af5f9
NK
4171 /* Source is fixed 16.16 but we ignore mantissa for now... */
4172 scaling_info->src_rect.x = state->src_x >> 16;
4173 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4174
d89f6048
HW
4175 /*
4176 * For reasons we don't (yet) fully understand a non-zero
4177 * src_y coordinate into an NV12 buffer can cause a
4178 * system hang. To avoid hangs (and maybe be overly cautious)
4179 * let's reject both non-zero src_x and src_y.
4180 *
4181 * We currently know of only one use-case to reproduce a
4182 * scenario with non-zero src_x and src_y for NV12, which
4183 * is to gesture the YouTube Android app into full screen
4184 * on ChromeOS.
4185 */
4186 if (state->fb &&
4187 state->fb->format->format == DRM_FORMAT_NV12 &&
4188 (scaling_info->src_rect.x != 0 ||
4189 scaling_info->src_rect.y != 0))
4190 return -EINVAL;
4191
695af5f9
NK
4192 scaling_info->src_rect.width = state->src_w >> 16;
4193 if (scaling_info->src_rect.width == 0)
4194 return -EINVAL;
4195
4196 scaling_info->src_rect.height = state->src_h >> 16;
4197 if (scaling_info->src_rect.height == 0)
4198 return -EINVAL;
4199
4200 scaling_info->dst_rect.x = state->crtc_x;
4201 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4202
4203 if (state->crtc_w == 0)
695af5f9 4204 return -EINVAL;
e7b07cee 4205
695af5f9 4206 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4207
4208 if (state->crtc_h == 0)
695af5f9 4209 return -EINVAL;
e7b07cee 4210
695af5f9 4211 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4212
695af5f9
NK
4213 /* DRM doesn't specify clipping on destination output. */
4214 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4215
6300b3bd
MK
4216 /* Validate scaling per-format with DC plane caps */
4217 if (state->plane && state->plane->dev && state->fb) {
4218 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4219 &min_downscale, &max_upscale);
4220 } else {
4221 min_downscale = 250;
4222 max_upscale = 16000;
4223 }
4224
6491f0c0
NK
4225 scale_w = scaling_info->dst_rect.width * 1000 /
4226 scaling_info->src_rect.width;
e7b07cee 4227
6300b3bd 4228 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4229 return -EINVAL;
4230
4231 scale_h = scaling_info->dst_rect.height * 1000 /
4232 scaling_info->src_rect.height;
4233
6300b3bd 4234 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4235 return -EINVAL;
4236
695af5f9
NK
4237 /*
4238 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4239 * assume reasonable defaults based on the format.
4240 */
e7b07cee 4241
695af5f9 4242 return 0;
4562236b 4243}
695af5f9 4244
a3241991
BN
4245static void
4246fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4247 uint64_t tiling_flags)
e7b07cee 4248{
a3241991
BN
4249 /* Fill GFX8 params */
4250 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4251 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4252
a3241991
BN
4253 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4254 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4255 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4256 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4257 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4258
a3241991
BN
4259 /* XXX fix me for VI */
4260 tiling_info->gfx8.num_banks = num_banks;
4261 tiling_info->gfx8.array_mode =
4262 DC_ARRAY_2D_TILED_THIN1;
4263 tiling_info->gfx8.tile_split = tile_split;
4264 tiling_info->gfx8.bank_width = bankw;
4265 tiling_info->gfx8.bank_height = bankh;
4266 tiling_info->gfx8.tile_aspect = mtaspect;
4267 tiling_info->gfx8.tile_mode =
4268 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4269 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4270 == DC_ARRAY_1D_TILED_THIN1) {
4271 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4272 }
4273
a3241991
BN
4274 tiling_info->gfx8.pipe_config =
4275 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4276}
4277
a3241991
BN
4278static void
4279fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4280 union dc_tiling_info *tiling_info)
4281{
4282 tiling_info->gfx9.num_pipes =
4283 adev->gfx.config.gb_addr_config_fields.num_pipes;
4284 tiling_info->gfx9.num_banks =
4285 adev->gfx.config.gb_addr_config_fields.num_banks;
4286 tiling_info->gfx9.pipe_interleave =
4287 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4288 tiling_info->gfx9.num_shader_engines =
4289 adev->gfx.config.gb_addr_config_fields.num_se;
4290 tiling_info->gfx9.max_compressed_frags =
4291 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4292 tiling_info->gfx9.num_rb_per_se =
4293 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4294 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
4295 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4296 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4297 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
656fe9b6 4298 adev->asic_type == CHIP_BEIGE_GOBY ||
1ebcaebd 4299 adev->asic_type == CHIP_YELLOW_CARP ||
a3241991
BN
4300 adev->asic_type == CHIP_VANGOGH)
4301 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4302}
4303
695af5f9 4304static int
a3241991
BN
4305validate_dcc(struct amdgpu_device *adev,
4306 const enum surface_pixel_format format,
4307 const enum dc_rotation_angle rotation,
4308 const union dc_tiling_info *tiling_info,
4309 const struct dc_plane_dcc_param *dcc,
4310 const struct dc_plane_address *address,
4311 const struct plane_size *plane_size)
7df7e505
NK
4312{
4313 struct dc *dc = adev->dm.dc;
8daa1218
NC
4314 struct dc_dcc_surface_param input;
4315 struct dc_surface_dcc_cap output;
7df7e505 4316
8daa1218
NC
4317 memset(&input, 0, sizeof(input));
4318 memset(&output, 0, sizeof(output));
4319
a3241991 4320 if (!dcc->enable)
87b7ebc2
RS
4321 return 0;
4322
a3241991
BN
4323 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4324 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4325 return -EINVAL;
7df7e505 4326
695af5f9 4327 input.format = format;
12e2b2d4
DL
4328 input.surface_size.width = plane_size->surface_size.width;
4329 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4330 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4331
695af5f9 4332 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4333 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4334 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4335 input.scan = SCAN_DIRECTION_VERTICAL;
4336
4337 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4338 return -EINVAL;
7df7e505
NK
4339
4340 if (!output.capable)
09e5665a 4341 return -EINVAL;
7df7e505 4342
a3241991
BN
4343 if (dcc->independent_64b_blks == 0 &&
4344 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4345 return -EINVAL;
7df7e505 4346
a3241991
BN
4347 return 0;
4348}
4349
37384b3f
BN
4350static bool
4351modifier_has_dcc(uint64_t modifier)
4352{
4353 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4354}
4355
4356static unsigned
4357modifier_gfx9_swizzle_mode(uint64_t modifier)
4358{
4359 if (modifier == DRM_FORMAT_MOD_LINEAR)
4360 return 0;
4361
4362 return AMD_FMT_MOD_GET(TILE, modifier);
4363}
4364
dfbbfe3c
BN
4365static const struct drm_format_info *
4366amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4367{
816853f9 4368 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4369}
4370
37384b3f
BN
4371static void
4372fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4373 union dc_tiling_info *tiling_info,
4374 uint64_t modifier)
4375{
4376 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4377 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4378 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4379 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4380
4381 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4382
4383 if (!IS_AMD_FMT_MOD(modifier))
4384 return;
4385
4386 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4387 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4388
4389 if (adev->family >= AMDGPU_FAMILY_NV) {
4390 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4391 } else {
4392 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4393
4394 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4395 }
4396}
4397
faa37f54
BN
4398enum dm_micro_swizzle {
4399 MICRO_SWIZZLE_Z = 0,
4400 MICRO_SWIZZLE_S = 1,
4401 MICRO_SWIZZLE_D = 2,
4402 MICRO_SWIZZLE_R = 3
4403};
4404
4405static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4406 uint32_t format,
4407 uint64_t modifier)
4408{
4409 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4410 const struct drm_format_info *info = drm_format_info(format);
fe180178 4411 int i;
faa37f54
BN
4412
4413 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4414
4415 if (!info)
4416 return false;
4417
4418 /*
fe180178
QZ
4419 * We always have to allow these modifiers:
4420 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4421 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4422 */
fe180178
QZ
4423 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4424 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4425 return true;
fe180178 4426 }
faa37f54 4427
fe180178
QZ
4428 /* Check that the modifier is on the list of the plane's supported modifiers. */
4429 for (i = 0; i < plane->modifier_count; i++) {
4430 if (modifier == plane->modifiers[i])
4431 break;
4432 }
4433 if (i == plane->modifier_count)
faa37f54
BN
4434 return false;
4435
4436 /*
4437 * For D swizzle the canonical modifier depends on the bpp, so check
4438 * it here.
4439 */
4440 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4441 adev->family >= AMDGPU_FAMILY_NV) {
4442 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4443 return false;
4444 }
4445
4446 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4447 info->cpp[0] < 8)
4448 return false;
4449
4450 if (modifier_has_dcc(modifier)) {
4451 /* Per radeonsi comments 16/64 bpp are more complicated. */
4452 if (info->cpp[0] != 4)
4453 return false;
951796f2
SS
4454 /* We support multi-planar formats, but not when combined with
4455 * additional DCC metadata planes. */
4456 if (info->num_planes > 1)
4457 return false;
faa37f54
BN
4458 }
4459
4460 return true;
4461}
4462
4463static void
4464add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4465{
4466 if (!*mods)
4467 return;
4468
4469 if (*cap - *size < 1) {
4470 uint64_t new_cap = *cap * 2;
4471 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4472
4473 if (!new_mods) {
4474 kfree(*mods);
4475 *mods = NULL;
4476 return;
4477 }
4478
4479 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4480 kfree(*mods);
4481 *mods = new_mods;
4482 *cap = new_cap;
4483 }
4484
4485 (*mods)[*size] = mod;
4486 *size += 1;
4487}
4488
4489static void
4490add_gfx9_modifiers(const struct amdgpu_device *adev,
4491 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4492{
4493 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4494 int pipe_xor_bits = min(8, pipes +
4495 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4496 int bank_xor_bits = min(8 - pipe_xor_bits,
4497 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4498 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4499 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4500
4501
4502 if (adev->family == AMDGPU_FAMILY_RV) {
4503 /* Raven2 and later */
4504 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4505
4506 /*
4507 * No _D DCC swizzles yet because we only allow 32bpp, which
4508 * doesn't support _D on DCN
4509 */
4510
4511 if (has_constant_encode) {
4512 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4513 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4514 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4515 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4516 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4517 AMD_FMT_MOD_SET(DCC, 1) |
4518 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4519 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4520 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4521 }
4522
4523 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4524 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4525 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4526 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4527 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4528 AMD_FMT_MOD_SET(DCC, 1) |
4529 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4530 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4531 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4532
4533 if (has_constant_encode) {
4534 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4535 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4536 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4537 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4538 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4539 AMD_FMT_MOD_SET(DCC, 1) |
4540 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4541 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4542 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4543
4544 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4545 AMD_FMT_MOD_SET(RB, rb) |
4546 AMD_FMT_MOD_SET(PIPE, pipes));
4547 }
4548
4549 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4551 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4552 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4553 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4554 AMD_FMT_MOD_SET(DCC, 1) |
4555 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4556 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4557 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4558 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4559 AMD_FMT_MOD_SET(RB, rb) |
4560 AMD_FMT_MOD_SET(PIPE, pipes));
4561 }
4562
4563 /*
4564 * Only supported for 64bpp on Raven, will be filtered on format in
4565 * dm_plane_format_mod_supported.
4566 */
4567 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4568 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4569 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4570 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4571 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4572
4573 if (adev->family == AMDGPU_FAMILY_RV) {
4574 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4575 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4576 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4577 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4578 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4579 }
4580
4581 /*
4582 * Only supported for 64bpp on Raven, will be filtered on format in
4583 * dm_plane_format_mod_supported.
4584 */
4585 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4586 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4587 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4588
4589 if (adev->family == AMDGPU_FAMILY_RV) {
4590 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4591 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4592 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4593 }
4594}
4595
4596static void
4597add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4598 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4599{
4600 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4601
4602 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4603 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4604 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4605 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4606 AMD_FMT_MOD_SET(DCC, 1) |
4607 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4608 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4609 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4610
4611 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4612 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4613 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4614 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4615 AMD_FMT_MOD_SET(DCC, 1) |
4616 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4617 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4618 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4619 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4620
4621 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4622 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4623 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4624 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4625
4626 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4627 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4628 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4629 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4630
4631
4632 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4633 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4634 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4635 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4636
4637 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4638 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4639 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4640}
4641
4642static void
4643add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4644 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4645{
4646 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4647 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4648
4649 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4650 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4651 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4652 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4653 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4654 AMD_FMT_MOD_SET(DCC, 1) |
4655 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4656 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4657 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4658 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4659
4660 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4661 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4662 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4663 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4664 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4665 AMD_FMT_MOD_SET(DCC, 1) |
4666 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4667 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4668 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4669 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4670 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4671
4672 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4673 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4674 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4675 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4676 AMD_FMT_MOD_SET(PACKERS, pkrs));
4677
4678 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4679 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4680 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4681 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4682 AMD_FMT_MOD_SET(PACKERS, pkrs));
4683
4684 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4685 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4686 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4687 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4688
4689 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4690 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4691 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4692}
4693
4694static int
4695get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4696{
4697 uint64_t size = 0, capacity = 128;
4698 *mods = NULL;
4699
4700 /* We have not hooked up any pre-GFX9 modifiers. */
4701 if (adev->family < AMDGPU_FAMILY_AI)
4702 return 0;
4703
4704 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4705
4706 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4707 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4708 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4709 return *mods ? 0 : -ENOMEM;
4710 }
4711
4712 switch (adev->family) {
4713 case AMDGPU_FAMILY_AI:
4714 case AMDGPU_FAMILY_RV:
4715 add_gfx9_modifiers(adev, mods, &size, &capacity);
4716 break;
4717 case AMDGPU_FAMILY_NV:
4718 case AMDGPU_FAMILY_VGH:
1ebcaebd 4719 case AMDGPU_FAMILY_YC:
faa37f54
BN
4720 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4721 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4722 else
4723 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4724 break;
4725 }
4726
4727 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4728
4729 /* INVALID marks the end of the list. */
4730 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4731
4732 if (!*mods)
4733 return -ENOMEM;
4734
4735 return 0;
4736}
4737
37384b3f
BN
4738static int
4739fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4740 const struct amdgpu_framebuffer *afb,
4741 const enum surface_pixel_format format,
4742 const enum dc_rotation_angle rotation,
4743 const struct plane_size *plane_size,
4744 union dc_tiling_info *tiling_info,
4745 struct dc_plane_dcc_param *dcc,
4746 struct dc_plane_address *address,
4747 const bool force_disable_dcc)
4748{
4749 const uint64_t modifier = afb->base.modifier;
2be7f77f 4750 int ret = 0;
37384b3f
BN
4751
4752 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4753 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4754
4755 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4756 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4757
4758 dcc->enable = 1;
4759 dcc->meta_pitch = afb->base.pitches[1];
4760 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4761
4762 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4763 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4764 }
4765
4766 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4767 if (ret)
2be7f77f 4768 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 4769
2be7f77f 4770 return ret;
09e5665a
NK
4771}
4772
4773static int
320932bf 4774fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4775 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4776 const enum surface_pixel_format format,
4777 const enum dc_rotation_angle rotation,
4778 const uint64_t tiling_flags,
09e5665a 4779 union dc_tiling_info *tiling_info,
12e2b2d4 4780 struct plane_size *plane_size,
09e5665a 4781 struct dc_plane_dcc_param *dcc,
87b7ebc2 4782 struct dc_plane_address *address,
5888f07a 4783 bool tmz_surface,
87b7ebc2 4784 bool force_disable_dcc)
09e5665a 4785{
320932bf 4786 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4787 int ret;
4788
4789 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4790 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4791 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4792 memset(address, 0, sizeof(*address));
4793
5888f07a
HW
4794 address->tmz_surface = tmz_surface;
4795
695af5f9 4796 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4797 uint64_t addr = afb->address + fb->offsets[0];
4798
12e2b2d4
DL
4799 plane_size->surface_size.x = 0;
4800 plane_size->surface_size.y = 0;
4801 plane_size->surface_size.width = fb->width;
4802 plane_size->surface_size.height = fb->height;
4803 plane_size->surface_pitch =
320932bf
NK
4804 fb->pitches[0] / fb->format->cpp[0];
4805
e0634e8d 4806 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4807 address->grph.addr.low_part = lower_32_bits(addr);
4808 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4809 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4810 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4811 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4812
12e2b2d4
DL
4813 plane_size->surface_size.x = 0;
4814 plane_size->surface_size.y = 0;
4815 plane_size->surface_size.width = fb->width;
4816 plane_size->surface_size.height = fb->height;
4817 plane_size->surface_pitch =
320932bf
NK
4818 fb->pitches[0] / fb->format->cpp[0];
4819
12e2b2d4
DL
4820 plane_size->chroma_size.x = 0;
4821 plane_size->chroma_size.y = 0;
320932bf 4822 /* TODO: set these based on surface format */
12e2b2d4
DL
4823 plane_size->chroma_size.width = fb->width / 2;
4824 plane_size->chroma_size.height = fb->height / 2;
320932bf 4825
12e2b2d4 4826 plane_size->chroma_pitch =
320932bf
NK
4827 fb->pitches[1] / fb->format->cpp[1];
4828
e0634e8d
NK
4829 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4830 address->video_progressive.luma_addr.low_part =
be7b9b32 4831 lower_32_bits(luma_addr);
e0634e8d 4832 address->video_progressive.luma_addr.high_part =
be7b9b32 4833 upper_32_bits(luma_addr);
e0634e8d
NK
4834 address->video_progressive.chroma_addr.low_part =
4835 lower_32_bits(chroma_addr);
4836 address->video_progressive.chroma_addr.high_part =
4837 upper_32_bits(chroma_addr);
4838 }
09e5665a 4839
a3241991 4840 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4841 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4842 rotation, plane_size,
4843 tiling_info, dcc,
4844 address,
4845 force_disable_dcc);
09e5665a
NK
4846 if (ret)
4847 return ret;
a3241991
BN
4848 } else {
4849 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4850 }
4851
4852 return 0;
7df7e505
NK
4853}
4854
d74004b6 4855static void
695af5f9 4856fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4857 bool *per_pixel_alpha, bool *global_alpha,
4858 int *global_alpha_value)
4859{
4860 *per_pixel_alpha = false;
4861 *global_alpha = false;
4862 *global_alpha_value = 0xff;
4863
4864 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4865 return;
4866
4867 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4868 static const uint32_t alpha_formats[] = {
4869 DRM_FORMAT_ARGB8888,
4870 DRM_FORMAT_RGBA8888,
4871 DRM_FORMAT_ABGR8888,
4872 };
4873 uint32_t format = plane_state->fb->format->format;
4874 unsigned int i;
4875
4876 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4877 if (format == alpha_formats[i]) {
4878 *per_pixel_alpha = true;
4879 break;
4880 }
4881 }
4882 }
4883
4884 if (plane_state->alpha < 0xffff) {
4885 *global_alpha = true;
4886 *global_alpha_value = plane_state->alpha >> 8;
4887 }
4888}
4889
004fefa3
NK
4890static int
4891fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4892 const enum surface_pixel_format format,
004fefa3
NK
4893 enum dc_color_space *color_space)
4894{
4895 bool full_range;
4896
4897 *color_space = COLOR_SPACE_SRGB;
4898
4899 /* DRM color properties only affect non-RGB formats. */
695af5f9 4900 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4901 return 0;
4902
4903 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4904
4905 switch (plane_state->color_encoding) {
4906 case DRM_COLOR_YCBCR_BT601:
4907 if (full_range)
4908 *color_space = COLOR_SPACE_YCBCR601;
4909 else
4910 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4911 break;
4912
4913 case DRM_COLOR_YCBCR_BT709:
4914 if (full_range)
4915 *color_space = COLOR_SPACE_YCBCR709;
4916 else
4917 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4918 break;
4919
4920 case DRM_COLOR_YCBCR_BT2020:
4921 if (full_range)
4922 *color_space = COLOR_SPACE_2020_YCBCR;
4923 else
4924 return -EINVAL;
4925 break;
4926
4927 default:
4928 return -EINVAL;
4929 }
4930
4931 return 0;
4932}
4933
695af5f9
NK
4934static int
4935fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4936 const struct drm_plane_state *plane_state,
4937 const uint64_t tiling_flags,
4938 struct dc_plane_info *plane_info,
87b7ebc2 4939 struct dc_plane_address *address,
5888f07a 4940 bool tmz_surface,
87b7ebc2 4941 bool force_disable_dcc)
695af5f9
NK
4942{
4943 const struct drm_framebuffer *fb = plane_state->fb;
4944 const struct amdgpu_framebuffer *afb =
4945 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
4946 int ret;
4947
4948 memset(plane_info, 0, sizeof(*plane_info));
4949
4950 switch (fb->format->format) {
4951 case DRM_FORMAT_C8:
4952 plane_info->format =
4953 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4954 break;
4955 case DRM_FORMAT_RGB565:
4956 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4957 break;
4958 case DRM_FORMAT_XRGB8888:
4959 case DRM_FORMAT_ARGB8888:
4960 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4961 break;
4962 case DRM_FORMAT_XRGB2101010:
4963 case DRM_FORMAT_ARGB2101010:
4964 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4965 break;
4966 case DRM_FORMAT_XBGR2101010:
4967 case DRM_FORMAT_ABGR2101010:
4968 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4969 break;
4970 case DRM_FORMAT_XBGR8888:
4971 case DRM_FORMAT_ABGR8888:
4972 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4973 break;
4974 case DRM_FORMAT_NV21:
4975 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4976 break;
4977 case DRM_FORMAT_NV12:
4978 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4979 break;
cbec6477
SW
4980 case DRM_FORMAT_P010:
4981 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4982 break;
492548dc
SW
4983 case DRM_FORMAT_XRGB16161616F:
4984 case DRM_FORMAT_ARGB16161616F:
4985 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4986 break;
2a5195dc
MK
4987 case DRM_FORMAT_XBGR16161616F:
4988 case DRM_FORMAT_ABGR16161616F:
4989 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4990 break;
58020403
MK
4991 case DRM_FORMAT_XRGB16161616:
4992 case DRM_FORMAT_ARGB16161616:
4993 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4994 break;
4995 case DRM_FORMAT_XBGR16161616:
4996 case DRM_FORMAT_ABGR16161616:
4997 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4998 break;
695af5f9
NK
4999 default:
5000 DRM_ERROR(
92f1d09c
SA
5001 "Unsupported screen format %p4cc\n",
5002 &fb->format->format);
695af5f9
NK
5003 return -EINVAL;
5004 }
5005
5006 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5007 case DRM_MODE_ROTATE_0:
5008 plane_info->rotation = ROTATION_ANGLE_0;
5009 break;
5010 case DRM_MODE_ROTATE_90:
5011 plane_info->rotation = ROTATION_ANGLE_90;
5012 break;
5013 case DRM_MODE_ROTATE_180:
5014 plane_info->rotation = ROTATION_ANGLE_180;
5015 break;
5016 case DRM_MODE_ROTATE_270:
5017 plane_info->rotation = ROTATION_ANGLE_270;
5018 break;
5019 default:
5020 plane_info->rotation = ROTATION_ANGLE_0;
5021 break;
5022 }
5023
5024 plane_info->visible = true;
5025 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5026
6d83a32d
MS
5027 plane_info->layer_index = 0;
5028
695af5f9
NK
5029 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5030 &plane_info->color_space);
5031 if (ret)
5032 return ret;
5033
5034 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5035 plane_info->rotation, tiling_flags,
5036 &plane_info->tiling_info,
5037 &plane_info->plane_size,
5888f07a 5038 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5039 force_disable_dcc);
695af5f9
NK
5040 if (ret)
5041 return ret;
5042
5043 fill_blending_from_plane_state(
5044 plane_state, &plane_info->per_pixel_alpha,
5045 &plane_info->global_alpha, &plane_info->global_alpha_value);
5046
5047 return 0;
5048}
5049
5050static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5051 struct dc_plane_state *dc_plane_state,
5052 struct drm_plane_state *plane_state,
5053 struct drm_crtc_state *crtc_state)
e7b07cee 5054{
cf020d49 5055 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5056 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5057 struct dc_scaling_info scaling_info;
5058 struct dc_plane_info plane_info;
695af5f9 5059 int ret;
87b7ebc2 5060 bool force_disable_dcc = false;
e7b07cee 5061
695af5f9
NK
5062 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5063 if (ret)
5064 return ret;
e7b07cee 5065
695af5f9
NK
5066 dc_plane_state->src_rect = scaling_info.src_rect;
5067 dc_plane_state->dst_rect = scaling_info.dst_rect;
5068 dc_plane_state->clip_rect = scaling_info.clip_rect;
5069 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5070
87b7ebc2 5071 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5072 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5073 afb->tiling_flags,
695af5f9 5074 &plane_info,
87b7ebc2 5075 &dc_plane_state->address,
6eed95b0 5076 afb->tmz_surface,
87b7ebc2 5077 force_disable_dcc);
004fefa3
NK
5078 if (ret)
5079 return ret;
5080
695af5f9
NK
5081 dc_plane_state->format = plane_info.format;
5082 dc_plane_state->color_space = plane_info.color_space;
5083 dc_plane_state->format = plane_info.format;
5084 dc_plane_state->plane_size = plane_info.plane_size;
5085 dc_plane_state->rotation = plane_info.rotation;
5086 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5087 dc_plane_state->stereo_format = plane_info.stereo_format;
5088 dc_plane_state->tiling_info = plane_info.tiling_info;
5089 dc_plane_state->visible = plane_info.visible;
5090 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5091 dc_plane_state->global_alpha = plane_info.global_alpha;
5092 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5093 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5094 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5095 dc_plane_state->flip_int_enabled = true;
695af5f9 5096
e277adc5
LSL
5097 /*
5098 * Always set input transfer function, since plane state is refreshed
5099 * every time.
5100 */
cf020d49
NK
5101 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5102 if (ret)
5103 return ret;
e7b07cee 5104
cf020d49 5105 return 0;
e7b07cee
HW
5106}
5107
3ee6b26b
AD
5108static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5109 const struct dm_connector_state *dm_state,
5110 struct dc_stream_state *stream)
e7b07cee
HW
5111{
5112 enum amdgpu_rmx_type rmx_type;
5113
5114 struct rect src = { 0 }; /* viewport in composition space*/
5115 struct rect dst = { 0 }; /* stream addressable area */
5116
5117 /* no mode. nothing to be done */
5118 if (!mode)
5119 return;
5120
5121 /* Full screen scaling by default */
5122 src.width = mode->hdisplay;
5123 src.height = mode->vdisplay;
5124 dst.width = stream->timing.h_addressable;
5125 dst.height = stream->timing.v_addressable;
5126
f4791779
HW
5127 if (dm_state) {
5128 rmx_type = dm_state->scaling;
5129 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5130 if (src.width * dst.height <
5131 src.height * dst.width) {
5132 /* height needs less upscaling/more downscaling */
5133 dst.width = src.width *
5134 dst.height / src.height;
5135 } else {
5136 /* width needs less upscaling/more downscaling */
5137 dst.height = src.height *
5138 dst.width / src.width;
5139 }
5140 } else if (rmx_type == RMX_CENTER) {
5141 dst = src;
e7b07cee 5142 }
e7b07cee 5143
f4791779
HW
5144 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5145 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5146
f4791779
HW
5147 if (dm_state->underscan_enable) {
5148 dst.x += dm_state->underscan_hborder / 2;
5149 dst.y += dm_state->underscan_vborder / 2;
5150 dst.width -= dm_state->underscan_hborder;
5151 dst.height -= dm_state->underscan_vborder;
5152 }
e7b07cee
HW
5153 }
5154
5155 stream->src = src;
5156 stream->dst = dst;
5157
4711c033
LT
5158 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5159 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5160
5161}
5162
3ee6b26b 5163static enum dc_color_depth
42ba01fc 5164convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5165 bool is_y420, int requested_bpc)
e7b07cee 5166{
1bc22f20 5167 uint8_t bpc;
01c22997 5168
1bc22f20
SW
5169 if (is_y420) {
5170 bpc = 8;
5171
5172 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5173 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5174 bpc = 16;
5175 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5176 bpc = 12;
5177 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5178 bpc = 10;
5179 } else {
5180 bpc = (uint8_t)connector->display_info.bpc;
5181 /* Assume 8 bpc by default if no bpc is specified. */
5182 bpc = bpc ? bpc : 8;
5183 }
e7b07cee 5184
cbd14ae7 5185 if (requested_bpc > 0) {
01c22997
NK
5186 /*
5187 * Cap display bpc based on the user requested value.
5188 *
5189 * The value for state->max_bpc may not correctly updated
5190 * depending on when the connector gets added to the state
5191 * or if this was called outside of atomic check, so it
5192 * can't be used directly.
5193 */
cbd14ae7 5194 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5195
1825fd34
NK
5196 /* Round down to the nearest even number. */
5197 bpc = bpc - (bpc & 1);
5198 }
07e3a1cf 5199
e7b07cee
HW
5200 switch (bpc) {
5201 case 0:
1f6010a9
DF
5202 /*
5203 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5204 * EDID revision before 1.4
5205 * TODO: Fix edid parsing
5206 */
5207 return COLOR_DEPTH_888;
5208 case 6:
5209 return COLOR_DEPTH_666;
5210 case 8:
5211 return COLOR_DEPTH_888;
5212 case 10:
5213 return COLOR_DEPTH_101010;
5214 case 12:
5215 return COLOR_DEPTH_121212;
5216 case 14:
5217 return COLOR_DEPTH_141414;
5218 case 16:
5219 return COLOR_DEPTH_161616;
5220 default:
5221 return COLOR_DEPTH_UNDEFINED;
5222 }
5223}
5224
3ee6b26b
AD
5225static enum dc_aspect_ratio
5226get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5227{
e11d4147
LSL
5228 /* 1-1 mapping, since both enums follow the HDMI spec. */
5229 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5230}
5231
3ee6b26b
AD
5232static enum dc_color_space
5233get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5234{
5235 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5236
5237 switch (dc_crtc_timing->pixel_encoding) {
5238 case PIXEL_ENCODING_YCBCR422:
5239 case PIXEL_ENCODING_YCBCR444:
5240 case PIXEL_ENCODING_YCBCR420:
5241 {
5242 /*
5243 * 27030khz is the separation point between HDTV and SDTV
5244 * according to HDMI spec, we use YCbCr709 and YCbCr601
5245 * respectively
5246 */
380604e2 5247 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5248 if (dc_crtc_timing->flags.Y_ONLY)
5249 color_space =
5250 COLOR_SPACE_YCBCR709_LIMITED;
5251 else
5252 color_space = COLOR_SPACE_YCBCR709;
5253 } else {
5254 if (dc_crtc_timing->flags.Y_ONLY)
5255 color_space =
5256 COLOR_SPACE_YCBCR601_LIMITED;
5257 else
5258 color_space = COLOR_SPACE_YCBCR601;
5259 }
5260
5261 }
5262 break;
5263 case PIXEL_ENCODING_RGB:
5264 color_space = COLOR_SPACE_SRGB;
5265 break;
5266
5267 default:
5268 WARN_ON(1);
5269 break;
5270 }
5271
5272 return color_space;
5273}
5274
ea117312
TA
5275static bool adjust_colour_depth_from_display_info(
5276 struct dc_crtc_timing *timing_out,
5277 const struct drm_display_info *info)
400443e8 5278{
ea117312 5279 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5280 int normalized_clk;
400443e8 5281 do {
380604e2 5282 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5283 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5284 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5285 normalized_clk /= 2;
5286 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5287 switch (depth) {
5288 case COLOR_DEPTH_888:
5289 break;
400443e8
ML
5290 case COLOR_DEPTH_101010:
5291 normalized_clk = (normalized_clk * 30) / 24;
5292 break;
5293 case COLOR_DEPTH_121212:
5294 normalized_clk = (normalized_clk * 36) / 24;
5295 break;
5296 case COLOR_DEPTH_161616:
5297 normalized_clk = (normalized_clk * 48) / 24;
5298 break;
5299 default:
ea117312
TA
5300 /* The above depths are the only ones valid for HDMI. */
5301 return false;
400443e8 5302 }
ea117312
TA
5303 if (normalized_clk <= info->max_tmds_clock) {
5304 timing_out->display_color_depth = depth;
5305 return true;
5306 }
5307 } while (--depth > COLOR_DEPTH_666);
5308 return false;
400443e8 5309}
e7b07cee 5310
42ba01fc
NK
5311static void fill_stream_properties_from_drm_display_mode(
5312 struct dc_stream_state *stream,
5313 const struct drm_display_mode *mode_in,
5314 const struct drm_connector *connector,
5315 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5316 const struct dc_stream_state *old_stream,
5317 int requested_bpc)
e7b07cee
HW
5318{
5319 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5320 const struct drm_display_info *info = &connector->display_info;
d4252eee 5321 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5322 struct hdmi_vendor_infoframe hv_frame;
5323 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5324
acf83f86
WL
5325 memset(&hv_frame, 0, sizeof(hv_frame));
5326 memset(&avi_frame, 0, sizeof(avi_frame));
5327
e7b07cee
HW
5328 timing_out->h_border_left = 0;
5329 timing_out->h_border_right = 0;
5330 timing_out->v_border_top = 0;
5331 timing_out->v_border_bottom = 0;
5332 /* TODO: un-hardcode */
fe61a2f1 5333 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5334 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5335 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5336 else if (drm_mode_is_420_also(info, mode_in)
5337 && aconnector->force_yuv420_output)
5338 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5339 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5340 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5341 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5342 else
5343 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5344
5345 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5346 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5347 connector,
5348 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5349 requested_bpc);
e7b07cee
HW
5350 timing_out->scan_type = SCANNING_TYPE_NODATA;
5351 timing_out->hdmi_vic = 0;
b333730d
BL
5352
5353 if(old_stream) {
5354 timing_out->vic = old_stream->timing.vic;
5355 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5356 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5357 } else {
5358 timing_out->vic = drm_match_cea_mode(mode_in);
5359 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5360 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5361 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5362 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5363 }
e7b07cee 5364
1cb1d477
WL
5365 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5366 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5367 timing_out->vic = avi_frame.video_code;
5368 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5369 timing_out->hdmi_vic = hv_frame.vic;
5370 }
5371
fe8858bb
NC
5372 if (is_freesync_video_mode(mode_in, aconnector)) {
5373 timing_out->h_addressable = mode_in->hdisplay;
5374 timing_out->h_total = mode_in->htotal;
5375 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5376 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5377 timing_out->v_total = mode_in->vtotal;
5378 timing_out->v_addressable = mode_in->vdisplay;
5379 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5380 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5381 timing_out->pix_clk_100hz = mode_in->clock * 10;
5382 } else {
5383 timing_out->h_addressable = mode_in->crtc_hdisplay;
5384 timing_out->h_total = mode_in->crtc_htotal;
5385 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5386 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5387 timing_out->v_total = mode_in->crtc_vtotal;
5388 timing_out->v_addressable = mode_in->crtc_vdisplay;
5389 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5390 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5391 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5392 }
a85ba005 5393
e7b07cee 5394 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5395
5396 stream->output_color_space = get_output_color_space(timing_out);
5397
e43a432c
AK
5398 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5399 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5400 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5401 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5402 drm_mode_is_420_also(info, mode_in) &&
5403 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5404 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5405 adjust_colour_depth_from_display_info(timing_out, info);
5406 }
5407 }
e7b07cee
HW
5408}
5409
3ee6b26b
AD
5410static void fill_audio_info(struct audio_info *audio_info,
5411 const struct drm_connector *drm_connector,
5412 const struct dc_sink *dc_sink)
e7b07cee
HW
5413{
5414 int i = 0;
5415 int cea_revision = 0;
5416 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5417
5418 audio_info->manufacture_id = edid_caps->manufacturer_id;
5419 audio_info->product_id = edid_caps->product_id;
5420
5421 cea_revision = drm_connector->display_info.cea_rev;
5422
090afc1e 5423 strscpy(audio_info->display_name,
d2b2562c 5424 edid_caps->display_name,
090afc1e 5425 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5426
b830ebc9 5427 if (cea_revision >= 3) {
e7b07cee
HW
5428 audio_info->mode_count = edid_caps->audio_mode_count;
5429
5430 for (i = 0; i < audio_info->mode_count; ++i) {
5431 audio_info->modes[i].format_code =
5432 (enum audio_format_code)
5433 (edid_caps->audio_modes[i].format_code);
5434 audio_info->modes[i].channel_count =
5435 edid_caps->audio_modes[i].channel_count;
5436 audio_info->modes[i].sample_rates.all =
5437 edid_caps->audio_modes[i].sample_rate;
5438 audio_info->modes[i].sample_size =
5439 edid_caps->audio_modes[i].sample_size;
5440 }
5441 }
5442
5443 audio_info->flags.all = edid_caps->speaker_flags;
5444
5445 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5446 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5447 audio_info->video_latency = drm_connector->video_latency[0];
5448 audio_info->audio_latency = drm_connector->audio_latency[0];
5449 }
5450
5451 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5452
5453}
5454
3ee6b26b
AD
5455static void
5456copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5457 struct drm_display_mode *dst_mode)
e7b07cee
HW
5458{
5459 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5460 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5461 dst_mode->crtc_clock = src_mode->crtc_clock;
5462 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5463 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5464 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5465 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5466 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5467 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5468 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5469 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5470 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5471 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5472 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5473}
5474
3ee6b26b
AD
5475static void
5476decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5477 const struct drm_display_mode *native_mode,
5478 bool scale_enabled)
e7b07cee
HW
5479{
5480 if (scale_enabled) {
5481 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5482 } else if (native_mode->clock == drm_mode->clock &&
5483 native_mode->htotal == drm_mode->htotal &&
5484 native_mode->vtotal == drm_mode->vtotal) {
5485 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5486 } else {
5487 /* no scaling nor amdgpu inserted, no need to patch */
5488 }
5489}
5490
aed15309
ML
5491static struct dc_sink *
5492create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5493{
2e0ac3d6 5494 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5495 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5496 sink_init_data.link = aconnector->dc_link;
5497 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5498
5499 sink = dc_sink_create(&sink_init_data);
423788c7 5500 if (!sink) {
2e0ac3d6 5501 DRM_ERROR("Failed to create sink!\n");
aed15309 5502 return NULL;
423788c7 5503 }
2e0ac3d6 5504 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5505
aed15309 5506 return sink;
2e0ac3d6
HW
5507}
5508
fa2123db
ML
5509static void set_multisync_trigger_params(
5510 struct dc_stream_state *stream)
5511{
ec372186
ML
5512 struct dc_stream_state *master = NULL;
5513
fa2123db 5514 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5515 master = stream->triggered_crtc_reset.event_source;
5516 stream->triggered_crtc_reset.event =
5517 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5518 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5519 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5520 }
5521}
5522
5523static void set_master_stream(struct dc_stream_state *stream_set[],
5524 int stream_count)
5525{
5526 int j, highest_rfr = 0, master_stream = 0;
5527
5528 for (j = 0; j < stream_count; j++) {
5529 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5530 int refresh_rate = 0;
5531
380604e2 5532 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5533 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5534 if (refresh_rate > highest_rfr) {
5535 highest_rfr = refresh_rate;
5536 master_stream = j;
5537 }
5538 }
5539 }
5540 for (j = 0; j < stream_count; j++) {
03736f4c 5541 if (stream_set[j])
fa2123db
ML
5542 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5543 }
5544}
5545
5546static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5547{
5548 int i = 0;
ec372186 5549 struct dc_stream_state *stream;
fa2123db
ML
5550
5551 if (context->stream_count < 2)
5552 return;
5553 for (i = 0; i < context->stream_count ; i++) {
5554 if (!context->streams[i])
5555 continue;
1f6010a9
DF
5556 /*
5557 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5558 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5559 * For now it's set to false
fa2123db 5560 */
fa2123db 5561 }
ec372186 5562
fa2123db 5563 set_master_stream(context->streams, context->stream_count);
ec372186
ML
5564
5565 for (i = 0; i < context->stream_count ; i++) {
5566 stream = context->streams[i];
5567
5568 if (!stream)
5569 continue;
5570
5571 set_multisync_trigger_params(stream);
5572 }
fa2123db
ML
5573}
5574
ea2be5c0 5575#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
5576static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5577 struct dc_sink *sink, struct dc_stream_state *stream,
5578 struct dsc_dec_dpcd_caps *dsc_caps)
5579{
5580 stream->timing.flags.DSC = 0;
5581
5582 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
998b7ad2
FZ
5583 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5584 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5585 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5586 dsc_caps);
998b7ad2
FZ
5587 }
5588}
5589
5590static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5591 struct dc_sink *sink, struct dc_stream_state *stream,
5592 struct dsc_dec_dpcd_caps *dsc_caps)
5593{
5594 struct drm_connector *drm_connector = &aconnector->base;
5595 uint32_t link_bandwidth_kbps;
5596
5597 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5598 dc_link_get_link_cap(aconnector->dc_link));
998b7ad2
FZ
5599 /* Set DSC policy according to dsc_clock_en */
5600 dc_dsc_policy_set_enable_dsc_when_not_needed(
5601 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5602
5603 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5604
5605 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5606 dsc_caps,
5607 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5608 0,
5609 link_bandwidth_kbps,
5610 &stream->timing,
5611 &stream->timing.dsc_cfg)) {
5612 stream->timing.flags.DSC = 1;
5613 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5614 }
5615 }
5616
5617 /* Overwrite the stream flag if DSC is enabled through debugfs */
5618 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5619 stream->timing.flags.DSC = 1;
5620
5621 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5622 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5623
5624 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5625 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5626
5627 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5628 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 5629}
ea2be5c0 5630#endif
998b7ad2 5631
5fd953a3
RS
5632/**
5633 * DOC: FreeSync Video
5634 *
5635 * When a userspace application wants to play a video, the content follows a
5636 * standard format definition that usually specifies the FPS for that format.
5637 * The below list illustrates some video format and the expected FPS,
5638 * respectively:
5639 *
5640 * - TV/NTSC (23.976 FPS)
5641 * - Cinema (24 FPS)
5642 * - TV/PAL (25 FPS)
5643 * - TV/NTSC (29.97 FPS)
5644 * - TV/NTSC (30 FPS)
5645 * - Cinema HFR (48 FPS)
5646 * - TV/PAL (50 FPS)
5647 * - Commonly used (60 FPS)
5648 * - Multiples of 24 (48,72,96 FPS)
5649 *
5650 * The list of standards video format is not huge and can be added to the
5651 * connector modeset list beforehand. With that, userspace can leverage
5652 * FreeSync to extends the front porch in order to attain the target refresh
5653 * rate. Such a switch will happen seamlessly, without screen blanking or
5654 * reprogramming of the output in any other way. If the userspace requests a
5655 * modesetting change compatible with FreeSync modes that only differ in the
5656 * refresh rate, DC will skip the full update and avoid blink during the
5657 * transition. For example, the video player can change the modesetting from
5658 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5659 * causing any display blink. This same concept can be applied to a mode
5660 * setting change.
5661 */
a85ba005
NC
5662static struct drm_display_mode *
5663get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5664 bool use_probed_modes)
5665{
5666 struct drm_display_mode *m, *m_pref = NULL;
5667 u16 current_refresh, highest_refresh;
5668 struct list_head *list_head = use_probed_modes ?
5669 &aconnector->base.probed_modes :
5670 &aconnector->base.modes;
5671
5672 if (aconnector->freesync_vid_base.clock != 0)
5673 return &aconnector->freesync_vid_base;
5674
5675 /* Find the preferred mode */
5676 list_for_each_entry (m, list_head, head) {
5677 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5678 m_pref = m;
5679 break;
5680 }
5681 }
5682
5683 if (!m_pref) {
5684 /* Probably an EDID with no preferred mode. Fallback to first entry */
5685 m_pref = list_first_entry_or_null(
5686 &aconnector->base.modes, struct drm_display_mode, head);
5687 if (!m_pref) {
5688 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5689 return NULL;
5690 }
5691 }
5692
5693 highest_refresh = drm_mode_vrefresh(m_pref);
5694
5695 /*
5696 * Find the mode with highest refresh rate with same resolution.
5697 * For some monitors, preferred mode is not the mode with highest
5698 * supported refresh rate.
5699 */
5700 list_for_each_entry (m, list_head, head) {
5701 current_refresh = drm_mode_vrefresh(m);
5702
5703 if (m->hdisplay == m_pref->hdisplay &&
5704 m->vdisplay == m_pref->vdisplay &&
5705 highest_refresh < current_refresh) {
5706 highest_refresh = current_refresh;
5707 m_pref = m;
5708 }
5709 }
5710
5711 aconnector->freesync_vid_base = *m_pref;
5712 return m_pref;
5713}
5714
fe8858bb 5715static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
5716 struct amdgpu_dm_connector *aconnector)
5717{
5718 struct drm_display_mode *high_mode;
5719 int timing_diff;
5720
5721 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5722 if (!high_mode || !mode)
5723 return false;
5724
5725 timing_diff = high_mode->vtotal - mode->vtotal;
5726
5727 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5728 high_mode->hdisplay != mode->hdisplay ||
5729 high_mode->vdisplay != mode->vdisplay ||
5730 high_mode->hsync_start != mode->hsync_start ||
5731 high_mode->hsync_end != mode->hsync_end ||
5732 high_mode->htotal != mode->htotal ||
5733 high_mode->hskew != mode->hskew ||
5734 high_mode->vscan != mode->vscan ||
5735 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5736 high_mode->vsync_end - mode->vsync_end != timing_diff)
5737 return false;
5738 else
5739 return true;
5740}
5741
3ee6b26b
AD
5742static struct dc_stream_state *
5743create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5744 const struct drm_display_mode *drm_mode,
b333730d 5745 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5746 const struct dc_stream_state *old_stream,
5747 int requested_bpc)
e7b07cee
HW
5748{
5749 struct drm_display_mode *preferred_mode = NULL;
391ef035 5750 struct drm_connector *drm_connector;
42ba01fc
NK
5751 const struct drm_connector_state *con_state =
5752 dm_state ? &dm_state->base : NULL;
0971c40e 5753 struct dc_stream_state *stream = NULL;
e7b07cee 5754 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
5755 struct drm_display_mode saved_mode;
5756 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 5757 bool native_mode_found = false;
b0781603
NK
5758 bool recalculate_timing = false;
5759 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 5760 int mode_refresh;
58124bf8 5761 int preferred_refresh = 0;
defeb878 5762#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 5763 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 5764#endif
aed15309 5765 struct dc_sink *sink = NULL;
a85ba005
NC
5766
5767 memset(&saved_mode, 0, sizeof(saved_mode));
5768
b830ebc9 5769 if (aconnector == NULL) {
e7b07cee 5770 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5771 return stream;
e7b07cee
HW
5772 }
5773
e7b07cee 5774 drm_connector = &aconnector->base;
2e0ac3d6 5775
f4ac176e 5776 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5777 sink = create_fake_sink(aconnector);
5778 if (!sink)
5779 return stream;
aed15309
ML
5780 } else {
5781 sink = aconnector->dc_sink;
dcd5fb82 5782 dc_sink_retain(sink);
f4ac176e 5783 }
2e0ac3d6 5784
aed15309 5785 stream = dc_create_stream_for_sink(sink);
4562236b 5786
b830ebc9 5787 if (stream == NULL) {
e7b07cee 5788 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5789 goto finish;
e7b07cee
HW
5790 }
5791
ceb3dbb4
JL
5792 stream->dm_stream_context = aconnector;
5793
4a36fcba
WL
5794 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5795 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5796
e7b07cee
HW
5797 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5798 /* Search for preferred mode */
5799 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5800 native_mode_found = true;
5801 break;
5802 }
5803 }
5804 if (!native_mode_found)
5805 preferred_mode = list_first_entry_or_null(
5806 &aconnector->base.modes,
5807 struct drm_display_mode,
5808 head);
5809
b333730d
BL
5810 mode_refresh = drm_mode_vrefresh(&mode);
5811
b830ebc9 5812 if (preferred_mode == NULL) {
1f6010a9
DF
5813 /*
5814 * This may not be an error, the use case is when we have no
e7b07cee
HW
5815 * usermode calls to reset and set mode upon hotplug. In this
5816 * case, we call set mode ourselves to restore the previous mode
5817 * and the modelist may not be filled in in time.
5818 */
f1ad2f5e 5819 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 5820 } else {
b0781603 5821 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
5822 is_freesync_video_mode(&mode, aconnector);
5823 if (recalculate_timing) {
5824 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5825 saved_mode = mode;
5826 mode = *freesync_mode;
5827 } else {
5828 decide_crtc_timing_for_drm_display_mode(
b0781603 5829 &mode, preferred_mode, scale);
a85ba005 5830
b0781603
NK
5831 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5832 }
e7b07cee
HW
5833 }
5834
a85ba005
NC
5835 if (recalculate_timing)
5836 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 5837 else if (!dm_state)
f783577c
JFZ
5838 drm_mode_set_crtcinfo(&mode, 0);
5839
a85ba005 5840 /*
b333730d
BL
5841 * If scaling is enabled and refresh rate didn't change
5842 * we copy the vic and polarities of the old timings
5843 */
b0781603 5844 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
5845 fill_stream_properties_from_drm_display_mode(
5846 stream, &mode, &aconnector->base, con_state, NULL,
5847 requested_bpc);
b333730d 5848 else
a85ba005
NC
5849 fill_stream_properties_from_drm_display_mode(
5850 stream, &mode, &aconnector->base, con_state, old_stream,
5851 requested_bpc);
b333730d 5852
defeb878 5853#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
5854 /* SST DSC determination policy */
5855 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5856 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5857 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
5858#endif
5859
e7b07cee
HW
5860 update_stream_scaling_settings(&mode, dm_state, stream);
5861
5862 fill_audio_info(
5863 &stream->audio_info,
5864 drm_connector,
aed15309 5865 sink);
e7b07cee 5866
ceb3dbb4 5867 update_stream_signal(stream, sink);
9182b4cb 5868
d832fc3b 5869 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5870 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5871
8a488f5d
RL
5872 if (stream->link->psr_settings.psr_feature_enabled) {
5873 //
5874 // should decide stream support vsc sdp colorimetry capability
5875 // before building vsc info packet
5876 //
5877 stream->use_vsc_sdp_for_colorimetry = false;
5878 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5879 stream->use_vsc_sdp_for_colorimetry =
5880 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5881 } else {
5882 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5883 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5884 }
8a488f5d 5885 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
5886 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5887
8c322309 5888 }
aed15309 5889finish:
dcd5fb82 5890 dc_sink_release(sink);
9e3efe3e 5891
e7b07cee
HW
5892 return stream;
5893}
5894
7578ecda 5895static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5896{
5897 drm_crtc_cleanup(crtc);
5898 kfree(crtc);
5899}
5900
5901static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5902 struct drm_crtc_state *state)
e7b07cee
HW
5903{
5904 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5905
5906 /* TODO Destroy dc_stream objects are stream object is flattened */
5907 if (cur->stream)
5908 dc_stream_release(cur->stream);
5909
5910
5911 __drm_atomic_helper_crtc_destroy_state(state);
5912
5913
5914 kfree(state);
5915}
5916
5917static void dm_crtc_reset_state(struct drm_crtc *crtc)
5918{
5919 struct dm_crtc_state *state;
5920
5921 if (crtc->state)
5922 dm_crtc_destroy_state(crtc, crtc->state);
5923
5924 state = kzalloc(sizeof(*state), GFP_KERNEL);
5925 if (WARN_ON(!state))
5926 return;
5927
1f8a52ec 5928 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5929}
5930
5931static struct drm_crtc_state *
5932dm_crtc_duplicate_state(struct drm_crtc *crtc)
5933{
5934 struct dm_crtc_state *state, *cur;
5935
5936 cur = to_dm_crtc_state(crtc->state);
5937
5938 if (WARN_ON(!crtc->state))
5939 return NULL;
5940
2004f45e 5941 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5942 if (!state)
5943 return NULL;
e7b07cee
HW
5944
5945 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5946
5947 if (cur->stream) {
5948 state->stream = cur->stream;
5949 dc_stream_retain(state->stream);
5950 }
5951
d6ef9b41 5952 state->active_planes = cur->active_planes;
98e6436d 5953 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5954 state->abm_level = cur->abm_level;
bb47de73
NK
5955 state->vrr_supported = cur->vrr_supported;
5956 state->freesync_config = cur->freesync_config;
cf020d49
NK
5957 state->cm_has_degamma = cur->cm_has_degamma;
5958 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
e7b07cee
HW
5959 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5960
5961 return &state->base;
5962}
5963
86bc2219 5964#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 5965static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
5966{
5967 crtc_debugfs_init(crtc);
5968
5969 return 0;
5970}
5971#endif
5972
d2574c33
MK
5973static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5974{
5975 enum dc_irq_source irq_source;
5976 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5977 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5978 int rc;
5979
5980 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5981
5982 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5983
4711c033
LT
5984 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5985 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
5986 return rc;
5987}
589d2739
HW
5988
5989static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5990{
5991 enum dc_irq_source irq_source;
5992 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5993 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 5994 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 5995#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 5996 struct amdgpu_display_manager *dm = &adev->dm;
ea3b4242
QZ
5997 unsigned long flags;
5998#endif
d2574c33
MK
5999 int rc = 0;
6000
6001 if (enable) {
6002 /* vblank irq on -> Only need vupdate irq in vrr mode */
6003 if (amdgpu_dm_vrr_active(acrtc_state))
6004 rc = dm_set_vupdate_irq(crtc, true);
6005 } else {
6006 /* vblank irq off -> vupdate irq off */
6007 rc = dm_set_vupdate_irq(crtc, false);
6008 }
6009
6010 if (rc)
6011 return rc;
589d2739
HW
6012
6013 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6014
6015 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6016 return -EBUSY;
6017
98ab5f35
BL
6018 if (amdgpu_in_reset(adev))
6019 return 0;
6020
4928b480 6021#if defined(CONFIG_DRM_AMD_DC_DCN)
ea3b4242
QZ
6022 spin_lock_irqsave(&dm->vblank_lock, flags);
6023 dm->vblank_workqueue->dm = dm;
6024 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
6025 dm->vblank_workqueue->enable = enable;
6026 spin_unlock_irqrestore(&dm->vblank_lock, flags);
6027 schedule_work(&dm->vblank_workqueue->mall_work);
4928b480 6028#endif
71338cb4 6029
71338cb4 6030 return 0;
589d2739
HW
6031}
6032
6033static int dm_enable_vblank(struct drm_crtc *crtc)
6034{
6035 return dm_set_vblank(crtc, true);
6036}
6037
6038static void dm_disable_vblank(struct drm_crtc *crtc)
6039{
6040 dm_set_vblank(crtc, false);
6041}
6042
e7b07cee
HW
6043/* Implemented only the options currently availible for the driver */
6044static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6045 .reset = dm_crtc_reset_state,
6046 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6047 .set_config = drm_atomic_helper_set_config,
6048 .page_flip = drm_atomic_helper_page_flip,
6049 .atomic_duplicate_state = dm_crtc_duplicate_state,
6050 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6051 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6052 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6053 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6054 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6055 .enable_vblank = dm_enable_vblank,
6056 .disable_vblank = dm_disable_vblank,
e3eff4b5 6057 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6058#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6059 .late_register = amdgpu_dm_crtc_late_register,
6060#endif
e7b07cee
HW
6061};
6062
6063static enum drm_connector_status
6064amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6065{
6066 bool connected;
c84dec2f 6067 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6068
1f6010a9
DF
6069 /*
6070 * Notes:
e7b07cee
HW
6071 * 1. This interface is NOT called in context of HPD irq.
6072 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6073 * makes it a bad place for *any* MST-related activity.
6074 */
e7b07cee 6075
8580d60b
HW
6076 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6077 !aconnector->fake_enable)
e7b07cee
HW
6078 connected = (aconnector->dc_sink != NULL);
6079 else
6080 connected = (aconnector->base.force == DRM_FORCE_ON);
6081
0f877894
OV
6082 update_subconnector_property(aconnector);
6083
e7b07cee
HW
6084 return (connected ? connector_status_connected :
6085 connector_status_disconnected);
6086}
6087
3ee6b26b
AD
6088int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6089 struct drm_connector_state *connector_state,
6090 struct drm_property *property,
6091 uint64_t val)
e7b07cee
HW
6092{
6093 struct drm_device *dev = connector->dev;
1348969a 6094 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6095 struct dm_connector_state *dm_old_state =
6096 to_dm_connector_state(connector->state);
6097 struct dm_connector_state *dm_new_state =
6098 to_dm_connector_state(connector_state);
6099
6100 int ret = -EINVAL;
6101
6102 if (property == dev->mode_config.scaling_mode_property) {
6103 enum amdgpu_rmx_type rmx_type;
6104
6105 switch (val) {
6106 case DRM_MODE_SCALE_CENTER:
6107 rmx_type = RMX_CENTER;
6108 break;
6109 case DRM_MODE_SCALE_ASPECT:
6110 rmx_type = RMX_ASPECT;
6111 break;
6112 case DRM_MODE_SCALE_FULLSCREEN:
6113 rmx_type = RMX_FULL;
6114 break;
6115 case DRM_MODE_SCALE_NONE:
6116 default:
6117 rmx_type = RMX_OFF;
6118 break;
6119 }
6120
6121 if (dm_old_state->scaling == rmx_type)
6122 return 0;
6123
6124 dm_new_state->scaling = rmx_type;
6125 ret = 0;
6126 } else if (property == adev->mode_info.underscan_hborder_property) {
6127 dm_new_state->underscan_hborder = val;
6128 ret = 0;
6129 } else if (property == adev->mode_info.underscan_vborder_property) {
6130 dm_new_state->underscan_vborder = val;
6131 ret = 0;
6132 } else if (property == adev->mode_info.underscan_property) {
6133 dm_new_state->underscan_enable = val;
6134 ret = 0;
c1ee92f9
DF
6135 } else if (property == adev->mode_info.abm_level_property) {
6136 dm_new_state->abm_level = val;
6137 ret = 0;
e7b07cee
HW
6138 }
6139
6140 return ret;
6141}
6142
3ee6b26b
AD
6143int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6144 const struct drm_connector_state *state,
6145 struct drm_property *property,
6146 uint64_t *val)
e7b07cee
HW
6147{
6148 struct drm_device *dev = connector->dev;
1348969a 6149 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6150 struct dm_connector_state *dm_state =
6151 to_dm_connector_state(state);
6152 int ret = -EINVAL;
6153
6154 if (property == dev->mode_config.scaling_mode_property) {
6155 switch (dm_state->scaling) {
6156 case RMX_CENTER:
6157 *val = DRM_MODE_SCALE_CENTER;
6158 break;
6159 case RMX_ASPECT:
6160 *val = DRM_MODE_SCALE_ASPECT;
6161 break;
6162 case RMX_FULL:
6163 *val = DRM_MODE_SCALE_FULLSCREEN;
6164 break;
6165 case RMX_OFF:
6166 default:
6167 *val = DRM_MODE_SCALE_NONE;
6168 break;
6169 }
6170 ret = 0;
6171 } else if (property == adev->mode_info.underscan_hborder_property) {
6172 *val = dm_state->underscan_hborder;
6173 ret = 0;
6174 } else if (property == adev->mode_info.underscan_vborder_property) {
6175 *val = dm_state->underscan_vborder;
6176 ret = 0;
6177 } else if (property == adev->mode_info.underscan_property) {
6178 *val = dm_state->underscan_enable;
6179 ret = 0;
c1ee92f9
DF
6180 } else if (property == adev->mode_info.abm_level_property) {
6181 *val = dm_state->abm_level;
6182 ret = 0;
e7b07cee 6183 }
c1ee92f9 6184
e7b07cee
HW
6185 return ret;
6186}
6187
526c654a
ED
6188static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6189{
6190 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6191
6192 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6193}
6194
7578ecda 6195static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6196{
c84dec2f 6197 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6198 const struct dc_link *link = aconnector->dc_link;
1348969a 6199 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6200 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 6201
5dff80bd
AG
6202 /*
6203 * Call only if mst_mgr was iniitalized before since it's not done
6204 * for all connector types.
6205 */
6206 if (aconnector->mst_mgr.dev)
6207 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6208
e7b07cee
HW
6209#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6210 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6211
89fc8d4e 6212 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
6213 link->type != dc_connection_none &&
6214 dm->backlight_dev) {
6215 backlight_device_unregister(dm->backlight_dev);
6216 dm->backlight_dev = NULL;
e7b07cee
HW
6217 }
6218#endif
dcd5fb82
MF
6219
6220 if (aconnector->dc_em_sink)
6221 dc_sink_release(aconnector->dc_em_sink);
6222 aconnector->dc_em_sink = NULL;
6223 if (aconnector->dc_sink)
6224 dc_sink_release(aconnector->dc_sink);
6225 aconnector->dc_sink = NULL;
6226
e86e8947 6227 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6228 drm_connector_unregister(connector);
6229 drm_connector_cleanup(connector);
526c654a
ED
6230 if (aconnector->i2c) {
6231 i2c_del_adapter(&aconnector->i2c->base);
6232 kfree(aconnector->i2c);
6233 }
7daec99f 6234 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6235
e7b07cee
HW
6236 kfree(connector);
6237}
6238
6239void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6240{
6241 struct dm_connector_state *state =
6242 to_dm_connector_state(connector->state);
6243
df099b9b
LSL
6244 if (connector->state)
6245 __drm_atomic_helper_connector_destroy_state(connector->state);
6246
e7b07cee
HW
6247 kfree(state);
6248
6249 state = kzalloc(sizeof(*state), GFP_KERNEL);
6250
6251 if (state) {
6252 state->scaling = RMX_OFF;
6253 state->underscan_enable = false;
6254 state->underscan_hborder = 0;
6255 state->underscan_vborder = 0;
01933ba4 6256 state->base.max_requested_bpc = 8;
3261e013
ML
6257 state->vcpi_slots = 0;
6258 state->pbn = 0;
c3e50f89
NK
6259 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6260 state->abm_level = amdgpu_dm_abm_level;
6261
df099b9b 6262 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6263 }
6264}
6265
3ee6b26b
AD
6266struct drm_connector_state *
6267amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6268{
6269 struct dm_connector_state *state =
6270 to_dm_connector_state(connector->state);
6271
6272 struct dm_connector_state *new_state =
6273 kmemdup(state, sizeof(*state), GFP_KERNEL);
6274
98e6436d
AK
6275 if (!new_state)
6276 return NULL;
e7b07cee 6277
98e6436d
AK
6278 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6279
6280 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6281 new_state->abm_level = state->abm_level;
922454c2
NK
6282 new_state->scaling = state->scaling;
6283 new_state->underscan_enable = state->underscan_enable;
6284 new_state->underscan_hborder = state->underscan_hborder;
6285 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6286 new_state->vcpi_slots = state->vcpi_slots;
6287 new_state->pbn = state->pbn;
98e6436d 6288 return &new_state->base;
e7b07cee
HW
6289}
6290
14f04fa4
AD
6291static int
6292amdgpu_dm_connector_late_register(struct drm_connector *connector)
6293{
6294 struct amdgpu_dm_connector *amdgpu_dm_connector =
6295 to_amdgpu_dm_connector(connector);
00a8037e 6296 int r;
14f04fa4 6297
00a8037e
AD
6298 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6299 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6300 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6301 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6302 if (r)
6303 return r;
6304 }
6305
6306#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6307 connector_debugfs_init(amdgpu_dm_connector);
6308#endif
6309
6310 return 0;
6311}
6312
e7b07cee
HW
6313static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6314 .reset = amdgpu_dm_connector_funcs_reset,
6315 .detect = amdgpu_dm_connector_detect,
6316 .fill_modes = drm_helper_probe_single_connector_modes,
6317 .destroy = amdgpu_dm_connector_destroy,
6318 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6319 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6320 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6321 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6322 .late_register = amdgpu_dm_connector_late_register,
526c654a 6323 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6324};
6325
e7b07cee
HW
6326static int get_modes(struct drm_connector *connector)
6327{
6328 return amdgpu_dm_connector_get_modes(connector);
6329}
6330
c84dec2f 6331static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6332{
6333 struct dc_sink_init_data init_params = {
6334 .link = aconnector->dc_link,
6335 .sink_signal = SIGNAL_TYPE_VIRTUAL
6336 };
70e8ffc5 6337 struct edid *edid;
e7b07cee 6338
a89ff457 6339 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6340 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6341 aconnector->base.name);
6342
6343 aconnector->base.force = DRM_FORCE_OFF;
6344 aconnector->base.override_edid = false;
6345 return;
6346 }
6347
70e8ffc5
HW
6348 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6349
e7b07cee
HW
6350 aconnector->edid = edid;
6351
6352 aconnector->dc_em_sink = dc_link_add_remote_sink(
6353 aconnector->dc_link,
6354 (uint8_t *)edid,
6355 (edid->extensions + 1) * EDID_LENGTH,
6356 &init_params);
6357
dcd5fb82 6358 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6359 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6360 aconnector->dc_link->local_sink :
6361 aconnector->dc_em_sink;
dcd5fb82
MF
6362 dc_sink_retain(aconnector->dc_sink);
6363 }
e7b07cee
HW
6364}
6365
c84dec2f 6366static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6367{
6368 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6369
1f6010a9
DF
6370 /*
6371 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6372 * Those settings have to be != 0 to get initial modeset
6373 */
6374 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6375 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6376 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6377 }
6378
6379
6380 aconnector->base.override_edid = true;
6381 create_eml_sink(aconnector);
6382}
6383
cbd14ae7
SW
6384static struct dc_stream_state *
6385create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6386 const struct drm_display_mode *drm_mode,
6387 const struct dm_connector_state *dm_state,
6388 const struct dc_stream_state *old_stream)
6389{
6390 struct drm_connector *connector = &aconnector->base;
1348969a 6391 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6392 struct dc_stream_state *stream;
4b7da34b
SW
6393 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6394 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6395 enum dc_status dc_result = DC_OK;
6396
6397 do {
6398 stream = create_stream_for_sink(aconnector, drm_mode,
6399 dm_state, old_stream,
6400 requested_bpc);
6401 if (stream == NULL) {
6402 DRM_ERROR("Failed to create stream for sink!\n");
6403 break;
6404 }
6405
6406 dc_result = dc_validate_stream(adev->dm.dc, stream);
6407
6408 if (dc_result != DC_OK) {
74a16675 6409 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6410 drm_mode->hdisplay,
6411 drm_mode->vdisplay,
6412 drm_mode->clock,
74a16675
RS
6413 dc_result,
6414 dc_status_to_str(dc_result));
cbd14ae7
SW
6415
6416 dc_stream_release(stream);
6417 stream = NULL;
6418 requested_bpc -= 2; /* lower bpc to retry validation */
6419 }
6420
6421 } while (stream == NULL && requested_bpc >= 6);
6422
68eb3ae3
WS
6423 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6424 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6425
6426 aconnector->force_yuv420_output = true;
6427 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6428 dm_state, old_stream);
6429 aconnector->force_yuv420_output = false;
6430 }
6431
cbd14ae7
SW
6432 return stream;
6433}
6434
ba9ca088 6435enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6436 struct drm_display_mode *mode)
e7b07cee
HW
6437{
6438 int result = MODE_ERROR;
6439 struct dc_sink *dc_sink;
e7b07cee 6440 /* TODO: Unhardcode stream count */
0971c40e 6441 struct dc_stream_state *stream;
c84dec2f 6442 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6443
6444 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6445 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6446 return result;
6447
1f6010a9
DF
6448 /*
6449 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6450 * EDID mgmt
6451 */
6452 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6453 !aconnector->dc_em_sink)
6454 handle_edid_mgmt(aconnector);
6455
c84dec2f 6456 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6457
ad975f44
VL
6458 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6459 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6460 DRM_ERROR("dc_sink is NULL!\n");
6461 goto fail;
6462 }
6463
cbd14ae7
SW
6464 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6465 if (stream) {
6466 dc_stream_release(stream);
e7b07cee 6467 result = MODE_OK;
cbd14ae7 6468 }
e7b07cee
HW
6469
6470fail:
6471 /* TODO: error handling*/
6472 return result;
6473}
6474
88694af9
NK
6475static int fill_hdr_info_packet(const struct drm_connector_state *state,
6476 struct dc_info_packet *out)
6477{
6478 struct hdmi_drm_infoframe frame;
6479 unsigned char buf[30]; /* 26 + 4 */
6480 ssize_t len;
6481 int ret, i;
6482
6483 memset(out, 0, sizeof(*out));
6484
6485 if (!state->hdr_output_metadata)
6486 return 0;
6487
6488 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6489 if (ret)
6490 return ret;
6491
6492 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6493 if (len < 0)
6494 return (int)len;
6495
6496 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6497 if (len != 30)
6498 return -EINVAL;
6499
6500 /* Prepare the infopacket for DC. */
6501 switch (state->connector->connector_type) {
6502 case DRM_MODE_CONNECTOR_HDMIA:
6503 out->hb0 = 0x87; /* type */
6504 out->hb1 = 0x01; /* version */
6505 out->hb2 = 0x1A; /* length */
6506 out->sb[0] = buf[3]; /* checksum */
6507 i = 1;
6508 break;
6509
6510 case DRM_MODE_CONNECTOR_DisplayPort:
6511 case DRM_MODE_CONNECTOR_eDP:
6512 out->hb0 = 0x00; /* sdp id, zero */
6513 out->hb1 = 0x87; /* type */
6514 out->hb2 = 0x1D; /* payload len - 1 */
6515 out->hb3 = (0x13 << 2); /* sdp version */
6516 out->sb[0] = 0x01; /* version */
6517 out->sb[1] = 0x1A; /* length */
6518 i = 2;
6519 break;
6520
6521 default:
6522 return -EINVAL;
6523 }
6524
6525 memcpy(&out->sb[i], &buf[4], 26);
6526 out->valid = true;
6527
6528 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6529 sizeof(out->sb), false);
6530
6531 return 0;
6532}
6533
88694af9
NK
6534static int
6535amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6536 struct drm_atomic_state *state)
88694af9 6537{
51e857af
SP
6538 struct drm_connector_state *new_con_state =
6539 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6540 struct drm_connector_state *old_con_state =
6541 drm_atomic_get_old_connector_state(state, conn);
6542 struct drm_crtc *crtc = new_con_state->crtc;
6543 struct drm_crtc_state *new_crtc_state;
6544 int ret;
6545
e8a98235
RS
6546 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6547
88694af9
NK
6548 if (!crtc)
6549 return 0;
6550
72921cdf 6551 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
6552 struct dc_info_packet hdr_infopacket;
6553
6554 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6555 if (ret)
6556 return ret;
6557
6558 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6559 if (IS_ERR(new_crtc_state))
6560 return PTR_ERR(new_crtc_state);
6561
6562 /*
6563 * DC considers the stream backends changed if the
6564 * static metadata changes. Forcing the modeset also
6565 * gives a simple way for userspace to switch from
b232d4ed
NK
6566 * 8bpc to 10bpc when setting the metadata to enter
6567 * or exit HDR.
6568 *
6569 * Changing the static metadata after it's been
6570 * set is permissible, however. So only force a
6571 * modeset if we're entering or exiting HDR.
88694af9 6572 */
b232d4ed
NK
6573 new_crtc_state->mode_changed =
6574 !old_con_state->hdr_output_metadata ||
6575 !new_con_state->hdr_output_metadata;
88694af9
NK
6576 }
6577
6578 return 0;
6579}
6580
e7b07cee
HW
6581static const struct drm_connector_helper_funcs
6582amdgpu_dm_connector_helper_funcs = {
6583 /*
1f6010a9 6584 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6585 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6586 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6587 * in get_modes call back, not just return the modes count
6588 */
e7b07cee
HW
6589 .get_modes = get_modes,
6590 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6591 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6592};
6593
6594static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6595{
6596}
6597
d6ef9b41 6598static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6599{
6600 struct drm_atomic_state *state = new_crtc_state->state;
6601 struct drm_plane *plane;
6602 int num_active = 0;
6603
6604 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6605 struct drm_plane_state *new_plane_state;
6606
6607 /* Cursor planes are "fake". */
6608 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6609 continue;
6610
6611 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6612
6613 if (!new_plane_state) {
6614 /*
6615 * The plane is enable on the CRTC and hasn't changed
6616 * state. This means that it previously passed
6617 * validation and is therefore enabled.
6618 */
6619 num_active += 1;
6620 continue;
6621 }
6622
6623 /* We need a framebuffer to be considered enabled. */
6624 num_active += (new_plane_state->fb != NULL);
6625 }
6626
d6ef9b41
NK
6627 return num_active;
6628}
6629
8fe684e9
NK
6630static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6631 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6632{
6633 struct dm_crtc_state *dm_new_crtc_state =
6634 to_dm_crtc_state(new_crtc_state);
6635
6636 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6637
6638 if (!dm_new_crtc_state->stream)
6639 return;
6640
6641 dm_new_crtc_state->active_planes =
6642 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6643}
6644
3ee6b26b 6645static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6646 struct drm_atomic_state *state)
e7b07cee 6647{
29b77ad7
MR
6648 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6649 crtc);
1348969a 6650 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6651 struct dc *dc = adev->dm.dc;
29b77ad7 6652 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6653 int ret = -EINVAL;
6654
5b8c5969 6655 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6656
29b77ad7 6657 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6658
bcd74374
ND
6659 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6660 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
6661 return ret;
6662 }
6663
bc92c065 6664 /*
b836a274
MD
6665 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6666 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6667 * planes are disabled, which is not supported by the hardware. And there is legacy
6668 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6669 */
29b77ad7 6670 if (crtc_state->enable &&
ea9522f5
SS
6671 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6672 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6673 return -EINVAL;
ea9522f5 6674 }
c14a005c 6675
b836a274
MD
6676 /* In some use cases, like reset, no stream is attached */
6677 if (!dm_crtc_state->stream)
6678 return 0;
6679
62c933f9 6680 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6681 return 0;
6682
ea9522f5 6683 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6684 return ret;
6685}
6686
3ee6b26b
AD
6687static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6688 const struct drm_display_mode *mode,
6689 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6690{
6691 return true;
6692}
6693
6694static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6695 .disable = dm_crtc_helper_disable,
6696 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6697 .mode_fixup = dm_crtc_helper_mode_fixup,
6698 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6699};
6700
6701static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6702{
6703
6704}
6705
3261e013
ML
6706static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6707{
6708 switch (display_color_depth) {
6709 case COLOR_DEPTH_666:
6710 return 6;
6711 case COLOR_DEPTH_888:
6712 return 8;
6713 case COLOR_DEPTH_101010:
6714 return 10;
6715 case COLOR_DEPTH_121212:
6716 return 12;
6717 case COLOR_DEPTH_141414:
6718 return 14;
6719 case COLOR_DEPTH_161616:
6720 return 16;
6721 default:
6722 break;
6723 }
6724 return 0;
6725}
6726
3ee6b26b
AD
6727static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6728 struct drm_crtc_state *crtc_state,
6729 struct drm_connector_state *conn_state)
e7b07cee 6730{
3261e013
ML
6731 struct drm_atomic_state *state = crtc_state->state;
6732 struct drm_connector *connector = conn_state->connector;
6733 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6734 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6735 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6736 struct drm_dp_mst_topology_mgr *mst_mgr;
6737 struct drm_dp_mst_port *mst_port;
6738 enum dc_color_depth color_depth;
6739 int clock, bpp = 0;
1bc22f20 6740 bool is_y420 = false;
3261e013
ML
6741
6742 if (!aconnector->port || !aconnector->dc_sink)
6743 return 0;
6744
6745 mst_port = aconnector->port;
6746 mst_mgr = &aconnector->mst_port->mst_mgr;
6747
6748 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6749 return 0;
6750
6751 if (!state->duplicated) {
cbd14ae7 6752 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6753 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6754 aconnector->force_yuv420_output;
cbd14ae7
SW
6755 color_depth = convert_color_depth_from_display_info(connector,
6756 is_y420,
6757 max_bpc);
3261e013
ML
6758 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6759 clock = adjusted_mode->clock;
dc48529f 6760 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6761 }
6762 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6763 mst_mgr,
6764 mst_port,
1c6c1cb5 6765 dm_new_connector_state->pbn,
03ca9600 6766 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6767 if (dm_new_connector_state->vcpi_slots < 0) {
6768 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6769 return dm_new_connector_state->vcpi_slots;
6770 }
e7b07cee
HW
6771 return 0;
6772}
6773
6774const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6775 .disable = dm_encoder_helper_disable,
6776 .atomic_check = dm_encoder_helper_atomic_check
6777};
6778
d9fe1a4c 6779#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6780static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6781 struct dc_state *dc_state)
6782{
6783 struct dc_stream_state *stream = NULL;
6784 struct drm_connector *connector;
5760dcb9 6785 struct drm_connector_state *new_con_state;
29b9ba74
ML
6786 struct amdgpu_dm_connector *aconnector;
6787 struct dm_connector_state *dm_conn_state;
6788 int i, j, clock, bpp;
6789 int vcpi, pbn_div, pbn = 0;
6790
5760dcb9 6791 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
6792
6793 aconnector = to_amdgpu_dm_connector(connector);
6794
6795 if (!aconnector->port)
6796 continue;
6797
6798 if (!new_con_state || !new_con_state->crtc)
6799 continue;
6800
6801 dm_conn_state = to_dm_connector_state(new_con_state);
6802
6803 for (j = 0; j < dc_state->stream_count; j++) {
6804 stream = dc_state->streams[j];
6805 if (!stream)
6806 continue;
6807
6808 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6809 break;
6810
6811 stream = NULL;
6812 }
6813
6814 if (!stream)
6815 continue;
6816
6817 if (stream->timing.flags.DSC != 1) {
6818 drm_dp_mst_atomic_enable_dsc(state,
6819 aconnector->port,
6820 dm_conn_state->pbn,
6821 0,
6822 false);
6823 continue;
6824 }
6825
6826 pbn_div = dm_mst_get_pbn_divider(stream->link);
6827 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6828 clock = stream->timing.pix_clk_100hz / 10;
6829 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6830 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6831 aconnector->port,
6832 pbn, pbn_div,
6833 true);
6834 if (vcpi < 0)
6835 return vcpi;
6836
6837 dm_conn_state->pbn = pbn;
6838 dm_conn_state->vcpi_slots = vcpi;
6839 }
6840 return 0;
6841}
d9fe1a4c 6842#endif
29b9ba74 6843
e7b07cee
HW
6844static void dm_drm_plane_reset(struct drm_plane *plane)
6845{
6846 struct dm_plane_state *amdgpu_state = NULL;
6847
6848 if (plane->state)
6849 plane->funcs->atomic_destroy_state(plane, plane->state);
6850
6851 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6852 WARN_ON(amdgpu_state == NULL);
1f6010a9 6853
7ddaef96
NK
6854 if (amdgpu_state)
6855 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6856}
6857
6858static struct drm_plane_state *
6859dm_drm_plane_duplicate_state(struct drm_plane *plane)
6860{
6861 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6862
6863 old_dm_plane_state = to_dm_plane_state(plane->state);
6864 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6865 if (!dm_plane_state)
6866 return NULL;
6867
6868 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6869
3be5262e
HW
6870 if (old_dm_plane_state->dc_state) {
6871 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6872 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6873 }
6874
6875 return &dm_plane_state->base;
6876}
6877
dfd84d90 6878static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6879 struct drm_plane_state *state)
e7b07cee
HW
6880{
6881 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6882
3be5262e
HW
6883 if (dm_plane_state->dc_state)
6884 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6885
0627bbd3 6886 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6887}
6888
6889static const struct drm_plane_funcs dm_plane_funcs = {
6890 .update_plane = drm_atomic_helper_update_plane,
6891 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6892 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6893 .reset = dm_drm_plane_reset,
6894 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6895 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6896 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6897};
6898
3ee6b26b
AD
6899static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6900 struct drm_plane_state *new_state)
e7b07cee
HW
6901{
6902 struct amdgpu_framebuffer *afb;
6903 struct drm_gem_object *obj;
5d43be0c 6904 struct amdgpu_device *adev;
e7b07cee 6905 struct amdgpu_bo *rbo;
e7b07cee 6906 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6907 struct list_head list;
6908 struct ttm_validate_buffer tv;
6909 struct ww_acquire_ctx ticket;
5d43be0c
CK
6910 uint32_t domain;
6911 int r;
e7b07cee
HW
6912
6913 if (!new_state->fb) {
4711c033 6914 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
6915 return 0;
6916 }
6917
6918 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6919 obj = new_state->fb->obj[0];
e7b07cee 6920 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6921 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6922 INIT_LIST_HEAD(&list);
6923
6924 tv.bo = &rbo->tbo;
6925 tv.num_shared = 1;
6926 list_add(&tv.head, &list);
6927
9165fb87 6928 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6929 if (r) {
6930 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6931 return r;
0f257b09 6932 }
e7b07cee 6933
5d43be0c 6934 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6935 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6936 else
6937 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6938
7b7c6c81 6939 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6940 if (unlikely(r != 0)) {
30b7c614
HW
6941 if (r != -ERESTARTSYS)
6942 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6943 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6944 return r;
6945 }
6946
bb812f1e
JZ
6947 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6948 if (unlikely(r != 0)) {
6949 amdgpu_bo_unpin(rbo);
0f257b09 6950 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6951 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6952 return r;
6953 }
7df7e505 6954
0f257b09 6955 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6956
7b7c6c81 6957 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6958
6959 amdgpu_bo_ref(rbo);
6960
cf322b49
NK
6961 /**
6962 * We don't do surface updates on planes that have been newly created,
6963 * but we also don't have the afb->address during atomic check.
6964 *
6965 * Fill in buffer attributes depending on the address here, but only on
6966 * newly created planes since they're not being used by DC yet and this
6967 * won't modify global state.
6968 */
6969 dm_plane_state_old = to_dm_plane_state(plane->state);
6970 dm_plane_state_new = to_dm_plane_state(new_state);
6971
3be5262e 6972 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6973 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6974 struct dc_plane_state *plane_state =
6975 dm_plane_state_new->dc_state;
6976 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6977
320932bf 6978 fill_plane_buffer_attributes(
695af5f9 6979 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6980 afb->tiling_flags,
cf322b49
NK
6981 &plane_state->tiling_info, &plane_state->plane_size,
6982 &plane_state->dcc, &plane_state->address,
6eed95b0 6983 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6984 }
6985
e7b07cee
HW
6986 return 0;
6987}
6988
3ee6b26b
AD
6989static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6990 struct drm_plane_state *old_state)
e7b07cee
HW
6991{
6992 struct amdgpu_bo *rbo;
e7b07cee
HW
6993 int r;
6994
6995 if (!old_state->fb)
6996 return;
6997
e68d14dd 6998 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6999 r = amdgpu_bo_reserve(rbo, false);
7000 if (unlikely(r)) {
7001 DRM_ERROR("failed to reserve rbo before unpin\n");
7002 return;
b830ebc9
HW
7003 }
7004
7005 amdgpu_bo_unpin(rbo);
7006 amdgpu_bo_unreserve(rbo);
7007 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7008}
7009
8c44515b
AP
7010static int dm_plane_helper_check_state(struct drm_plane_state *state,
7011 struct drm_crtc_state *new_crtc_state)
7012{
6300b3bd
MK
7013 struct drm_framebuffer *fb = state->fb;
7014 int min_downscale, max_upscale;
7015 int min_scale = 0;
7016 int max_scale = INT_MAX;
7017
40d916a2 7018 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7019 if (fb && state->crtc) {
40d916a2
NC
7020 /* Validate viewport to cover the case when only the position changes */
7021 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7022 int viewport_width = state->crtc_w;
7023 int viewport_height = state->crtc_h;
7024
7025 if (state->crtc_x < 0)
7026 viewport_width += state->crtc_x;
7027 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7028 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7029
7030 if (state->crtc_y < 0)
7031 viewport_height += state->crtc_y;
7032 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7033 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7034
4abdb72b
NC
7035 if (viewport_width < 0 || viewport_height < 0) {
7036 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7037 return -EINVAL;
7038 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7039 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7040 return -EINVAL;
4abdb72b
NC
7041 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7042 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7043 return -EINVAL;
4abdb72b
NC
7044 }
7045
40d916a2
NC
7046 }
7047
7048 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7049 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7050 &min_downscale, &max_upscale);
7051 /*
7052 * Convert to drm convention: 16.16 fixed point, instead of dc's
7053 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7054 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7055 */
7056 min_scale = (1000 << 16) / max_upscale;
7057 max_scale = (1000 << 16) / min_downscale;
7058 }
8c44515b 7059
8c44515b 7060 return drm_atomic_helper_check_plane_state(
6300b3bd 7061 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7062}
7063
7578ecda 7064static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7065 struct drm_atomic_state *state)
cbd19488 7066{
7c11b99a
MR
7067 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7068 plane);
1348969a 7069 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7070 struct dc *dc = adev->dm.dc;
78171832 7071 struct dm_plane_state *dm_plane_state;
695af5f9 7072 struct dc_scaling_info scaling_info;
8c44515b 7073 struct drm_crtc_state *new_crtc_state;
695af5f9 7074 int ret;
78171832 7075
ba5c1649 7076 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7077
ba5c1649 7078 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7079
3be5262e 7080 if (!dm_plane_state->dc_state)
9a3329b1 7081 return 0;
cbd19488 7082
8c44515b 7083 new_crtc_state =
dec92020 7084 drm_atomic_get_new_crtc_state(state,
ba5c1649 7085 new_plane_state->crtc);
8c44515b
AP
7086 if (!new_crtc_state)
7087 return -EINVAL;
7088
ba5c1649 7089 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7090 if (ret)
7091 return ret;
7092
ba5c1649 7093 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
695af5f9
NK
7094 if (ret)
7095 return ret;
a05bcff1 7096
62c933f9 7097 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7098 return 0;
7099
7100 return -EINVAL;
7101}
7102
674e78ac 7103static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7104 struct drm_atomic_state *state)
674e78ac
NK
7105{
7106 /* Only support async updates on cursor planes. */
7107 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7108 return -EINVAL;
7109
7110 return 0;
7111}
7112
7113static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7114 struct drm_atomic_state *state)
674e78ac 7115{
5ddb0bd4
MR
7116 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7117 plane);
674e78ac 7118 struct drm_plane_state *old_state =
5ddb0bd4 7119 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7120
e8a98235
RS
7121 trace_amdgpu_dm_atomic_update_cursor(new_state);
7122
332af874 7123 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7124
7125 plane->state->src_x = new_state->src_x;
7126 plane->state->src_y = new_state->src_y;
7127 plane->state->src_w = new_state->src_w;
7128 plane->state->src_h = new_state->src_h;
7129 plane->state->crtc_x = new_state->crtc_x;
7130 plane->state->crtc_y = new_state->crtc_y;
7131 plane->state->crtc_w = new_state->crtc_w;
7132 plane->state->crtc_h = new_state->crtc_h;
7133
7134 handle_cursor_update(plane, old_state);
7135}
7136
e7b07cee
HW
7137static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7138 .prepare_fb = dm_plane_helper_prepare_fb,
7139 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7140 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7141 .atomic_async_check = dm_plane_atomic_async_check,
7142 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7143};
7144
7145/*
7146 * TODO: these are currently initialized to rgb formats only.
7147 * For future use cases we should either initialize them dynamically based on
7148 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7149 * check will succeed, and let DC implement proper check
e7b07cee 7150 */
d90371b0 7151static const uint32_t rgb_formats[] = {
e7b07cee
HW
7152 DRM_FORMAT_XRGB8888,
7153 DRM_FORMAT_ARGB8888,
7154 DRM_FORMAT_RGBA8888,
7155 DRM_FORMAT_XRGB2101010,
7156 DRM_FORMAT_XBGR2101010,
7157 DRM_FORMAT_ARGB2101010,
7158 DRM_FORMAT_ABGR2101010,
58020403
MK
7159 DRM_FORMAT_XRGB16161616,
7160 DRM_FORMAT_XBGR16161616,
7161 DRM_FORMAT_ARGB16161616,
7162 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7163 DRM_FORMAT_XBGR8888,
7164 DRM_FORMAT_ABGR8888,
46dd9ff7 7165 DRM_FORMAT_RGB565,
e7b07cee
HW
7166};
7167
0d579c7e
NK
7168static const uint32_t overlay_formats[] = {
7169 DRM_FORMAT_XRGB8888,
7170 DRM_FORMAT_ARGB8888,
7171 DRM_FORMAT_RGBA8888,
7172 DRM_FORMAT_XBGR8888,
7173 DRM_FORMAT_ABGR8888,
7267a1a9 7174 DRM_FORMAT_RGB565
e7b07cee
HW
7175};
7176
7177static const u32 cursor_formats[] = {
7178 DRM_FORMAT_ARGB8888
7179};
7180
37c6a93b
NK
7181static int get_plane_formats(const struct drm_plane *plane,
7182 const struct dc_plane_cap *plane_cap,
7183 uint32_t *formats, int max_formats)
e7b07cee 7184{
37c6a93b
NK
7185 int i, num_formats = 0;
7186
7187 /*
7188 * TODO: Query support for each group of formats directly from
7189 * DC plane caps. This will require adding more formats to the
7190 * caps list.
7191 */
e7b07cee 7192
f180b4bc 7193 switch (plane->type) {
e7b07cee 7194 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7195 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7196 if (num_formats >= max_formats)
7197 break;
7198
7199 formats[num_formats++] = rgb_formats[i];
7200 }
7201
ea36ad34 7202 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7203 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7204 if (plane_cap && plane_cap->pixel_format_support.p010)
7205 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7206 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7207 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7208 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7209 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7210 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7211 }
e7b07cee 7212 break;
37c6a93b 7213
e7b07cee 7214 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7215 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7216 if (num_formats >= max_formats)
7217 break;
7218
7219 formats[num_formats++] = overlay_formats[i];
7220 }
e7b07cee 7221 break;
37c6a93b 7222
e7b07cee 7223 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7224 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7225 if (num_formats >= max_formats)
7226 break;
7227
7228 formats[num_formats++] = cursor_formats[i];
7229 }
e7b07cee
HW
7230 break;
7231 }
7232
37c6a93b
NK
7233 return num_formats;
7234}
7235
7236static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7237 struct drm_plane *plane,
7238 unsigned long possible_crtcs,
7239 const struct dc_plane_cap *plane_cap)
7240{
7241 uint32_t formats[32];
7242 int num_formats;
7243 int res = -EPERM;
ecc874a6 7244 unsigned int supported_rotations;
faa37f54 7245 uint64_t *modifiers = NULL;
37c6a93b
NK
7246
7247 num_formats = get_plane_formats(plane, plane_cap, formats,
7248 ARRAY_SIZE(formats));
7249
faa37f54
BN
7250 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7251 if (res)
7252 return res;
7253
4a580877 7254 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7255 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7256 modifiers, plane->type, NULL);
7257 kfree(modifiers);
37c6a93b
NK
7258 if (res)
7259 return res;
7260
cc1fec57
NK
7261 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7262 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7263 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7264 BIT(DRM_MODE_BLEND_PREMULTI);
7265
7266 drm_plane_create_alpha_property(plane);
7267 drm_plane_create_blend_mode_property(plane, blend_caps);
7268 }
7269
fc8e5230 7270 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7271 plane_cap &&
7272 (plane_cap->pixel_format_support.nv12 ||
7273 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7274 /* This only affects YUV formats. */
7275 drm_plane_create_color_properties(
7276 plane,
7277 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7278 BIT(DRM_COLOR_YCBCR_BT709) |
7279 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7280 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7281 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7282 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7283 }
7284
ecc874a6
PLG
7285 supported_rotations =
7286 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7287 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7288
1347385f
SS
7289 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7290 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7291 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7292 supported_rotations);
ecc874a6 7293
f180b4bc 7294 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7295
96719c54 7296 /* Create (reset) the plane state */
f180b4bc
HW
7297 if (plane->funcs->reset)
7298 plane->funcs->reset(plane);
96719c54 7299
37c6a93b 7300 return 0;
e7b07cee
HW
7301}
7302
7578ecda
AD
7303static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7304 struct drm_plane *plane,
7305 uint32_t crtc_index)
e7b07cee
HW
7306{
7307 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7308 struct drm_plane *cursor_plane;
e7b07cee
HW
7309
7310 int res = -ENOMEM;
7311
7312 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7313 if (!cursor_plane)
7314 goto fail;
7315
f180b4bc 7316 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7317 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7318
7319 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7320 if (!acrtc)
7321 goto fail;
7322
7323 res = drm_crtc_init_with_planes(
7324 dm->ddev,
7325 &acrtc->base,
7326 plane,
f180b4bc 7327 cursor_plane,
e7b07cee
HW
7328 &amdgpu_dm_crtc_funcs, NULL);
7329
7330 if (res)
7331 goto fail;
7332
7333 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7334
96719c54
HW
7335 /* Create (reset) the plane state */
7336 if (acrtc->base.funcs->reset)
7337 acrtc->base.funcs->reset(&acrtc->base);
7338
e7b07cee
HW
7339 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7340 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7341
7342 acrtc->crtc_id = crtc_index;
7343 acrtc->base.enabled = false;
c37e2d29 7344 acrtc->otg_inst = -1;
e7b07cee
HW
7345
7346 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7347 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7348 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7349 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7350
e7b07cee
HW
7351 return 0;
7352
7353fail:
b830ebc9
HW
7354 kfree(acrtc);
7355 kfree(cursor_plane);
e7b07cee
HW
7356 return res;
7357}
7358
7359
7360static int to_drm_connector_type(enum signal_type st)
7361{
7362 switch (st) {
7363 case SIGNAL_TYPE_HDMI_TYPE_A:
7364 return DRM_MODE_CONNECTOR_HDMIA;
7365 case SIGNAL_TYPE_EDP:
7366 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7367 case SIGNAL_TYPE_LVDS:
7368 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7369 case SIGNAL_TYPE_RGB:
7370 return DRM_MODE_CONNECTOR_VGA;
7371 case SIGNAL_TYPE_DISPLAY_PORT:
7372 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7373 return DRM_MODE_CONNECTOR_DisplayPort;
7374 case SIGNAL_TYPE_DVI_DUAL_LINK:
7375 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7376 return DRM_MODE_CONNECTOR_DVID;
7377 case SIGNAL_TYPE_VIRTUAL:
7378 return DRM_MODE_CONNECTOR_VIRTUAL;
7379
7380 default:
7381 return DRM_MODE_CONNECTOR_Unknown;
7382 }
7383}
7384
2b4c1c05
DV
7385static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7386{
62afb4ad
JRS
7387 struct drm_encoder *encoder;
7388
7389 /* There is only one encoder per connector */
7390 drm_connector_for_each_possible_encoder(connector, encoder)
7391 return encoder;
7392
7393 return NULL;
2b4c1c05
DV
7394}
7395
e7b07cee
HW
7396static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7397{
e7b07cee
HW
7398 struct drm_encoder *encoder;
7399 struct amdgpu_encoder *amdgpu_encoder;
7400
2b4c1c05 7401 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7402
7403 if (encoder == NULL)
7404 return;
7405
7406 amdgpu_encoder = to_amdgpu_encoder(encoder);
7407
7408 amdgpu_encoder->native_mode.clock = 0;
7409
7410 if (!list_empty(&connector->probed_modes)) {
7411 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7412
e7b07cee 7413 list_for_each_entry(preferred_mode,
b830ebc9
HW
7414 &connector->probed_modes,
7415 head) {
7416 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7417 amdgpu_encoder->native_mode = *preferred_mode;
7418
e7b07cee
HW
7419 break;
7420 }
7421
7422 }
7423}
7424
3ee6b26b
AD
7425static struct drm_display_mode *
7426amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7427 char *name,
7428 int hdisplay, int vdisplay)
e7b07cee
HW
7429{
7430 struct drm_device *dev = encoder->dev;
7431 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7432 struct drm_display_mode *mode = NULL;
7433 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7434
7435 mode = drm_mode_duplicate(dev, native_mode);
7436
b830ebc9 7437 if (mode == NULL)
e7b07cee
HW
7438 return NULL;
7439
7440 mode->hdisplay = hdisplay;
7441 mode->vdisplay = vdisplay;
7442 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7443 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7444
7445 return mode;
7446
7447}
7448
7449static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7450 struct drm_connector *connector)
e7b07cee
HW
7451{
7452 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7453 struct drm_display_mode *mode = NULL;
7454 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7455 struct amdgpu_dm_connector *amdgpu_dm_connector =
7456 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7457 int i;
7458 int n;
7459 struct mode_size {
7460 char name[DRM_DISPLAY_MODE_LEN];
7461 int w;
7462 int h;
b830ebc9 7463 } common_modes[] = {
e7b07cee
HW
7464 { "640x480", 640, 480},
7465 { "800x600", 800, 600},
7466 { "1024x768", 1024, 768},
7467 { "1280x720", 1280, 720},
7468 { "1280x800", 1280, 800},
7469 {"1280x1024", 1280, 1024},
7470 { "1440x900", 1440, 900},
7471 {"1680x1050", 1680, 1050},
7472 {"1600x1200", 1600, 1200},
7473 {"1920x1080", 1920, 1080},
7474 {"1920x1200", 1920, 1200}
7475 };
7476
b830ebc9 7477 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7478
7479 for (i = 0; i < n; i++) {
7480 struct drm_display_mode *curmode = NULL;
7481 bool mode_existed = false;
7482
7483 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7484 common_modes[i].h > native_mode->vdisplay ||
7485 (common_modes[i].w == native_mode->hdisplay &&
7486 common_modes[i].h == native_mode->vdisplay))
7487 continue;
e7b07cee
HW
7488
7489 list_for_each_entry(curmode, &connector->probed_modes, head) {
7490 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7491 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7492 mode_existed = true;
7493 break;
7494 }
7495 }
7496
7497 if (mode_existed)
7498 continue;
7499
7500 mode = amdgpu_dm_create_common_mode(encoder,
7501 common_modes[i].name, common_modes[i].w,
7502 common_modes[i].h);
7503 drm_mode_probed_add(connector, mode);
c84dec2f 7504 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7505 }
7506}
7507
3ee6b26b
AD
7508static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7509 struct edid *edid)
e7b07cee 7510{
c84dec2f
HW
7511 struct amdgpu_dm_connector *amdgpu_dm_connector =
7512 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7513
7514 if (edid) {
7515 /* empty probed_modes */
7516 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7517 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7518 drm_add_edid_modes(connector, edid);
7519
f1e5e913
YMM
7520 /* sorting the probed modes before calling function
7521 * amdgpu_dm_get_native_mode() since EDID can have
7522 * more than one preferred mode. The modes that are
7523 * later in the probed mode list could be of higher
7524 * and preferred resolution. For example, 3840x2160
7525 * resolution in base EDID preferred timing and 4096x2160
7526 * preferred resolution in DID extension block later.
7527 */
7528 drm_mode_sort(&connector->probed_modes);
e7b07cee 7529 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7530
7531 /* Freesync capabilities are reset by calling
7532 * drm_add_edid_modes() and need to be
7533 * restored here.
7534 */
7535 amdgpu_dm_update_freesync_caps(connector, edid);
a8d8d3dc 7536 } else {
c84dec2f 7537 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7538 }
e7b07cee
HW
7539}
7540
a85ba005
NC
7541static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7542 struct drm_display_mode *mode)
7543{
7544 struct drm_display_mode *m;
7545
7546 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7547 if (drm_mode_equal(m, mode))
7548 return true;
7549 }
7550
7551 return false;
7552}
7553
7554static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7555{
7556 const struct drm_display_mode *m;
7557 struct drm_display_mode *new_mode;
7558 uint i;
7559 uint32_t new_modes_count = 0;
7560
7561 /* Standard FPS values
7562 *
7563 * 23.976 - TV/NTSC
7564 * 24 - Cinema
7565 * 25 - TV/PAL
7566 * 29.97 - TV/NTSC
7567 * 30 - TV/NTSC
7568 * 48 - Cinema HFR
7569 * 50 - TV/PAL
7570 * 60 - Commonly used
7571 * 48,72,96 - Multiples of 24
7572 */
9ce5ed6e
CIK
7573 static const uint32_t common_rates[] = {
7574 23976, 24000, 25000, 29970, 30000,
7575 48000, 50000, 60000, 72000, 96000
7576 };
a85ba005
NC
7577
7578 /*
7579 * Find mode with highest refresh rate with the same resolution
7580 * as the preferred mode. Some monitors report a preferred mode
7581 * with lower resolution than the highest refresh rate supported.
7582 */
7583
7584 m = get_highest_refresh_rate_mode(aconnector, true);
7585 if (!m)
7586 return 0;
7587
7588 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7589 uint64_t target_vtotal, target_vtotal_diff;
7590 uint64_t num, den;
7591
7592 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7593 continue;
7594
7595 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7596 common_rates[i] > aconnector->max_vfreq * 1000)
7597 continue;
7598
7599 num = (unsigned long long)m->clock * 1000 * 1000;
7600 den = common_rates[i] * (unsigned long long)m->htotal;
7601 target_vtotal = div_u64(num, den);
7602 target_vtotal_diff = target_vtotal - m->vtotal;
7603
7604 /* Check for illegal modes */
7605 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7606 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7607 m->vtotal + target_vtotal_diff < m->vsync_end)
7608 continue;
7609
7610 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7611 if (!new_mode)
7612 goto out;
7613
7614 new_mode->vtotal += (u16)target_vtotal_diff;
7615 new_mode->vsync_start += (u16)target_vtotal_diff;
7616 new_mode->vsync_end += (u16)target_vtotal_diff;
7617 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7618 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7619
7620 if (!is_duplicate_mode(aconnector, new_mode)) {
7621 drm_mode_probed_add(&aconnector->base, new_mode);
7622 new_modes_count += 1;
7623 } else
7624 drm_mode_destroy(aconnector->base.dev, new_mode);
7625 }
7626 out:
7627 return new_modes_count;
7628}
7629
7630static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7631 struct edid *edid)
7632{
7633 struct amdgpu_dm_connector *amdgpu_dm_connector =
7634 to_amdgpu_dm_connector(connector);
7635
7636 if (!(amdgpu_freesync_vid_mode && edid))
7637 return;
fe8858bb 7638
a85ba005
NC
7639 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7640 amdgpu_dm_connector->num_modes +=
7641 add_fs_modes(amdgpu_dm_connector);
7642}
7643
7578ecda 7644static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 7645{
c84dec2f
HW
7646 struct amdgpu_dm_connector *amdgpu_dm_connector =
7647 to_amdgpu_dm_connector(connector);
e7b07cee 7648 struct drm_encoder *encoder;
c84dec2f 7649 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 7650
2b4c1c05 7651 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 7652
5c0e6840 7653 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
7654 amdgpu_dm_connector->num_modes =
7655 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
7656 } else {
7657 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7658 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 7659 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 7660 }
3e332d3a 7661 amdgpu_dm_fbc_init(connector);
5099114b 7662
c84dec2f 7663 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
7664}
7665
3ee6b26b
AD
7666void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7667 struct amdgpu_dm_connector *aconnector,
7668 int connector_type,
7669 struct dc_link *link,
7670 int link_index)
e7b07cee 7671{
1348969a 7672 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7673
f04bee34
NK
7674 /*
7675 * Some of the properties below require access to state, like bpc.
7676 * Allocate some default initial connector state with our reset helper.
7677 */
7678 if (aconnector->base.funcs->reset)
7679 aconnector->base.funcs->reset(&aconnector->base);
7680
e7b07cee
HW
7681 aconnector->connector_id = link_index;
7682 aconnector->dc_link = link;
7683 aconnector->base.interlace_allowed = false;
7684 aconnector->base.doublescan_allowed = false;
7685 aconnector->base.stereo_allowed = false;
7686 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7687 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7688 aconnector->audio_inst = -1;
e7b07cee
HW
7689 mutex_init(&aconnector->hpd_lock);
7690
1f6010a9
DF
7691 /*
7692 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7693 * which means HPD hot plug not supported
7694 */
e7b07cee
HW
7695 switch (connector_type) {
7696 case DRM_MODE_CONNECTOR_HDMIA:
7697 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7698 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7699 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7700 break;
7701 case DRM_MODE_CONNECTOR_DisplayPort:
7702 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7703 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7704 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7705 break;
7706 case DRM_MODE_CONNECTOR_DVID:
7707 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7708 break;
7709 default:
7710 break;
7711 }
7712
7713 drm_object_attach_property(&aconnector->base.base,
7714 dm->ddev->mode_config.scaling_mode_property,
7715 DRM_MODE_SCALE_NONE);
7716
7717 drm_object_attach_property(&aconnector->base.base,
7718 adev->mode_info.underscan_property,
7719 UNDERSCAN_OFF);
7720 drm_object_attach_property(&aconnector->base.base,
7721 adev->mode_info.underscan_hborder_property,
7722 0);
7723 drm_object_attach_property(&aconnector->base.base,
7724 adev->mode_info.underscan_vborder_property,
7725 0);
1825fd34 7726
8c61b31e
JFZ
7727 if (!aconnector->mst_port)
7728 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7729
4a8ca46b
RL
7730 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7731 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7732 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7733
c1ee92f9 7734 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7735 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7736 drm_object_attach_property(&aconnector->base.base,
7737 adev->mode_info.abm_level_property, 0);
7738 }
bb47de73
NK
7739
7740 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7741 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7742 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 7743 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 7744
8c61b31e
JFZ
7745 if (!aconnector->mst_port)
7746 drm_connector_attach_vrr_capable_property(&aconnector->base);
7747
0c8620d6 7748#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7749 if (adev->dm.hdcp_workqueue)
53e108aa 7750 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7751#endif
bb47de73 7752 }
e7b07cee
HW
7753}
7754
7578ecda
AD
7755static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7756 struct i2c_msg *msgs, int num)
e7b07cee
HW
7757{
7758 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7759 struct ddc_service *ddc_service = i2c->ddc_service;
7760 struct i2c_command cmd;
7761 int i;
7762 int result = -EIO;
7763
b830ebc9 7764 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7765
7766 if (!cmd.payloads)
7767 return result;
7768
7769 cmd.number_of_payloads = num;
7770 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7771 cmd.speed = 100;
7772
7773 for (i = 0; i < num; i++) {
7774 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7775 cmd.payloads[i].address = msgs[i].addr;
7776 cmd.payloads[i].length = msgs[i].len;
7777 cmd.payloads[i].data = msgs[i].buf;
7778 }
7779
c85e6e54
DF
7780 if (dc_submit_i2c(
7781 ddc_service->ctx->dc,
7782 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7783 &cmd))
7784 result = num;
7785
7786 kfree(cmd.payloads);
7787 return result;
7788}
7789
7578ecda 7790static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7791{
7792 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7793}
7794
7795static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7796 .master_xfer = amdgpu_dm_i2c_xfer,
7797 .functionality = amdgpu_dm_i2c_func,
7798};
7799
3ee6b26b
AD
7800static struct amdgpu_i2c_adapter *
7801create_i2c(struct ddc_service *ddc_service,
7802 int link_index,
7803 int *res)
e7b07cee
HW
7804{
7805 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7806 struct amdgpu_i2c_adapter *i2c;
7807
b830ebc9 7808 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7809 if (!i2c)
7810 return NULL;
e7b07cee
HW
7811 i2c->base.owner = THIS_MODULE;
7812 i2c->base.class = I2C_CLASS_DDC;
7813 i2c->base.dev.parent = &adev->pdev->dev;
7814 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7815 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7816 i2c_set_adapdata(&i2c->base, i2c);
7817 i2c->ddc_service = ddc_service;
c85e6e54 7818 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7819
7820 return i2c;
7821}
7822
89fc8d4e 7823
1f6010a9
DF
7824/*
7825 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7826 * dc_link which will be represented by this aconnector.
7827 */
7578ecda
AD
7828static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7829 struct amdgpu_dm_connector *aconnector,
7830 uint32_t link_index,
7831 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7832{
7833 int res = 0;
7834 int connector_type;
7835 struct dc *dc = dm->dc;
7836 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7837 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7838
7839 link->priv = aconnector;
e7b07cee 7840
f1ad2f5e 7841 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7842
7843 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7844 if (!i2c) {
7845 DRM_ERROR("Failed to create i2c adapter data\n");
7846 return -ENOMEM;
7847 }
7848
e7b07cee
HW
7849 aconnector->i2c = i2c;
7850 res = i2c_add_adapter(&i2c->base);
7851
7852 if (res) {
7853 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7854 goto out_free;
7855 }
7856
7857 connector_type = to_drm_connector_type(link->connector_signal);
7858
17165de2 7859 res = drm_connector_init_with_ddc(
e7b07cee
HW
7860 dm->ddev,
7861 &aconnector->base,
7862 &amdgpu_dm_connector_funcs,
17165de2
AP
7863 connector_type,
7864 &i2c->base);
e7b07cee
HW
7865
7866 if (res) {
7867 DRM_ERROR("connector_init failed\n");
7868 aconnector->connector_id = -1;
7869 goto out_free;
7870 }
7871
7872 drm_connector_helper_add(
7873 &aconnector->base,
7874 &amdgpu_dm_connector_helper_funcs);
7875
7876 amdgpu_dm_connector_init_helper(
7877 dm,
7878 aconnector,
7879 connector_type,
7880 link,
7881 link_index);
7882
cde4c44d 7883 drm_connector_attach_encoder(
e7b07cee
HW
7884 &aconnector->base, &aencoder->base);
7885
e7b07cee
HW
7886 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7887 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7888 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7889
e7b07cee
HW
7890out_free:
7891 if (res) {
7892 kfree(i2c);
7893 aconnector->i2c = NULL;
7894 }
7895 return res;
7896}
7897
7898int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7899{
7900 switch (adev->mode_info.num_crtc) {
7901 case 1:
7902 return 0x1;
7903 case 2:
7904 return 0x3;
7905 case 3:
7906 return 0x7;
7907 case 4:
7908 return 0xf;
7909 case 5:
7910 return 0x1f;
7911 case 6:
7912 default:
7913 return 0x3f;
7914 }
7915}
7916
7578ecda
AD
7917static int amdgpu_dm_encoder_init(struct drm_device *dev,
7918 struct amdgpu_encoder *aencoder,
7919 uint32_t link_index)
e7b07cee 7920{
1348969a 7921 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7922
7923 int res = drm_encoder_init(dev,
7924 &aencoder->base,
7925 &amdgpu_dm_encoder_funcs,
7926 DRM_MODE_ENCODER_TMDS,
7927 NULL);
7928
7929 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7930
7931 if (!res)
7932 aencoder->encoder_id = link_index;
7933 else
7934 aencoder->encoder_id = -1;
7935
7936 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7937
7938 return res;
7939}
7940
3ee6b26b
AD
7941static void manage_dm_interrupts(struct amdgpu_device *adev,
7942 struct amdgpu_crtc *acrtc,
7943 bool enable)
e7b07cee
HW
7944{
7945 /*
8fe684e9
NK
7946 * We have no guarantee that the frontend index maps to the same
7947 * backend index - some even map to more than one.
7948 *
7949 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7950 */
7951 int irq_type =
734dd01d 7952 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7953 adev,
7954 acrtc->crtc_id);
7955
7956 if (enable) {
7957 drm_crtc_vblank_on(&acrtc->base);
7958 amdgpu_irq_get(
7959 adev,
7960 &adev->pageflip_irq,
7961 irq_type);
86bc2219
WL
7962#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7963 amdgpu_irq_get(
7964 adev,
7965 &adev->vline0_irq,
7966 irq_type);
7967#endif
e7b07cee 7968 } else {
86bc2219
WL
7969#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7970 amdgpu_irq_put(
7971 adev,
7972 &adev->vline0_irq,
7973 irq_type);
7974#endif
e7b07cee
HW
7975 amdgpu_irq_put(
7976 adev,
7977 &adev->pageflip_irq,
7978 irq_type);
7979 drm_crtc_vblank_off(&acrtc->base);
7980 }
7981}
7982
8fe684e9
NK
7983static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7984 struct amdgpu_crtc *acrtc)
7985{
7986 int irq_type =
7987 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7988
7989 /**
7990 * This reads the current state for the IRQ and force reapplies
7991 * the setting to hardware.
7992 */
7993 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7994}
7995
3ee6b26b
AD
7996static bool
7997is_scaling_state_different(const struct dm_connector_state *dm_state,
7998 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7999{
8000 if (dm_state->scaling != old_dm_state->scaling)
8001 return true;
8002 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8003 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8004 return true;
8005 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8006 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8007 return true;
b830ebc9
HW
8008 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8009 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8010 return true;
e7b07cee
HW
8011 return false;
8012}
8013
0c8620d6
BL
8014#ifdef CONFIG_DRM_AMD_DC_HDCP
8015static bool is_content_protection_different(struct drm_connector_state *state,
8016 const struct drm_connector_state *old_state,
8017 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8018{
8019 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8020 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8021
31c0ed90 8022 /* Handle: Type0/1 change */
53e108aa
BL
8023 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8024 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8025 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8026 return true;
8027 }
8028
31c0ed90
BL
8029 /* CP is being re enabled, ignore this
8030 *
8031 * Handles: ENABLED -> DESIRED
8032 */
0c8620d6
BL
8033 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8034 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8035 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8036 return false;
8037 }
8038
31c0ed90
BL
8039 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8040 *
8041 * Handles: UNDESIRED -> ENABLED
8042 */
0c8620d6
BL
8043 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8044 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8045 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8046
8047 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8048 * hot-plug, headless s3, dpms
31c0ed90
BL
8049 *
8050 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8051 */
97f6c917
BL
8052 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8053 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8054 dm_con_state->update_hdcp = false;
0c8620d6 8055 return true;
97f6c917 8056 }
0c8620d6 8057
31c0ed90
BL
8058 /*
8059 * Handles: UNDESIRED -> UNDESIRED
8060 * DESIRED -> DESIRED
8061 * ENABLED -> ENABLED
8062 */
0c8620d6
BL
8063 if (old_state->content_protection == state->content_protection)
8064 return false;
8065
31c0ed90
BL
8066 /*
8067 * Handles: UNDESIRED -> DESIRED
8068 * DESIRED -> UNDESIRED
8069 * ENABLED -> UNDESIRED
8070 */
97f6c917 8071 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8072 return true;
8073
31c0ed90
BL
8074 /*
8075 * Handles: DESIRED -> ENABLED
8076 */
0c8620d6
BL
8077 return false;
8078}
8079
0c8620d6 8080#endif
3ee6b26b
AD
8081static void remove_stream(struct amdgpu_device *adev,
8082 struct amdgpu_crtc *acrtc,
8083 struct dc_stream_state *stream)
e7b07cee
HW
8084{
8085 /* this is the update mode case */
e7b07cee
HW
8086
8087 acrtc->otg_inst = -1;
8088 acrtc->enabled = false;
8089}
8090
7578ecda
AD
8091static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8092 struct dc_cursor_position *position)
2a8f6ccb 8093{
f4c2cc43 8094 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8095 int x, y;
8096 int xorigin = 0, yorigin = 0;
8097
e371e19c 8098 if (!crtc || !plane->state->fb)
2a8f6ccb 8099 return 0;
2a8f6ccb
HW
8100
8101 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8102 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8103 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8104 __func__,
8105 plane->state->crtc_w,
8106 plane->state->crtc_h);
8107 return -EINVAL;
8108 }
8109
8110 x = plane->state->crtc_x;
8111 y = plane->state->crtc_y;
c14a005c 8112
e371e19c
NK
8113 if (x <= -amdgpu_crtc->max_cursor_width ||
8114 y <= -amdgpu_crtc->max_cursor_height)
8115 return 0;
8116
2a8f6ccb
HW
8117 if (x < 0) {
8118 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8119 x = 0;
8120 }
8121 if (y < 0) {
8122 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8123 y = 0;
8124 }
8125 position->enable = true;
d243b6ff 8126 position->translate_by_source = true;
2a8f6ccb
HW
8127 position->x = x;
8128 position->y = y;
8129 position->x_hotspot = xorigin;
8130 position->y_hotspot = yorigin;
8131
8132 return 0;
8133}
8134
3ee6b26b
AD
8135static void handle_cursor_update(struct drm_plane *plane,
8136 struct drm_plane_state *old_plane_state)
e7b07cee 8137{
1348969a 8138 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8139 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8140 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8141 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8142 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8143 uint64_t address = afb ? afb->address : 0;
6a30a929 8144 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8145 struct dc_cursor_attributes attributes;
8146 int ret;
8147
e7b07cee
HW
8148 if (!plane->state->fb && !old_plane_state->fb)
8149 return;
8150
cb2318b7 8151 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8152 __func__,
8153 amdgpu_crtc->crtc_id,
8154 plane->state->crtc_w,
8155 plane->state->crtc_h);
2a8f6ccb
HW
8156
8157 ret = get_cursor_position(plane, crtc, &position);
8158 if (ret)
8159 return;
8160
8161 if (!position.enable) {
8162 /* turn off cursor */
674e78ac
NK
8163 if (crtc_state && crtc_state->stream) {
8164 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8165 dc_stream_set_cursor_position(crtc_state->stream,
8166 &position);
674e78ac
NK
8167 mutex_unlock(&adev->dm.dc_lock);
8168 }
2a8f6ccb 8169 return;
e7b07cee 8170 }
e7b07cee 8171
2a8f6ccb
HW
8172 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8173 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8174
c1cefe11 8175 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8176 attributes.address.high_part = upper_32_bits(address);
8177 attributes.address.low_part = lower_32_bits(address);
8178 attributes.width = plane->state->crtc_w;
8179 attributes.height = plane->state->crtc_h;
8180 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8181 attributes.rotation_angle = 0;
8182 attributes.attribute_flags.value = 0;
8183
03a66367 8184 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8185
886daac9 8186 if (crtc_state->stream) {
674e78ac 8187 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8188 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8189 &attributes))
8190 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8191
2a8f6ccb
HW
8192 if (!dc_stream_set_cursor_position(crtc_state->stream,
8193 &position))
8194 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8195 mutex_unlock(&adev->dm.dc_lock);
886daac9 8196 }
2a8f6ccb 8197}
e7b07cee
HW
8198
8199static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8200{
8201
8202 assert_spin_locked(&acrtc->base.dev->event_lock);
8203 WARN_ON(acrtc->event);
8204
8205 acrtc->event = acrtc->base.state->event;
8206
8207 /* Set the flip status */
8208 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8209
8210 /* Mark this event as consumed */
8211 acrtc->base.state->event = NULL;
8212
cb2318b7
VL
8213 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8214 acrtc->crtc_id);
e7b07cee
HW
8215}
8216
bb47de73
NK
8217static void update_freesync_state_on_stream(
8218 struct amdgpu_display_manager *dm,
8219 struct dm_crtc_state *new_crtc_state,
180db303
NK
8220 struct dc_stream_state *new_stream,
8221 struct dc_plane_state *surface,
8222 u32 flip_timestamp_in_us)
bb47de73 8223{
09aef2c4 8224 struct mod_vrr_params vrr_params;
bb47de73 8225 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8226 struct amdgpu_device *adev = dm->adev;
585d450c 8227 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8228 unsigned long flags;
4cda3243 8229 bool pack_sdp_v1_3 = false;
bb47de73
NK
8230
8231 if (!new_stream)
8232 return;
8233
8234 /*
8235 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8236 * For now it's sufficient to just guard against these conditions.
8237 */
8238
8239 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8240 return;
8241
4a580877 8242 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8243 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8244
180db303
NK
8245 if (surface) {
8246 mod_freesync_handle_preflip(
8247 dm->freesync_module,
8248 surface,
8249 new_stream,
8250 flip_timestamp_in_us,
8251 &vrr_params);
09aef2c4
MK
8252
8253 if (adev->family < AMDGPU_FAMILY_AI &&
8254 amdgpu_dm_vrr_active(new_crtc_state)) {
8255 mod_freesync_handle_v_update(dm->freesync_module,
8256 new_stream, &vrr_params);
e63e2491
EB
8257
8258 /* Need to call this before the frame ends. */
8259 dc_stream_adjust_vmin_vmax(dm->dc,
8260 new_crtc_state->stream,
8261 &vrr_params.adjust);
09aef2c4 8262 }
180db303 8263 }
bb47de73
NK
8264
8265 mod_freesync_build_vrr_infopacket(
8266 dm->freesync_module,
8267 new_stream,
180db303 8268 &vrr_params,
ecd0136b
HT
8269 PACKET_TYPE_VRR,
8270 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8271 &vrr_infopacket,
8272 pack_sdp_v1_3);
bb47de73 8273
8a48b44c 8274 new_crtc_state->freesync_timing_changed |=
585d450c 8275 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8276 &vrr_params.adjust,
8277 sizeof(vrr_params.adjust)) != 0);
bb47de73 8278
8a48b44c 8279 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8280 (memcmp(&new_crtc_state->vrr_infopacket,
8281 &vrr_infopacket,
8282 sizeof(vrr_infopacket)) != 0);
8283
585d450c 8284 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8285 new_crtc_state->vrr_infopacket = vrr_infopacket;
8286
585d450c 8287 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8288 new_stream->vrr_infopacket = vrr_infopacket;
8289
8290 if (new_crtc_state->freesync_vrr_info_changed)
8291 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8292 new_crtc_state->base.crtc->base.id,
8293 (int)new_crtc_state->base.vrr_enabled,
180db303 8294 (int)vrr_params.state);
09aef2c4 8295
4a580877 8296 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8297}
8298
585d450c 8299static void update_stream_irq_parameters(
e854194c
MK
8300 struct amdgpu_display_manager *dm,
8301 struct dm_crtc_state *new_crtc_state)
8302{
8303 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8304 struct mod_vrr_params vrr_params;
e854194c 8305 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8306 struct amdgpu_device *adev = dm->adev;
585d450c 8307 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8308 unsigned long flags;
e854194c
MK
8309
8310 if (!new_stream)
8311 return;
8312
8313 /*
8314 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8315 * For now it's sufficient to just guard against these conditions.
8316 */
8317 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8318 return;
8319
4a580877 8320 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8321 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8322
e854194c
MK
8323 if (new_crtc_state->vrr_supported &&
8324 config.min_refresh_in_uhz &&
8325 config.max_refresh_in_uhz) {
a85ba005
NC
8326 /*
8327 * if freesync compatible mode was set, config.state will be set
8328 * in atomic check
8329 */
8330 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8331 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8332 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8333 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8334 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8335 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8336 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8337 } else {
8338 config.state = new_crtc_state->base.vrr_enabled ?
8339 VRR_STATE_ACTIVE_VARIABLE :
8340 VRR_STATE_INACTIVE;
8341 }
e854194c
MK
8342 } else {
8343 config.state = VRR_STATE_UNSUPPORTED;
8344 }
8345
8346 mod_freesync_build_vrr_params(dm->freesync_module,
8347 new_stream,
8348 &config, &vrr_params);
8349
8350 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8351 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8352 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8353
585d450c
AP
8354 new_crtc_state->freesync_config = config;
8355 /* Copy state for access from DM IRQ handler */
8356 acrtc->dm_irq_params.freesync_config = config;
8357 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8358 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8359 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8360}
8361
66b0c973
MK
8362static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8363 struct dm_crtc_state *new_state)
8364{
8365 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8366 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8367
8368 if (!old_vrr_active && new_vrr_active) {
8369 /* Transition VRR inactive -> active:
8370 * While VRR is active, we must not disable vblank irq, as a
8371 * reenable after disable would compute bogus vblank/pflip
8372 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8373 *
8374 * We also need vupdate irq for the actual core vblank handling
8375 * at end of vblank.
66b0c973 8376 */
d2574c33 8377 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8378 drm_crtc_vblank_get(new_state->base.crtc);
8379 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8380 __func__, new_state->base.crtc->base.id);
8381 } else if (old_vrr_active && !new_vrr_active) {
8382 /* Transition VRR active -> inactive:
8383 * Allow vblank irq disable again for fixed refresh rate.
8384 */
d2574c33 8385 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
8386 drm_crtc_vblank_put(new_state->base.crtc);
8387 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8388 __func__, new_state->base.crtc->base.id);
8389 }
8390}
8391
8ad27806
NK
8392static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8393{
8394 struct drm_plane *plane;
5760dcb9 8395 struct drm_plane_state *old_plane_state;
8ad27806
NK
8396 int i;
8397
8398 /*
8399 * TODO: Make this per-stream so we don't issue redundant updates for
8400 * commits with multiple streams.
8401 */
5760dcb9 8402 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
8403 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8404 handle_cursor_update(plane, old_plane_state);
8405}
8406
3be5262e 8407static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 8408 struct dc_state *dc_state,
3ee6b26b
AD
8409 struct drm_device *dev,
8410 struct amdgpu_display_manager *dm,
8411 struct drm_crtc *pcrtc,
420cd472 8412 bool wait_for_vblank)
e7b07cee 8413{
efc8278e 8414 uint32_t i;
8a48b44c 8415 uint64_t timestamp_ns;
e7b07cee 8416 struct drm_plane *plane;
0bc9706d 8417 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 8418 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
8419 struct drm_crtc_state *new_pcrtc_state =
8420 drm_atomic_get_new_crtc_state(state, pcrtc);
8421 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
8422 struct dm_crtc_state *dm_old_crtc_state =
8423 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 8424 int planes_count = 0, vpos, hpos;
570c91d5 8425 long r;
e7b07cee 8426 unsigned long flags;
8a48b44c 8427 struct amdgpu_bo *abo;
fdd1fe57
MK
8428 uint32_t target_vblank, last_flip_vblank;
8429 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 8430 bool pflip_present = false;
bc7f670e
DF
8431 struct {
8432 struct dc_surface_update surface_updates[MAX_SURFACES];
8433 struct dc_plane_info plane_infos[MAX_SURFACES];
8434 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 8435 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 8436 struct dc_stream_update stream_update;
74aa7bd4 8437 } *bundle;
bc7f670e 8438
74aa7bd4 8439 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 8440
74aa7bd4
DF
8441 if (!bundle) {
8442 dm_error("Failed to allocate update bundle\n");
4b510503
NK
8443 goto cleanup;
8444 }
e7b07cee 8445
8ad27806
NK
8446 /*
8447 * Disable the cursor first if we're disabling all the planes.
8448 * It'll remain on the screen after the planes are re-enabled
8449 * if we don't.
8450 */
8451 if (acrtc_state->active_planes == 0)
8452 amdgpu_dm_commit_cursors(state);
8453
e7b07cee 8454 /* update planes when needed */
efc8278e 8455 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 8456 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 8457 struct drm_crtc_state *new_crtc_state;
0bc9706d 8458 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 8459 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 8460 bool plane_needs_flip;
c7af5f77 8461 struct dc_plane_state *dc_plane;
54d76575 8462 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 8463
80c218d5
NK
8464 /* Cursor plane is handled after stream updates */
8465 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 8466 continue;
e7b07cee 8467
f5ba60fe
DD
8468 if (!fb || !crtc || pcrtc != crtc)
8469 continue;
8470
8471 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8472 if (!new_crtc_state->active)
e7b07cee
HW
8473 continue;
8474
bc7f670e 8475 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 8476
74aa7bd4 8477 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 8478 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
8479 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8480 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 8481 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 8482 }
8a48b44c 8483
695af5f9
NK
8484 fill_dc_scaling_info(new_plane_state,
8485 &bundle->scaling_infos[planes_count]);
8a48b44c 8486
695af5f9
NK
8487 bundle->surface_updates[planes_count].scaling_info =
8488 &bundle->scaling_infos[planes_count];
8a48b44c 8489
f5031000 8490 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 8491
f5031000 8492 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 8493
f5031000
DF
8494 if (!plane_needs_flip) {
8495 planes_count += 1;
8496 continue;
8497 }
8a48b44c 8498
2fac0f53
CK
8499 abo = gem_to_amdgpu_bo(fb->obj[0]);
8500
f8308898
AG
8501 /*
8502 * Wait for all fences on this FB. Do limited wait to avoid
8503 * deadlock during GPU reset when this fence will not signal
8504 * but we hold reservation lock for the BO.
8505 */
d3fae3b3
CK
8506 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8507 msecs_to_jiffies(5000));
f8308898 8508 if (unlikely(r <= 0))
ed8a5fb2 8509 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 8510
695af5f9 8511 fill_dc_plane_info_and_addr(
8ce5d842 8512 dm->adev, new_plane_state,
6eed95b0 8513 afb->tiling_flags,
695af5f9 8514 &bundle->plane_infos[planes_count],
87b7ebc2 8515 &bundle->flip_addrs[planes_count].address,
6eed95b0 8516 afb->tmz_surface, false);
87b7ebc2 8517
4711c033 8518 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
8519 new_plane_state->plane->index,
8520 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
8521
8522 bundle->surface_updates[planes_count].plane_info =
8523 &bundle->plane_infos[planes_count];
8a48b44c 8524
caff0e66
NK
8525 /*
8526 * Only allow immediate flips for fast updates that don't
8527 * change FB pitch, DCC state, rotation or mirroing.
8528 */
f5031000 8529 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 8530 crtc->state->async_flip &&
caff0e66 8531 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 8532
f5031000
DF
8533 timestamp_ns = ktime_get_ns();
8534 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8535 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8536 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 8537
f5031000
DF
8538 if (!bundle->surface_updates[planes_count].surface) {
8539 DRM_ERROR("No surface for CRTC: id=%d\n",
8540 acrtc_attach->crtc_id);
8541 continue;
bc7f670e
DF
8542 }
8543
f5031000
DF
8544 if (plane == pcrtc->primary)
8545 update_freesync_state_on_stream(
8546 dm,
8547 acrtc_state,
8548 acrtc_state->stream,
8549 dc_plane,
8550 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 8551
4711c033 8552 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
8553 __func__,
8554 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8555 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
8556
8557 planes_count += 1;
8558
8a48b44c
DF
8559 }
8560
74aa7bd4 8561 if (pflip_present) {
634092b1
MK
8562 if (!vrr_active) {
8563 /* Use old throttling in non-vrr fixed refresh rate mode
8564 * to keep flip scheduling based on target vblank counts
8565 * working in a backwards compatible way, e.g., for
8566 * clients using the GLX_OML_sync_control extension or
8567 * DRI3/Present extension with defined target_msc.
8568 */
e3eff4b5 8569 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
8570 }
8571 else {
8572 /* For variable refresh rate mode only:
8573 * Get vblank of last completed flip to avoid > 1 vrr
8574 * flips per video frame by use of throttling, but allow
8575 * flip programming anywhere in the possibly large
8576 * variable vrr vblank interval for fine-grained flip
8577 * timing control and more opportunity to avoid stutter
8578 * on late submission of flips.
8579 */
8580 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 8581 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
8582 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8583 }
8584
fdd1fe57 8585 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
8586
8587 /*
8588 * Wait until we're out of the vertical blank period before the one
8589 * targeted by the flip
8590 */
8591 while ((acrtc_attach->enabled &&
8592 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8593 0, &vpos, &hpos, NULL,
8594 NULL, &pcrtc->hwmode)
8595 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8596 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8597 (int)(target_vblank -
e3eff4b5 8598 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
8599 usleep_range(1000, 1100);
8600 }
8601
8fe684e9
NK
8602 /**
8603 * Prepare the flip event for the pageflip interrupt to handle.
8604 *
8605 * This only works in the case where we've already turned on the
8606 * appropriate hardware blocks (eg. HUBP) so in the transition case
8607 * from 0 -> n planes we have to skip a hardware generated event
8608 * and rely on sending it from software.
8609 */
8610 if (acrtc_attach->base.state->event &&
8611 acrtc_state->active_planes > 0) {
8a48b44c
DF
8612 drm_crtc_vblank_get(pcrtc);
8613
8614 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8615
8616 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8617 prepare_flip_isr(acrtc_attach);
8618
8619 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8620 }
8621
8622 if (acrtc_state->stream) {
8a48b44c 8623 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 8624 bundle->stream_update.vrr_infopacket =
8a48b44c 8625 &acrtc_state->stream->vrr_infopacket;
e7b07cee 8626 }
e7b07cee
HW
8627 }
8628
bc92c065 8629 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
8630 if ((planes_count || acrtc_state->active_planes == 0) &&
8631 acrtc_state->stream) {
b6e881c9 8632 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 8633 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
8634 bundle->stream_update.src = acrtc_state->stream->src;
8635 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
8636 }
8637
cf020d49
NK
8638 if (new_pcrtc_state->color_mgmt_changed) {
8639 /*
8640 * TODO: This isn't fully correct since we've actually
8641 * already modified the stream in place.
8642 */
8643 bundle->stream_update.gamut_remap =
8644 &acrtc_state->stream->gamut_remap_matrix;
8645 bundle->stream_update.output_csc_transform =
8646 &acrtc_state->stream->csc_color_matrix;
8647 bundle->stream_update.out_transfer_func =
8648 acrtc_state->stream->out_transfer_func;
8649 }
bc7f670e 8650
8a48b44c 8651 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 8652 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 8653 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 8654
e63e2491
EB
8655 /*
8656 * If FreeSync state on the stream has changed then we need to
8657 * re-adjust the min/max bounds now that DC doesn't handle this
8658 * as part of commit.
8659 */
a85ba005 8660 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
8661 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8662 dc_stream_adjust_vmin_vmax(
8663 dm->dc, acrtc_state->stream,
585d450c 8664 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
8665 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8666 }
bc7f670e 8667 mutex_lock(&dm->dc_lock);
8c322309 8668 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 8669 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
8670 amdgpu_dm_psr_disable(acrtc_state->stream);
8671
bc7f670e 8672 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 8673 bundle->surface_updates,
bc7f670e
DF
8674 planes_count,
8675 acrtc_state->stream,
efc8278e
AJ
8676 &bundle->stream_update,
8677 dc_state);
8c322309 8678
8fe684e9
NK
8679 /**
8680 * Enable or disable the interrupts on the backend.
8681 *
8682 * Most pipes are put into power gating when unused.
8683 *
8684 * When power gating is enabled on a pipe we lose the
8685 * interrupt enablement state when power gating is disabled.
8686 *
8687 * So we need to update the IRQ control state in hardware
8688 * whenever the pipe turns on (since it could be previously
8689 * power gated) or off (since some pipes can't be power gated
8690 * on some ASICs).
8691 */
8692 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8693 dm_update_pflip_irq_state(drm_to_adev(dev),
8694 acrtc_attach);
8fe684e9 8695
8c322309 8696 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8697 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8698 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
8699 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8700 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
8701 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8702 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
1a365683
RL
8703 struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
8704 acrtc_state->stream->dm_stream_context;
8705
8706 if (aconn->psr_skip_count > 0)
8707 aconn->psr_skip_count--;
8708 else
8709 amdgpu_dm_psr_enable(acrtc_state->stream);
8c322309
RL
8710 }
8711
bc7f670e 8712 mutex_unlock(&dm->dc_lock);
e7b07cee 8713 }
4b510503 8714
8ad27806
NK
8715 /*
8716 * Update cursor state *after* programming all the planes.
8717 * This avoids redundant programming in the case where we're going
8718 * to be disabling a single plane - those pipes are being disabled.
8719 */
8720 if (acrtc_state->active_planes)
8721 amdgpu_dm_commit_cursors(state);
80c218d5 8722
4b510503 8723cleanup:
74aa7bd4 8724 kfree(bundle);
e7b07cee
HW
8725}
8726
6ce8f316
NK
8727static void amdgpu_dm_commit_audio(struct drm_device *dev,
8728 struct drm_atomic_state *state)
8729{
1348969a 8730 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8731 struct amdgpu_dm_connector *aconnector;
8732 struct drm_connector *connector;
8733 struct drm_connector_state *old_con_state, *new_con_state;
8734 struct drm_crtc_state *new_crtc_state;
8735 struct dm_crtc_state *new_dm_crtc_state;
8736 const struct dc_stream_status *status;
8737 int i, inst;
8738
8739 /* Notify device removals. */
8740 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8741 if (old_con_state->crtc != new_con_state->crtc) {
8742 /* CRTC changes require notification. */
8743 goto notify;
8744 }
8745
8746 if (!new_con_state->crtc)
8747 continue;
8748
8749 new_crtc_state = drm_atomic_get_new_crtc_state(
8750 state, new_con_state->crtc);
8751
8752 if (!new_crtc_state)
8753 continue;
8754
8755 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8756 continue;
8757
8758 notify:
8759 aconnector = to_amdgpu_dm_connector(connector);
8760
8761 mutex_lock(&adev->dm.audio_lock);
8762 inst = aconnector->audio_inst;
8763 aconnector->audio_inst = -1;
8764 mutex_unlock(&adev->dm.audio_lock);
8765
8766 amdgpu_dm_audio_eld_notify(adev, inst);
8767 }
8768
8769 /* Notify audio device additions. */
8770 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8771 if (!new_con_state->crtc)
8772 continue;
8773
8774 new_crtc_state = drm_atomic_get_new_crtc_state(
8775 state, new_con_state->crtc);
8776
8777 if (!new_crtc_state)
8778 continue;
8779
8780 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8781 continue;
8782
8783 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8784 if (!new_dm_crtc_state->stream)
8785 continue;
8786
8787 status = dc_stream_get_status(new_dm_crtc_state->stream);
8788 if (!status)
8789 continue;
8790
8791 aconnector = to_amdgpu_dm_connector(connector);
8792
8793 mutex_lock(&adev->dm.audio_lock);
8794 inst = status->audio_inst;
8795 aconnector->audio_inst = inst;
8796 mutex_unlock(&adev->dm.audio_lock);
8797
8798 amdgpu_dm_audio_eld_notify(adev, inst);
8799 }
8800}
8801
1f6010a9 8802/*
27b3f4fc
LSL
8803 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8804 * @crtc_state: the DRM CRTC state
8805 * @stream_state: the DC stream state.
8806 *
8807 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8808 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8809 */
8810static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8811 struct dc_stream_state *stream_state)
8812{
b9952f93 8813 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8814}
e7b07cee 8815
b8592b48
LL
8816/**
8817 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8818 * @state: The atomic state to commit
8819 *
8820 * This will tell DC to commit the constructed DC state from atomic_check,
8821 * programming the hardware. Any failures here implies a hardware failure, since
8822 * atomic check should have filtered anything non-kosher.
8823 */
7578ecda 8824static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8825{
8826 struct drm_device *dev = state->dev;
1348969a 8827 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8828 struct amdgpu_display_manager *dm = &adev->dm;
8829 struct dm_atomic_state *dm_state;
eb3dc897 8830 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8831 uint32_t i, j;
5cc6dcbd 8832 struct drm_crtc *crtc;
0bc9706d 8833 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8834 unsigned long flags;
8835 bool wait_for_vblank = true;
8836 struct drm_connector *connector;
c2cea706 8837 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8838 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8839 int crtc_disable_count = 0;
6ee90e88 8840 bool mode_set_reset_required = false;
e7b07cee 8841
e8a98235
RS
8842 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8843
e7b07cee
HW
8844 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8845
eb3dc897
NK
8846 dm_state = dm_atomic_get_new_state(state);
8847 if (dm_state && dm_state->context) {
8848 dc_state = dm_state->context;
8849 } else {
8850 /* No state changes, retain current state. */
813d20dc 8851 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8852 ASSERT(dc_state_temp);
8853 dc_state = dc_state_temp;
8854 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8855 }
e7b07cee 8856
6d90a208
AP
8857 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8858 new_crtc_state, i) {
8859 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8860
8861 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8862
8863 if (old_crtc_state->active &&
8864 (!new_crtc_state->active ||
8865 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8866 manage_dm_interrupts(adev, acrtc, false);
8867 dc_stream_release(dm_old_crtc_state->stream);
8868 }
8869 }
8870
8976f73b
RS
8871 drm_atomic_helper_calc_timestamping_constants(state);
8872
e7b07cee 8873 /* update changed items */
0bc9706d 8874 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8875 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8876
54d76575
LSL
8877 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8878 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8879
4711c033 8880 DRM_DEBUG_ATOMIC(
e7b07cee
HW
8881 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8882 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8883 "connectors_changed:%d\n",
8884 acrtc->crtc_id,
0bc9706d
LSL
8885 new_crtc_state->enable,
8886 new_crtc_state->active,
8887 new_crtc_state->planes_changed,
8888 new_crtc_state->mode_changed,
8889 new_crtc_state->active_changed,
8890 new_crtc_state->connectors_changed);
e7b07cee 8891
5c68c652
VL
8892 /* Disable cursor if disabling crtc */
8893 if (old_crtc_state->active && !new_crtc_state->active) {
8894 struct dc_cursor_position position;
8895
8896 memset(&position, 0, sizeof(position));
8897 mutex_lock(&dm->dc_lock);
8898 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8899 mutex_unlock(&dm->dc_lock);
8900 }
8901
27b3f4fc
LSL
8902 /* Copy all transient state flags into dc state */
8903 if (dm_new_crtc_state->stream) {
8904 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8905 dm_new_crtc_state->stream);
8906 }
8907
e7b07cee
HW
8908 /* handles headless hotplug case, updating new_state and
8909 * aconnector as needed
8910 */
8911
54d76575 8912 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8913
4711c033 8914 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8915
54d76575 8916 if (!dm_new_crtc_state->stream) {
e7b07cee 8917 /*
b830ebc9
HW
8918 * this could happen because of issues with
8919 * userspace notifications delivery.
8920 * In this case userspace tries to set mode on
1f6010a9
DF
8921 * display which is disconnected in fact.
8922 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8923 * We expect reset mode will come soon.
8924 *
8925 * This can also happen when unplug is done
8926 * during resume sequence ended
8927 *
8928 * In this case, we want to pretend we still
8929 * have a sink to keep the pipe running so that
8930 * hw state is consistent with the sw state
8931 */
f1ad2f5e 8932 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8933 __func__, acrtc->base.base.id);
8934 continue;
8935 }
8936
54d76575
LSL
8937 if (dm_old_crtc_state->stream)
8938 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8939
97028037
LP
8940 pm_runtime_get_noresume(dev->dev);
8941
e7b07cee 8942 acrtc->enabled = true;
0bc9706d
LSL
8943 acrtc->hw_mode = new_crtc_state->mode;
8944 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8945 mode_set_reset_required = true;
0bc9706d 8946 } else if (modereset_required(new_crtc_state)) {
4711c033 8947 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8948 /* i.e. reset mode */
6ee90e88 8949 if (dm_old_crtc_state->stream)
54d76575 8950 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 8951
6ee90e88 8952 mode_set_reset_required = true;
e7b07cee
HW
8953 }
8954 } /* for_each_crtc_in_state() */
8955
eb3dc897 8956 if (dc_state) {
6ee90e88 8957 /* if there mode set or reset, disable eDP PSR */
8958 if (mode_set_reset_required)
8959 amdgpu_dm_psr_disable_all(dm);
8960
eb3dc897 8961 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8962 mutex_lock(&dm->dc_lock);
eb3dc897 8963 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
8964#if defined(CONFIG_DRM_AMD_DC_DCN)
8965 /* Allow idle optimization when vblank count is 0 for display off */
8966 if (dm->active_vblank_irq_count == 0)
8967 dc_allow_idle_optimizations(dm->dc,true);
8968#endif
674e78ac 8969 mutex_unlock(&dm->dc_lock);
fa2123db 8970 }
fe8858bb 8971
0bc9706d 8972 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8973 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8974
54d76575 8975 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8976
54d76575 8977 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8978 const struct dc_stream_status *status =
54d76575 8979 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8980
eb3dc897 8981 if (!status)
09f609c3
LL
8982 status = dc_stream_get_status_from_state(dc_state,
8983 dm_new_crtc_state->stream);
e7b07cee 8984 if (!status)
54d76575 8985 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8986 else
8987 acrtc->otg_inst = status->primary_otg_inst;
8988 }
8989 }
0c8620d6
BL
8990#ifdef CONFIG_DRM_AMD_DC_HDCP
8991 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8992 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8993 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8994 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8995
8996 new_crtc_state = NULL;
8997
8998 if (acrtc)
8999 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9000
9001 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9002
9003 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9004 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9005 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9006 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9007 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9008 continue;
9009 }
9010
9011 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9012 hdcp_update_display(
9013 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9014 new_con_state->hdcp_content_type,
0e86d3d4 9015 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9016 }
9017#endif
e7b07cee 9018
02d6a6fc 9019 /* Handle connector state changes */
c2cea706 9020 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9021 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9022 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9023 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9024 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9025 struct dc_stream_update stream_update;
b232d4ed 9026 struct dc_info_packet hdr_packet;
e7b07cee 9027 struct dc_stream_status *status = NULL;
b232d4ed 9028 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9029
efc8278e 9030 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9031 memset(&stream_update, 0, sizeof(stream_update));
9032
44d09c6a 9033 if (acrtc) {
0bc9706d 9034 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9035 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9036 }
0bc9706d 9037
e7b07cee 9038 /* Skip any modesets/resets */
0bc9706d 9039 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9040 continue;
9041
54d76575 9042 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9043 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9044
b232d4ed
NK
9045 scaling_changed = is_scaling_state_different(dm_new_con_state,
9046 dm_old_con_state);
9047
9048 abm_changed = dm_new_crtc_state->abm_level !=
9049 dm_old_crtc_state->abm_level;
9050
9051 hdr_changed =
72921cdf 9052 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9053
9054 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9055 continue;
e7b07cee 9056
b6e881c9 9057 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9058 if (scaling_changed) {
02d6a6fc 9059 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9060 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9061
02d6a6fc
DF
9062 stream_update.src = dm_new_crtc_state->stream->src;
9063 stream_update.dst = dm_new_crtc_state->stream->dst;
9064 }
9065
b232d4ed 9066 if (abm_changed) {
02d6a6fc
DF
9067 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9068
9069 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9070 }
70e8ffc5 9071
b232d4ed
NK
9072 if (hdr_changed) {
9073 fill_hdr_info_packet(new_con_state, &hdr_packet);
9074 stream_update.hdr_static_metadata = &hdr_packet;
9075 }
9076
54d76575 9077 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9078
9079 if (WARN_ON(!status))
9080 continue;
9081
3be5262e 9082 WARN_ON(!status->plane_count);
e7b07cee 9083
02d6a6fc
DF
9084 /*
9085 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9086 * Here we create an empty update on each plane.
9087 * To fix this, DC should permit updating only stream properties.
9088 */
9089 for (j = 0; j < status->plane_count; j++)
efc8278e 9090 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9091
9092
9093 mutex_lock(&dm->dc_lock);
9094 dc_commit_updates_for_stream(dm->dc,
efc8278e 9095 dummy_updates,
02d6a6fc
DF
9096 status->plane_count,
9097 dm_new_crtc_state->stream,
efc8278e
AJ
9098 &stream_update,
9099 dc_state);
02d6a6fc 9100 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9101 }
9102
b5e83f6f 9103 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9104 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9105 new_crtc_state, i) {
fe2a1965
LP
9106 if (old_crtc_state->active && !new_crtc_state->active)
9107 crtc_disable_count++;
9108
54d76575 9109 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9110 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9111
585d450c
AP
9112 /* For freesync config update on crtc state and params for irq */
9113 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9114
66b0c973
MK
9115 /* Handle vrr on->off / off->on transitions */
9116 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9117 dm_new_crtc_state);
e7b07cee
HW
9118 }
9119
8fe684e9
NK
9120 /**
9121 * Enable interrupts for CRTCs that are newly enabled or went through
9122 * a modeset. It was intentionally deferred until after the front end
9123 * state was modified to wait until the OTG was on and so the IRQ
9124 * handlers didn't access stale or invalid state.
9125 */
9126 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9127 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9128#ifdef CONFIG_DEBUG_FS
86bc2219 9129 bool configure_crc = false;
8e7b6fee 9130 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9131#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9132 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9133#endif
9134 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9135 cur_crc_src = acrtc->dm_irq_params.crc_src;
9136 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9137#endif
585d450c
AP
9138 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9139
8fe684e9
NK
9140 if (new_crtc_state->active &&
9141 (!old_crtc_state->active ||
9142 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9143 dc_stream_retain(dm_new_crtc_state->stream);
9144 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9145 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9146
24eb9374 9147#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9148 /**
9149 * Frontend may have changed so reapply the CRC capture
9150 * settings for the stream.
9151 */
9152 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9153
8e7b6fee 9154 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9155 configure_crc = true;
9156#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9157 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9158 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9159 acrtc->dm_irq_params.crc_window.update_win = true;
9160 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9161 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9162 crc_rd_wrk->crtc = crtc;
9163 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9164 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9165 }
86bc2219 9166#endif
e2881d6d 9167 }
c920888c 9168
86bc2219 9169 if (configure_crc)
bbc49fc0
WL
9170 if (amdgpu_dm_crtc_configure_crc_source(
9171 crtc, dm_new_crtc_state, cur_crc_src))
9172 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9173#endif
8fe684e9
NK
9174 }
9175 }
e7b07cee 9176
420cd472 9177 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9178 if (new_crtc_state->async_flip)
420cd472
DF
9179 wait_for_vblank = false;
9180
e7b07cee 9181 /* update planes when needed per crtc*/
5cc6dcbd 9182 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9183 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9184
54d76575 9185 if (dm_new_crtc_state->stream)
eb3dc897 9186 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9187 dm, crtc, wait_for_vblank);
e7b07cee
HW
9188 }
9189
6ce8f316
NK
9190 /* Update audio instances for each connector. */
9191 amdgpu_dm_commit_audio(dev, state);
9192
7230362c
AD
9193#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9194 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9195 /* restore the backlight level */
c5b53d12 9196 if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
7230362c
AD
9197 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9198#endif
e7b07cee
HW
9199 /*
9200 * send vblank event on all events not handled in flip and
9201 * mark consumed event for drm_atomic_helper_commit_hw_done
9202 */
4a580877 9203 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9204 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9205
0bc9706d
LSL
9206 if (new_crtc_state->event)
9207 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9208
0bc9706d 9209 new_crtc_state->event = NULL;
e7b07cee 9210 }
4a580877 9211 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9212
29c8f234
LL
9213 /* Signal HW programming completion */
9214 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9215
9216 if (wait_for_vblank)
320a1274 9217 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9218
9219 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9220
5f6fab24
AD
9221 /* return the stolen vga memory back to VRAM */
9222 if (!adev->mman.keep_stolen_vga_memory)
9223 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9224 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9225
1f6010a9
DF
9226 /*
9227 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9228 * so we can put the GPU into runtime suspend if we're not driving any
9229 * displays anymore
9230 */
fe2a1965
LP
9231 for (i = 0; i < crtc_disable_count; i++)
9232 pm_runtime_put_autosuspend(dev->dev);
97028037 9233 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9234
9235 if (dc_state_temp)
9236 dc_release_state(dc_state_temp);
e7b07cee
HW
9237}
9238
9239
9240static int dm_force_atomic_commit(struct drm_connector *connector)
9241{
9242 int ret = 0;
9243 struct drm_device *ddev = connector->dev;
9244 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9245 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9246 struct drm_plane *plane = disconnected_acrtc->base.primary;
9247 struct drm_connector_state *conn_state;
9248 struct drm_crtc_state *crtc_state;
9249 struct drm_plane_state *plane_state;
9250
9251 if (!state)
9252 return -ENOMEM;
9253
9254 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9255
9256 /* Construct an atomic state to restore previous display setting */
9257
9258 /*
9259 * Attach connectors to drm_atomic_state
9260 */
9261 conn_state = drm_atomic_get_connector_state(state, connector);
9262
9263 ret = PTR_ERR_OR_ZERO(conn_state);
9264 if (ret)
2dc39051 9265 goto out;
e7b07cee
HW
9266
9267 /* Attach crtc to drm_atomic_state*/
9268 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9269
9270 ret = PTR_ERR_OR_ZERO(crtc_state);
9271 if (ret)
2dc39051 9272 goto out;
e7b07cee
HW
9273
9274 /* force a restore */
9275 crtc_state->mode_changed = true;
9276
9277 /* Attach plane to drm_atomic_state */
9278 plane_state = drm_atomic_get_plane_state(state, plane);
9279
9280 ret = PTR_ERR_OR_ZERO(plane_state);
9281 if (ret)
2dc39051 9282 goto out;
e7b07cee
HW
9283
9284 /* Call commit internally with the state we just constructed */
9285 ret = drm_atomic_commit(state);
e7b07cee 9286
2dc39051 9287out:
e7b07cee 9288 drm_atomic_state_put(state);
2dc39051
VL
9289 if (ret)
9290 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9291
9292 return ret;
9293}
9294
9295/*
1f6010a9
DF
9296 * This function handles all cases when set mode does not come upon hotplug.
9297 * This includes when a display is unplugged then plugged back into the
9298 * same port and when running without usermode desktop manager supprot
e7b07cee 9299 */
3ee6b26b
AD
9300void dm_restore_drm_connector_state(struct drm_device *dev,
9301 struct drm_connector *connector)
e7b07cee 9302{
c84dec2f 9303 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9304 struct amdgpu_crtc *disconnected_acrtc;
9305 struct dm_crtc_state *acrtc_state;
9306
9307 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9308 return;
9309
9310 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9311 if (!disconnected_acrtc)
9312 return;
e7b07cee 9313
70e8ffc5
HW
9314 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9315 if (!acrtc_state->stream)
e7b07cee
HW
9316 return;
9317
9318 /*
9319 * If the previous sink is not released and different from the current,
9320 * we deduce we are in a state where we can not rely on usermode call
9321 * to turn on the display, so we do it here
9322 */
9323 if (acrtc_state->stream->sink != aconnector->dc_sink)
9324 dm_force_atomic_commit(&aconnector->base);
9325}
9326
1f6010a9 9327/*
e7b07cee
HW
9328 * Grabs all modesetting locks to serialize against any blocking commits,
9329 * Waits for completion of all non blocking commits.
9330 */
3ee6b26b
AD
9331static int do_aquire_global_lock(struct drm_device *dev,
9332 struct drm_atomic_state *state)
e7b07cee
HW
9333{
9334 struct drm_crtc *crtc;
9335 struct drm_crtc_commit *commit;
9336 long ret;
9337
1f6010a9
DF
9338 /*
9339 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9340 * ensure that when the framework release it the
9341 * extra locks we are locking here will get released to
9342 */
9343 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9344 if (ret)
9345 return ret;
9346
9347 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9348 spin_lock(&crtc->commit_lock);
9349 commit = list_first_entry_or_null(&crtc->commit_list,
9350 struct drm_crtc_commit, commit_entry);
9351 if (commit)
9352 drm_crtc_commit_get(commit);
9353 spin_unlock(&crtc->commit_lock);
9354
9355 if (!commit)
9356 continue;
9357
1f6010a9
DF
9358 /*
9359 * Make sure all pending HW programming completed and
e7b07cee
HW
9360 * page flips done
9361 */
9362 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9363
9364 if (ret > 0)
9365 ret = wait_for_completion_interruptible_timeout(
9366 &commit->flip_done, 10*HZ);
9367
9368 if (ret == 0)
9369 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9370 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9371
9372 drm_crtc_commit_put(commit);
9373 }
9374
9375 return ret < 0 ? ret : 0;
9376}
9377
bb47de73
NK
9378static void get_freesync_config_for_crtc(
9379 struct dm_crtc_state *new_crtc_state,
9380 struct dm_connector_state *new_con_state)
98e6436d
AK
9381{
9382 struct mod_freesync_config config = {0};
98e6436d
AK
9383 struct amdgpu_dm_connector *aconnector =
9384 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9385 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9386 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9387 bool fs_vid_mode = false;
98e6436d 9388
a057ec46 9389 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9390 vrefresh >= aconnector->min_vfreq &&
9391 vrefresh <= aconnector->max_vfreq;
bb47de73 9392
a057ec46
IB
9393 if (new_crtc_state->vrr_supported) {
9394 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
9395 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9396
9397 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9398 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9399 config.vsif_supported = true;
180db303 9400 config.btr = true;
98e6436d 9401
a85ba005
NC
9402 if (fs_vid_mode) {
9403 config.state = VRR_STATE_ACTIVE_FIXED;
9404 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9405 goto out;
9406 } else if (new_crtc_state->base.vrr_enabled) {
9407 config.state = VRR_STATE_ACTIVE_VARIABLE;
9408 } else {
9409 config.state = VRR_STATE_INACTIVE;
9410 }
9411 }
9412out:
bb47de73
NK
9413 new_crtc_state->freesync_config = config;
9414}
98e6436d 9415
bb47de73
NK
9416static void reset_freesync_config_for_crtc(
9417 struct dm_crtc_state *new_crtc_state)
9418{
9419 new_crtc_state->vrr_supported = false;
98e6436d 9420
bb47de73
NK
9421 memset(&new_crtc_state->vrr_infopacket, 0,
9422 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9423}
9424
a85ba005
NC
9425static bool
9426is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9427 struct drm_crtc_state *new_crtc_state)
9428{
9429 struct drm_display_mode old_mode, new_mode;
9430
9431 if (!old_crtc_state || !new_crtc_state)
9432 return false;
9433
9434 old_mode = old_crtc_state->mode;
9435 new_mode = new_crtc_state->mode;
9436
9437 if (old_mode.clock == new_mode.clock &&
9438 old_mode.hdisplay == new_mode.hdisplay &&
9439 old_mode.vdisplay == new_mode.vdisplay &&
9440 old_mode.htotal == new_mode.htotal &&
9441 old_mode.vtotal != new_mode.vtotal &&
9442 old_mode.hsync_start == new_mode.hsync_start &&
9443 old_mode.vsync_start != new_mode.vsync_start &&
9444 old_mode.hsync_end == new_mode.hsync_end &&
9445 old_mode.vsync_end != new_mode.vsync_end &&
9446 old_mode.hskew == new_mode.hskew &&
9447 old_mode.vscan == new_mode.vscan &&
9448 (old_mode.vsync_end - old_mode.vsync_start) ==
9449 (new_mode.vsync_end - new_mode.vsync_start))
9450 return true;
9451
9452 return false;
9453}
9454
9455static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9456 uint64_t num, den, res;
9457 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9458
9459 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9460
9461 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9462 den = (unsigned long long)new_crtc_state->mode.htotal *
9463 (unsigned long long)new_crtc_state->mode.vtotal;
9464
9465 res = div_u64(num, den);
9466 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9467}
9468
4b9674e5
LL
9469static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9470 struct drm_atomic_state *state,
9471 struct drm_crtc *crtc,
9472 struct drm_crtc_state *old_crtc_state,
9473 struct drm_crtc_state *new_crtc_state,
9474 bool enable,
9475 bool *lock_and_validation_needed)
e7b07cee 9476{
eb3dc897 9477 struct dm_atomic_state *dm_state = NULL;
54d76575 9478 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 9479 struct dc_stream_state *new_stream;
62f55537 9480 int ret = 0;
d4d4a645 9481
1f6010a9
DF
9482 /*
9483 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9484 * update changed items
9485 */
4b9674e5
LL
9486 struct amdgpu_crtc *acrtc = NULL;
9487 struct amdgpu_dm_connector *aconnector = NULL;
9488 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9489 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 9490
4b9674e5 9491 new_stream = NULL;
9635b754 9492
4b9674e5
LL
9493 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9494 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9495 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 9496 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 9497
4b9674e5
LL
9498 /* TODO This hack should go away */
9499 if (aconnector && enable) {
9500 /* Make sure fake sink is created in plug-in scenario */
9501 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9502 &aconnector->base);
9503 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9504 &aconnector->base);
19f89e23 9505
4b9674e5
LL
9506 if (IS_ERR(drm_new_conn_state)) {
9507 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9508 goto fail;
9509 }
19f89e23 9510
4b9674e5
LL
9511 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9512 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 9513
02d35a67
JFZ
9514 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9515 goto skip_modeset;
9516
cbd14ae7
SW
9517 new_stream = create_validate_stream_for_sink(aconnector,
9518 &new_crtc_state->mode,
9519 dm_new_conn_state,
9520 dm_old_crtc_state->stream);
19f89e23 9521
4b9674e5
LL
9522 /*
9523 * we can have no stream on ACTION_SET if a display
9524 * was disconnected during S3, in this case it is not an
9525 * error, the OS will be updated after detection, and
9526 * will do the right thing on next atomic commit
9527 */
19f89e23 9528
4b9674e5
LL
9529 if (!new_stream) {
9530 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9531 __func__, acrtc->base.base.id);
9532 ret = -ENOMEM;
9533 goto fail;
9534 }
e7b07cee 9535
3d4e52d0
VL
9536 /*
9537 * TODO: Check VSDB bits to decide whether this should
9538 * be enabled or not.
9539 */
9540 new_stream->triggered_crtc_reset.enabled =
9541 dm->force_timing_sync;
9542
4b9674e5 9543 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 9544
88694af9
NK
9545 ret = fill_hdr_info_packet(drm_new_conn_state,
9546 &new_stream->hdr_static_metadata);
9547 if (ret)
9548 goto fail;
9549
7e930949
NK
9550 /*
9551 * If we already removed the old stream from the context
9552 * (and set the new stream to NULL) then we can't reuse
9553 * the old stream even if the stream and scaling are unchanged.
9554 * We'll hit the BUG_ON and black screen.
9555 *
9556 * TODO: Refactor this function to allow this check to work
9557 * in all conditions.
9558 */
a85ba005
NC
9559 if (amdgpu_freesync_vid_mode &&
9560 dm_new_crtc_state->stream &&
9561 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9562 goto skip_modeset;
9563
7e930949
NK
9564 if (dm_new_crtc_state->stream &&
9565 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
9566 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9567 new_crtc_state->mode_changed = false;
9568 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9569 new_crtc_state->mode_changed);
62f55537 9570 }
4b9674e5 9571 }
b830ebc9 9572
02d35a67 9573 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
9574 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9575 goto skip_modeset;
e7b07cee 9576
4711c033 9577 DRM_DEBUG_ATOMIC(
4b9674e5
LL
9578 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9579 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9580 "connectors_changed:%d\n",
9581 acrtc->crtc_id,
9582 new_crtc_state->enable,
9583 new_crtc_state->active,
9584 new_crtc_state->planes_changed,
9585 new_crtc_state->mode_changed,
9586 new_crtc_state->active_changed,
9587 new_crtc_state->connectors_changed);
62f55537 9588
4b9674e5
LL
9589 /* Remove stream for any changed/disabled CRTC */
9590 if (!enable) {
62f55537 9591
4b9674e5
LL
9592 if (!dm_old_crtc_state->stream)
9593 goto skip_modeset;
eb3dc897 9594
a85ba005
NC
9595 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9596 is_timing_unchanged_for_freesync(new_crtc_state,
9597 old_crtc_state)) {
9598 new_crtc_state->mode_changed = false;
9599 DRM_DEBUG_DRIVER(
9600 "Mode change not required for front porch change, "
9601 "setting mode_changed to %d",
9602 new_crtc_state->mode_changed);
9603
9604 set_freesync_fixed_config(dm_new_crtc_state);
9605
9606 goto skip_modeset;
9607 } else if (amdgpu_freesync_vid_mode && aconnector &&
9608 is_freesync_video_mode(&new_crtc_state->mode,
9609 aconnector)) {
9610 set_freesync_fixed_config(dm_new_crtc_state);
9611 }
9612
4b9674e5
LL
9613 ret = dm_atomic_get_state(state, &dm_state);
9614 if (ret)
9615 goto fail;
e7b07cee 9616
4b9674e5
LL
9617 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9618 crtc->base.id);
62f55537 9619
4b9674e5
LL
9620 /* i.e. reset mode */
9621 if (dc_remove_stream_from_ctx(
9622 dm->dc,
9623 dm_state->context,
9624 dm_old_crtc_state->stream) != DC_OK) {
9625 ret = -EINVAL;
9626 goto fail;
9627 }
62f55537 9628
4b9674e5
LL
9629 dc_stream_release(dm_old_crtc_state->stream);
9630 dm_new_crtc_state->stream = NULL;
bb47de73 9631
4b9674e5 9632 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 9633
4b9674e5 9634 *lock_and_validation_needed = true;
62f55537 9635
4b9674e5
LL
9636 } else {/* Add stream for any updated/enabled CRTC */
9637 /*
9638 * Quick fix to prevent NULL pointer on new_stream when
9639 * added MST connectors not found in existing crtc_state in the chained mode
9640 * TODO: need to dig out the root cause of that
9641 */
9642 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9643 goto skip_modeset;
62f55537 9644
4b9674e5
LL
9645 if (modereset_required(new_crtc_state))
9646 goto skip_modeset;
62f55537 9647
4b9674e5
LL
9648 if (modeset_required(new_crtc_state, new_stream,
9649 dm_old_crtc_state->stream)) {
62f55537 9650
4b9674e5 9651 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 9652
4b9674e5
LL
9653 ret = dm_atomic_get_state(state, &dm_state);
9654 if (ret)
9655 goto fail;
27b3f4fc 9656
4b9674e5 9657 dm_new_crtc_state->stream = new_stream;
62f55537 9658
4b9674e5 9659 dc_stream_retain(new_stream);
1dc90497 9660
4711c033
LT
9661 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9662 crtc->base.id);
1dc90497 9663
4b9674e5
LL
9664 if (dc_add_stream_to_ctx(
9665 dm->dc,
9666 dm_state->context,
9667 dm_new_crtc_state->stream) != DC_OK) {
9668 ret = -EINVAL;
9669 goto fail;
9b690ef3
BL
9670 }
9671
4b9674e5
LL
9672 *lock_and_validation_needed = true;
9673 }
9674 }
e277adc5 9675
4b9674e5
LL
9676skip_modeset:
9677 /* Release extra reference */
9678 if (new_stream)
9679 dc_stream_release(new_stream);
e277adc5 9680
4b9674e5
LL
9681 /*
9682 * We want to do dc stream updates that do not require a
9683 * full modeset below.
9684 */
2afda735 9685 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
9686 return 0;
9687 /*
9688 * Given above conditions, the dc state cannot be NULL because:
9689 * 1. We're in the process of enabling CRTCs (just been added
9690 * to the dc context, or already is on the context)
9691 * 2. Has a valid connector attached, and
9692 * 3. Is currently active and enabled.
9693 * => The dc stream state currently exists.
9694 */
9695 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 9696
4b9674e5 9697 /* Scaling or underscan settings */
c521fc31
RL
9698 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9699 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
9700 update_stream_scaling_settings(
9701 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 9702
b05e2c5e
DF
9703 /* ABM settings */
9704 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9705
4b9674e5
LL
9706 /*
9707 * Color management settings. We also update color properties
9708 * when a modeset is needed, to ensure it gets reprogrammed.
9709 */
9710 if (dm_new_crtc_state->base.color_mgmt_changed ||
9711 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 9712 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
9713 if (ret)
9714 goto fail;
62f55537 9715 }
e7b07cee 9716
4b9674e5
LL
9717 /* Update Freesync settings. */
9718 get_freesync_config_for_crtc(dm_new_crtc_state,
9719 dm_new_conn_state);
9720
62f55537 9721 return ret;
9635b754
DS
9722
9723fail:
9724 if (new_stream)
9725 dc_stream_release(new_stream);
9726 return ret;
62f55537 9727}
9b690ef3 9728
f6ff2a08
NK
9729static bool should_reset_plane(struct drm_atomic_state *state,
9730 struct drm_plane *plane,
9731 struct drm_plane_state *old_plane_state,
9732 struct drm_plane_state *new_plane_state)
9733{
9734 struct drm_plane *other;
9735 struct drm_plane_state *old_other_state, *new_other_state;
9736 struct drm_crtc_state *new_crtc_state;
9737 int i;
9738
70a1efac
NK
9739 /*
9740 * TODO: Remove this hack once the checks below are sufficient
9741 * enough to determine when we need to reset all the planes on
9742 * the stream.
9743 */
9744 if (state->allow_modeset)
9745 return true;
9746
f6ff2a08
NK
9747 /* Exit early if we know that we're adding or removing the plane. */
9748 if (old_plane_state->crtc != new_plane_state->crtc)
9749 return true;
9750
9751 /* old crtc == new_crtc == NULL, plane not in context. */
9752 if (!new_plane_state->crtc)
9753 return false;
9754
9755 new_crtc_state =
9756 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9757
9758 if (!new_crtc_state)
9759 return true;
9760
7316c4ad
NK
9761 /* CRTC Degamma changes currently require us to recreate planes. */
9762 if (new_crtc_state->color_mgmt_changed)
9763 return true;
9764
f6ff2a08
NK
9765 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9766 return true;
9767
9768 /*
9769 * If there are any new primary or overlay planes being added or
9770 * removed then the z-order can potentially change. To ensure
9771 * correct z-order and pipe acquisition the current DC architecture
9772 * requires us to remove and recreate all existing planes.
9773 *
9774 * TODO: Come up with a more elegant solution for this.
9775 */
9776 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 9777 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
9778 if (other->type == DRM_PLANE_TYPE_CURSOR)
9779 continue;
9780
9781 if (old_other_state->crtc != new_plane_state->crtc &&
9782 new_other_state->crtc != new_plane_state->crtc)
9783 continue;
9784
9785 if (old_other_state->crtc != new_other_state->crtc)
9786 return true;
9787
dc4cb30d
NK
9788 /* Src/dst size and scaling updates. */
9789 if (old_other_state->src_w != new_other_state->src_w ||
9790 old_other_state->src_h != new_other_state->src_h ||
9791 old_other_state->crtc_w != new_other_state->crtc_w ||
9792 old_other_state->crtc_h != new_other_state->crtc_h)
9793 return true;
9794
9795 /* Rotation / mirroring updates. */
9796 if (old_other_state->rotation != new_other_state->rotation)
9797 return true;
9798
9799 /* Blending updates. */
9800 if (old_other_state->pixel_blend_mode !=
9801 new_other_state->pixel_blend_mode)
9802 return true;
9803
9804 /* Alpha updates. */
9805 if (old_other_state->alpha != new_other_state->alpha)
9806 return true;
9807
9808 /* Colorspace changes. */
9809 if (old_other_state->color_range != new_other_state->color_range ||
9810 old_other_state->color_encoding != new_other_state->color_encoding)
9811 return true;
9812
9a81cc60
NK
9813 /* Framebuffer checks fall at the end. */
9814 if (!old_other_state->fb || !new_other_state->fb)
9815 continue;
9816
9817 /* Pixel format changes can require bandwidth updates. */
9818 if (old_other_state->fb->format != new_other_state->fb->format)
9819 return true;
9820
6eed95b0
BN
9821 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9822 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9823
9824 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9825 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9826 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9827 return true;
9828 }
9829
9830 return false;
9831}
9832
b0455fda
SS
9833static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9834 struct drm_plane_state *new_plane_state,
9835 struct drm_framebuffer *fb)
9836{
e72868c4
SS
9837 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9838 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9839 unsigned int pitch;
e72868c4 9840 bool linear;
b0455fda
SS
9841
9842 if (fb->width > new_acrtc->max_cursor_width ||
9843 fb->height > new_acrtc->max_cursor_height) {
9844 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9845 new_plane_state->fb->width,
9846 new_plane_state->fb->height);
9847 return -EINVAL;
9848 }
9849 if (new_plane_state->src_w != fb->width << 16 ||
9850 new_plane_state->src_h != fb->height << 16) {
9851 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9852 return -EINVAL;
9853 }
9854
9855 /* Pitch in pixels */
9856 pitch = fb->pitches[0] / fb->format->cpp[0];
9857
9858 if (fb->width != pitch) {
9859 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9860 fb->width, pitch);
9861 return -EINVAL;
9862 }
9863
9864 switch (pitch) {
9865 case 64:
9866 case 128:
9867 case 256:
9868 /* FB pitch is supported by cursor plane */
9869 break;
9870 default:
9871 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9872 return -EINVAL;
9873 }
9874
e72868c4
SS
9875 /* Core DRM takes care of checking FB modifiers, so we only need to
9876 * check tiling flags when the FB doesn't have a modifier. */
9877 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9878 if (adev->family < AMDGPU_FAMILY_AI) {
9879 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9880 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9881 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9882 } else {
9883 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9884 }
9885 if (!linear) {
9886 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9887 return -EINVAL;
9888 }
9889 }
9890
b0455fda
SS
9891 return 0;
9892}
9893
9e869063
LL
9894static int dm_update_plane_state(struct dc *dc,
9895 struct drm_atomic_state *state,
9896 struct drm_plane *plane,
9897 struct drm_plane_state *old_plane_state,
9898 struct drm_plane_state *new_plane_state,
9899 bool enable,
9900 bool *lock_and_validation_needed)
62f55537 9901{
eb3dc897
NK
9902
9903 struct dm_atomic_state *dm_state = NULL;
62f55537 9904 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9905 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9906 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9907 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9908 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9909 bool needs_reset;
62f55537 9910 int ret = 0;
e7b07cee 9911
9b690ef3 9912
9e869063
LL
9913 new_plane_crtc = new_plane_state->crtc;
9914 old_plane_crtc = old_plane_state->crtc;
9915 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9916 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9917
626bf90f
SS
9918 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9919 if (!enable || !new_plane_crtc ||
9920 drm_atomic_plane_disabling(plane->state, new_plane_state))
9921 return 0;
9922
9923 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9924
5f581248
SS
9925 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9926 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9927 return -EINVAL;
9928 }
9929
24f99d2b 9930 if (new_plane_state->fb) {
b0455fda
SS
9931 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9932 new_plane_state->fb);
9933 if (ret)
9934 return ret;
24f99d2b
SS
9935 }
9936
9e869063 9937 return 0;
626bf90f 9938 }
9b690ef3 9939
f6ff2a08
NK
9940 needs_reset = should_reset_plane(state, plane, old_plane_state,
9941 new_plane_state);
9942
9e869063
LL
9943 /* Remove any changed/removed planes */
9944 if (!enable) {
f6ff2a08 9945 if (!needs_reset)
9e869063 9946 return 0;
a7b06724 9947
9e869063
LL
9948 if (!old_plane_crtc)
9949 return 0;
62f55537 9950
9e869063
LL
9951 old_crtc_state = drm_atomic_get_old_crtc_state(
9952 state, old_plane_crtc);
9953 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9954
9e869063
LL
9955 if (!dm_old_crtc_state->stream)
9956 return 0;
62f55537 9957
9e869063
LL
9958 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9959 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9960
9e869063
LL
9961 ret = dm_atomic_get_state(state, &dm_state);
9962 if (ret)
9963 return ret;
eb3dc897 9964
9e869063
LL
9965 if (!dc_remove_plane_from_context(
9966 dc,
9967 dm_old_crtc_state->stream,
9968 dm_old_plane_state->dc_state,
9969 dm_state->context)) {
62f55537 9970
c3537613 9971 return -EINVAL;
9e869063 9972 }
e7b07cee 9973
9b690ef3 9974
9e869063
LL
9975 dc_plane_state_release(dm_old_plane_state->dc_state);
9976 dm_new_plane_state->dc_state = NULL;
1dc90497 9977
9e869063 9978 *lock_and_validation_needed = true;
1dc90497 9979
9e869063
LL
9980 } else { /* Add new planes */
9981 struct dc_plane_state *dc_new_plane_state;
1dc90497 9982
9e869063
LL
9983 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9984 return 0;
e7b07cee 9985
9e869063
LL
9986 if (!new_plane_crtc)
9987 return 0;
e7b07cee 9988
9e869063
LL
9989 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9990 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9991
9e869063
LL
9992 if (!dm_new_crtc_state->stream)
9993 return 0;
62f55537 9994
f6ff2a08 9995 if (!needs_reset)
9e869063 9996 return 0;
62f55537 9997
8c44515b
AP
9998 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9999 if (ret)
10000 return ret;
10001
9e869063 10002 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10003
9e869063
LL
10004 dc_new_plane_state = dc_create_plane_state(dc);
10005 if (!dc_new_plane_state)
10006 return -ENOMEM;
62f55537 10007
4711c033
LT
10008 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10009 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10010
695af5f9 10011 ret = fill_dc_plane_attributes(
1348969a 10012 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10013 dc_new_plane_state,
10014 new_plane_state,
10015 new_crtc_state);
10016 if (ret) {
10017 dc_plane_state_release(dc_new_plane_state);
10018 return ret;
10019 }
62f55537 10020
9e869063
LL
10021 ret = dm_atomic_get_state(state, &dm_state);
10022 if (ret) {
10023 dc_plane_state_release(dc_new_plane_state);
10024 return ret;
10025 }
eb3dc897 10026
9e869063
LL
10027 /*
10028 * Any atomic check errors that occur after this will
10029 * not need a release. The plane state will be attached
10030 * to the stream, and therefore part of the atomic
10031 * state. It'll be released when the atomic state is
10032 * cleaned.
10033 */
10034 if (!dc_add_plane_to_context(
10035 dc,
10036 dm_new_crtc_state->stream,
10037 dc_new_plane_state,
10038 dm_state->context)) {
62f55537 10039
9e869063
LL
10040 dc_plane_state_release(dc_new_plane_state);
10041 return -EINVAL;
10042 }
8c45c5db 10043
9e869063 10044 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10045
9e869063
LL
10046 /* Tell DC to do a full surface update every time there
10047 * is a plane change. Inefficient, but works for now.
10048 */
10049 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10050
10051 *lock_and_validation_needed = true;
62f55537 10052 }
e7b07cee
HW
10053
10054
62f55537
AG
10055 return ret;
10056}
a87fa993 10057
12f4849a
SS
10058static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10059 struct drm_crtc *crtc,
10060 struct drm_crtc_state *new_crtc_state)
10061{
10062 struct drm_plane_state *new_cursor_state, *new_primary_state;
10063 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10064
10065 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10066 * cursor per pipe but it's going to inherit the scaling and
10067 * positioning from the underlying pipe. Check the cursor plane's
10068 * blending properties match the primary plane's. */
10069
10070 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10071 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
7df4ceb6
SE
10072 if (!new_cursor_state || !new_primary_state ||
10073 !new_cursor_state->fb || !new_primary_state->fb) {
12f4849a
SS
10074 return 0;
10075 }
10076
10077 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10078 (new_cursor_state->src_w >> 16);
10079 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10080 (new_cursor_state->src_h >> 16);
10081
10082 primary_scale_w = new_primary_state->crtc_w * 1000 /
10083 (new_primary_state->src_w >> 16);
10084 primary_scale_h = new_primary_state->crtc_h * 1000 /
10085 (new_primary_state->src_h >> 16);
10086
10087 if (cursor_scale_w != primary_scale_w ||
10088 cursor_scale_h != primary_scale_h) {
8333388b 10089 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
12f4849a
SS
10090 return -EINVAL;
10091 }
10092
10093 return 0;
10094}
10095
e10517b3 10096#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10097static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10098{
10099 struct drm_connector *connector;
10100 struct drm_connector_state *conn_state;
10101 struct amdgpu_dm_connector *aconnector = NULL;
10102 int i;
10103 for_each_new_connector_in_state(state, connector, conn_state, i) {
10104 if (conn_state->crtc != crtc)
10105 continue;
10106
10107 aconnector = to_amdgpu_dm_connector(connector);
10108 if (!aconnector->port || !aconnector->mst_port)
10109 aconnector = NULL;
10110 else
10111 break;
10112 }
10113
10114 if (!aconnector)
10115 return 0;
10116
10117 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10118}
e10517b3 10119#endif
44be939f 10120
16e9b3e5
RS
10121static int validate_overlay(struct drm_atomic_state *state)
10122{
10123 int i;
10124 struct drm_plane *plane;
ed509955 10125 struct drm_plane_state *new_plane_state;
e7d9560a 10126 struct drm_plane_state *primary_state, *overlay_state = NULL;
16e9b3e5
RS
10127
10128 /* Check if primary plane is contained inside overlay */
a6c3c37b 10129 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
16e9b3e5
RS
10130 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10131 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10132 return 0;
10133
10134 overlay_state = new_plane_state;
10135 continue;
10136 }
10137 }
10138
10139 /* check if we're making changes to the overlay plane */
10140 if (!overlay_state)
10141 return 0;
10142
10143 /* check if overlay plane is enabled */
10144 if (!overlay_state->crtc)
10145 return 0;
10146
10147 /* find the primary plane for the CRTC that the overlay is enabled on */
10148 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10149 if (IS_ERR(primary_state))
10150 return PTR_ERR(primary_state);
10151
10152 /* check if primary plane is enabled */
10153 if (!primary_state->crtc)
10154 return 0;
10155
10156 /* Perform the bounds check to ensure the overlay plane covers the primary */
10157 if (primary_state->crtc_x < overlay_state->crtc_x ||
10158 primary_state->crtc_y < overlay_state->crtc_y ||
10159 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10160 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10161 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10162 return -EINVAL;
10163 }
10164
10165 return 0;
10166}
10167
b8592b48
LL
10168/**
10169 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10170 * @dev: The DRM device
10171 * @state: The atomic state to commit
10172 *
10173 * Validate that the given atomic state is programmable by DC into hardware.
10174 * This involves constructing a &struct dc_state reflecting the new hardware
10175 * state we wish to commit, then querying DC to see if it is programmable. It's
10176 * important not to modify the existing DC state. Otherwise, atomic_check
10177 * may unexpectedly commit hardware changes.
10178 *
10179 * When validating the DC state, it's important that the right locks are
10180 * acquired. For full updates case which removes/adds/updates streams on one
10181 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10182 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10183 * flip using DRMs synchronization events.
b8592b48
LL
10184 *
10185 * Note that DM adds the affected connectors for all CRTCs in state, when that
10186 * might not seem necessary. This is because DC stream creation requires the
10187 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10188 * be possible but non-trivial - a possible TODO item.
10189 *
10190 * Return: -Error code if validation failed.
10191 */
7578ecda
AD
10192static int amdgpu_dm_atomic_check(struct drm_device *dev,
10193 struct drm_atomic_state *state)
62f55537 10194{
1348969a 10195 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10196 struct dm_atomic_state *dm_state = NULL;
62f55537 10197 struct dc *dc = adev->dm.dc;
62f55537 10198 struct drm_connector *connector;
c2cea706 10199 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10200 struct drm_crtc *crtc;
fc9e9920 10201 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10202 struct drm_plane *plane;
10203 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10204 enum dc_status status;
1e88ad0a 10205 int ret, i;
62f55537 10206 bool lock_and_validation_needed = false;
886876ec 10207 struct dm_crtc_state *dm_old_crtc_state;
62f55537 10208
e8a98235 10209 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10210
62f55537 10211 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
10212 if (ret)
10213 goto fail;
62f55537 10214
c5892a10
SW
10215 /* Check connector changes */
10216 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10217 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10218 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10219
10220 /* Skip connectors that are disabled or part of modeset already. */
10221 if (!old_con_state->crtc && !new_con_state->crtc)
10222 continue;
10223
10224 if (!new_con_state->crtc)
10225 continue;
10226
10227 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10228 if (IS_ERR(new_crtc_state)) {
10229 ret = PTR_ERR(new_crtc_state);
10230 goto fail;
10231 }
10232
10233 if (dm_old_con_state->abm_level !=
10234 dm_new_con_state->abm_level)
10235 new_crtc_state->connectors_changed = true;
10236 }
10237
e10517b3 10238#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10239 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10240 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10241 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10242 ret = add_affected_mst_dsc_crtcs(state, crtc);
10243 if (ret)
10244 goto fail;
10245 }
10246 }
10247 }
e10517b3 10248#endif
1e88ad0a 10249 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10250 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10251
1e88ad0a 10252 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10253 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10254 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10255 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10256 continue;
7bef1af3 10257
03fc4cf4
MY
10258 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10259 if (ret)
10260 goto fail;
10261
1e88ad0a
S
10262 if (!new_crtc_state->enable)
10263 continue;
fc9e9920 10264
1e88ad0a
S
10265 ret = drm_atomic_add_affected_connectors(state, crtc);
10266 if (ret)
10267 return ret;
fc9e9920 10268
1e88ad0a
S
10269 ret = drm_atomic_add_affected_planes(state, crtc);
10270 if (ret)
10271 goto fail;
115a385c 10272
cbac53f7 10273 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10274 new_crtc_state->mode_changed = true;
e7b07cee
HW
10275 }
10276
2d9e6431
NK
10277 /*
10278 * Add all primary and overlay planes on the CRTC to the state
10279 * whenever a plane is enabled to maintain correct z-ordering
10280 * and to enable fast surface updates.
10281 */
10282 drm_for_each_crtc(crtc, dev) {
10283 bool modified = false;
10284
10285 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10286 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10287 continue;
10288
10289 if (new_plane_state->crtc == crtc ||
10290 old_plane_state->crtc == crtc) {
10291 modified = true;
10292 break;
10293 }
10294 }
10295
10296 if (!modified)
10297 continue;
10298
10299 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10300 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10301 continue;
10302
10303 new_plane_state =
10304 drm_atomic_get_plane_state(state, plane);
10305
10306 if (IS_ERR(new_plane_state)) {
10307 ret = PTR_ERR(new_plane_state);
10308 goto fail;
10309 }
10310 }
10311 }
10312
62f55537 10313 /* Remove exiting planes if they are modified */
9e869063
LL
10314 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10315 ret = dm_update_plane_state(dc, state, plane,
10316 old_plane_state,
10317 new_plane_state,
10318 false,
10319 &lock_and_validation_needed);
10320 if (ret)
10321 goto fail;
62f55537
AG
10322 }
10323
10324 /* Disable all crtcs which require disable */
4b9674e5
LL
10325 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10326 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10327 old_crtc_state,
10328 new_crtc_state,
10329 false,
10330 &lock_and_validation_needed);
10331 if (ret)
10332 goto fail;
62f55537
AG
10333 }
10334
10335 /* Enable all crtcs which require enable */
4b9674e5
LL
10336 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10337 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10338 old_crtc_state,
10339 new_crtc_state,
10340 true,
10341 &lock_and_validation_needed);
10342 if (ret)
10343 goto fail;
62f55537
AG
10344 }
10345
16e9b3e5
RS
10346 ret = validate_overlay(state);
10347 if (ret)
10348 goto fail;
10349
62f55537 10350 /* Add new/modified planes */
9e869063
LL
10351 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10352 ret = dm_update_plane_state(dc, state, plane,
10353 old_plane_state,
10354 new_plane_state,
10355 true,
10356 &lock_and_validation_needed);
10357 if (ret)
10358 goto fail;
62f55537
AG
10359 }
10360
b349f76e
ES
10361 /* Run this here since we want to validate the streams we created */
10362 ret = drm_atomic_helper_check_planes(dev, state);
10363 if (ret)
10364 goto fail;
62f55537 10365
12f4849a
SS
10366 /* Check cursor planes scaling */
10367 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10368 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10369 if (ret)
10370 goto fail;
10371 }
10372
43d10d30
NK
10373 if (state->legacy_cursor_update) {
10374 /*
10375 * This is a fast cursor update coming from the plane update
10376 * helper, check if it can be done asynchronously for better
10377 * performance.
10378 */
10379 state->async_update =
10380 !drm_atomic_helper_async_check(dev, state);
10381
10382 /*
10383 * Skip the remaining global validation if this is an async
10384 * update. Cursor updates can be done without affecting
10385 * state or bandwidth calcs and this avoids the performance
10386 * penalty of locking the private state object and
10387 * allocating a new dc_state.
10388 */
10389 if (state->async_update)
10390 return 0;
10391 }
10392
ebdd27e1 10393 /* Check scaling and underscan changes*/
1f6010a9 10394 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10395 * new stream into context w\o causing full reset. Need to
10396 * decide how to handle.
10397 */
c2cea706 10398 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10399 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10400 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10401 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10402
10403 /* Skip any modesets/resets */
0bc9706d
LSL
10404 if (!acrtc || drm_atomic_crtc_needs_modeset(
10405 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10406 continue;
10407
b830ebc9 10408 /* Skip any thing not scale or underscan changes */
54d76575 10409 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10410 continue;
10411
10412 lock_and_validation_needed = true;
10413 }
10414
f6d7c7fa
NK
10415 /**
10416 * Streams and planes are reset when there are changes that affect
10417 * bandwidth. Anything that affects bandwidth needs to go through
10418 * DC global validation to ensure that the configuration can be applied
10419 * to hardware.
10420 *
10421 * We have to currently stall out here in atomic_check for outstanding
10422 * commits to finish in this case because our IRQ handlers reference
10423 * DRM state directly - we can end up disabling interrupts too early
10424 * if we don't.
10425 *
10426 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10427 */
f6d7c7fa 10428 if (lock_and_validation_needed) {
eb3dc897
NK
10429 ret = dm_atomic_get_state(state, &dm_state);
10430 if (ret)
10431 goto fail;
e7b07cee
HW
10432
10433 ret = do_aquire_global_lock(dev, state);
10434 if (ret)
10435 goto fail;
1dc90497 10436
d9fe1a4c 10437#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
10438 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10439 goto fail;
10440
29b9ba74
ML
10441 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10442 if (ret)
10443 goto fail;
d9fe1a4c 10444#endif
29b9ba74 10445
ded58c7b
ZL
10446 /*
10447 * Perform validation of MST topology in the state:
10448 * We need to perform MST atomic check before calling
10449 * dc_validate_global_state(), or there is a chance
10450 * to get stuck in an infinite loop and hang eventually.
10451 */
10452 ret = drm_dp_mst_atomic_check(state);
10453 if (ret)
10454 goto fail;
74a16675
RS
10455 status = dc_validate_global_state(dc, dm_state->context, false);
10456 if (status != DC_OK) {
10457 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10458 dc_status_to_str(status), status);
e7b07cee
HW
10459 ret = -EINVAL;
10460 goto fail;
10461 }
bd200d19 10462 } else {
674e78ac 10463 /*
bd200d19
NK
10464 * The commit is a fast update. Fast updates shouldn't change
10465 * the DC context, affect global validation, and can have their
10466 * commit work done in parallel with other commits not touching
10467 * the same resource. If we have a new DC context as part of
10468 * the DM atomic state from validation we need to free it and
10469 * retain the existing one instead.
fde9f39a
MR
10470 *
10471 * Furthermore, since the DM atomic state only contains the DC
10472 * context and can safely be annulled, we can free the state
10473 * and clear the associated private object now to free
10474 * some memory and avoid a possible use-after-free later.
674e78ac 10475 */
bd200d19 10476
fde9f39a
MR
10477 for (i = 0; i < state->num_private_objs; i++) {
10478 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 10479
fde9f39a
MR
10480 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10481 int j = state->num_private_objs-1;
bd200d19 10482
fde9f39a
MR
10483 dm_atomic_destroy_state(obj,
10484 state->private_objs[i].state);
10485
10486 /* If i is not at the end of the array then the
10487 * last element needs to be moved to where i was
10488 * before the array can safely be truncated.
10489 */
10490 if (i != j)
10491 state->private_objs[i] =
10492 state->private_objs[j];
bd200d19 10493
fde9f39a
MR
10494 state->private_objs[j].ptr = NULL;
10495 state->private_objs[j].state = NULL;
10496 state->private_objs[j].old_state = NULL;
10497 state->private_objs[j].new_state = NULL;
10498
10499 state->num_private_objs = j;
10500 break;
10501 }
bd200d19 10502 }
e7b07cee
HW
10503 }
10504
caff0e66
NK
10505 /* Store the overall update type for use later in atomic check. */
10506 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10507 struct dm_crtc_state *dm_new_crtc_state =
10508 to_dm_crtc_state(new_crtc_state);
10509
f6d7c7fa
NK
10510 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10511 UPDATE_TYPE_FULL :
10512 UPDATE_TYPE_FAST;
e7b07cee
HW
10513 }
10514
10515 /* Must be success */
10516 WARN_ON(ret);
e8a98235
RS
10517
10518 trace_amdgpu_dm_atomic_check_finish(state, ret);
10519
e7b07cee
HW
10520 return ret;
10521
10522fail:
10523 if (ret == -EDEADLK)
01e28f9c 10524 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 10525 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 10526 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 10527 else
01e28f9c 10528 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 10529
e8a98235
RS
10530 trace_amdgpu_dm_atomic_check_finish(state, ret);
10531
e7b07cee
HW
10532 return ret;
10533}
10534
3ee6b26b
AD
10535static bool is_dp_capable_without_timing_msa(struct dc *dc,
10536 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
10537{
10538 uint8_t dpcd_data;
10539 bool capable = false;
10540
c84dec2f 10541 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
10542 dm_helpers_dp_read_dpcd(
10543 NULL,
c84dec2f 10544 amdgpu_dm_connector->dc_link,
e7b07cee
HW
10545 DP_DOWN_STREAM_PORT_COUNT,
10546 &dpcd_data,
10547 sizeof(dpcd_data))) {
10548 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10549 }
10550
10551 return capable;
10552}
f9b4f20c 10553
46db138d
SW
10554static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10555 unsigned int offset,
10556 unsigned int total_length,
10557 uint8_t *data,
10558 unsigned int length,
10559 struct amdgpu_hdmi_vsdb_info *vsdb)
10560{
10561 bool res;
10562 union dmub_rb_cmd cmd;
10563 struct dmub_cmd_send_edid_cea *input;
10564 struct dmub_cmd_edid_cea_output *output;
10565
10566 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10567 return false;
10568
10569 memset(&cmd, 0, sizeof(cmd));
10570
10571 input = &cmd.edid_cea.data.input;
10572
10573 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10574 cmd.edid_cea.header.sub_type = 0;
10575 cmd.edid_cea.header.payload_bytes =
10576 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10577 input->offset = offset;
10578 input->length = length;
10579 input->total_length = total_length;
10580 memcpy(input->payload, data, length);
10581
10582 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10583 if (!res) {
10584 DRM_ERROR("EDID CEA parser failed\n");
10585 return false;
10586 }
10587
10588 output = &cmd.edid_cea.data.output;
10589
10590 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10591 if (!output->ack.success) {
10592 DRM_ERROR("EDID CEA ack failed at offset %d\n",
10593 output->ack.offset);
10594 }
10595 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10596 if (!output->amd_vsdb.vsdb_found)
10597 return false;
10598
10599 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10600 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10601 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10602 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10603 } else {
b76a8062 10604 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
10605 return false;
10606 }
10607
10608 return true;
10609}
10610
10611static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
10612 uint8_t *edid_ext, int len,
10613 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10614{
10615 int i;
f9b4f20c
SW
10616
10617 /* send extension block to DMCU for parsing */
10618 for (i = 0; i < len; i += 8) {
10619 bool res;
10620 int offset;
10621
10622 /* send 8 bytes a time */
46db138d 10623 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
10624 return false;
10625
10626 if (i+8 == len) {
10627 /* EDID block sent completed, expect result */
10628 int version, min_rate, max_rate;
10629
46db138d 10630 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
10631 if (res) {
10632 /* amd vsdb found */
10633 vsdb_info->freesync_supported = 1;
10634 vsdb_info->amd_vsdb_version = version;
10635 vsdb_info->min_refresh_rate_hz = min_rate;
10636 vsdb_info->max_refresh_rate_hz = max_rate;
10637 return true;
10638 }
10639 /* not amd vsdb */
10640 return false;
10641 }
10642
10643 /* check for ack*/
46db138d 10644 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
10645 if (!res)
10646 return false;
10647 }
10648
10649 return false;
10650}
10651
46db138d
SW
10652static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10653 uint8_t *edid_ext, int len,
10654 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10655{
10656 int i;
10657
10658 /* send extension block to DMCU for parsing */
10659 for (i = 0; i < len; i += 8) {
10660 /* send 8 bytes a time */
10661 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10662 return false;
10663 }
10664
10665 return vsdb_info->freesync_supported;
10666}
10667
10668static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10669 uint8_t *edid_ext, int len,
10670 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10671{
10672 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10673
10674 if (adev->dm.dmub_srv)
10675 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10676 else
10677 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10678}
10679
7c7dd774 10680static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
10681 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10682{
10683 uint8_t *edid_ext = NULL;
10684 int i;
10685 bool valid_vsdb_found = false;
10686
10687 /*----- drm_find_cea_extension() -----*/
10688 /* No EDID or EDID extensions */
10689 if (edid == NULL || edid->extensions == 0)
7c7dd774 10690 return -ENODEV;
f9b4f20c
SW
10691
10692 /* Find CEA extension */
10693 for (i = 0; i < edid->extensions; i++) {
10694 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10695 if (edid_ext[0] == CEA_EXT)
10696 break;
10697 }
10698
10699 if (i == edid->extensions)
7c7dd774 10700 return -ENODEV;
f9b4f20c
SW
10701
10702 /*----- cea_db_offsets() -----*/
10703 if (edid_ext[0] != CEA_EXT)
7c7dd774 10704 return -ENODEV;
f9b4f20c
SW
10705
10706 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
10707
10708 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
10709}
10710
98e6436d
AK
10711void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10712 struct edid *edid)
e7b07cee 10713{
eb0709ba 10714 int i = 0;
e7b07cee
HW
10715 struct detailed_timing *timing;
10716 struct detailed_non_pixel *data;
10717 struct detailed_data_monitor_range *range;
c84dec2f
HW
10718 struct amdgpu_dm_connector *amdgpu_dm_connector =
10719 to_amdgpu_dm_connector(connector);
bb47de73 10720 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
10721
10722 struct drm_device *dev = connector->dev;
1348969a 10723 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 10724 bool freesync_capable = false;
f9b4f20c 10725 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 10726
8218d7f1
HW
10727 if (!connector->state) {
10728 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 10729 goto update;
8218d7f1
HW
10730 }
10731
98e6436d
AK
10732 if (!edid) {
10733 dm_con_state = to_dm_connector_state(connector->state);
10734
10735 amdgpu_dm_connector->min_vfreq = 0;
10736 amdgpu_dm_connector->max_vfreq = 0;
10737 amdgpu_dm_connector->pixel_clock_mhz = 0;
10738
bb47de73 10739 goto update;
98e6436d
AK
10740 }
10741
8218d7f1
HW
10742 dm_con_state = to_dm_connector_state(connector->state);
10743
c84dec2f 10744 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 10745 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 10746 goto update;
e7b07cee
HW
10747 }
10748 if (!adev->dm.freesync_module)
bb47de73 10749 goto update;
f9b4f20c
SW
10750
10751
10752 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10753 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10754 bool edid_check_required = false;
10755
10756 if (edid) {
e7b07cee
HW
10757 edid_check_required = is_dp_capable_without_timing_msa(
10758 adev->dm.dc,
c84dec2f 10759 amdgpu_dm_connector);
e7b07cee 10760 }
e7b07cee 10761
f9b4f20c
SW
10762 if (edid_check_required == true && (edid->version > 1 ||
10763 (edid->version == 1 && edid->revision > 1))) {
10764 for (i = 0; i < 4; i++) {
e7b07cee 10765
f9b4f20c
SW
10766 timing = &edid->detailed_timings[i];
10767 data = &timing->data.other_data;
10768 range = &data->data.range;
10769 /*
10770 * Check if monitor has continuous frequency mode
10771 */
10772 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10773 continue;
10774 /*
10775 * Check for flag range limits only. If flag == 1 then
10776 * no additional timing information provided.
10777 * Default GTF, GTF Secondary curve and CVT are not
10778 * supported
10779 */
10780 if (range->flags != 1)
10781 continue;
a0ffc3fd 10782
f9b4f20c
SW
10783 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10784 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10785 amdgpu_dm_connector->pixel_clock_mhz =
10786 range->pixel_clock_mhz * 10;
a0ffc3fd 10787
f9b4f20c
SW
10788 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10789 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 10790
f9b4f20c
SW
10791 break;
10792 }
98e6436d 10793
f9b4f20c
SW
10794 if (amdgpu_dm_connector->max_vfreq -
10795 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 10796
f9b4f20c
SW
10797 freesync_capable = true;
10798 }
10799 }
10800 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
10801 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10802 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
10803 timing = &edid->detailed_timings[i];
10804 data = &timing->data.other_data;
10805
10806 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10807 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10808 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10809 freesync_capable = true;
10810
10811 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10812 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
10813 }
10814 }
bb47de73
NK
10815
10816update:
10817 if (dm_con_state)
10818 dm_con_state->freesync_capable = freesync_capable;
10819
10820 if (connector->vrr_capable_property)
10821 drm_connector_set_vrr_capable_property(connector,
10822 freesync_capable);
e7b07cee
HW
10823}
10824
3d4e52d0
VL
10825void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10826{
1348969a 10827 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
10828 struct dc *dc = adev->dm.dc;
10829 int i;
10830
10831 mutex_lock(&adev->dm.dc_lock);
10832 if (dc->current_state) {
10833 for (i = 0; i < dc->current_state->stream_count; ++i)
10834 dc->current_state->streams[i]
10835 ->triggered_crtc_reset.enabled =
10836 adev->dm.force_timing_sync;
10837
10838 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10839 dc_trigger_sync(dc, dc->current_state);
10840 }
10841 mutex_unlock(&adev->dm.dc_lock);
10842}
9d83722d
RS
10843
10844void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10845 uint32_t value, const char *func_name)
10846{
10847#ifdef DM_CHECK_ADDR_0
10848 if (address == 0) {
10849 DC_ERR("invalid register write. address = 0");
10850 return;
10851 }
10852#endif
10853 cgs_write_register(ctx->cgs_device, address, value);
10854 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10855}
10856
10857uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10858 const char *func_name)
10859{
10860 uint32_t value;
10861#ifdef DM_CHECK_ADDR_0
10862 if (address == 0) {
10863 DC_ERR("invalid register read; address = 0\n");
10864 return 0;
10865 }
10866#endif
10867
10868 if (ctx->dmub_srv &&
10869 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10870 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10871 ASSERT(false);
10872 return 0;
10873 }
10874
10875 value = cgs_read_register(ctx->cgs_device, address);
10876
10877 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10878
10879 return value;
10880}
81927e28
JS
10881
10882int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10883 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10884{
10885 struct amdgpu_device *adev = ctx->driver_context;
10886 int ret = 0;
10887
10888 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10889 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10890 if (ret == 0) {
10891 *operation_result = AUX_RET_ERROR_TIMEOUT;
10892 return -1;
10893 }
10894 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10895
10896 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10897 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10898
10899 // For read case, Copy data to payload
10900 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10901 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10902 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10903 adev->dm.dmub_notify->aux_reply.length);
10904 }
10905
10906 return adev->dm.dmub_notify->aux_reply.length;
10907}