drm/amdgpu: Use preemptible placement for KFD
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
1dc90497 32#include "dc/inc/core_types.h"
a7669aff 33#include "dal_asic_id.h"
cdca3f21 34#include "dmub/dmub_srv.h"
743b9786
NK
35#include "dc/inc/hw/dmcu.h"
36#include "dc/inc/hw/abm.h"
9a71c7d3 37#include "dc/dc_dmub_srv.h"
f9b4f20c 38#include "dc/dc_edid_parser.h"
81927e28 39#include "dc/dc_stat.h"
9d83722d 40#include "amdgpu_dm_trace.h"
4562236b
HW
41
42#include "vid.h"
43#include "amdgpu.h"
a49dcb88 44#include "amdgpu_display.h"
a94d5569 45#include "amdgpu_ucode.h"
4562236b
HW
46#include "atom.h"
47#include "amdgpu_dm.h"
52704fca
BL
48#ifdef CONFIG_DRM_AMD_DC_HDCP
49#include "amdgpu_dm_hdcp.h"
53e108aa 50#include <drm/drm_hdcp.h>
52704fca 51#endif
e7b07cee 52#include "amdgpu_pm.h"
4562236b
HW
53
54#include "amd_shared.h"
55#include "amdgpu_dm_irq.h"
56#include "dm_helpers.h"
e7b07cee 57#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
58#if defined(CONFIG_DEBUG_FS)
59#include "amdgpu_dm_debugfs.h"
60#endif
4562236b
HW
61
62#include "ivsrcid/ivsrcid_vislands30.h"
63
81927e28 64#include "i2caux_interface.h"
4562236b
HW
65#include <linux/module.h>
66#include <linux/moduleparam.h>
e7b07cee 67#include <linux/types.h>
97028037 68#include <linux/pm_runtime.h>
09d21852 69#include <linux/pci.h>
a94d5569 70#include <linux/firmware.h>
6ce8f316 71#include <linux/component.h>
4562236b
HW
72
73#include <drm/drm_atomic.h>
674e78ac 74#include <drm/drm_atomic_uapi.h>
4562236b
HW
75#include <drm/drm_atomic_helper.h>
76#include <drm/drm_dp_mst_helper.h>
e7b07cee 77#include <drm/drm_fb_helper.h>
09d21852 78#include <drm/drm_fourcc.h>
e7b07cee 79#include <drm/drm_edid.h>
09d21852 80#include <drm/drm_vblank.h>
6ce8f316 81#include <drm/drm_audio_component.h>
4562236b 82
b86a1aa3 83#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 84#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 85
ad941f7a
FX
86#include "dcn/dcn_1_0_offset.h"
87#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
88#include "soc15_hw_ip.h"
89#include "vega10_ip_offset.h"
ff5ef992
AD
90
91#include "soc15_common.h"
92#endif
93
e7b07cee 94#include "modules/inc/mod_freesync.h"
bbf854dc 95#include "modules/power/power_helpers.h"
ecd0136b 96#include "modules/inc/mod_info_packet.h"
e7b07cee 97
743b9786
NK
98#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
100#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
102#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
104#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
106#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
108#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
110#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
2200eb9e 112
a94d5569
DF
113#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
114MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 115
5ea23931
RL
116#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
117MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
118
8c7aea40
NK
119/* Number of bytes in PSP header for firmware. */
120#define PSP_HEADER_BYTES 0x100
121
122/* Number of bytes in PSP footer for firmware. */
123#define PSP_FOOTER_BYTES 0x100
124
b8592b48
LL
125/**
126 * DOC: overview
127 *
128 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 129 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
130 * requests into DC requests, and DC responses into DRM responses.
131 *
132 * The root control structure is &struct amdgpu_display_manager.
133 */
134
7578ecda
AD
135/* basic init/fini API */
136static int amdgpu_dm_init(struct amdgpu_device *adev);
137static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 138static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 139
0f877894
OV
140static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
141{
142 switch (link->dpcd_caps.dongle_type) {
143 case DISPLAY_DONGLE_NONE:
144 return DRM_MODE_SUBCONNECTOR_Native;
145 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
146 return DRM_MODE_SUBCONNECTOR_VGA;
147 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
148 case DISPLAY_DONGLE_DP_DVI_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_DVID;
150 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
151 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_HDMIA;
153 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
154 default:
155 return DRM_MODE_SUBCONNECTOR_Unknown;
156 }
157}
158
159static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
160{
161 struct dc_link *link = aconnector->dc_link;
162 struct drm_connector *connector = &aconnector->base;
163 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
164
165 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
166 return;
167
168 if (aconnector->dc_sink)
169 subconnector = get_subconnector_type(link);
170
171 drm_object_property_set_value(&connector->base,
172 connector->dev->mode_config.dp_subconnector_property,
173 subconnector);
174}
175
1f6010a9
DF
176/*
177 * initializes drm_device display related structures, based on the information
7578ecda
AD
178 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
179 * drm_encoder, drm_mode_config
180 *
181 * Returns 0 on success
182 */
183static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
184/* removes and deallocates the drm structures, created by the above function */
185static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
186
7578ecda 187static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 188 struct drm_plane *plane,
cc1fec57
NK
189 unsigned long possible_crtcs,
190 const struct dc_plane_cap *plane_cap);
7578ecda
AD
191static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
192 struct drm_plane *plane,
193 uint32_t link_index);
194static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
195 struct amdgpu_dm_connector *amdgpu_dm_connector,
196 uint32_t link_index,
197 struct amdgpu_encoder *amdgpu_encoder);
198static int amdgpu_dm_encoder_init(struct drm_device *dev,
199 struct amdgpu_encoder *aencoder,
200 uint32_t link_index);
201
202static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
203
7578ecda
AD
204static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
205
206static int amdgpu_dm_atomic_check(struct drm_device *dev,
207 struct drm_atomic_state *state);
208
674e78ac
NK
209static void handle_cursor_update(struct drm_plane *plane,
210 struct drm_plane_state *old_plane_state);
7578ecda 211
8c322309
RL
212static void amdgpu_dm_set_psr_caps(struct dc_link *link);
213static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
214static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
215static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 216static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 217
dfbbfe3c
BN
218static const struct drm_format_info *
219amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
220
a85ba005
NC
221static bool
222is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 struct drm_crtc_state *new_crtc_state);
4562236b
HW
224/*
225 * dm_vblank_get_counter
226 *
227 * @brief
228 * Get counter for number of vertical blanks
229 *
230 * @param
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
233 *
234 * @return
235 * Counter for vertical blanks
236 */
237static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238{
239 if (crtc >= adev->mode_info.num_crtc)
240 return 0;
241 else {
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
585d450c 244 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 crtc);
4562236b
HW
247 return 0;
248 }
249
585d450c 250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
251 }
252}
253
254static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 255 u32 *vbl, u32 *position)
4562236b 256{
81c50963
ST
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
4562236b
HW
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 return -EINVAL;
261 else {
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
585d450c 264 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 crtc);
4562236b
HW
267 return 0;
268 }
269
81c50963
ST
270 /*
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
273 */
585d450c 274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
275 &v_blank_start,
276 &v_blank_end,
277 &h_position,
278 &v_position);
279
e806208d
AG
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
282 }
283
284 return 0;
285}
286
287static bool dm_is_idle(void *handle)
288{
289 /* XXX todo */
290 return true;
291}
292
293static int dm_wait_for_idle(void *handle)
294{
295 /* XXX todo */
296 return 0;
297}
298
299static bool dm_check_soft_reset(void *handle)
300{
301 return false;
302}
303
304static int dm_soft_reset(void *handle)
305{
306 /* XXX todo */
307 return 0;
308}
309
3ee6b26b
AD
310static struct amdgpu_crtc *
311get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 int otg_inst)
4562236b 313{
4a580877 314 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
317
4562236b
HW
318 if (otg_inst == -1) {
319 WARN_ON(1);
320 return adev->mode_info.crtcs[0];
321 }
322
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326 if (amdgpu_crtc->otg_inst == otg_inst)
327 return amdgpu_crtc;
328 }
329
330 return NULL;
331}
332
585d450c
AP
333static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334{
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
339}
340
66b0c973
MK
341static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342{
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345}
346
a85ba005
NC
347static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
349{
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
351 return true;
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 return true;
354 else
355 return false;
356}
357
b8e8c934
HW
358/**
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
361 *
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
364 */
4562236b
HW
365static void dm_pflip_high_irq(void *interrupt_params)
366{
4562236b
HW
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
370 unsigned long flags;
71bbe51a 371 struct drm_pending_vblank_event *e;
71bbe51a
MK
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 bool vrr_active;
4562236b
HW
374
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377 /* IRQ could occur when in initial stage */
1f6010a9 378 /* TODO work and BO cleanup */
4562236b 379 if (amdgpu_crtc == NULL) {
cb2318b7 380 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
381 return;
382 }
383
4a580877 384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
385
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
391 amdgpu_crtc);
4a580877 392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
393 return;
394 }
395
71bbe51a
MK
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
4562236b 399
71bbe51a
MK
400 if (!e)
401 WARN_ON(1);
1159898a 402
585d450c 403 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
404
405 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
406 if (!vrr_active ||
585d450c 407 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
408 &v_blank_end, &hpos, &vpos) ||
409 (vpos < v_blank_start)) {
410 /* Update to correct count and vblank timestamp if racing with
411 * vblank irq. This also updates to the correct vblank timestamp
412 * even in VRR mode, as scanout is past the front-porch atm.
413 */
414 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 415
71bbe51a
MK
416 /* Wake up userspace by sending the pageflip event with proper
417 * count and timestamp of vblank of flip completion.
418 */
419 if (e) {
420 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
421
422 /* Event sent, so done with vblank for this flip */
423 drm_crtc_vblank_put(&amdgpu_crtc->base);
424 }
425 } else if (e) {
426 /* VRR active and inside front-porch: vblank count and
427 * timestamp for pageflip event will only be up to date after
428 * drm_crtc_handle_vblank() has been executed from late vblank
429 * irq handler after start of back-porch (vline 0). We queue the
430 * pageflip event for send-out by drm_crtc_handle_vblank() with
431 * updated timestamp and count, once it runs after us.
432 *
433 * We need to open-code this instead of using the helper
434 * drm_crtc_arm_vblank_event(), as that helper would
435 * call drm_crtc_accurate_vblank_count(), which we must
436 * not call in VRR mode while we are in front-porch!
437 */
438
439 /* sequence will be replaced by real count during send-out. */
440 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
441 e->pipe = amdgpu_crtc->crtc_id;
442
4a580877 443 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
444 e = NULL;
445 }
4562236b 446
fdd1fe57
MK
447 /* Keep track of vblank of this flip for flip throttling. We use the
448 * cooked hw counter, as that one incremented at start of this vblank
449 * of pageflip completion, so last_flip_vblank is the forbidden count
450 * for queueing new pageflips if vsync + VRR is enabled.
451 */
5d1c59c4 452 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 453 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 454
54f5499a 455 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 456 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 457
cb2318b7
VL
458 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
459 amdgpu_crtc->crtc_id, amdgpu_crtc,
460 vrr_active, (int) !e);
4562236b
HW
461}
462
d2574c33
MK
463static void dm_vupdate_high_irq(void *interrupt_params)
464{
465 struct common_irq_params *irq_params = interrupt_params;
466 struct amdgpu_device *adev = irq_params->adev;
467 struct amdgpu_crtc *acrtc;
47588233
RS
468 struct drm_device *drm_dev;
469 struct drm_vblank_crtc *vblank;
470 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 471 unsigned long flags;
585d450c 472 int vrr_active;
d2574c33
MK
473
474 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
475
476 if (acrtc) {
585d450c 477 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
478 drm_dev = acrtc->base.dev;
479 vblank = &drm_dev->vblank[acrtc->base.index];
480 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
481 frame_duration_ns = vblank->time - previous_timestamp;
482
483 if (frame_duration_ns > 0) {
484 trace_amdgpu_refresh_rate_track(acrtc->base.index,
485 frame_duration_ns,
486 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
487 atomic64_set(&irq_params->previous_timestamp, vblank->time);
488 }
d2574c33 489
cb2318b7 490 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 491 acrtc->crtc_id,
585d450c 492 vrr_active);
d2574c33
MK
493
494 /* Core vblank handling is done here after end of front-porch in
495 * vrr mode, as vblank timestamping will give valid results
496 * while now done after front-porch. This will also deliver
497 * page-flip completion events that have been queued to us
498 * if a pageflip happened inside front-porch.
499 */
585d450c 500 if (vrr_active) {
d2574c33 501 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
502
503 /* BTR processing for pre-DCE12 ASICs */
585d450c 504 if (acrtc->dm_irq_params.stream &&
09aef2c4 505 adev->family < AMDGPU_FAMILY_AI) {
4a580877 506 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
507 mod_freesync_handle_v_update(
508 adev->dm.freesync_module,
585d450c
AP
509 acrtc->dm_irq_params.stream,
510 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
511
512 dc_stream_adjust_vmin_vmax(
513 adev->dm.dc,
585d450c
AP
514 acrtc->dm_irq_params.stream,
515 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 516 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
517 }
518 }
d2574c33
MK
519 }
520}
521
b8e8c934
HW
522/**
523 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 524 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
525 *
526 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
527 * event handler.
528 */
4562236b
HW
529static void dm_crtc_high_irq(void *interrupt_params)
530{
531 struct common_irq_params *irq_params = interrupt_params;
532 struct amdgpu_device *adev = irq_params->adev;
4562236b 533 struct amdgpu_crtc *acrtc;
09aef2c4 534 unsigned long flags;
585d450c 535 int vrr_active;
4562236b 536
b57de80a 537 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
538 if (!acrtc)
539 return;
540
585d450c 541 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 542
cb2318b7 543 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 544 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 545
2346ef47
NK
546 /**
547 * Core vblank handling at start of front-porch is only possible
548 * in non-vrr mode, as only there vblank timestamping will give
549 * valid results while done in front-porch. Otherwise defer it
550 * to dm_vupdate_high_irq after end of front-porch.
551 */
585d450c 552 if (!vrr_active)
2346ef47
NK
553 drm_crtc_handle_vblank(&acrtc->base);
554
555 /**
556 * Following stuff must happen at start of vblank, for crc
557 * computation and below-the-range btr support in vrr mode.
558 */
16f17eda 559 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
560
561 /* BTR updates need to happen before VUPDATE on Vega and above. */
562 if (adev->family < AMDGPU_FAMILY_AI)
563 return;
16f17eda 564
4a580877 565 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 566
585d450c
AP
567 if (acrtc->dm_irq_params.stream &&
568 acrtc->dm_irq_params.vrr_params.supported &&
569 acrtc->dm_irq_params.freesync_config.state ==
570 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 571 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
572 acrtc->dm_irq_params.stream,
573 &acrtc->dm_irq_params.vrr_params);
16f17eda 574
585d450c
AP
575 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
576 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
577 }
578
2b5aed9a
MK
579 /*
580 * If there aren't any active_planes then DCH HUBP may be clock-gated.
581 * In that case, pageflip completion interrupts won't fire and pageflip
582 * completion events won't get delivered. Prevent this by sending
583 * pending pageflip events from here if a flip is still pending.
584 *
585 * If any planes are enabled, use dm_pflip_high_irq() instead, to
586 * avoid race conditions between flip programming and completion,
587 * which could cause too early flip completion events.
588 */
2346ef47
NK
589 if (adev->family >= AMDGPU_FAMILY_RV &&
590 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 591 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
592 if (acrtc->event) {
593 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
594 acrtc->event = NULL;
595 drm_crtc_vblank_put(&acrtc->base);
596 }
597 acrtc->pflip_status = AMDGPU_FLIP_NONE;
598 }
599
4a580877 600 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
601}
602
86bc2219
WL
603#if defined(CONFIG_DRM_AMD_DC_DCN)
604/**
605 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606 * DCN generation ASICs
607 * @interrupt params - interrupt parameters
608 *
609 * Used to set crc window/read out crc value at vertical line 0 position
610 */
611#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
612static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
613{
614 struct common_irq_params *irq_params = interrupt_params;
615 struct amdgpu_device *adev = irq_params->adev;
616 struct amdgpu_crtc *acrtc;
617
618 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
619
620 if (!acrtc)
621 return;
622
623 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
624}
625#endif
86bc2219 626
81927e28
JS
627/**
628 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
629 * @interrupt_params: used for determining the Outbox instance
630 *
631 * Handles the Outbox Interrupt
632 * event handler.
633 */
634#define DMUB_TRACE_MAX_READ 64
635static void dm_dmub_outbox1_low_irq(void *interrupt_params)
636{
637 struct dmub_notification notify;
638 struct common_irq_params *irq_params = interrupt_params;
639 struct amdgpu_device *adev = irq_params->adev;
640 struct amdgpu_display_manager *dm = &adev->dm;
641 struct dmcub_trace_buf_entry entry = { 0 };
642 uint32_t count = 0;
643
644 if (dc_enable_dmub_notifications(adev->dm.dc)) {
645 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
646 do {
647 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
648 } while (notify.pending_notification);
649
650 if (adev->dm.dmub_notify)
651 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
652 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
653 complete(&adev->dm.dmub_aux_transfer_done);
654 // TODO : HPD Implementation
655
656 } else {
657 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
658 }
659 }
660
661
662 do {
663 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
664 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
665 entry.param0, entry.param1);
666
667 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
668 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
669 } else
670 break;
671
672 count++;
673
674 } while (count <= DMUB_TRACE_MAX_READ);
675
676 ASSERT(count <= DMUB_TRACE_MAX_READ);
677}
86bc2219
WL
678#endif
679
4562236b
HW
680static int dm_set_clockgating_state(void *handle,
681 enum amd_clockgating_state state)
682{
683 return 0;
684}
685
686static int dm_set_powergating_state(void *handle,
687 enum amd_powergating_state state)
688{
689 return 0;
690}
691
692/* Prototypes of private functions */
693static int dm_early_init(void* handle);
694
a32e24b4 695/* Allocate memory for FBC compressed data */
3e332d3a 696static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 697{
3e332d3a 698 struct drm_device *dev = connector->dev;
1348969a 699 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 700 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
701 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
702 struct drm_display_mode *mode;
42e67c3b
RL
703 unsigned long max_size = 0;
704
705 if (adev->dm.dc->fbc_compressor == NULL)
706 return;
a32e24b4 707
3e332d3a 708 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
709 return;
710
3e332d3a
RL
711 if (compressor->bo_ptr)
712 return;
42e67c3b 713
42e67c3b 714
3e332d3a
RL
715 list_for_each_entry(mode, &connector->modes, head) {
716 if (max_size < mode->htotal * mode->vtotal)
717 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
718 }
719
720 if (max_size) {
721 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 722 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 723 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
724
725 if (r)
42e67c3b
RL
726 DRM_ERROR("DM: Failed to initialize FBC\n");
727 else {
728 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
729 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
730 }
731
a32e24b4
RL
732 }
733
734}
a32e24b4 735
6ce8f316
NK
736static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
737 int pipe, bool *enabled,
738 unsigned char *buf, int max_bytes)
739{
740 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 741 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
742 struct drm_connector *connector;
743 struct drm_connector_list_iter conn_iter;
744 struct amdgpu_dm_connector *aconnector;
745 int ret = 0;
746
747 *enabled = false;
748
749 mutex_lock(&adev->dm.audio_lock);
750
751 drm_connector_list_iter_begin(dev, &conn_iter);
752 drm_for_each_connector_iter(connector, &conn_iter) {
753 aconnector = to_amdgpu_dm_connector(connector);
754 if (aconnector->audio_inst != port)
755 continue;
756
757 *enabled = true;
758 ret = drm_eld_size(connector->eld);
759 memcpy(buf, connector->eld, min(max_bytes, ret));
760
761 break;
762 }
763 drm_connector_list_iter_end(&conn_iter);
764
765 mutex_unlock(&adev->dm.audio_lock);
766
767 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
768
769 return ret;
770}
771
772static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
773 .get_eld = amdgpu_dm_audio_component_get_eld,
774};
775
776static int amdgpu_dm_audio_component_bind(struct device *kdev,
777 struct device *hda_kdev, void *data)
778{
779 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 780 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
781 struct drm_audio_component *acomp = data;
782
783 acomp->ops = &amdgpu_dm_audio_component_ops;
784 acomp->dev = kdev;
785 adev->dm.audio_component = acomp;
786
787 return 0;
788}
789
790static void amdgpu_dm_audio_component_unbind(struct device *kdev,
791 struct device *hda_kdev, void *data)
792{
793 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 794 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
795 struct drm_audio_component *acomp = data;
796
797 acomp->ops = NULL;
798 acomp->dev = NULL;
799 adev->dm.audio_component = NULL;
800}
801
802static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
803 .bind = amdgpu_dm_audio_component_bind,
804 .unbind = amdgpu_dm_audio_component_unbind,
805};
806
807static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
808{
809 int i, ret;
810
811 if (!amdgpu_audio)
812 return 0;
813
814 adev->mode_info.audio.enabled = true;
815
816 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
817
818 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
819 adev->mode_info.audio.pin[i].channels = -1;
820 adev->mode_info.audio.pin[i].rate = -1;
821 adev->mode_info.audio.pin[i].bits_per_sample = -1;
822 adev->mode_info.audio.pin[i].status_bits = 0;
823 adev->mode_info.audio.pin[i].category_code = 0;
824 adev->mode_info.audio.pin[i].connected = false;
825 adev->mode_info.audio.pin[i].id =
826 adev->dm.dc->res_pool->audios[i]->inst;
827 adev->mode_info.audio.pin[i].offset = 0;
828 }
829
830 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
831 if (ret < 0)
832 return ret;
833
834 adev->dm.audio_registered = true;
835
836 return 0;
837}
838
839static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
840{
841 if (!amdgpu_audio)
842 return;
843
844 if (!adev->mode_info.audio.enabled)
845 return;
846
847 if (adev->dm.audio_registered) {
848 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
849 adev->dm.audio_registered = false;
850 }
851
852 /* TODO: Disable audio? */
853
854 adev->mode_info.audio.enabled = false;
855}
856
dfd84d90 857static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
858{
859 struct drm_audio_component *acomp = adev->dm.audio_component;
860
861 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
862 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
863
864 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
865 pin, -1);
866 }
867}
868
743b9786
NK
869static int dm_dmub_hw_init(struct amdgpu_device *adev)
870{
743b9786
NK
871 const struct dmcub_firmware_header_v1_0 *hdr;
872 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 873 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
874 const struct firmware *dmub_fw = adev->dm.dmub_fw;
875 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
876 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
877 struct dmub_srv_hw_params hw_params;
878 enum dmub_status status;
879 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 880 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
881 bool has_hw_support;
882
883 if (!dmub_srv)
884 /* DMUB isn't supported on the ASIC. */
885 return 0;
886
8c7aea40
NK
887 if (!fb_info) {
888 DRM_ERROR("No framebuffer info for DMUB service.\n");
889 return -EINVAL;
890 }
891
743b9786
NK
892 if (!dmub_fw) {
893 /* Firmware required for DMUB support. */
894 DRM_ERROR("No firmware provided for DMUB.\n");
895 return -EINVAL;
896 }
897
898 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
899 if (status != DMUB_STATUS_OK) {
900 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
901 return -EINVAL;
902 }
903
904 if (!has_hw_support) {
905 DRM_INFO("DMUB unsupported on ASIC\n");
906 return 0;
907 }
908
909 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
910
743b9786
NK
911 fw_inst_const = dmub_fw->data +
912 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 913 PSP_HEADER_BYTES;
743b9786
NK
914
915 fw_bss_data = dmub_fw->data +
916 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
917 le32_to_cpu(hdr->inst_const_bytes);
918
919 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
920 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
921 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
922
923 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
924
ddde28a5
HW
925 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
926 * amdgpu_ucode_init_single_fw will load dmub firmware
927 * fw_inst_const part to cw0; otherwise, the firmware back door load
928 * will be done by dm_dmub_hw_init
929 */
930 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
931 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
932 fw_inst_const_size);
933 }
934
a576b345
NK
935 if (fw_bss_data_size)
936 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
937 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
938
939 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
940 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
941 adev->bios_size);
942
943 /* Reset regions that need to be reset. */
944 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
945 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
946
947 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
948 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
949
950 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
951 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
952
953 /* Initialize hardware. */
954 memset(&hw_params, 0, sizeof(hw_params));
955 hw_params.fb_base = adev->gmc.fb_start;
956 hw_params.fb_offset = adev->gmc.aper_base;
957
31a7f4bb
HW
958 /* backdoor load firmware and trigger dmub running */
959 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
960 hw_params.load_inst_const = true;
961
743b9786
NK
962 if (dmcu)
963 hw_params.psp_version = dmcu->psp_version;
964
8c7aea40
NK
965 for (i = 0; i < fb_info->num_fb; ++i)
966 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
967
968 status = dmub_srv_hw_init(dmub_srv, &hw_params);
969 if (status != DMUB_STATUS_OK) {
970 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
971 return -EINVAL;
972 }
973
974 /* Wait for firmware load to finish. */
975 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
976 if (status != DMUB_STATUS_OK)
977 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
978
979 /* Init DMCU and ABM if available. */
980 if (dmcu && abm) {
981 dmcu->funcs->dmcu_init(dmcu);
982 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
983 }
984
9a71c7d3
NK
985 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
986 if (!adev->dm.dc->ctx->dmub_srv) {
987 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
988 return -ENOMEM;
989 }
990
743b9786
NK
991 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
992 adev->dm.dmcub_fw_version);
993
994 return 0;
995}
996
a3fe0e33 997#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 998static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 999{
c0fb85ae
YZ
1000 uint64_t pt_base;
1001 uint32_t logical_addr_low;
1002 uint32_t logical_addr_high;
1003 uint32_t agp_base, agp_bot, agp_top;
1004 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1005
c0fb85ae
YZ
1006 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1007 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1008
c0fb85ae
YZ
1009 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1010 /*
1011 * Raven2 has a HW issue that it is unable to use the vram which
1012 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1013 * workaround that increase system aperture high address (add 1)
1014 * to get rid of the VM fault and hardware hang.
1015 */
1016 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1017 else
1018 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1019
c0fb85ae
YZ
1020 agp_base = 0;
1021 agp_bot = adev->gmc.agp_start >> 24;
1022 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1023
c44a22b3 1024
c0fb85ae
YZ
1025 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1026 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1027 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1028 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1029 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1030 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1031
c0fb85ae
YZ
1032 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1033 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1034
1035 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1036 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1037 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1038
1039 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1040 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1041 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1042
1043 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1044 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1045 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1046
1047 pa_config->is_hvm_enabled = 0;
c44a22b3 1048
c44a22b3 1049}
e6cd859d 1050#endif
ea3b4242
QZ
1051#if defined(CONFIG_DRM_AMD_DC_DCN)
1052static void event_mall_stutter(struct work_struct *work)
1053{
1054
1055 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1056 struct amdgpu_display_manager *dm = vblank_work->dm;
1057
1058 mutex_lock(&dm->dc_lock);
1059
1060 if (vblank_work->enable)
1061 dm->active_vblank_irq_count++;
5af50b0b 1062 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1063 dm->active_vblank_irq_count--;
1064
2cbcb78c 1065 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1066
4711c033 1067 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242
QZ
1068
1069 mutex_unlock(&dm->dc_lock);
1070}
1071
1072static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1073{
1074
1075 int max_caps = dc->caps.max_links;
1076 struct vblank_workqueue *vblank_work;
1077 int i = 0;
1078
1079 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1080 if (ZERO_OR_NULL_PTR(vblank_work)) {
1081 kfree(vblank_work);
1082 return NULL;
1083 }
c44a22b3 1084
ea3b4242
QZ
1085 for (i = 0; i < max_caps; i++)
1086 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1087
1088 return vblank_work;
1089}
1090#endif
7578ecda 1091static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1092{
1093 struct dc_init_data init_data;
52704fca
BL
1094#ifdef CONFIG_DRM_AMD_DC_HDCP
1095 struct dc_callback_init init_params;
1096#endif
743b9786 1097 int r;
52704fca 1098
4a580877 1099 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1100 adev->dm.adev = adev;
1101
4562236b
HW
1102 /* Zero all the fields */
1103 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1104#ifdef CONFIG_DRM_AMD_DC_HDCP
1105 memset(&init_params, 0, sizeof(init_params));
1106#endif
4562236b 1107
674e78ac 1108 mutex_init(&adev->dm.dc_lock);
6ce8f316 1109 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1110#if defined(CONFIG_DRM_AMD_DC_DCN)
1111 spin_lock_init(&adev->dm.vblank_lock);
1112#endif
674e78ac 1113
4562236b
HW
1114 if(amdgpu_dm_irq_init(adev)) {
1115 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1116 goto error;
1117 }
1118
1119 init_data.asic_id.chip_family = adev->family;
1120
2dc31ca1 1121 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1122 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1123
770d13b1 1124 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1125 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1126 init_data.asic_id.atombios_base_address =
1127 adev->mode_info.atom_context->bios;
1128
1129 init_data.driver = adev;
1130
1131 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1132
1133 if (!adev->dm.cgs_device) {
1134 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1135 goto error;
1136 }
1137
1138 init_data.cgs_device = adev->dm.cgs_device;
1139
4562236b
HW
1140 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1141
60fb100b
AD
1142 switch (adev->asic_type) {
1143 case CHIP_CARRIZO:
1144 case CHIP_STONEY:
1145 case CHIP_RAVEN:
fe3db437 1146 case CHIP_RENOIR:
6e227308 1147 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1148 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1149 init_data.flags.disable_dmcu = true;
60fb100b 1150 break;
6df9218a
CL
1151#if defined(CONFIG_DRM_AMD_DC_DCN)
1152 case CHIP_VANGOGH:
1153 init_data.flags.gpu_vm_support = true;
1154 break;
1155#endif
60fb100b
AD
1156 default:
1157 break;
1158 }
6e227308 1159
04b94af4
AD
1160 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1161 init_data.flags.fbc_support = true;
1162
d99f38ae
AD
1163 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1164 init_data.flags.multi_mon_pp_mclk_switch = true;
1165
eaf56410
LL
1166 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1167 init_data.flags.disable_fractional_pwm = true;
1168
27eaa492 1169 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1170
0dd79532 1171 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1172 /* Display Core create. */
1173 adev->dm.dc = dc_create(&init_data);
1174
423788c7 1175 if (adev->dm.dc) {
76121231 1176 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1177 } else {
76121231 1178 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1179 goto error;
1180 }
4562236b 1181
8a791dab
HW
1182 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1183 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1184 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1185 }
1186
f99d8762
HW
1187 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1188 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1189
8a791dab
HW
1190 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1191 adev->dm.dc->debug.disable_stutter = true;
1192
1193 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1194 adev->dm.dc->debug.disable_dsc = true;
1195
1196 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1197 adev->dm.dc->debug.disable_clock_gate = true;
1198
743b9786
NK
1199 r = dm_dmub_hw_init(adev);
1200 if (r) {
1201 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1202 goto error;
1203 }
1204
bb6785c1
NK
1205 dc_hardware_init(adev->dm.dc);
1206
0b08c54b 1207#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1208 if (adev->apu_flags) {
e6cd859d
AD
1209 struct dc_phy_addr_space_config pa_config;
1210
0b08c54b 1211 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1212
0b08c54b
YZ
1213 // Call the DC init_memory func
1214 dc_setup_system_context(adev->dm.dc, &pa_config);
1215 }
1216#endif
c0fb85ae 1217
4562236b
HW
1218 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1219 if (!adev->dm.freesync_module) {
1220 DRM_ERROR(
1221 "amdgpu: failed to initialize freesync_module.\n");
1222 } else
f1ad2f5e 1223 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1224 adev->dm.freesync_module);
1225
e277adc5
LSL
1226 amdgpu_dm_init_color_mod();
1227
ea3b4242
QZ
1228#if defined(CONFIG_DRM_AMD_DC_DCN)
1229 if (adev->dm.dc->caps.max_links > 0) {
1230 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1231
1232 if (!adev->dm.vblank_workqueue)
1233 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1234 else
1235 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1236 }
1237#endif
1238
52704fca 1239#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1240 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1241 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1242
96a3b32e
BL
1243 if (!adev->dm.hdcp_workqueue)
1244 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1245 else
1246 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1247
96a3b32e
BL
1248 dc_init_callbacks(adev->dm.dc, &init_params);
1249 }
9a65df19
WL
1250#endif
1251#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1252 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1253#endif
81927e28
JS
1254 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1255 init_completion(&adev->dm.dmub_aux_transfer_done);
1256 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1257 if (!adev->dm.dmub_notify) {
1258 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1259 goto error;
1260 }
1261 amdgpu_dm_outbox_init(adev);
1262 }
1263
4562236b
HW
1264 if (amdgpu_dm_initialize_drm_device(adev)) {
1265 DRM_ERROR(
1266 "amdgpu: failed to initialize sw for display support.\n");
1267 goto error;
1268 }
1269
f74367e4
AD
1270 /* create fake encoders for MST */
1271 dm_dp_create_fake_mst_encoders(adev);
1272
4562236b
HW
1273 /* TODO: Add_display_info? */
1274
1275 /* TODO use dynamic cursor width */
4a580877
LT
1276 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1277 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1278
4a580877 1279 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1280 DRM_ERROR(
1281 "amdgpu: failed to initialize sw for display support.\n");
1282 goto error;
1283 }
1284
c0fb85ae 1285
f1ad2f5e 1286 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1287
1288 return 0;
1289error:
1290 amdgpu_dm_fini(adev);
1291
59d0f396 1292 return -EINVAL;
4562236b
HW
1293}
1294
7578ecda 1295static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1296{
f74367e4
AD
1297 int i;
1298
1299 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1300 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1301 }
1302
6ce8f316
NK
1303 amdgpu_dm_audio_fini(adev);
1304
4562236b 1305 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1306
9a65df19
WL
1307#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1308 if (adev->dm.crc_rd_wrk) {
1309 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1310 kfree(adev->dm.crc_rd_wrk);
1311 adev->dm.crc_rd_wrk = NULL;
1312 }
1313#endif
52704fca
BL
1314#ifdef CONFIG_DRM_AMD_DC_HDCP
1315 if (adev->dm.hdcp_workqueue) {
e96b1b29 1316 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1317 adev->dm.hdcp_workqueue = NULL;
1318 }
1319
1320 if (adev->dm.dc)
1321 dc_deinit_callbacks(adev->dm.dc);
1322#endif
51ba6912
QZ
1323
1324#if defined(CONFIG_DRM_AMD_DC_DCN)
1325 if (adev->dm.vblank_workqueue) {
1326 adev->dm.vblank_workqueue->dm = NULL;
1327 kfree(adev->dm.vblank_workqueue);
1328 adev->dm.vblank_workqueue = NULL;
1329 }
1330#endif
1331
9a71c7d3
NK
1332 if (adev->dm.dc->ctx->dmub_srv) {
1333 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1334 adev->dm.dc->ctx->dmub_srv = NULL;
1335 }
1336
81927e28
JS
1337 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1338 kfree(adev->dm.dmub_notify);
1339 adev->dm.dmub_notify = NULL;
1340 }
1341
743b9786
NK
1342 if (adev->dm.dmub_bo)
1343 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1344 &adev->dm.dmub_bo_gpu_addr,
1345 &adev->dm.dmub_bo_cpu_addr);
52704fca 1346
c8bdf2b6
ED
1347 /* DC Destroy TODO: Replace destroy DAL */
1348 if (adev->dm.dc)
1349 dc_destroy(&adev->dm.dc);
4562236b
HW
1350 /*
1351 * TODO: pageflip, vlank interrupt
1352 *
1353 * amdgpu_dm_irq_fini(adev);
1354 */
1355
1356 if (adev->dm.cgs_device) {
1357 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1358 adev->dm.cgs_device = NULL;
1359 }
1360 if (adev->dm.freesync_module) {
1361 mod_freesync_destroy(adev->dm.freesync_module);
1362 adev->dm.freesync_module = NULL;
1363 }
674e78ac 1364
6ce8f316 1365 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1366 mutex_destroy(&adev->dm.dc_lock);
1367
4562236b
HW
1368 return;
1369}
1370
a94d5569 1371static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1372{
a7669aff 1373 const char *fw_name_dmcu = NULL;
a94d5569
DF
1374 int r;
1375 const struct dmcu_firmware_header_v1_0 *hdr;
1376
1377 switch(adev->asic_type) {
55e56389
MR
1378#if defined(CONFIG_DRM_AMD_DC_SI)
1379 case CHIP_TAHITI:
1380 case CHIP_PITCAIRN:
1381 case CHIP_VERDE:
1382 case CHIP_OLAND:
1383#endif
a94d5569
DF
1384 case CHIP_BONAIRE:
1385 case CHIP_HAWAII:
1386 case CHIP_KAVERI:
1387 case CHIP_KABINI:
1388 case CHIP_MULLINS:
1389 case CHIP_TONGA:
1390 case CHIP_FIJI:
1391 case CHIP_CARRIZO:
1392 case CHIP_STONEY:
1393 case CHIP_POLARIS11:
1394 case CHIP_POLARIS10:
1395 case CHIP_POLARIS12:
1396 case CHIP_VEGAM:
1397 case CHIP_VEGA10:
1398 case CHIP_VEGA12:
1399 case CHIP_VEGA20:
476e955d 1400 case CHIP_NAVI10:
baebcf2e 1401 case CHIP_NAVI14:
30221ad8 1402 case CHIP_RENOIR:
79037324 1403 case CHIP_SIENNA_CICHLID:
a6c5308f 1404 case CHIP_NAVY_FLOUNDER:
2a411205 1405 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 1406 case CHIP_BEIGE_GOBY:
469989ca 1407 case CHIP_VANGOGH:
a94d5569 1408 return 0;
5ea23931
RL
1409 case CHIP_NAVI12:
1410 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1411 break;
a94d5569 1412 case CHIP_RAVEN:
a7669aff
HW
1413 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1414 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1415 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1416 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1417 else
a7669aff 1418 return 0;
a94d5569
DF
1419 break;
1420 default:
1421 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1422 return -EINVAL;
a94d5569
DF
1423 }
1424
1425 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1426 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1427 return 0;
1428 }
1429
1430 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1431 if (r == -ENOENT) {
1432 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1433 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1434 adev->dm.fw_dmcu = NULL;
1435 return 0;
1436 }
1437 if (r) {
1438 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1439 fw_name_dmcu);
1440 return r;
1441 }
1442
1443 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1444 if (r) {
1445 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1446 fw_name_dmcu);
1447 release_firmware(adev->dm.fw_dmcu);
1448 adev->dm.fw_dmcu = NULL;
1449 return r;
1450 }
1451
1452 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1453 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1454 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1455 adev->firmware.fw_size +=
1456 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1457
1458 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1459 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1460 adev->firmware.fw_size +=
1461 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1462
ee6e89c0
DF
1463 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1464
a94d5569
DF
1465 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1466
4562236b
HW
1467 return 0;
1468}
1469
743b9786
NK
1470static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1471{
1472 struct amdgpu_device *adev = ctx;
1473
1474 return dm_read_reg(adev->dm.dc->ctx, address);
1475}
1476
1477static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1478 uint32_t value)
1479{
1480 struct amdgpu_device *adev = ctx;
1481
1482 return dm_write_reg(adev->dm.dc->ctx, address, value);
1483}
1484
1485static int dm_dmub_sw_init(struct amdgpu_device *adev)
1486{
1487 struct dmub_srv_create_params create_params;
8c7aea40
NK
1488 struct dmub_srv_region_params region_params;
1489 struct dmub_srv_region_info region_info;
1490 struct dmub_srv_fb_params fb_params;
1491 struct dmub_srv_fb_info *fb_info;
1492 struct dmub_srv *dmub_srv;
743b9786
NK
1493 const struct dmcub_firmware_header_v1_0 *hdr;
1494 const char *fw_name_dmub;
1495 enum dmub_asic dmub_asic;
1496 enum dmub_status status;
1497 int r;
1498
1499 switch (adev->asic_type) {
1500 case CHIP_RENOIR:
1501 dmub_asic = DMUB_ASIC_DCN21;
1502 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1503 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1504 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1505 break;
79037324
BL
1506 case CHIP_SIENNA_CICHLID:
1507 dmub_asic = DMUB_ASIC_DCN30;
1508 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1509 break;
5ce868fc
BL
1510 case CHIP_NAVY_FLOUNDER:
1511 dmub_asic = DMUB_ASIC_DCN30;
1512 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1513 break;
469989ca
RL
1514 case CHIP_VANGOGH:
1515 dmub_asic = DMUB_ASIC_DCN301;
1516 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1517 break;
2a411205
BL
1518 case CHIP_DIMGREY_CAVEFISH:
1519 dmub_asic = DMUB_ASIC_DCN302;
1520 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1521 break;
656fe9b6
AP
1522 case CHIP_BEIGE_GOBY:
1523 dmub_asic = DMUB_ASIC_DCN303;
1524 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1525 break;
743b9786
NK
1526
1527 default:
1528 /* ASIC doesn't support DMUB. */
1529 return 0;
1530 }
1531
743b9786
NK
1532 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1533 if (r) {
1534 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1535 return 0;
1536 }
1537
1538 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1539 if (r) {
1540 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1541 return 0;
1542 }
1543
743b9786 1544 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1545
9a6ed547
NK
1546 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1547 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1548 AMDGPU_UCODE_ID_DMCUB;
1549 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1550 adev->dm.dmub_fw;
1551 adev->firmware.fw_size +=
1552 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1553
9a6ed547
NK
1554 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1555 adev->dm.dmcub_fw_version);
1556 }
1557
1558 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1559
8c7aea40
NK
1560 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1561 dmub_srv = adev->dm.dmub_srv;
1562
1563 if (!dmub_srv) {
1564 DRM_ERROR("Failed to allocate DMUB service!\n");
1565 return -ENOMEM;
1566 }
1567
1568 memset(&create_params, 0, sizeof(create_params));
1569 create_params.user_ctx = adev;
1570 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1571 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1572 create_params.asic = dmub_asic;
1573
1574 /* Create the DMUB service. */
1575 status = dmub_srv_create(dmub_srv, &create_params);
1576 if (status != DMUB_STATUS_OK) {
1577 DRM_ERROR("Error creating DMUB service: %d\n", status);
1578 return -EINVAL;
1579 }
1580
1581 /* Calculate the size of all the regions for the DMUB service. */
1582 memset(&region_params, 0, sizeof(region_params));
1583
1584 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1585 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1586 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1587 region_params.vbios_size = adev->bios_size;
0922b899 1588 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1589 adev->dm.dmub_fw->data +
1590 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1591 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1592 region_params.fw_inst_const =
1593 adev->dm.dmub_fw->data +
1594 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1595 PSP_HEADER_BYTES;
8c7aea40
NK
1596
1597 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1598 &region_info);
1599
1600 if (status != DMUB_STATUS_OK) {
1601 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1602 return -EINVAL;
1603 }
1604
1605 /*
1606 * Allocate a framebuffer based on the total size of all the regions.
1607 * TODO: Move this into GART.
1608 */
1609 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1610 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1611 &adev->dm.dmub_bo_gpu_addr,
1612 &adev->dm.dmub_bo_cpu_addr);
1613 if (r)
1614 return r;
1615
1616 /* Rebase the regions on the framebuffer address. */
1617 memset(&fb_params, 0, sizeof(fb_params));
1618 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1619 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1620 fb_params.region_info = &region_info;
1621
1622 adev->dm.dmub_fb_info =
1623 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1624 fb_info = adev->dm.dmub_fb_info;
1625
1626 if (!fb_info) {
1627 DRM_ERROR(
1628 "Failed to allocate framebuffer info for DMUB service!\n");
1629 return -ENOMEM;
1630 }
1631
1632 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1633 if (status != DMUB_STATUS_OK) {
1634 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1635 return -EINVAL;
1636 }
1637
743b9786
NK
1638 return 0;
1639}
1640
a94d5569
DF
1641static int dm_sw_init(void *handle)
1642{
1643 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1644 int r;
1645
1646 r = dm_dmub_sw_init(adev);
1647 if (r)
1648 return r;
a94d5569
DF
1649
1650 return load_dmcu_fw(adev);
1651}
1652
4562236b
HW
1653static int dm_sw_fini(void *handle)
1654{
a94d5569
DF
1655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656
8c7aea40
NK
1657 kfree(adev->dm.dmub_fb_info);
1658 adev->dm.dmub_fb_info = NULL;
1659
743b9786
NK
1660 if (adev->dm.dmub_srv) {
1661 dmub_srv_destroy(adev->dm.dmub_srv);
1662 adev->dm.dmub_srv = NULL;
1663 }
1664
75e1658e
ND
1665 release_firmware(adev->dm.dmub_fw);
1666 adev->dm.dmub_fw = NULL;
743b9786 1667
75e1658e
ND
1668 release_firmware(adev->dm.fw_dmcu);
1669 adev->dm.fw_dmcu = NULL;
a94d5569 1670
4562236b
HW
1671 return 0;
1672}
1673
7abcf6b5 1674static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1675{
c84dec2f 1676 struct amdgpu_dm_connector *aconnector;
4562236b 1677 struct drm_connector *connector;
f8d2d39e 1678 struct drm_connector_list_iter iter;
7abcf6b5 1679 int ret = 0;
4562236b 1680
f8d2d39e
LP
1681 drm_connector_list_iter_begin(dev, &iter);
1682 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1683 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1684 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1685 aconnector->mst_mgr.aux) {
f1ad2f5e 1686 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1687 aconnector,
1688 aconnector->base.base.id);
7abcf6b5
AG
1689
1690 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1691 if (ret < 0) {
1692 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1693 aconnector->dc_link->type =
1694 dc_connection_single;
1695 break;
7abcf6b5 1696 }
f8d2d39e 1697 }
4562236b 1698 }
f8d2d39e 1699 drm_connector_list_iter_end(&iter);
4562236b 1700
7abcf6b5
AG
1701 return ret;
1702}
1703
1704static int dm_late_init(void *handle)
1705{
42e67c3b 1706 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1707
bbf854dc
DF
1708 struct dmcu_iram_parameters params;
1709 unsigned int linear_lut[16];
1710 int i;
17bdb4a8 1711 struct dmcu *dmcu = NULL;
5cb32419 1712 bool ret = true;
bbf854dc 1713
17bdb4a8
JFZ
1714 dmcu = adev->dm.dc->res_pool->dmcu;
1715
bbf854dc
DF
1716 for (i = 0; i < 16; i++)
1717 linear_lut[i] = 0xFFFF * i / 15;
1718
1719 params.set = 0;
1720 params.backlight_ramping_start = 0xCCCC;
1721 params.backlight_ramping_reduction = 0xCCCCCCCC;
1722 params.backlight_lut_array_size = 16;
1723 params.backlight_lut_array = linear_lut;
1724
2ad0cdf9
AK
1725 /* Min backlight level after ABM reduction, Don't allow below 1%
1726 * 0xFFFF x 0.01 = 0x28F
1727 */
1728 params.min_abm_backlight = 0x28F;
1729
5cb32419
RL
1730 /* In the case where abm is implemented on dmcub,
1731 * dmcu object will be null.
1732 * ABM 2.4 and up are implemented on dmcub.
1733 */
1734 if (dmcu)
1735 ret = dmcu_load_iram(dmcu, params);
1736 else if (adev->dm.dc->ctx->dmub_srv)
ecfe7569 1737 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
bbf854dc 1738
14ed1c90
HW
1739 if (!ret)
1740 return -EINVAL;
bbf854dc 1741
4a580877 1742 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1743}
1744
1745static void s3_handle_mst(struct drm_device *dev, bool suspend)
1746{
c84dec2f 1747 struct amdgpu_dm_connector *aconnector;
4562236b 1748 struct drm_connector *connector;
f8d2d39e 1749 struct drm_connector_list_iter iter;
fe7553be
LP
1750 struct drm_dp_mst_topology_mgr *mgr;
1751 int ret;
1752 bool need_hotplug = false;
4562236b 1753
f8d2d39e
LP
1754 drm_connector_list_iter_begin(dev, &iter);
1755 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1756 aconnector = to_amdgpu_dm_connector(connector);
1757 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1758 aconnector->mst_port)
1759 continue;
1760
1761 mgr = &aconnector->mst_mgr;
1762
1763 if (suspend) {
1764 drm_dp_mst_topology_mgr_suspend(mgr);
1765 } else {
6f85f738 1766 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1767 if (ret < 0) {
1768 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1769 need_hotplug = true;
1770 }
1771 }
4562236b 1772 }
f8d2d39e 1773 drm_connector_list_iter_end(&iter);
fe7553be
LP
1774
1775 if (need_hotplug)
1776 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1777}
1778
9340dfd3
HW
1779static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1780{
1781 struct smu_context *smu = &adev->smu;
1782 int ret = 0;
1783
1784 if (!is_support_sw_smu(adev))
1785 return 0;
1786
1787 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1788 * on window driver dc implementation.
1789 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1790 * should be passed to smu during boot up and resume from s3.
1791 * boot up: dc calculate dcn watermark clock settings within dc_create,
1792 * dcn20_resource_construct
1793 * then call pplib functions below to pass the settings to smu:
1794 * smu_set_watermarks_for_clock_ranges
1795 * smu_set_watermarks_table
1796 * navi10_set_watermarks_table
1797 * smu_write_watermarks_table
1798 *
1799 * For Renoir, clock settings of dcn watermark are also fixed values.
1800 * dc has implemented different flow for window driver:
1801 * dc_hardware_init / dc_set_power_state
1802 * dcn10_init_hw
1803 * notify_wm_ranges
1804 * set_wm_ranges
1805 * -- Linux
1806 * smu_set_watermarks_for_clock_ranges
1807 * renoir_set_watermarks_table
1808 * smu_write_watermarks_table
1809 *
1810 * For Linux,
1811 * dc_hardware_init -> amdgpu_dm_init
1812 * dc_set_power_state --> dm_resume
1813 *
1814 * therefore, this function apply to navi10/12/14 but not Renoir
1815 * *
1816 */
1817 switch(adev->asic_type) {
1818 case CHIP_NAVI10:
1819 case CHIP_NAVI14:
1820 case CHIP_NAVI12:
1821 break;
1822 default:
1823 return 0;
1824 }
1825
e7a95eea
EQ
1826 ret = smu_write_watermarks_table(smu);
1827 if (ret) {
1828 DRM_ERROR("Failed to update WMTABLE!\n");
1829 return ret;
9340dfd3
HW
1830 }
1831
9340dfd3
HW
1832 return 0;
1833}
1834
b8592b48
LL
1835/**
1836 * dm_hw_init() - Initialize DC device
28d687ea 1837 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1838 *
1839 * Initialize the &struct amdgpu_display_manager device. This involves calling
1840 * the initializers of each DM component, then populating the struct with them.
1841 *
1842 * Although the function implies hardware initialization, both hardware and
1843 * software are initialized here. Splitting them out to their relevant init
1844 * hooks is a future TODO item.
1845 *
1846 * Some notable things that are initialized here:
1847 *
1848 * - Display Core, both software and hardware
1849 * - DC modules that we need (freesync and color management)
1850 * - DRM software states
1851 * - Interrupt sources and handlers
1852 * - Vblank support
1853 * - Debug FS entries, if enabled
1854 */
4562236b
HW
1855static int dm_hw_init(void *handle)
1856{
1857 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1858 /* Create DAL display manager */
1859 amdgpu_dm_init(adev);
4562236b
HW
1860 amdgpu_dm_hpd_init(adev);
1861
4562236b
HW
1862 return 0;
1863}
1864
b8592b48
LL
1865/**
1866 * dm_hw_fini() - Teardown DC device
28d687ea 1867 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1868 *
1869 * Teardown components within &struct amdgpu_display_manager that require
1870 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1871 * were loaded. Also flush IRQ workqueues and disable them.
1872 */
4562236b
HW
1873static int dm_hw_fini(void *handle)
1874{
1875 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1876
1877 amdgpu_dm_hpd_fini(adev);
1878
1879 amdgpu_dm_irq_fini(adev);
21de3396 1880 amdgpu_dm_fini(adev);
4562236b
HW
1881 return 0;
1882}
1883
cdaae837
BL
1884
1885static int dm_enable_vblank(struct drm_crtc *crtc);
1886static void dm_disable_vblank(struct drm_crtc *crtc);
1887
1888static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1889 struct dc_state *state, bool enable)
1890{
1891 enum dc_irq_source irq_source;
1892 struct amdgpu_crtc *acrtc;
1893 int rc = -EBUSY;
1894 int i = 0;
1895
1896 for (i = 0; i < state->stream_count; i++) {
1897 acrtc = get_crtc_by_otg_inst(
1898 adev, state->stream_status[i].primary_otg_inst);
1899
1900 if (acrtc && state->stream_status[i].plane_count != 0) {
1901 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1902 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
1903 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1904 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
1905 if (rc)
1906 DRM_WARN("Failed to %s pflip interrupts\n",
1907 enable ? "enable" : "disable");
1908
1909 if (enable) {
1910 rc = dm_enable_vblank(&acrtc->base);
1911 if (rc)
1912 DRM_WARN("Failed to enable vblank interrupts\n");
1913 } else {
1914 dm_disable_vblank(&acrtc->base);
1915 }
1916
1917 }
1918 }
1919
1920}
1921
dfd84d90 1922static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1923{
1924 struct dc_state *context = NULL;
1925 enum dc_status res = DC_ERROR_UNEXPECTED;
1926 int i;
1927 struct dc_stream_state *del_streams[MAX_PIPES];
1928 int del_streams_count = 0;
1929
1930 memset(del_streams, 0, sizeof(del_streams));
1931
1932 context = dc_create_state(dc);
1933 if (context == NULL)
1934 goto context_alloc_fail;
1935
1936 dc_resource_state_copy_construct_current(dc, context);
1937
1938 /* First remove from context all streams */
1939 for (i = 0; i < context->stream_count; i++) {
1940 struct dc_stream_state *stream = context->streams[i];
1941
1942 del_streams[del_streams_count++] = stream;
1943 }
1944
1945 /* Remove all planes for removed streams and then remove the streams */
1946 for (i = 0; i < del_streams_count; i++) {
1947 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1948 res = DC_FAIL_DETACH_SURFACES;
1949 goto fail;
1950 }
1951
1952 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1953 if (res != DC_OK)
1954 goto fail;
1955 }
1956
1957
1958 res = dc_validate_global_state(dc, context, false);
1959
1960 if (res != DC_OK) {
1961 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1962 goto fail;
1963 }
1964
1965 res = dc_commit_state(dc, context);
1966
1967fail:
1968 dc_release_state(context);
1969
1970context_alloc_fail:
1971 return res;
1972}
1973
4562236b
HW
1974static int dm_suspend(void *handle)
1975{
1976 struct amdgpu_device *adev = handle;
1977 struct amdgpu_display_manager *dm = &adev->dm;
1978 int ret = 0;
4562236b 1979
53b3f8f4 1980 if (amdgpu_in_reset(adev)) {
cdaae837 1981 mutex_lock(&dm->dc_lock);
98ab5f35
BL
1982
1983#if defined(CONFIG_DRM_AMD_DC_DCN)
1984 dc_allow_idle_optimizations(adev->dm.dc, false);
1985#endif
1986
cdaae837
BL
1987 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1988
1989 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1990
1991 amdgpu_dm_commit_zero_streams(dm->dc);
1992
1993 amdgpu_dm_irq_suspend(adev);
1994
1995 return ret;
1996 }
4562236b 1997
d2f0b53b 1998 WARN_ON(adev->dm.cached_state);
4a580877 1999 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2000
4a580877 2001 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2002
4562236b
HW
2003 amdgpu_dm_irq_suspend(adev);
2004
a3621485 2005
32f5062d 2006 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2007
1c2075d4 2008 return 0;
4562236b
HW
2009}
2010
1daf8c63
AD
2011static struct amdgpu_dm_connector *
2012amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2013 struct drm_crtc *crtc)
4562236b
HW
2014{
2015 uint32_t i;
c2cea706 2016 struct drm_connector_state *new_con_state;
4562236b
HW
2017 struct drm_connector *connector;
2018 struct drm_crtc *crtc_from_state;
2019
c2cea706
LSL
2020 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2021 crtc_from_state = new_con_state->crtc;
4562236b
HW
2022
2023 if (crtc_from_state == crtc)
c84dec2f 2024 return to_amdgpu_dm_connector(connector);
4562236b
HW
2025 }
2026
2027 return NULL;
2028}
2029
fbbdadf2
BL
2030static void emulated_link_detect(struct dc_link *link)
2031{
2032 struct dc_sink_init_data sink_init_data = { 0 };
2033 struct display_sink_capability sink_caps = { 0 };
2034 enum dc_edid_status edid_status;
2035 struct dc_context *dc_ctx = link->ctx;
2036 struct dc_sink *sink = NULL;
2037 struct dc_sink *prev_sink = NULL;
2038
2039 link->type = dc_connection_none;
2040 prev_sink = link->local_sink;
2041
30164a16
VL
2042 if (prev_sink)
2043 dc_sink_release(prev_sink);
fbbdadf2
BL
2044
2045 switch (link->connector_signal) {
2046 case SIGNAL_TYPE_HDMI_TYPE_A: {
2047 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2048 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2049 break;
2050 }
2051
2052 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2053 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2054 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2055 break;
2056 }
2057
2058 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2059 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2060 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2061 break;
2062 }
2063
2064 case SIGNAL_TYPE_LVDS: {
2065 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2066 sink_caps.signal = SIGNAL_TYPE_LVDS;
2067 break;
2068 }
2069
2070 case SIGNAL_TYPE_EDP: {
2071 sink_caps.transaction_type =
2072 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2073 sink_caps.signal = SIGNAL_TYPE_EDP;
2074 break;
2075 }
2076
2077 case SIGNAL_TYPE_DISPLAY_PORT: {
2078 sink_caps.transaction_type =
2079 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2080 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2081 break;
2082 }
2083
2084 default:
2085 DC_ERROR("Invalid connector type! signal:%d\n",
2086 link->connector_signal);
2087 return;
2088 }
2089
2090 sink_init_data.link = link;
2091 sink_init_data.sink_signal = sink_caps.signal;
2092
2093 sink = dc_sink_create(&sink_init_data);
2094 if (!sink) {
2095 DC_ERROR("Failed to create sink!\n");
2096 return;
2097 }
2098
dcd5fb82 2099 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2100 link->local_sink = sink;
2101
2102 edid_status = dm_helpers_read_local_edid(
2103 link->ctx,
2104 link,
2105 sink);
2106
2107 if (edid_status != EDID_OK)
2108 DC_ERROR("Failed to read EDID");
2109
2110}
2111
cdaae837
BL
2112static void dm_gpureset_commit_state(struct dc_state *dc_state,
2113 struct amdgpu_display_manager *dm)
2114{
2115 struct {
2116 struct dc_surface_update surface_updates[MAX_SURFACES];
2117 struct dc_plane_info plane_infos[MAX_SURFACES];
2118 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2119 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2120 struct dc_stream_update stream_update;
2121 } * bundle;
2122 int k, m;
2123
2124 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2125
2126 if (!bundle) {
2127 dm_error("Failed to allocate update bundle\n");
2128 goto cleanup;
2129 }
2130
2131 for (k = 0; k < dc_state->stream_count; k++) {
2132 bundle->stream_update.stream = dc_state->streams[k];
2133
2134 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2135 bundle->surface_updates[m].surface =
2136 dc_state->stream_status->plane_states[m];
2137 bundle->surface_updates[m].surface->force_full_update =
2138 true;
2139 }
2140 dc_commit_updates_for_stream(
2141 dm->dc, bundle->surface_updates,
2142 dc_state->stream_status->plane_count,
efc8278e 2143 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2144 }
2145
2146cleanup:
2147 kfree(bundle);
2148
2149 return;
2150}
2151
3c4d55c9
AP
2152static void dm_set_dpms_off(struct dc_link *link)
2153{
2154 struct dc_stream_state *stream_state;
2155 struct amdgpu_dm_connector *aconnector = link->priv;
2156 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2157 struct dc_stream_update stream_update;
2158 bool dpms_off = true;
2159
2160 memset(&stream_update, 0, sizeof(stream_update));
2161 stream_update.dpms_off = &dpms_off;
2162
2163 mutex_lock(&adev->dm.dc_lock);
2164 stream_state = dc_stream_find_from_link(link);
2165
2166 if (stream_state == NULL) {
2167 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2168 mutex_unlock(&adev->dm.dc_lock);
2169 return;
2170 }
2171
2172 stream_update.stream = stream_state;
2173 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2174 stream_state, &stream_update,
2175 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2176 mutex_unlock(&adev->dm.dc_lock);
2177}
2178
4562236b
HW
2179static int dm_resume(void *handle)
2180{
2181 struct amdgpu_device *adev = handle;
4a580877 2182 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2183 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2184 struct amdgpu_dm_connector *aconnector;
4562236b 2185 struct drm_connector *connector;
f8d2d39e 2186 struct drm_connector_list_iter iter;
4562236b 2187 struct drm_crtc *crtc;
c2cea706 2188 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2189 struct dm_crtc_state *dm_new_crtc_state;
2190 struct drm_plane *plane;
2191 struct drm_plane_state *new_plane_state;
2192 struct dm_plane_state *dm_new_plane_state;
113b7a01 2193 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2194 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2195 struct dc_state *dc_state;
2196 int i, r, j;
4562236b 2197
53b3f8f4 2198 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2199 dc_state = dm->cached_dc_state;
2200
2201 r = dm_dmub_hw_init(adev);
2202 if (r)
2203 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2204
2205 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2206 dc_resume(dm->dc);
2207
2208 amdgpu_dm_irq_resume_early(adev);
2209
2210 for (i = 0; i < dc_state->stream_count; i++) {
2211 dc_state->streams[i]->mode_changed = true;
2212 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2213 dc_state->stream_status->plane_states[j]->update_flags.raw
2214 = 0xffffffff;
2215 }
2216 }
2217
2218 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2219
cdaae837
BL
2220 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2221
2222 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2223
2224 dc_release_state(dm->cached_dc_state);
2225 dm->cached_dc_state = NULL;
2226
2227 amdgpu_dm_irq_resume_late(adev);
2228
2229 mutex_unlock(&dm->dc_lock);
2230
2231 return 0;
2232 }
113b7a01
LL
2233 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2234 dc_release_state(dm_state->context);
2235 dm_state->context = dc_create_state(dm->dc);
2236 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2237 dc_resource_state_construct(dm->dc, dm_state->context);
2238
8c7aea40
NK
2239 /* Before powering on DC we need to re-initialize DMUB. */
2240 r = dm_dmub_hw_init(adev);
2241 if (r)
2242 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2243
a80aa93d
ML
2244 /* power on hardware */
2245 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2246
4562236b
HW
2247 /* program HPD filter */
2248 dc_resume(dm->dc);
2249
4562236b
HW
2250 /*
2251 * early enable HPD Rx IRQ, should be done before set mode as short
2252 * pulse interrupts are used for MST
2253 */
2254 amdgpu_dm_irq_resume_early(adev);
2255
d20ebea8 2256 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2257 s3_handle_mst(ddev, false);
2258
4562236b 2259 /* Do detection*/
f8d2d39e
LP
2260 drm_connector_list_iter_begin(ddev, &iter);
2261 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2262 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2263
2264 /*
2265 * this is the case when traversing through already created
2266 * MST connectors, should be skipped
2267 */
2268 if (aconnector->mst_port)
2269 continue;
2270
03ea364c 2271 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2272 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2273 DRM_ERROR("KMS: Failed to detect connector\n");
2274
2275 if (aconnector->base.force && new_connection_type == dc_connection_none)
2276 emulated_link_detect(aconnector->dc_link);
2277 else
2278 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2279
2280 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2281 aconnector->fake_enable = false;
2282
dcd5fb82
MF
2283 if (aconnector->dc_sink)
2284 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2285 aconnector->dc_sink = NULL;
2286 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2287 mutex_unlock(&aconnector->hpd_lock);
4562236b 2288 }
f8d2d39e 2289 drm_connector_list_iter_end(&iter);
4562236b 2290
1f6010a9 2291 /* Force mode set in atomic commit */
a80aa93d 2292 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2293 new_crtc_state->active_changed = true;
4f346e65 2294
fcb4019e
LSL
2295 /*
2296 * atomic_check is expected to create the dc states. We need to release
2297 * them here, since they were duplicated as part of the suspend
2298 * procedure.
2299 */
a80aa93d 2300 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2301 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2302 if (dm_new_crtc_state->stream) {
2303 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2304 dc_stream_release(dm_new_crtc_state->stream);
2305 dm_new_crtc_state->stream = NULL;
2306 }
2307 }
2308
a80aa93d 2309 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2310 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2311 if (dm_new_plane_state->dc_state) {
2312 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2313 dc_plane_state_release(dm_new_plane_state->dc_state);
2314 dm_new_plane_state->dc_state = NULL;
2315 }
2316 }
2317
2d1af6a1 2318 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2319
a80aa93d 2320 dm->cached_state = NULL;
0a214e2f 2321
9faa4237 2322 amdgpu_dm_irq_resume_late(adev);
4562236b 2323
9340dfd3
HW
2324 amdgpu_dm_smu_write_watermarks_table(adev);
2325
2d1af6a1 2326 return 0;
4562236b
HW
2327}
2328
b8592b48
LL
2329/**
2330 * DOC: DM Lifecycle
2331 *
2332 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2333 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2334 * the base driver's device list to be initialized and torn down accordingly.
2335 *
2336 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2337 */
2338
4562236b
HW
2339static const struct amd_ip_funcs amdgpu_dm_funcs = {
2340 .name = "dm",
2341 .early_init = dm_early_init,
7abcf6b5 2342 .late_init = dm_late_init,
4562236b
HW
2343 .sw_init = dm_sw_init,
2344 .sw_fini = dm_sw_fini,
2345 .hw_init = dm_hw_init,
2346 .hw_fini = dm_hw_fini,
2347 .suspend = dm_suspend,
2348 .resume = dm_resume,
2349 .is_idle = dm_is_idle,
2350 .wait_for_idle = dm_wait_for_idle,
2351 .check_soft_reset = dm_check_soft_reset,
2352 .soft_reset = dm_soft_reset,
2353 .set_clockgating_state = dm_set_clockgating_state,
2354 .set_powergating_state = dm_set_powergating_state,
2355};
2356
2357const struct amdgpu_ip_block_version dm_ip_block =
2358{
2359 .type = AMD_IP_BLOCK_TYPE_DCE,
2360 .major = 1,
2361 .minor = 0,
2362 .rev = 0,
2363 .funcs = &amdgpu_dm_funcs,
2364};
2365
ca3268c4 2366
b8592b48
LL
2367/**
2368 * DOC: atomic
2369 *
2370 * *WIP*
2371 */
0a323b84 2372
b3663f70 2373static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2374 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2375 .get_format_info = amd_get_format_info,
366c1baa 2376 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2377 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2378 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2379};
2380
2381static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2382 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2383};
2384
94562810
RS
2385static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2386{
2387 u32 max_cll, min_cll, max, min, q, r;
2388 struct amdgpu_dm_backlight_caps *caps;
2389 struct amdgpu_display_manager *dm;
2390 struct drm_connector *conn_base;
2391 struct amdgpu_device *adev;
ec11fe37 2392 struct dc_link *link = NULL;
94562810
RS
2393 static const u8 pre_computed_values[] = {
2394 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2395 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2396
2397 if (!aconnector || !aconnector->dc_link)
2398 return;
2399
ec11fe37 2400 link = aconnector->dc_link;
2401 if (link->connector_signal != SIGNAL_TYPE_EDP)
2402 return;
2403
94562810 2404 conn_base = &aconnector->base;
1348969a 2405 adev = drm_to_adev(conn_base->dev);
94562810
RS
2406 dm = &adev->dm;
2407 caps = &dm->backlight_caps;
2408 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2409 caps->aux_support = false;
2410 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2411 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2412
2413 if (caps->ext_caps->bits.oled == 1 ||
2414 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2415 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2416 caps->aux_support = true;
2417
7a46f05e
TI
2418 if (amdgpu_backlight == 0)
2419 caps->aux_support = false;
2420 else if (amdgpu_backlight == 1)
2421 caps->aux_support = true;
2422
94562810
RS
2423 /* From the specification (CTA-861-G), for calculating the maximum
2424 * luminance we need to use:
2425 * Luminance = 50*2**(CV/32)
2426 * Where CV is a one-byte value.
2427 * For calculating this expression we may need float point precision;
2428 * to avoid this complexity level, we take advantage that CV is divided
2429 * by a constant. From the Euclids division algorithm, we know that CV
2430 * can be written as: CV = 32*q + r. Next, we replace CV in the
2431 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2432 * need to pre-compute the value of r/32. For pre-computing the values
2433 * We just used the following Ruby line:
2434 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2435 * The results of the above expressions can be verified at
2436 * pre_computed_values.
2437 */
2438 q = max_cll >> 5;
2439 r = max_cll % 32;
2440 max = (1 << q) * pre_computed_values[r];
2441
2442 // min luminance: maxLum * (CV/255)^2 / 100
2443 q = DIV_ROUND_CLOSEST(min_cll, 255);
2444 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2445
2446 caps->aux_max_input_signal = max;
2447 caps->aux_min_input_signal = min;
2448}
2449
97e51c16
HW
2450void amdgpu_dm_update_connector_after_detect(
2451 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2452{
2453 struct drm_connector *connector = &aconnector->base;
2454 struct drm_device *dev = connector->dev;
b73a22d3 2455 struct dc_sink *sink;
4562236b
HW
2456
2457 /* MST handled by drm_mst framework */
2458 if (aconnector->mst_mgr.mst_state == true)
2459 return;
2460
4562236b 2461 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2462 if (sink)
2463 dc_sink_retain(sink);
4562236b 2464
1f6010a9
DF
2465 /*
2466 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2467 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2468 * Skip if already done during boot.
4562236b
HW
2469 */
2470 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2471 && aconnector->dc_em_sink) {
2472
1f6010a9
DF
2473 /*
2474 * For S3 resume with headless use eml_sink to fake stream
2475 * because on resume connector->sink is set to NULL
4562236b
HW
2476 */
2477 mutex_lock(&dev->mode_config.mutex);
2478
2479 if (sink) {
922aa1e1 2480 if (aconnector->dc_sink) {
98e6436d 2481 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2482 /*
2483 * retain and release below are used to
2484 * bump up refcount for sink because the link doesn't point
2485 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2486 * reshuffle by UMD we will get into unwanted dc_sink release
2487 */
dcd5fb82 2488 dc_sink_release(aconnector->dc_sink);
922aa1e1 2489 }
4562236b 2490 aconnector->dc_sink = sink;
dcd5fb82 2491 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2492 amdgpu_dm_update_freesync_caps(connector,
2493 aconnector->edid);
4562236b 2494 } else {
98e6436d 2495 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2496 if (!aconnector->dc_sink) {
4562236b 2497 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2498 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2499 }
4562236b
HW
2500 }
2501
2502 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2503
2504 if (sink)
2505 dc_sink_release(sink);
4562236b
HW
2506 return;
2507 }
2508
2509 /*
2510 * TODO: temporary guard to look for proper fix
2511 * if this sink is MST sink, we should not do anything
2512 */
dcd5fb82
MF
2513 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2514 dc_sink_release(sink);
4562236b 2515 return;
dcd5fb82 2516 }
4562236b
HW
2517
2518 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2519 /*
2520 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2521 * Do nothing!!
2522 */
f1ad2f5e 2523 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2524 aconnector->connector_id);
dcd5fb82
MF
2525 if (sink)
2526 dc_sink_release(sink);
4562236b
HW
2527 return;
2528 }
2529
f1ad2f5e 2530 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2531 aconnector->connector_id, aconnector->dc_sink, sink);
2532
2533 mutex_lock(&dev->mode_config.mutex);
2534
1f6010a9
DF
2535 /*
2536 * 1. Update status of the drm connector
2537 * 2. Send an event and let userspace tell us what to do
2538 */
4562236b 2539 if (sink) {
1f6010a9
DF
2540 /*
2541 * TODO: check if we still need the S3 mode update workaround.
2542 * If yes, put it here.
2543 */
c64b0d6b 2544 if (aconnector->dc_sink) {
98e6436d 2545 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2546 dc_sink_release(aconnector->dc_sink);
2547 }
4562236b
HW
2548
2549 aconnector->dc_sink = sink;
dcd5fb82 2550 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2551 if (sink->dc_edid.length == 0) {
4562236b 2552 aconnector->edid = NULL;
e6142dd5
AP
2553 if (aconnector->dc_link->aux_mode) {
2554 drm_dp_cec_unset_edid(
2555 &aconnector->dm_dp_aux.aux);
2556 }
900b3cb1 2557 } else {
4562236b 2558 aconnector->edid =
e6142dd5 2559 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2560
c555f023 2561 drm_connector_update_edid_property(connector,
e6142dd5 2562 aconnector->edid);
e6142dd5
AP
2563 if (aconnector->dc_link->aux_mode)
2564 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2565 aconnector->edid);
4562236b 2566 }
e6142dd5 2567
98e6436d 2568 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2569 update_connector_ext_caps(aconnector);
4562236b 2570 } else {
e86e8947 2571 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2572 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2573 drm_connector_update_edid_property(connector, NULL);
4562236b 2574 aconnector->num_modes = 0;
dcd5fb82 2575 dc_sink_release(aconnector->dc_sink);
4562236b 2576 aconnector->dc_sink = NULL;
5326c452 2577 aconnector->edid = NULL;
0c8620d6
BL
2578#ifdef CONFIG_DRM_AMD_DC_HDCP
2579 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2580 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2581 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2582#endif
4562236b
HW
2583 }
2584
2585 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2586
0f877894
OV
2587 update_subconnector_property(aconnector);
2588
dcd5fb82
MF
2589 if (sink)
2590 dc_sink_release(sink);
4562236b
HW
2591}
2592
2593static void handle_hpd_irq(void *param)
2594{
c84dec2f 2595 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2596 struct drm_connector *connector = &aconnector->base;
2597 struct drm_device *dev = connector->dev;
fbbdadf2 2598 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2599 struct amdgpu_device *adev = drm_to_adev(dev);
b972b4f9 2600#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2601 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2602#endif
4562236b 2603
b972b4f9
HW
2604 if (adev->dm.disable_hpd_irq)
2605 return;
2606
1f6010a9
DF
2607 /*
2608 * In case of failure or MST no need to update connector status or notify the OS
2609 * since (for MST case) MST does this in its own context.
4562236b
HW
2610 */
2611 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2612
0c8620d6 2613#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2614 if (adev->dm.hdcp_workqueue) {
96a3b32e 2615 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2616 dm_con_state->update_hdcp = true;
2617 }
0c8620d6 2618#endif
2e0ac3d6
HW
2619 if (aconnector->fake_enable)
2620 aconnector->fake_enable = false;
2621
fbbdadf2
BL
2622 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2623 DRM_ERROR("KMS: Failed to detect connector\n");
2624
2625 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2626 emulated_link_detect(aconnector->dc_link);
2627
2628
2629 drm_modeset_lock_all(dev);
2630 dm_restore_drm_connector_state(dev, connector);
2631 drm_modeset_unlock_all(dev);
2632
2633 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2634 drm_kms_helper_hotplug_event(dev);
2635
2636 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2637 if (new_connection_type == dc_connection_none &&
2638 aconnector->dc_link->type == dc_connection_none)
2639 dm_set_dpms_off(aconnector->dc_link);
4562236b 2640
3c4d55c9 2641 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2642
2643 drm_modeset_lock_all(dev);
2644 dm_restore_drm_connector_state(dev, connector);
2645 drm_modeset_unlock_all(dev);
2646
2647 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2648 drm_kms_helper_hotplug_event(dev);
2649 }
2650 mutex_unlock(&aconnector->hpd_lock);
2651
2652}
2653
c84dec2f 2654static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2655{
2656 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2657 uint8_t dret;
2658 bool new_irq_handled = false;
2659 int dpcd_addr;
2660 int dpcd_bytes_to_read;
2661
2662 const int max_process_count = 30;
2663 int process_count = 0;
2664
2665 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2666
2667 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2668 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2669 /* DPCD 0x200 - 0x201 for downstream IRQ */
2670 dpcd_addr = DP_SINK_COUNT;
2671 } else {
2672 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2673 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2674 dpcd_addr = DP_SINK_COUNT_ESI;
2675 }
2676
2677 dret = drm_dp_dpcd_read(
2678 &aconnector->dm_dp_aux.aux,
2679 dpcd_addr,
2680 esi,
2681 dpcd_bytes_to_read);
2682
2683 while (dret == dpcd_bytes_to_read &&
2684 process_count < max_process_count) {
2685 uint8_t retry;
2686 dret = 0;
2687
2688 process_count++;
2689
f1ad2f5e 2690 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2691 /* handle HPD short pulse irq */
2692 if (aconnector->mst_mgr.mst_state)
2693 drm_dp_mst_hpd_irq(
2694 &aconnector->mst_mgr,
2695 esi,
2696 &new_irq_handled);
4562236b
HW
2697
2698 if (new_irq_handled) {
2699 /* ACK at DPCD to notify down stream */
2700 const int ack_dpcd_bytes_to_write =
2701 dpcd_bytes_to_read - 1;
2702
2703 for (retry = 0; retry < 3; retry++) {
2704 uint8_t wret;
2705
2706 wret = drm_dp_dpcd_write(
2707 &aconnector->dm_dp_aux.aux,
2708 dpcd_addr + 1,
2709 &esi[1],
2710 ack_dpcd_bytes_to_write);
2711 if (wret == ack_dpcd_bytes_to_write)
2712 break;
2713 }
2714
1f6010a9 2715 /* check if there is new irq to be handled */
4562236b
HW
2716 dret = drm_dp_dpcd_read(
2717 &aconnector->dm_dp_aux.aux,
2718 dpcd_addr,
2719 esi,
2720 dpcd_bytes_to_read);
2721
2722 new_irq_handled = false;
d4a6e8a9 2723 } else {
4562236b 2724 break;
d4a6e8a9 2725 }
4562236b
HW
2726 }
2727
2728 if (process_count == max_process_count)
f1ad2f5e 2729 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2730}
2731
2732static void handle_hpd_rx_irq(void *param)
2733{
c84dec2f 2734 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2735 struct drm_connector *connector = &aconnector->base;
2736 struct drm_device *dev = connector->dev;
53cbf65c 2737 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2738 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2739 bool result = false;
fbbdadf2 2740 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2741 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2742 union hpd_irq_data hpd_irq_data;
d2aa1356 2743 bool lock_flag = 0;
2a0f9270
BL
2744
2745 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2746
b972b4f9
HW
2747 if (adev->dm.disable_hpd_irq)
2748 return;
2749
2750
1f6010a9
DF
2751 /*
2752 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2753 * conflict, after implement i2c helper, this mutex should be
2754 * retired.
2755 */
b86e7eef 2756 mutex_lock(&aconnector->hpd_lock);
4562236b 2757
3083a984
QZ
2758 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2759
2760 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2761 (dc_link->type == dc_connection_mst_branch)) {
2762 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2763 result = true;
2764 dm_handle_hpd_rx_irq(aconnector);
2765 goto out;
2766 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2767 result = false;
2768 dm_handle_hpd_rx_irq(aconnector);
2769 goto out;
2770 }
2771 }
2772
d2aa1356
AP
2773 /*
2774 * TODO: We need the lock to avoid touching DC state while it's being
2775 * modified during automated compliance testing, or when link loss
2776 * happens. While this should be split into subhandlers and proper
2777 * interfaces to avoid having to conditionally lock like this in the
2778 * outer layer, we need this workaround temporarily to allow MST
2779 * lightup in some scenarios to avoid timeout.
2780 */
2781 if (!amdgpu_in_reset(adev) &&
2782 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2783 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
cf8b92a7 2784 mutex_lock(&adev->dm.dc_lock);
d2aa1356
AP
2785 lock_flag = 1;
2786 }
2787
2a0f9270 2788#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2789 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2790#else
c8ea79a8 2791 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2792#endif
d2aa1356 2793 if (!amdgpu_in_reset(adev) && lock_flag)
cf8b92a7 2794 mutex_unlock(&adev->dm.dc_lock);
c8ea79a8 2795
3083a984 2796out:
c8ea79a8 2797 if (result && !is_mst_root_connector) {
4562236b 2798 /* Downstream Port status changed. */
fbbdadf2
BL
2799 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2800 DRM_ERROR("KMS: Failed to detect connector\n");
2801
2802 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2803 emulated_link_detect(dc_link);
2804
2805 if (aconnector->fake_enable)
2806 aconnector->fake_enable = false;
2807
2808 amdgpu_dm_update_connector_after_detect(aconnector);
2809
2810
2811 drm_modeset_lock_all(dev);
2812 dm_restore_drm_connector_state(dev, connector);
2813 drm_modeset_unlock_all(dev);
2814
2815 drm_kms_helper_hotplug_event(dev);
2816 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2817
2818 if (aconnector->fake_enable)
2819 aconnector->fake_enable = false;
2820
4562236b
HW
2821 amdgpu_dm_update_connector_after_detect(aconnector);
2822
2823
2824 drm_modeset_lock_all(dev);
2825 dm_restore_drm_connector_state(dev, connector);
2826 drm_modeset_unlock_all(dev);
2827
2828 drm_kms_helper_hotplug_event(dev);
2829 }
2830 }
2a0f9270 2831#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2832 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2833 if (adev->dm.hdcp_workqueue)
2834 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2835 }
2a0f9270 2836#endif
4562236b 2837
b86e7eef 2838 if (dc_link->type != dc_connection_mst_branch)
e86e8947 2839 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
2840
2841 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
2842}
2843
2844static void register_hpd_handlers(struct amdgpu_device *adev)
2845{
4a580877 2846 struct drm_device *dev = adev_to_drm(adev);
4562236b 2847 struct drm_connector *connector;
c84dec2f 2848 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2849 const struct dc_link *dc_link;
2850 struct dc_interrupt_params int_params = {0};
2851
2852 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2853 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2854
2855 list_for_each_entry(connector,
2856 &dev->mode_config.connector_list, head) {
2857
c84dec2f 2858 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2859 dc_link = aconnector->dc_link;
2860
2861 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2862 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2863 int_params.irq_source = dc_link->irq_source_hpd;
2864
2865 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2866 handle_hpd_irq,
2867 (void *) aconnector);
2868 }
2869
2870 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2871
2872 /* Also register for DP short pulse (hpd_rx). */
2873 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2874 int_params.irq_source = dc_link->irq_source_hpd_rx;
2875
2876 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2877 handle_hpd_rx_irq,
2878 (void *) aconnector);
2879 }
2880 }
2881}
2882
55e56389
MR
2883#if defined(CONFIG_DRM_AMD_DC_SI)
2884/* Register IRQ sources and initialize IRQ callbacks */
2885static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2886{
2887 struct dc *dc = adev->dm.dc;
2888 struct common_irq_params *c_irq_params;
2889 struct dc_interrupt_params int_params = {0};
2890 int r;
2891 int i;
2892 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2893
2894 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2895 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2896
2897 /*
2898 * Actions of amdgpu_irq_add_id():
2899 * 1. Register a set() function with base driver.
2900 * Base driver will call set() function to enable/disable an
2901 * interrupt in DC hardware.
2902 * 2. Register amdgpu_dm_irq_handler().
2903 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2904 * coming from DC hardware.
2905 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2906 * for acknowledging and handling. */
2907
2908 /* Use VBLANK interrupt */
2909 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2910 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2911 if (r) {
2912 DRM_ERROR("Failed to add crtc irq id!\n");
2913 return r;
2914 }
2915
2916 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2917 int_params.irq_source =
2918 dc_interrupt_to_irq_source(dc, i+1 , 0);
2919
2920 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2921
2922 c_irq_params->adev = adev;
2923 c_irq_params->irq_src = int_params.irq_source;
2924
2925 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2926 dm_crtc_high_irq, c_irq_params);
2927 }
2928
2929 /* Use GRPH_PFLIP interrupt */
2930 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2931 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2932 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2933 if (r) {
2934 DRM_ERROR("Failed to add page flip irq id!\n");
2935 return r;
2936 }
2937
2938 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2939 int_params.irq_source =
2940 dc_interrupt_to_irq_source(dc, i, 0);
2941
2942 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2943
2944 c_irq_params->adev = adev;
2945 c_irq_params->irq_src = int_params.irq_source;
2946
2947 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2948 dm_pflip_high_irq, c_irq_params);
2949
2950 }
2951
2952 /* HPD */
2953 r = amdgpu_irq_add_id(adev, client_id,
2954 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2955 if (r) {
2956 DRM_ERROR("Failed to add hpd irq id!\n");
2957 return r;
2958 }
2959
2960 register_hpd_handlers(adev);
2961
2962 return 0;
2963}
2964#endif
2965
4562236b
HW
2966/* Register IRQ sources and initialize IRQ callbacks */
2967static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2968{
2969 struct dc *dc = adev->dm.dc;
2970 struct common_irq_params *c_irq_params;
2971 struct dc_interrupt_params int_params = {0};
2972 int r;
2973 int i;
1ffdeca6 2974 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2975
84374725 2976 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2977 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2978
2979 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2980 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2981
1f6010a9
DF
2982 /*
2983 * Actions of amdgpu_irq_add_id():
4562236b
HW
2984 * 1. Register a set() function with base driver.
2985 * Base driver will call set() function to enable/disable an
2986 * interrupt in DC hardware.
2987 * 2. Register amdgpu_dm_irq_handler().
2988 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2989 * coming from DC hardware.
2990 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2991 * for acknowledging and handling. */
2992
b57de80a 2993 /* Use VBLANK interrupt */
e9029155 2994 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2995 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2996 if (r) {
2997 DRM_ERROR("Failed to add crtc irq id!\n");
2998 return r;
2999 }
3000
3001 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3002 int_params.irq_source =
3d761e79 3003 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3004
b57de80a 3005 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3006
3007 c_irq_params->adev = adev;
3008 c_irq_params->irq_src = int_params.irq_source;
3009
3010 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3011 dm_crtc_high_irq, c_irq_params);
3012 }
3013
d2574c33
MK
3014 /* Use VUPDATE interrupt */
3015 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3016 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3017 if (r) {
3018 DRM_ERROR("Failed to add vupdate irq id!\n");
3019 return r;
3020 }
3021
3022 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3023 int_params.irq_source =
3024 dc_interrupt_to_irq_source(dc, i, 0);
3025
3026 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3027
3028 c_irq_params->adev = adev;
3029 c_irq_params->irq_src = int_params.irq_source;
3030
3031 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3032 dm_vupdate_high_irq, c_irq_params);
3033 }
3034
3d761e79 3035 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3036 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3037 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3038 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3039 if (r) {
3040 DRM_ERROR("Failed to add page flip irq id!\n");
3041 return r;
3042 }
3043
3044 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3045 int_params.irq_source =
3046 dc_interrupt_to_irq_source(dc, i, 0);
3047
3048 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3049
3050 c_irq_params->adev = adev;
3051 c_irq_params->irq_src = int_params.irq_source;
3052
3053 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3054 dm_pflip_high_irq, c_irq_params);
3055
3056 }
3057
3058 /* HPD */
2c8ad2d5
AD
3059 r = amdgpu_irq_add_id(adev, client_id,
3060 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3061 if (r) {
3062 DRM_ERROR("Failed to add hpd irq id!\n");
3063 return r;
3064 }
3065
3066 register_hpd_handlers(adev);
3067
3068 return 0;
3069}
3070
b86a1aa3 3071#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3072/* Register IRQ sources and initialize IRQ callbacks */
3073static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3074{
3075 struct dc *dc = adev->dm.dc;
3076 struct common_irq_params *c_irq_params;
3077 struct dc_interrupt_params int_params = {0};
3078 int r;
3079 int i;
660d5406
WL
3080#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3081 static const unsigned int vrtl_int_srcid[] = {
3082 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3083 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3084 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3085 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3086 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3087 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3088 };
3089#endif
ff5ef992
AD
3090
3091 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3092 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3093
1f6010a9
DF
3094 /*
3095 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3096 * 1. Register a set() function with base driver.
3097 * Base driver will call set() function to enable/disable an
3098 * interrupt in DC hardware.
3099 * 2. Register amdgpu_dm_irq_handler().
3100 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3101 * coming from DC hardware.
3102 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3103 * for acknowledging and handling.
1f6010a9 3104 */
ff5ef992
AD
3105
3106 /* Use VSTARTUP interrupt */
3107 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3108 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3109 i++) {
3760f76c 3110 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3111
3112 if (r) {
3113 DRM_ERROR("Failed to add crtc irq id!\n");
3114 return r;
3115 }
3116
3117 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3118 int_params.irq_source =
3119 dc_interrupt_to_irq_source(dc, i, 0);
3120
3121 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3122
3123 c_irq_params->adev = adev;
3124 c_irq_params->irq_src = int_params.irq_source;
3125
2346ef47
NK
3126 amdgpu_dm_irq_register_interrupt(
3127 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3128 }
3129
86bc2219
WL
3130 /* Use otg vertical line interrupt */
3131#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3132 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3133 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3134 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3135
3136 if (r) {
3137 DRM_ERROR("Failed to add vline0 irq id!\n");
3138 return r;
3139 }
3140
3141 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3142 int_params.irq_source =
660d5406
WL
3143 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3144
3145 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3146 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3147 break;
3148 }
86bc2219
WL
3149
3150 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3151 - DC_IRQ_SOURCE_DC1_VLINE0];
3152
3153 c_irq_params->adev = adev;
3154 c_irq_params->irq_src = int_params.irq_source;
3155
3156 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3157 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3158 }
3159#endif
3160
2346ef47
NK
3161 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3162 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3163 * to trigger at end of each vblank, regardless of state of the lock,
3164 * matching DCE behaviour.
3165 */
3166 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3167 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3168 i++) {
3169 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3170
3171 if (r) {
3172 DRM_ERROR("Failed to add vupdate irq id!\n");
3173 return r;
3174 }
3175
3176 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3177 int_params.irq_source =
3178 dc_interrupt_to_irq_source(dc, i, 0);
3179
3180 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3181
3182 c_irq_params->adev = adev;
3183 c_irq_params->irq_src = int_params.irq_source;
3184
ff5ef992 3185 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3186 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3187 }
3188
ff5ef992
AD
3189 /* Use GRPH_PFLIP interrupt */
3190 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3191 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3192 i++) {
3760f76c 3193 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3194 if (r) {
3195 DRM_ERROR("Failed to add page flip irq id!\n");
3196 return r;
3197 }
3198
3199 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3200 int_params.irq_source =
3201 dc_interrupt_to_irq_source(dc, i, 0);
3202
3203 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3204
3205 c_irq_params->adev = adev;
3206 c_irq_params->irq_src = int_params.irq_source;
3207
3208 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3209 dm_pflip_high_irq, c_irq_params);
3210
3211 }
3212
81927e28
JS
3213 /* HPD */
3214 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3215 &adev->hpd_irq);
3216 if (r) {
3217 DRM_ERROR("Failed to add hpd irq id!\n");
3218 return r;
3219 }
a08f16cf 3220
81927e28 3221 register_hpd_handlers(adev);
a08f16cf 3222
81927e28
JS
3223 return 0;
3224}
3225/* Register Outbox IRQ sources and initialize IRQ callbacks */
3226static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3227{
3228 struct dc *dc = adev->dm.dc;
3229 struct common_irq_params *c_irq_params;
3230 struct dc_interrupt_params int_params = {0};
3231 int r, i;
3232
3233 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3234 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3235
3236 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3237 &adev->dmub_outbox_irq);
3238 if (r) {
3239 DRM_ERROR("Failed to add outbox irq id!\n");
3240 return r;
3241 }
3242
3243 if (dc->ctx->dmub_srv) {
3244 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3245 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3246 int_params.irq_source =
81927e28 3247 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3248
81927e28 3249 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3250
3251 c_irq_params->adev = adev;
3252 c_irq_params->irq_src = int_params.irq_source;
3253
3254 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3255 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3256 }
3257
ff5ef992
AD
3258 return 0;
3259}
3260#endif
3261
eb3dc897
NK
3262/*
3263 * Acquires the lock for the atomic state object and returns
3264 * the new atomic state.
3265 *
3266 * This should only be called during atomic check.
3267 */
3268static int dm_atomic_get_state(struct drm_atomic_state *state,
3269 struct dm_atomic_state **dm_state)
3270{
3271 struct drm_device *dev = state->dev;
1348969a 3272 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3273 struct amdgpu_display_manager *dm = &adev->dm;
3274 struct drm_private_state *priv_state;
eb3dc897
NK
3275
3276 if (*dm_state)
3277 return 0;
3278
eb3dc897
NK
3279 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3280 if (IS_ERR(priv_state))
3281 return PTR_ERR(priv_state);
3282
3283 *dm_state = to_dm_atomic_state(priv_state);
3284
3285 return 0;
3286}
3287
dfd84d90 3288static struct dm_atomic_state *
eb3dc897
NK
3289dm_atomic_get_new_state(struct drm_atomic_state *state)
3290{
3291 struct drm_device *dev = state->dev;
1348969a 3292 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3293 struct amdgpu_display_manager *dm = &adev->dm;
3294 struct drm_private_obj *obj;
3295 struct drm_private_state *new_obj_state;
3296 int i;
3297
3298 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3299 if (obj->funcs == dm->atomic_obj.funcs)
3300 return to_dm_atomic_state(new_obj_state);
3301 }
3302
3303 return NULL;
3304}
3305
eb3dc897
NK
3306static struct drm_private_state *
3307dm_atomic_duplicate_state(struct drm_private_obj *obj)
3308{
3309 struct dm_atomic_state *old_state, *new_state;
3310
3311 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3312 if (!new_state)
3313 return NULL;
3314
3315 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3316
813d20dc
AW
3317 old_state = to_dm_atomic_state(obj->state);
3318
3319 if (old_state && old_state->context)
3320 new_state->context = dc_copy_state(old_state->context);
3321
eb3dc897
NK
3322 if (!new_state->context) {
3323 kfree(new_state);
3324 return NULL;
3325 }
3326
eb3dc897
NK
3327 return &new_state->base;
3328}
3329
3330static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3331 struct drm_private_state *state)
3332{
3333 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3334
3335 if (dm_state && dm_state->context)
3336 dc_release_state(dm_state->context);
3337
3338 kfree(dm_state);
3339}
3340
3341static struct drm_private_state_funcs dm_atomic_state_funcs = {
3342 .atomic_duplicate_state = dm_atomic_duplicate_state,
3343 .atomic_destroy_state = dm_atomic_destroy_state,
3344};
3345
4562236b
HW
3346static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3347{
eb3dc897 3348 struct dm_atomic_state *state;
4562236b
HW
3349 int r;
3350
3351 adev->mode_info.mode_config_initialized = true;
3352
4a580877
LT
3353 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3354 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3355
4a580877
LT
3356 adev_to_drm(adev)->mode_config.max_width = 16384;
3357 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3358
4a580877
LT
3359 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3360 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3361 /* indicates support for immediate flip */
4a580877 3362 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3363
4a580877 3364 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3365
eb3dc897
NK
3366 state = kzalloc(sizeof(*state), GFP_KERNEL);
3367 if (!state)
3368 return -ENOMEM;
3369
813d20dc 3370 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3371 if (!state->context) {
3372 kfree(state);
3373 return -ENOMEM;
3374 }
3375
3376 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3377
4a580877 3378 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3379 &adev->dm.atomic_obj,
eb3dc897
NK
3380 &state->base,
3381 &dm_atomic_state_funcs);
3382
3dc9b1ce 3383 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3384 if (r) {
3385 dc_release_state(state->context);
3386 kfree(state);
4562236b 3387 return r;
b67a468a 3388 }
4562236b 3389
6ce8f316 3390 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3391 if (r) {
3392 dc_release_state(state->context);
3393 kfree(state);
6ce8f316 3394 return r;
b67a468a 3395 }
6ce8f316 3396
4562236b
HW
3397 return 0;
3398}
3399
206bbafe
DF
3400#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3401#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3402#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3403
4562236b
HW
3404#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3405 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3406
206bbafe
DF
3407static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3408{
3409#if defined(CONFIG_ACPI)
3410 struct amdgpu_dm_backlight_caps caps;
3411
58965855
FS
3412 memset(&caps, 0, sizeof(caps));
3413
206bbafe
DF
3414 if (dm->backlight_caps.caps_valid)
3415 return;
3416
3417 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3418 if (caps.caps_valid) {
94562810
RS
3419 dm->backlight_caps.caps_valid = true;
3420 if (caps.aux_support)
3421 return;
206bbafe
DF
3422 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3423 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3424 } else {
3425 dm->backlight_caps.min_input_signal =
3426 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3427 dm->backlight_caps.max_input_signal =
3428 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3429 }
3430#else
94562810
RS
3431 if (dm->backlight_caps.aux_support)
3432 return;
3433
8bcbc9ef
DF
3434 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3435 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3436#endif
3437}
3438
69d9f427
AM
3439static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3440 unsigned *min, unsigned *max)
94562810 3441{
94562810 3442 if (!caps)
69d9f427 3443 return 0;
94562810 3444
69d9f427
AM
3445 if (caps->aux_support) {
3446 // Firmware limits are in nits, DC API wants millinits.
3447 *max = 1000 * caps->aux_max_input_signal;
3448 *min = 1000 * caps->aux_min_input_signal;
94562810 3449 } else {
69d9f427
AM
3450 // Firmware limits are 8-bit, PWM control is 16-bit.
3451 *max = 0x101 * caps->max_input_signal;
3452 *min = 0x101 * caps->min_input_signal;
94562810 3453 }
69d9f427
AM
3454 return 1;
3455}
94562810 3456
69d9f427
AM
3457static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3458 uint32_t brightness)
3459{
3460 unsigned min, max;
94562810 3461
69d9f427
AM
3462 if (!get_brightness_range(caps, &min, &max))
3463 return brightness;
3464
3465 // Rescale 0..255 to min..max
3466 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3467 AMDGPU_MAX_BL_LEVEL);
3468}
3469
3470static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3471 uint32_t brightness)
3472{
3473 unsigned min, max;
3474
3475 if (!get_brightness_range(caps, &min, &max))
3476 return brightness;
3477
3478 if (brightness < min)
3479 return 0;
3480 // Rescale min..max to 0..255
3481 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3482 max - min);
94562810
RS
3483}
3484
3d6c9164
AD
3485static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3486 u32 user_brightness)
4562236b 3487{
206bbafe 3488 struct amdgpu_dm_backlight_caps caps;
118b4627 3489 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3d6c9164 3490 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
94562810 3491 bool rc;
118b4627 3492 int i;
4562236b 3493
206bbafe
DF
3494 amdgpu_dm_update_backlight_caps(dm);
3495 caps = dm->backlight_caps;
94562810 3496
3d6c9164
AD
3497 for (i = 0; i < dm->num_of_edps; i++) {
3498 dm->brightness[i] = user_brightness;
3499 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
118b4627 3500 link[i] = (struct dc_link *)dm->backlight_link[i];
3d6c9164 3501 }
94562810 3502
3d6c9164 3503 /* Change brightness based on AUX property */
118b4627
ML
3504 if (caps.aux_support) {
3505 for (i = 0; i < dm->num_of_edps; i++) {
3d6c9164 3506 rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
118b4627
ML
3507 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3508 if (!rc) {
3509 DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3510 break;
3511 }
3512 }
3513 } else {
3514 for (i = 0; i < dm->num_of_edps; i++) {
3d6c9164 3515 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
118b4627
ML
3516 if (!rc) {
3517 DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3518 break;
3519 }
3520 }
3521 }
94562810
RS
3522
3523 return rc ? 0 : 1;
4562236b
HW
3524}
3525
3d6c9164 3526static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3527{
620a0d27 3528 struct amdgpu_display_manager *dm = bl_get_data(bd);
3d6c9164
AD
3529
3530 amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3531
3532 return 0;
3533}
3534
3535static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3536{
0ad3e64e
AD
3537 struct amdgpu_dm_backlight_caps caps;
3538
3539 amdgpu_dm_update_backlight_caps(dm);
3540 caps = dm->backlight_caps;
620a0d27 3541
0ad3e64e 3542 if (caps.aux_support) {
118b4627 3543 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
0ad3e64e
AD
3544 u32 avg, peak;
3545 bool rc;
3546
3547 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3548 if (!rc)
3d6c9164 3549 return dm->brightness[0];
0ad3e64e
AD
3550 return convert_brightness_to_user(&caps, avg);
3551 } else {
118b4627 3552 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
0ad3e64e
AD
3553
3554 if (ret == DC_ERROR_UNEXPECTED)
3d6c9164 3555 return dm->brightness[0];
0ad3e64e
AD
3556 return convert_brightness_to_user(&caps, ret);
3557 }
4562236b
HW
3558}
3559
3d6c9164
AD
3560static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3561{
3562 struct amdgpu_display_manager *dm = bl_get_data(bd);
3563
3564 return amdgpu_dm_backlight_get_level(dm);
3565}
3566
4562236b 3567static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3568 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3569 .get_brightness = amdgpu_dm_backlight_get_brightness,
3570 .update_status = amdgpu_dm_backlight_update_status,
3571};
3572
7578ecda
AD
3573static void
3574amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3575{
3576 char bl_name[16];
3577 struct backlight_properties props = { 0 };
3d6c9164 3578 int i;
4562236b 3579
206bbafe 3580 amdgpu_dm_update_backlight_caps(dm);
3d6c9164
AD
3581 for (i = 0; i < dm->num_of_edps; i++)
3582 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
206bbafe 3583
4562236b 3584 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3585 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3586 props.type = BACKLIGHT_RAW;
3587
3588 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3589 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3590
3591 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3592 adev_to_drm(dm->adev)->dev,
3593 dm,
3594 &amdgpu_dm_backlight_ops,
3595 &props);
4562236b 3596
74baea42 3597 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3598 DRM_ERROR("DM: Backlight registration failed!\n");
3599 else
f1ad2f5e 3600 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3601}
3602
3603#endif
3604
df534fff 3605static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3606 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3607 enum drm_plane_type plane_type,
3608 const struct dc_plane_cap *plane_cap)
df534fff 3609{
f180b4bc 3610 struct drm_plane *plane;
df534fff
S
3611 unsigned long possible_crtcs;
3612 int ret = 0;
3613
f180b4bc 3614 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3615 if (!plane) {
3616 DRM_ERROR("KMS: Failed to allocate plane\n");
3617 return -ENOMEM;
3618 }
b2fddb13 3619 plane->type = plane_type;
df534fff
S
3620
3621 /*
b2fddb13
NK
3622 * HACK: IGT tests expect that the primary plane for a CRTC
3623 * can only have one possible CRTC. Only expose support for
3624 * any CRTC if they're not going to be used as a primary plane
3625 * for a CRTC - like overlay or underlay planes.
df534fff
S
3626 */
3627 possible_crtcs = 1 << plane_id;
3628 if (plane_id >= dm->dc->caps.max_streams)
3629 possible_crtcs = 0xff;
3630
cc1fec57 3631 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3632
3633 if (ret) {
3634 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3635 kfree(plane);
df534fff
S
3636 return ret;
3637 }
3638
54087768
NK
3639 if (mode_info)
3640 mode_info->planes[plane_id] = plane;
3641
df534fff
S
3642 return ret;
3643}
3644
89fc8d4e
HW
3645
3646static void register_backlight_device(struct amdgpu_display_manager *dm,
3647 struct dc_link *link)
3648{
3649#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3650 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3651
3652 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3653 link->type != dc_connection_none) {
1f6010a9
DF
3654 /*
3655 * Event if registration failed, we should continue with
89fc8d4e
HW
3656 * DM initialization because not having a backlight control
3657 * is better then a black screen.
3658 */
118b4627
ML
3659 if (!dm->backlight_dev)
3660 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 3661
118b4627
ML
3662 if (dm->backlight_dev) {
3663 dm->backlight_link[dm->num_of_edps] = link;
3664 dm->num_of_edps++;
3665 }
89fc8d4e
HW
3666 }
3667#endif
3668}
3669
3670
1f6010a9
DF
3671/*
3672 * In this architecture, the association
4562236b
HW
3673 * connector -> encoder -> crtc
3674 * id not really requried. The crtc and connector will hold the
3675 * display_index as an abstraction to use with DAL component
3676 *
3677 * Returns 0 on success
3678 */
7578ecda 3679static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3680{
3681 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3682 int32_t i;
c84dec2f 3683 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3684 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3685 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3686 uint32_t link_cnt;
cc1fec57 3687 int32_t primary_planes;
fbbdadf2 3688 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3689 const struct dc_plane_cap *plane;
4562236b 3690
d58159de
AD
3691 dm->display_indexes_num = dm->dc->caps.max_streams;
3692 /* Update the actual used number of crtc */
3693 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3694
4562236b 3695 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3696 if (amdgpu_dm_mode_config_init(dm->adev)) {
3697 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3698 return -EINVAL;
4562236b
HW
3699 }
3700
b2fddb13
NK
3701 /* There is one primary plane per CRTC */
3702 primary_planes = dm->dc->caps.max_streams;
54087768 3703 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3704
b2fddb13
NK
3705 /*
3706 * Initialize primary planes, implicit planes for legacy IOCTLS.
3707 * Order is reversed to match iteration order in atomic check.
3708 */
3709 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3710 plane = &dm->dc->caps.planes[i];
3711
b2fddb13 3712 if (initialize_plane(dm, mode_info, i,
cc1fec57 3713 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3714 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3715 goto fail;
d4e13b0d 3716 }
df534fff 3717 }
92f3ac40 3718
0d579c7e
NK
3719 /*
3720 * Initialize overlay planes, index starting after primary planes.
3721 * These planes have a higher DRM index than the primary planes since
3722 * they should be considered as having a higher z-order.
3723 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3724 *
3725 * Only support DCN for now, and only expose one so we don't encourage
3726 * userspace to use up all the pipes.
0d579c7e 3727 */
cc1fec57
NK
3728 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3729 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3730
3731 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3732 continue;
3733
3734 if (!plane->blends_with_above || !plane->blends_with_below)
3735 continue;
3736
ea36ad34 3737 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3738 continue;
3739
54087768 3740 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3741 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3742 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3743 goto fail;
d4e13b0d 3744 }
cc1fec57
NK
3745
3746 /* Only create one overlay plane. */
3747 break;
d4e13b0d 3748 }
4562236b 3749
d4e13b0d 3750 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3751 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3752 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3753 goto fail;
4562236b 3754 }
4562236b 3755
50610b74 3756#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28
JS
3757 /* Use Outbox interrupt */
3758 switch (adev->asic_type) {
81927e28
JS
3759 case CHIP_SIENNA_CICHLID:
3760 case CHIP_NAVY_FLOUNDER:
81927e28
JS
3761 case CHIP_RENOIR:
3762 if (register_outbox_irq_handlers(dm->adev)) {
3763 DRM_ERROR("DM: Failed to initialize IRQ\n");
3764 goto fail;
3765 }
3766 break;
3767 default:
3768 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3769 }
50610b74 3770#endif
81927e28 3771
4562236b
HW
3772 /* loops over all connectors on the board */
3773 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3774 struct dc_link *link = NULL;
4562236b
HW
3775
3776 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3777 DRM_ERROR(
3778 "KMS: Cannot support more than %d display indexes\n",
3779 AMDGPU_DM_MAX_DISPLAY_INDEX);
3780 continue;
3781 }
3782
3783 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3784 if (!aconnector)
cd8a2ae8 3785 goto fail;
4562236b
HW
3786
3787 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3788 if (!aencoder)
cd8a2ae8 3789 goto fail;
4562236b
HW
3790
3791 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3792 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3793 goto fail;
4562236b
HW
3794 }
3795
3796 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3797 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3798 goto fail;
4562236b
HW
3799 }
3800
89fc8d4e
HW
3801 link = dc_get_link_at_index(dm->dc, i);
3802
fbbdadf2
BL
3803 if (!dc_link_detect_sink(link, &new_connection_type))
3804 DRM_ERROR("KMS: Failed to detect connector\n");
3805
3806 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3807 emulated_link_detect(link);
3808 amdgpu_dm_update_connector_after_detect(aconnector);
3809
3810 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3811 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3812 register_backlight_device(dm, link);
397a9bc5
RL
3813 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3814 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3815 }
3816
3817
4562236b
HW
3818 }
3819
3820 /* Software is initialized. Now we can register interrupt handlers. */
3821 switch (adev->asic_type) {
55e56389
MR
3822#if defined(CONFIG_DRM_AMD_DC_SI)
3823 case CHIP_TAHITI:
3824 case CHIP_PITCAIRN:
3825 case CHIP_VERDE:
3826 case CHIP_OLAND:
3827 if (dce60_register_irq_handlers(dm->adev)) {
3828 DRM_ERROR("DM: Failed to initialize IRQ\n");
3829 goto fail;
3830 }
3831 break;
3832#endif
4562236b
HW
3833 case CHIP_BONAIRE:
3834 case CHIP_HAWAII:
cd4b356f
AD
3835 case CHIP_KAVERI:
3836 case CHIP_KABINI:
3837 case CHIP_MULLINS:
4562236b
HW
3838 case CHIP_TONGA:
3839 case CHIP_FIJI:
3840 case CHIP_CARRIZO:
3841 case CHIP_STONEY:
3842 case CHIP_POLARIS11:
3843 case CHIP_POLARIS10:
b264d345 3844 case CHIP_POLARIS12:
7737de91 3845 case CHIP_VEGAM:
2c8ad2d5 3846 case CHIP_VEGA10:
2325ff30 3847 case CHIP_VEGA12:
1fe6bf2f 3848 case CHIP_VEGA20:
4562236b
HW
3849 if (dce110_register_irq_handlers(dm->adev)) {
3850 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3851 goto fail;
4562236b
HW
3852 }
3853 break;
b86a1aa3 3854#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3855 case CHIP_RAVEN:
fbd2afe5 3856 case CHIP_NAVI12:
476e955d 3857 case CHIP_NAVI10:
fce651e3 3858 case CHIP_NAVI14:
30221ad8 3859 case CHIP_RENOIR:
79037324 3860 case CHIP_SIENNA_CICHLID:
a6c5308f 3861 case CHIP_NAVY_FLOUNDER:
2a411205 3862 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 3863 case CHIP_BEIGE_GOBY:
469989ca 3864 case CHIP_VANGOGH:
ff5ef992
AD
3865 if (dcn10_register_irq_handlers(dm->adev)) {
3866 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3867 goto fail;
ff5ef992
AD
3868 }
3869 break;
3870#endif
4562236b 3871 default:
e63f8673 3872 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3873 goto fail;
4562236b
HW
3874 }
3875
4562236b 3876 return 0;
cd8a2ae8 3877fail:
4562236b 3878 kfree(aencoder);
4562236b 3879 kfree(aconnector);
54087768 3880
59d0f396 3881 return -EINVAL;
4562236b
HW
3882}
3883
7578ecda 3884static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3885{
3886 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3887 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3888 return;
3889}
3890
3891/******************************************************************************
3892 * amdgpu_display_funcs functions
3893 *****************************************************************************/
3894
1f6010a9 3895/*
4562236b
HW
3896 * dm_bandwidth_update - program display watermarks
3897 *
3898 * @adev: amdgpu_device pointer
3899 *
3900 * Calculate and program the display watermarks and line buffer allocation.
3901 */
3902static void dm_bandwidth_update(struct amdgpu_device *adev)
3903{
49c07a99 3904 /* TODO: implement later */
4562236b
HW
3905}
3906
39cc5be2 3907static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3908 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3909 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3910 .backlight_set_level = NULL, /* never called for DC */
3911 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3912 .hpd_sense = NULL,/* called unconditionally */
3913 .hpd_set_polarity = NULL, /* called unconditionally */
3914 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3915 .page_flip_get_scanoutpos =
3916 dm_crtc_get_scanoutpos,/* called unconditionally */
3917 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3918 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3919};
3920
3921#if defined(CONFIG_DEBUG_KERNEL_DC)
3922
3ee6b26b
AD
3923static ssize_t s3_debug_store(struct device *device,
3924 struct device_attribute *attr,
3925 const char *buf,
3926 size_t count)
4562236b
HW
3927{
3928 int ret;
3929 int s3_state;
ef1de361 3930 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3931 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3932
3933 ret = kstrtoint(buf, 0, &s3_state);
3934
3935 if (ret == 0) {
3936 if (s3_state) {
3937 dm_resume(adev);
4a580877 3938 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3939 } else
3940 dm_suspend(adev);
3941 }
3942
3943 return ret == 0 ? count : 0;
3944}
3945
3946DEVICE_ATTR_WO(s3_debug);
3947
3948#endif
3949
3950static int dm_early_init(void *handle)
3951{
3952 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3953
4562236b 3954 switch (adev->asic_type) {
55e56389
MR
3955#if defined(CONFIG_DRM_AMD_DC_SI)
3956 case CHIP_TAHITI:
3957 case CHIP_PITCAIRN:
3958 case CHIP_VERDE:
3959 adev->mode_info.num_crtc = 6;
3960 adev->mode_info.num_hpd = 6;
3961 adev->mode_info.num_dig = 6;
3962 break;
3963 case CHIP_OLAND:
3964 adev->mode_info.num_crtc = 2;
3965 adev->mode_info.num_hpd = 2;
3966 adev->mode_info.num_dig = 2;
3967 break;
3968#endif
4562236b
HW
3969 case CHIP_BONAIRE:
3970 case CHIP_HAWAII:
3971 adev->mode_info.num_crtc = 6;
3972 adev->mode_info.num_hpd = 6;
3973 adev->mode_info.num_dig = 6;
4562236b 3974 break;
cd4b356f
AD
3975 case CHIP_KAVERI:
3976 adev->mode_info.num_crtc = 4;
3977 adev->mode_info.num_hpd = 6;
3978 adev->mode_info.num_dig = 7;
cd4b356f
AD
3979 break;
3980 case CHIP_KABINI:
3981 case CHIP_MULLINS:
3982 adev->mode_info.num_crtc = 2;
3983 adev->mode_info.num_hpd = 6;
3984 adev->mode_info.num_dig = 6;
cd4b356f 3985 break;
4562236b
HW
3986 case CHIP_FIJI:
3987 case CHIP_TONGA:
3988 adev->mode_info.num_crtc = 6;
3989 adev->mode_info.num_hpd = 6;
3990 adev->mode_info.num_dig = 7;
4562236b
HW
3991 break;
3992 case CHIP_CARRIZO:
3993 adev->mode_info.num_crtc = 3;
3994 adev->mode_info.num_hpd = 6;
3995 adev->mode_info.num_dig = 9;
4562236b
HW
3996 break;
3997 case CHIP_STONEY:
3998 adev->mode_info.num_crtc = 2;
3999 adev->mode_info.num_hpd = 6;
4000 adev->mode_info.num_dig = 9;
4562236b
HW
4001 break;
4002 case CHIP_POLARIS11:
b264d345 4003 case CHIP_POLARIS12:
4562236b
HW
4004 adev->mode_info.num_crtc = 5;
4005 adev->mode_info.num_hpd = 5;
4006 adev->mode_info.num_dig = 5;
4562236b
HW
4007 break;
4008 case CHIP_POLARIS10:
7737de91 4009 case CHIP_VEGAM:
4562236b
HW
4010 adev->mode_info.num_crtc = 6;
4011 adev->mode_info.num_hpd = 6;
4012 adev->mode_info.num_dig = 6;
4562236b 4013 break;
2c8ad2d5 4014 case CHIP_VEGA10:
2325ff30 4015 case CHIP_VEGA12:
1fe6bf2f 4016 case CHIP_VEGA20:
2c8ad2d5
AD
4017 adev->mode_info.num_crtc = 6;
4018 adev->mode_info.num_hpd = 6;
4019 adev->mode_info.num_dig = 6;
4020 break;
b86a1aa3 4021#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 4022 case CHIP_RAVEN:
20f2ffe5
AD
4023 case CHIP_RENOIR:
4024 case CHIP_VANGOGH:
ff5ef992
AD
4025 adev->mode_info.num_crtc = 4;
4026 adev->mode_info.num_hpd = 4;
4027 adev->mode_info.num_dig = 4;
ff5ef992 4028 break;
476e955d 4029 case CHIP_NAVI10:
fbd2afe5 4030 case CHIP_NAVI12:
79037324 4031 case CHIP_SIENNA_CICHLID:
a6c5308f 4032 case CHIP_NAVY_FLOUNDER:
476e955d
HW
4033 adev->mode_info.num_crtc = 6;
4034 adev->mode_info.num_hpd = 6;
4035 adev->mode_info.num_dig = 6;
4036 break;
fce651e3 4037 case CHIP_NAVI14:
2a411205 4038 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
4039 adev->mode_info.num_crtc = 5;
4040 adev->mode_info.num_hpd = 5;
4041 adev->mode_info.num_dig = 5;
4042 break;
656fe9b6
AP
4043 case CHIP_BEIGE_GOBY:
4044 adev->mode_info.num_crtc = 2;
4045 adev->mode_info.num_hpd = 2;
4046 adev->mode_info.num_dig = 2;
4047 break;
20f2ffe5 4048#endif
4562236b 4049 default:
e63f8673 4050 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
4051 return -EINVAL;
4052 }
4053
c8dd5715
MD
4054 amdgpu_dm_set_irq_funcs(adev);
4055
39cc5be2
AD
4056 if (adev->mode_info.funcs == NULL)
4057 adev->mode_info.funcs = &dm_display_funcs;
4058
1f6010a9
DF
4059 /*
4060 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4061 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4062 * amdgpu_device_init()
4063 */
4562236b
HW
4064#if defined(CONFIG_DEBUG_KERNEL_DC)
4065 device_create_file(
4a580877 4066 adev_to_drm(adev)->dev,
4562236b
HW
4067 &dev_attr_s3_debug);
4068#endif
4069
4070 return 0;
4071}
4072
9b690ef3 4073static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4074 struct dc_stream_state *new_stream,
4075 struct dc_stream_state *old_stream)
9b690ef3 4076{
2afda735 4077 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4078}
4079
4080static bool modereset_required(struct drm_crtc_state *crtc_state)
4081{
2afda735 4082 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4083}
4084
7578ecda 4085static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4086{
4087 drm_encoder_cleanup(encoder);
4088 kfree(encoder);
4089}
4090
4091static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4092 .destroy = amdgpu_dm_encoder_destroy,
4093};
4094
e7b07cee 4095
6300b3bd
MK
4096static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4097 struct drm_framebuffer *fb,
4098 int *min_downscale, int *max_upscale)
4099{
4100 struct amdgpu_device *adev = drm_to_adev(dev);
4101 struct dc *dc = adev->dm.dc;
4102 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4103 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4104
4105 switch (fb->format->format) {
4106 case DRM_FORMAT_P010:
4107 case DRM_FORMAT_NV12:
4108 case DRM_FORMAT_NV21:
4109 *max_upscale = plane_cap->max_upscale_factor.nv12;
4110 *min_downscale = plane_cap->max_downscale_factor.nv12;
4111 break;
4112
4113 case DRM_FORMAT_XRGB16161616F:
4114 case DRM_FORMAT_ARGB16161616F:
4115 case DRM_FORMAT_XBGR16161616F:
4116 case DRM_FORMAT_ABGR16161616F:
4117 *max_upscale = plane_cap->max_upscale_factor.fp16;
4118 *min_downscale = plane_cap->max_downscale_factor.fp16;
4119 break;
4120
4121 default:
4122 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4123 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4124 break;
4125 }
4126
4127 /*
4128 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4129 * scaling factor of 1.0 == 1000 units.
4130 */
4131 if (*max_upscale == 1)
4132 *max_upscale = 1000;
4133
4134 if (*min_downscale == 1)
4135 *min_downscale = 1000;
4136}
4137
4138
695af5f9
NK
4139static int fill_dc_scaling_info(const struct drm_plane_state *state,
4140 struct dc_scaling_info *scaling_info)
e7b07cee 4141{
6300b3bd 4142 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4143
695af5f9 4144 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4145
695af5f9
NK
4146 /* Source is fixed 16.16 but we ignore mantissa for now... */
4147 scaling_info->src_rect.x = state->src_x >> 16;
4148 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4149
d89f6048
HW
4150 /*
4151 * For reasons we don't (yet) fully understand a non-zero
4152 * src_y coordinate into an NV12 buffer can cause a
4153 * system hang. To avoid hangs (and maybe be overly cautious)
4154 * let's reject both non-zero src_x and src_y.
4155 *
4156 * We currently know of only one use-case to reproduce a
4157 * scenario with non-zero src_x and src_y for NV12, which
4158 * is to gesture the YouTube Android app into full screen
4159 * on ChromeOS.
4160 */
4161 if (state->fb &&
4162 state->fb->format->format == DRM_FORMAT_NV12 &&
4163 (scaling_info->src_rect.x != 0 ||
4164 scaling_info->src_rect.y != 0))
4165 return -EINVAL;
4166
695af5f9
NK
4167 scaling_info->src_rect.width = state->src_w >> 16;
4168 if (scaling_info->src_rect.width == 0)
4169 return -EINVAL;
4170
4171 scaling_info->src_rect.height = state->src_h >> 16;
4172 if (scaling_info->src_rect.height == 0)
4173 return -EINVAL;
4174
4175 scaling_info->dst_rect.x = state->crtc_x;
4176 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4177
4178 if (state->crtc_w == 0)
695af5f9 4179 return -EINVAL;
e7b07cee 4180
695af5f9 4181 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4182
4183 if (state->crtc_h == 0)
695af5f9 4184 return -EINVAL;
e7b07cee 4185
695af5f9 4186 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4187
695af5f9
NK
4188 /* DRM doesn't specify clipping on destination output. */
4189 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4190
6300b3bd
MK
4191 /* Validate scaling per-format with DC plane caps */
4192 if (state->plane && state->plane->dev && state->fb) {
4193 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4194 &min_downscale, &max_upscale);
4195 } else {
4196 min_downscale = 250;
4197 max_upscale = 16000;
4198 }
4199
6491f0c0
NK
4200 scale_w = scaling_info->dst_rect.width * 1000 /
4201 scaling_info->src_rect.width;
e7b07cee 4202
6300b3bd 4203 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4204 return -EINVAL;
4205
4206 scale_h = scaling_info->dst_rect.height * 1000 /
4207 scaling_info->src_rect.height;
4208
6300b3bd 4209 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4210 return -EINVAL;
4211
695af5f9
NK
4212 /*
4213 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4214 * assume reasonable defaults based on the format.
4215 */
e7b07cee 4216
695af5f9 4217 return 0;
4562236b 4218}
695af5f9 4219
a3241991
BN
4220static void
4221fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4222 uint64_t tiling_flags)
e7b07cee 4223{
a3241991
BN
4224 /* Fill GFX8 params */
4225 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4226 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4227
a3241991
BN
4228 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4229 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4230 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4231 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4232 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4233
a3241991
BN
4234 /* XXX fix me for VI */
4235 tiling_info->gfx8.num_banks = num_banks;
4236 tiling_info->gfx8.array_mode =
4237 DC_ARRAY_2D_TILED_THIN1;
4238 tiling_info->gfx8.tile_split = tile_split;
4239 tiling_info->gfx8.bank_width = bankw;
4240 tiling_info->gfx8.bank_height = bankh;
4241 tiling_info->gfx8.tile_aspect = mtaspect;
4242 tiling_info->gfx8.tile_mode =
4243 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4244 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4245 == DC_ARRAY_1D_TILED_THIN1) {
4246 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4247 }
4248
a3241991
BN
4249 tiling_info->gfx8.pipe_config =
4250 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4251}
4252
a3241991
BN
4253static void
4254fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4255 union dc_tiling_info *tiling_info)
4256{
4257 tiling_info->gfx9.num_pipes =
4258 adev->gfx.config.gb_addr_config_fields.num_pipes;
4259 tiling_info->gfx9.num_banks =
4260 adev->gfx.config.gb_addr_config_fields.num_banks;
4261 tiling_info->gfx9.pipe_interleave =
4262 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4263 tiling_info->gfx9.num_shader_engines =
4264 adev->gfx.config.gb_addr_config_fields.num_se;
4265 tiling_info->gfx9.max_compressed_frags =
4266 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4267 tiling_info->gfx9.num_rb_per_se =
4268 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4269 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
4270 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4271 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4272 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
656fe9b6 4273 adev->asic_type == CHIP_BEIGE_GOBY ||
a3241991
BN
4274 adev->asic_type == CHIP_VANGOGH)
4275 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4276}
4277
695af5f9 4278static int
a3241991
BN
4279validate_dcc(struct amdgpu_device *adev,
4280 const enum surface_pixel_format format,
4281 const enum dc_rotation_angle rotation,
4282 const union dc_tiling_info *tiling_info,
4283 const struct dc_plane_dcc_param *dcc,
4284 const struct dc_plane_address *address,
4285 const struct plane_size *plane_size)
7df7e505
NK
4286{
4287 struct dc *dc = adev->dm.dc;
8daa1218
NC
4288 struct dc_dcc_surface_param input;
4289 struct dc_surface_dcc_cap output;
7df7e505 4290
8daa1218
NC
4291 memset(&input, 0, sizeof(input));
4292 memset(&output, 0, sizeof(output));
4293
a3241991 4294 if (!dcc->enable)
87b7ebc2
RS
4295 return 0;
4296
a3241991
BN
4297 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4298 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4299 return -EINVAL;
7df7e505 4300
695af5f9 4301 input.format = format;
12e2b2d4
DL
4302 input.surface_size.width = plane_size->surface_size.width;
4303 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4304 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4305
695af5f9 4306 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4307 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4308 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4309 input.scan = SCAN_DIRECTION_VERTICAL;
4310
4311 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4312 return -EINVAL;
7df7e505
NK
4313
4314 if (!output.capable)
09e5665a 4315 return -EINVAL;
7df7e505 4316
a3241991
BN
4317 if (dcc->independent_64b_blks == 0 &&
4318 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4319 return -EINVAL;
7df7e505 4320
a3241991
BN
4321 return 0;
4322}
4323
37384b3f
BN
4324static bool
4325modifier_has_dcc(uint64_t modifier)
4326{
4327 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4328}
4329
4330static unsigned
4331modifier_gfx9_swizzle_mode(uint64_t modifier)
4332{
4333 if (modifier == DRM_FORMAT_MOD_LINEAR)
4334 return 0;
4335
4336 return AMD_FMT_MOD_GET(TILE, modifier);
4337}
4338
dfbbfe3c
BN
4339static const struct drm_format_info *
4340amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4341{
816853f9 4342 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4343}
4344
37384b3f
BN
4345static void
4346fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4347 union dc_tiling_info *tiling_info,
4348 uint64_t modifier)
4349{
4350 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4351 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4352 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4353 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4354
4355 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4356
4357 if (!IS_AMD_FMT_MOD(modifier))
4358 return;
4359
4360 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4361 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4362
4363 if (adev->family >= AMDGPU_FAMILY_NV) {
4364 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4365 } else {
4366 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4367
4368 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4369 }
4370}
4371
faa37f54
BN
4372enum dm_micro_swizzle {
4373 MICRO_SWIZZLE_Z = 0,
4374 MICRO_SWIZZLE_S = 1,
4375 MICRO_SWIZZLE_D = 2,
4376 MICRO_SWIZZLE_R = 3
4377};
4378
4379static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4380 uint32_t format,
4381 uint64_t modifier)
4382{
4383 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4384 const struct drm_format_info *info = drm_format_info(format);
fe180178 4385 int i;
faa37f54
BN
4386
4387 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4388
4389 if (!info)
4390 return false;
4391
4392 /*
fe180178
QZ
4393 * We always have to allow these modifiers:
4394 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4395 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4396 */
fe180178
QZ
4397 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4398 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4399 return true;
fe180178 4400 }
faa37f54 4401
fe180178
QZ
4402 /* Check that the modifier is on the list of the plane's supported modifiers. */
4403 for (i = 0; i < plane->modifier_count; i++) {
4404 if (modifier == plane->modifiers[i])
4405 break;
4406 }
4407 if (i == plane->modifier_count)
faa37f54
BN
4408 return false;
4409
4410 /*
4411 * For D swizzle the canonical modifier depends on the bpp, so check
4412 * it here.
4413 */
4414 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4415 adev->family >= AMDGPU_FAMILY_NV) {
4416 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4417 return false;
4418 }
4419
4420 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4421 info->cpp[0] < 8)
4422 return false;
4423
4424 if (modifier_has_dcc(modifier)) {
4425 /* Per radeonsi comments 16/64 bpp are more complicated. */
4426 if (info->cpp[0] != 4)
4427 return false;
951796f2
SS
4428 /* We support multi-planar formats, but not when combined with
4429 * additional DCC metadata planes. */
4430 if (info->num_planes > 1)
4431 return false;
faa37f54
BN
4432 }
4433
4434 return true;
4435}
4436
4437static void
4438add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4439{
4440 if (!*mods)
4441 return;
4442
4443 if (*cap - *size < 1) {
4444 uint64_t new_cap = *cap * 2;
4445 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4446
4447 if (!new_mods) {
4448 kfree(*mods);
4449 *mods = NULL;
4450 return;
4451 }
4452
4453 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4454 kfree(*mods);
4455 *mods = new_mods;
4456 *cap = new_cap;
4457 }
4458
4459 (*mods)[*size] = mod;
4460 *size += 1;
4461}
4462
4463static void
4464add_gfx9_modifiers(const struct amdgpu_device *adev,
4465 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4466{
4467 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4468 int pipe_xor_bits = min(8, pipes +
4469 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4470 int bank_xor_bits = min(8 - pipe_xor_bits,
4471 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4472 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4473 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4474
4475
4476 if (adev->family == AMDGPU_FAMILY_RV) {
4477 /* Raven2 and later */
4478 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4479
4480 /*
4481 * No _D DCC swizzles yet because we only allow 32bpp, which
4482 * doesn't support _D on DCN
4483 */
4484
4485 if (has_constant_encode) {
4486 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4487 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4488 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4489 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4490 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4491 AMD_FMT_MOD_SET(DCC, 1) |
4492 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4493 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4494 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4495 }
4496
4497 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4499 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4500 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4501 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4502 AMD_FMT_MOD_SET(DCC, 1) |
4503 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4504 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4505 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4506
4507 if (has_constant_encode) {
4508 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4509 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4510 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4511 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4512 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4513 AMD_FMT_MOD_SET(DCC, 1) |
4514 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4515 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4516 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4517
4518 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4519 AMD_FMT_MOD_SET(RB, rb) |
4520 AMD_FMT_MOD_SET(PIPE, pipes));
4521 }
4522
4523 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4524 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4525 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4526 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4527 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4528 AMD_FMT_MOD_SET(DCC, 1) |
4529 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4530 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4531 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4532 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4533 AMD_FMT_MOD_SET(RB, rb) |
4534 AMD_FMT_MOD_SET(PIPE, pipes));
4535 }
4536
4537 /*
4538 * Only supported for 64bpp on Raven, will be filtered on format in
4539 * dm_plane_format_mod_supported.
4540 */
4541 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4542 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4543 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4544 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4545 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4546
4547 if (adev->family == AMDGPU_FAMILY_RV) {
4548 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4549 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4550 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4551 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4552 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4553 }
4554
4555 /*
4556 * Only supported for 64bpp on Raven, will be filtered on format in
4557 * dm_plane_format_mod_supported.
4558 */
4559 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4560 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4561 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4562
4563 if (adev->family == AMDGPU_FAMILY_RV) {
4564 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4565 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4566 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4567 }
4568}
4569
4570static void
4571add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4572 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4573{
4574 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4575
4576 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4577 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4578 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4579 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4580 AMD_FMT_MOD_SET(DCC, 1) |
4581 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4582 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4583 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4584
4585 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4586 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4587 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4588 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4589 AMD_FMT_MOD_SET(DCC, 1) |
4590 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4591 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4592 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4593 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4594
4595 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4596 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4597 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4598 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4599
4600 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4601 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4602 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4603 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4604
4605
4606 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4607 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4608 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4609 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4610
4611 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4612 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4613 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4614}
4615
4616static void
4617add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4618 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4619{
4620 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4621 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4622
4623 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4624 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4625 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4626 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4627 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4628 AMD_FMT_MOD_SET(DCC, 1) |
4629 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4630 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4631 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4632 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4633
4634 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4635 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4636 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4637 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4638 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4639 AMD_FMT_MOD_SET(DCC, 1) |
4640 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4641 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4642 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4643 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4644 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4645
4646 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4647 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4648 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4649 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4650 AMD_FMT_MOD_SET(PACKERS, pkrs));
4651
4652 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4653 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4654 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4655 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4656 AMD_FMT_MOD_SET(PACKERS, pkrs));
4657
4658 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4659 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4660 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4661 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4662
4663 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4664 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4665 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4666}
4667
4668static int
4669get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4670{
4671 uint64_t size = 0, capacity = 128;
4672 *mods = NULL;
4673
4674 /* We have not hooked up any pre-GFX9 modifiers. */
4675 if (adev->family < AMDGPU_FAMILY_AI)
4676 return 0;
4677
4678 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4679
4680 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4681 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4682 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4683 return *mods ? 0 : -ENOMEM;
4684 }
4685
4686 switch (adev->family) {
4687 case AMDGPU_FAMILY_AI:
4688 case AMDGPU_FAMILY_RV:
4689 add_gfx9_modifiers(adev, mods, &size, &capacity);
4690 break;
4691 case AMDGPU_FAMILY_NV:
4692 case AMDGPU_FAMILY_VGH:
4693 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4694 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4695 else
4696 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4697 break;
4698 }
4699
4700 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4701
4702 /* INVALID marks the end of the list. */
4703 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4704
4705 if (!*mods)
4706 return -ENOMEM;
4707
4708 return 0;
4709}
4710
37384b3f
BN
4711static int
4712fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4713 const struct amdgpu_framebuffer *afb,
4714 const enum surface_pixel_format format,
4715 const enum dc_rotation_angle rotation,
4716 const struct plane_size *plane_size,
4717 union dc_tiling_info *tiling_info,
4718 struct dc_plane_dcc_param *dcc,
4719 struct dc_plane_address *address,
4720 const bool force_disable_dcc)
4721{
4722 const uint64_t modifier = afb->base.modifier;
4723 int ret;
4724
4725 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4726 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4727
4728 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4729 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4730
4731 dcc->enable = 1;
4732 dcc->meta_pitch = afb->base.pitches[1];
4733 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4734
4735 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4736 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4737 }
4738
4739 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4740 if (ret)
4741 return ret;
7df7e505 4742
09e5665a
NK
4743 return 0;
4744}
4745
4746static int
320932bf 4747fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4748 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4749 const enum surface_pixel_format format,
4750 const enum dc_rotation_angle rotation,
4751 const uint64_t tiling_flags,
09e5665a 4752 union dc_tiling_info *tiling_info,
12e2b2d4 4753 struct plane_size *plane_size,
09e5665a 4754 struct dc_plane_dcc_param *dcc,
87b7ebc2 4755 struct dc_plane_address *address,
5888f07a 4756 bool tmz_surface,
87b7ebc2 4757 bool force_disable_dcc)
09e5665a 4758{
320932bf 4759 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4760 int ret;
4761
4762 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4763 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4764 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4765 memset(address, 0, sizeof(*address));
4766
5888f07a
HW
4767 address->tmz_surface = tmz_surface;
4768
695af5f9 4769 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4770 uint64_t addr = afb->address + fb->offsets[0];
4771
12e2b2d4
DL
4772 plane_size->surface_size.x = 0;
4773 plane_size->surface_size.y = 0;
4774 plane_size->surface_size.width = fb->width;
4775 plane_size->surface_size.height = fb->height;
4776 plane_size->surface_pitch =
320932bf
NK
4777 fb->pitches[0] / fb->format->cpp[0];
4778
e0634e8d 4779 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4780 address->grph.addr.low_part = lower_32_bits(addr);
4781 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4782 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4783 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4784 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4785
12e2b2d4
DL
4786 plane_size->surface_size.x = 0;
4787 plane_size->surface_size.y = 0;
4788 plane_size->surface_size.width = fb->width;
4789 plane_size->surface_size.height = fb->height;
4790 plane_size->surface_pitch =
320932bf
NK
4791 fb->pitches[0] / fb->format->cpp[0];
4792
12e2b2d4
DL
4793 plane_size->chroma_size.x = 0;
4794 plane_size->chroma_size.y = 0;
320932bf 4795 /* TODO: set these based on surface format */
12e2b2d4
DL
4796 plane_size->chroma_size.width = fb->width / 2;
4797 plane_size->chroma_size.height = fb->height / 2;
320932bf 4798
12e2b2d4 4799 plane_size->chroma_pitch =
320932bf
NK
4800 fb->pitches[1] / fb->format->cpp[1];
4801
e0634e8d
NK
4802 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4803 address->video_progressive.luma_addr.low_part =
be7b9b32 4804 lower_32_bits(luma_addr);
e0634e8d 4805 address->video_progressive.luma_addr.high_part =
be7b9b32 4806 upper_32_bits(luma_addr);
e0634e8d
NK
4807 address->video_progressive.chroma_addr.low_part =
4808 lower_32_bits(chroma_addr);
4809 address->video_progressive.chroma_addr.high_part =
4810 upper_32_bits(chroma_addr);
4811 }
09e5665a 4812
a3241991 4813 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4814 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4815 rotation, plane_size,
4816 tiling_info, dcc,
4817 address,
4818 force_disable_dcc);
09e5665a
NK
4819 if (ret)
4820 return ret;
a3241991
BN
4821 } else {
4822 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4823 }
4824
4825 return 0;
7df7e505
NK
4826}
4827
d74004b6 4828static void
695af5f9 4829fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4830 bool *per_pixel_alpha, bool *global_alpha,
4831 int *global_alpha_value)
4832{
4833 *per_pixel_alpha = false;
4834 *global_alpha = false;
4835 *global_alpha_value = 0xff;
4836
4837 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4838 return;
4839
4840 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4841 static const uint32_t alpha_formats[] = {
4842 DRM_FORMAT_ARGB8888,
4843 DRM_FORMAT_RGBA8888,
4844 DRM_FORMAT_ABGR8888,
4845 };
4846 uint32_t format = plane_state->fb->format->format;
4847 unsigned int i;
4848
4849 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4850 if (format == alpha_formats[i]) {
4851 *per_pixel_alpha = true;
4852 break;
4853 }
4854 }
4855 }
4856
4857 if (plane_state->alpha < 0xffff) {
4858 *global_alpha = true;
4859 *global_alpha_value = plane_state->alpha >> 8;
4860 }
4861}
4862
004fefa3
NK
4863static int
4864fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4865 const enum surface_pixel_format format,
004fefa3
NK
4866 enum dc_color_space *color_space)
4867{
4868 bool full_range;
4869
4870 *color_space = COLOR_SPACE_SRGB;
4871
4872 /* DRM color properties only affect non-RGB formats. */
695af5f9 4873 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4874 return 0;
4875
4876 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4877
4878 switch (plane_state->color_encoding) {
4879 case DRM_COLOR_YCBCR_BT601:
4880 if (full_range)
4881 *color_space = COLOR_SPACE_YCBCR601;
4882 else
4883 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4884 break;
4885
4886 case DRM_COLOR_YCBCR_BT709:
4887 if (full_range)
4888 *color_space = COLOR_SPACE_YCBCR709;
4889 else
4890 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4891 break;
4892
4893 case DRM_COLOR_YCBCR_BT2020:
4894 if (full_range)
4895 *color_space = COLOR_SPACE_2020_YCBCR;
4896 else
4897 return -EINVAL;
4898 break;
4899
4900 default:
4901 return -EINVAL;
4902 }
4903
4904 return 0;
4905}
4906
695af5f9
NK
4907static int
4908fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4909 const struct drm_plane_state *plane_state,
4910 const uint64_t tiling_flags,
4911 struct dc_plane_info *plane_info,
87b7ebc2 4912 struct dc_plane_address *address,
5888f07a 4913 bool tmz_surface,
87b7ebc2 4914 bool force_disable_dcc)
695af5f9
NK
4915{
4916 const struct drm_framebuffer *fb = plane_state->fb;
4917 const struct amdgpu_framebuffer *afb =
4918 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
4919 int ret;
4920
4921 memset(plane_info, 0, sizeof(*plane_info));
4922
4923 switch (fb->format->format) {
4924 case DRM_FORMAT_C8:
4925 plane_info->format =
4926 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4927 break;
4928 case DRM_FORMAT_RGB565:
4929 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4930 break;
4931 case DRM_FORMAT_XRGB8888:
4932 case DRM_FORMAT_ARGB8888:
4933 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4934 break;
4935 case DRM_FORMAT_XRGB2101010:
4936 case DRM_FORMAT_ARGB2101010:
4937 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4938 break;
4939 case DRM_FORMAT_XBGR2101010:
4940 case DRM_FORMAT_ABGR2101010:
4941 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4942 break;
4943 case DRM_FORMAT_XBGR8888:
4944 case DRM_FORMAT_ABGR8888:
4945 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4946 break;
4947 case DRM_FORMAT_NV21:
4948 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4949 break;
4950 case DRM_FORMAT_NV12:
4951 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4952 break;
cbec6477
SW
4953 case DRM_FORMAT_P010:
4954 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4955 break;
492548dc
SW
4956 case DRM_FORMAT_XRGB16161616F:
4957 case DRM_FORMAT_ARGB16161616F:
4958 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4959 break;
2a5195dc
MK
4960 case DRM_FORMAT_XBGR16161616F:
4961 case DRM_FORMAT_ABGR16161616F:
4962 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4963 break;
695af5f9
NK
4964 default:
4965 DRM_ERROR(
92f1d09c
SA
4966 "Unsupported screen format %p4cc\n",
4967 &fb->format->format);
695af5f9
NK
4968 return -EINVAL;
4969 }
4970
4971 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4972 case DRM_MODE_ROTATE_0:
4973 plane_info->rotation = ROTATION_ANGLE_0;
4974 break;
4975 case DRM_MODE_ROTATE_90:
4976 plane_info->rotation = ROTATION_ANGLE_90;
4977 break;
4978 case DRM_MODE_ROTATE_180:
4979 plane_info->rotation = ROTATION_ANGLE_180;
4980 break;
4981 case DRM_MODE_ROTATE_270:
4982 plane_info->rotation = ROTATION_ANGLE_270;
4983 break;
4984 default:
4985 plane_info->rotation = ROTATION_ANGLE_0;
4986 break;
4987 }
4988
4989 plane_info->visible = true;
4990 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4991
6d83a32d
MS
4992 plane_info->layer_index = 0;
4993
695af5f9
NK
4994 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4995 &plane_info->color_space);
4996 if (ret)
4997 return ret;
4998
4999 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5000 plane_info->rotation, tiling_flags,
5001 &plane_info->tiling_info,
5002 &plane_info->plane_size,
5888f07a 5003 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5004 force_disable_dcc);
695af5f9
NK
5005 if (ret)
5006 return ret;
5007
5008 fill_blending_from_plane_state(
5009 plane_state, &plane_info->per_pixel_alpha,
5010 &plane_info->global_alpha, &plane_info->global_alpha_value);
5011
5012 return 0;
5013}
5014
5015static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5016 struct dc_plane_state *dc_plane_state,
5017 struct drm_plane_state *plane_state,
5018 struct drm_crtc_state *crtc_state)
e7b07cee 5019{
cf020d49 5020 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5021 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5022 struct dc_scaling_info scaling_info;
5023 struct dc_plane_info plane_info;
695af5f9 5024 int ret;
87b7ebc2 5025 bool force_disable_dcc = false;
e7b07cee 5026
695af5f9
NK
5027 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5028 if (ret)
5029 return ret;
e7b07cee 5030
695af5f9
NK
5031 dc_plane_state->src_rect = scaling_info.src_rect;
5032 dc_plane_state->dst_rect = scaling_info.dst_rect;
5033 dc_plane_state->clip_rect = scaling_info.clip_rect;
5034 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5035
87b7ebc2 5036 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5037 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5038 afb->tiling_flags,
695af5f9 5039 &plane_info,
87b7ebc2 5040 &dc_plane_state->address,
6eed95b0 5041 afb->tmz_surface,
87b7ebc2 5042 force_disable_dcc);
004fefa3
NK
5043 if (ret)
5044 return ret;
5045
695af5f9
NK
5046 dc_plane_state->format = plane_info.format;
5047 dc_plane_state->color_space = plane_info.color_space;
5048 dc_plane_state->format = plane_info.format;
5049 dc_plane_state->plane_size = plane_info.plane_size;
5050 dc_plane_state->rotation = plane_info.rotation;
5051 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5052 dc_plane_state->stereo_format = plane_info.stereo_format;
5053 dc_plane_state->tiling_info = plane_info.tiling_info;
5054 dc_plane_state->visible = plane_info.visible;
5055 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5056 dc_plane_state->global_alpha = plane_info.global_alpha;
5057 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5058 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5059 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5060 dc_plane_state->flip_int_enabled = true;
695af5f9 5061
e277adc5
LSL
5062 /*
5063 * Always set input transfer function, since plane state is refreshed
5064 * every time.
5065 */
cf020d49
NK
5066 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5067 if (ret)
5068 return ret;
e7b07cee 5069
cf020d49 5070 return 0;
e7b07cee
HW
5071}
5072
3ee6b26b
AD
5073static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5074 const struct dm_connector_state *dm_state,
5075 struct dc_stream_state *stream)
e7b07cee
HW
5076{
5077 enum amdgpu_rmx_type rmx_type;
5078
5079 struct rect src = { 0 }; /* viewport in composition space*/
5080 struct rect dst = { 0 }; /* stream addressable area */
5081
5082 /* no mode. nothing to be done */
5083 if (!mode)
5084 return;
5085
5086 /* Full screen scaling by default */
5087 src.width = mode->hdisplay;
5088 src.height = mode->vdisplay;
5089 dst.width = stream->timing.h_addressable;
5090 dst.height = stream->timing.v_addressable;
5091
f4791779
HW
5092 if (dm_state) {
5093 rmx_type = dm_state->scaling;
5094 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5095 if (src.width * dst.height <
5096 src.height * dst.width) {
5097 /* height needs less upscaling/more downscaling */
5098 dst.width = src.width *
5099 dst.height / src.height;
5100 } else {
5101 /* width needs less upscaling/more downscaling */
5102 dst.height = src.height *
5103 dst.width / src.width;
5104 }
5105 } else if (rmx_type == RMX_CENTER) {
5106 dst = src;
e7b07cee 5107 }
e7b07cee 5108
f4791779
HW
5109 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5110 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5111
f4791779
HW
5112 if (dm_state->underscan_enable) {
5113 dst.x += dm_state->underscan_hborder / 2;
5114 dst.y += dm_state->underscan_vborder / 2;
5115 dst.width -= dm_state->underscan_hborder;
5116 dst.height -= dm_state->underscan_vborder;
5117 }
e7b07cee
HW
5118 }
5119
5120 stream->src = src;
5121 stream->dst = dst;
5122
4711c033
LT
5123 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5124 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5125
5126}
5127
3ee6b26b 5128static enum dc_color_depth
42ba01fc 5129convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5130 bool is_y420, int requested_bpc)
e7b07cee 5131{
1bc22f20 5132 uint8_t bpc;
01c22997 5133
1bc22f20
SW
5134 if (is_y420) {
5135 bpc = 8;
5136
5137 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5138 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5139 bpc = 16;
5140 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5141 bpc = 12;
5142 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5143 bpc = 10;
5144 } else {
5145 bpc = (uint8_t)connector->display_info.bpc;
5146 /* Assume 8 bpc by default if no bpc is specified. */
5147 bpc = bpc ? bpc : 8;
5148 }
e7b07cee 5149
cbd14ae7 5150 if (requested_bpc > 0) {
01c22997
NK
5151 /*
5152 * Cap display bpc based on the user requested value.
5153 *
5154 * The value for state->max_bpc may not correctly updated
5155 * depending on when the connector gets added to the state
5156 * or if this was called outside of atomic check, so it
5157 * can't be used directly.
5158 */
cbd14ae7 5159 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5160
1825fd34
NK
5161 /* Round down to the nearest even number. */
5162 bpc = bpc - (bpc & 1);
5163 }
07e3a1cf 5164
e7b07cee
HW
5165 switch (bpc) {
5166 case 0:
1f6010a9
DF
5167 /*
5168 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5169 * EDID revision before 1.4
5170 * TODO: Fix edid parsing
5171 */
5172 return COLOR_DEPTH_888;
5173 case 6:
5174 return COLOR_DEPTH_666;
5175 case 8:
5176 return COLOR_DEPTH_888;
5177 case 10:
5178 return COLOR_DEPTH_101010;
5179 case 12:
5180 return COLOR_DEPTH_121212;
5181 case 14:
5182 return COLOR_DEPTH_141414;
5183 case 16:
5184 return COLOR_DEPTH_161616;
5185 default:
5186 return COLOR_DEPTH_UNDEFINED;
5187 }
5188}
5189
3ee6b26b
AD
5190static enum dc_aspect_ratio
5191get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5192{
e11d4147
LSL
5193 /* 1-1 mapping, since both enums follow the HDMI spec. */
5194 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5195}
5196
3ee6b26b
AD
5197static enum dc_color_space
5198get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5199{
5200 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5201
5202 switch (dc_crtc_timing->pixel_encoding) {
5203 case PIXEL_ENCODING_YCBCR422:
5204 case PIXEL_ENCODING_YCBCR444:
5205 case PIXEL_ENCODING_YCBCR420:
5206 {
5207 /*
5208 * 27030khz is the separation point between HDTV and SDTV
5209 * according to HDMI spec, we use YCbCr709 and YCbCr601
5210 * respectively
5211 */
380604e2 5212 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5213 if (dc_crtc_timing->flags.Y_ONLY)
5214 color_space =
5215 COLOR_SPACE_YCBCR709_LIMITED;
5216 else
5217 color_space = COLOR_SPACE_YCBCR709;
5218 } else {
5219 if (dc_crtc_timing->flags.Y_ONLY)
5220 color_space =
5221 COLOR_SPACE_YCBCR601_LIMITED;
5222 else
5223 color_space = COLOR_SPACE_YCBCR601;
5224 }
5225
5226 }
5227 break;
5228 case PIXEL_ENCODING_RGB:
5229 color_space = COLOR_SPACE_SRGB;
5230 break;
5231
5232 default:
5233 WARN_ON(1);
5234 break;
5235 }
5236
5237 return color_space;
5238}
5239
ea117312
TA
5240static bool adjust_colour_depth_from_display_info(
5241 struct dc_crtc_timing *timing_out,
5242 const struct drm_display_info *info)
400443e8 5243{
ea117312 5244 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5245 int normalized_clk;
400443e8 5246 do {
380604e2 5247 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5248 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5249 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5250 normalized_clk /= 2;
5251 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5252 switch (depth) {
5253 case COLOR_DEPTH_888:
5254 break;
400443e8
ML
5255 case COLOR_DEPTH_101010:
5256 normalized_clk = (normalized_clk * 30) / 24;
5257 break;
5258 case COLOR_DEPTH_121212:
5259 normalized_clk = (normalized_clk * 36) / 24;
5260 break;
5261 case COLOR_DEPTH_161616:
5262 normalized_clk = (normalized_clk * 48) / 24;
5263 break;
5264 default:
ea117312
TA
5265 /* The above depths are the only ones valid for HDMI. */
5266 return false;
400443e8 5267 }
ea117312
TA
5268 if (normalized_clk <= info->max_tmds_clock) {
5269 timing_out->display_color_depth = depth;
5270 return true;
5271 }
5272 } while (--depth > COLOR_DEPTH_666);
5273 return false;
400443e8 5274}
e7b07cee 5275
42ba01fc
NK
5276static void fill_stream_properties_from_drm_display_mode(
5277 struct dc_stream_state *stream,
5278 const struct drm_display_mode *mode_in,
5279 const struct drm_connector *connector,
5280 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5281 const struct dc_stream_state *old_stream,
5282 int requested_bpc)
e7b07cee
HW
5283{
5284 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5285 const struct drm_display_info *info = &connector->display_info;
d4252eee 5286 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5287 struct hdmi_vendor_infoframe hv_frame;
5288 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5289
acf83f86
WL
5290 memset(&hv_frame, 0, sizeof(hv_frame));
5291 memset(&avi_frame, 0, sizeof(avi_frame));
5292
e7b07cee
HW
5293 timing_out->h_border_left = 0;
5294 timing_out->h_border_right = 0;
5295 timing_out->v_border_top = 0;
5296 timing_out->v_border_bottom = 0;
5297 /* TODO: un-hardcode */
fe61a2f1 5298 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5299 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5300 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5301 else if (drm_mode_is_420_also(info, mode_in)
5302 && aconnector->force_yuv420_output)
5303 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5304 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5305 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5306 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5307 else
5308 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5309
5310 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5311 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5312 connector,
5313 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5314 requested_bpc);
e7b07cee
HW
5315 timing_out->scan_type = SCANNING_TYPE_NODATA;
5316 timing_out->hdmi_vic = 0;
b333730d
BL
5317
5318 if(old_stream) {
5319 timing_out->vic = old_stream->timing.vic;
5320 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5321 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5322 } else {
5323 timing_out->vic = drm_match_cea_mode(mode_in);
5324 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5325 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5326 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5327 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5328 }
e7b07cee 5329
1cb1d477
WL
5330 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5331 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5332 timing_out->vic = avi_frame.video_code;
5333 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5334 timing_out->hdmi_vic = hv_frame.vic;
5335 }
5336
fe8858bb
NC
5337 if (is_freesync_video_mode(mode_in, aconnector)) {
5338 timing_out->h_addressable = mode_in->hdisplay;
5339 timing_out->h_total = mode_in->htotal;
5340 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5341 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5342 timing_out->v_total = mode_in->vtotal;
5343 timing_out->v_addressable = mode_in->vdisplay;
5344 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5345 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5346 timing_out->pix_clk_100hz = mode_in->clock * 10;
5347 } else {
5348 timing_out->h_addressable = mode_in->crtc_hdisplay;
5349 timing_out->h_total = mode_in->crtc_htotal;
5350 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5351 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5352 timing_out->v_total = mode_in->crtc_vtotal;
5353 timing_out->v_addressable = mode_in->crtc_vdisplay;
5354 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5355 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5356 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5357 }
a85ba005 5358
e7b07cee 5359 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5360
5361 stream->output_color_space = get_output_color_space(timing_out);
5362
e43a432c
AK
5363 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5364 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5365 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5366 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5367 drm_mode_is_420_also(info, mode_in) &&
5368 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5369 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5370 adjust_colour_depth_from_display_info(timing_out, info);
5371 }
5372 }
e7b07cee
HW
5373}
5374
3ee6b26b
AD
5375static void fill_audio_info(struct audio_info *audio_info,
5376 const struct drm_connector *drm_connector,
5377 const struct dc_sink *dc_sink)
e7b07cee
HW
5378{
5379 int i = 0;
5380 int cea_revision = 0;
5381 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5382
5383 audio_info->manufacture_id = edid_caps->manufacturer_id;
5384 audio_info->product_id = edid_caps->product_id;
5385
5386 cea_revision = drm_connector->display_info.cea_rev;
5387
090afc1e 5388 strscpy(audio_info->display_name,
d2b2562c 5389 edid_caps->display_name,
090afc1e 5390 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5391
b830ebc9 5392 if (cea_revision >= 3) {
e7b07cee
HW
5393 audio_info->mode_count = edid_caps->audio_mode_count;
5394
5395 for (i = 0; i < audio_info->mode_count; ++i) {
5396 audio_info->modes[i].format_code =
5397 (enum audio_format_code)
5398 (edid_caps->audio_modes[i].format_code);
5399 audio_info->modes[i].channel_count =
5400 edid_caps->audio_modes[i].channel_count;
5401 audio_info->modes[i].sample_rates.all =
5402 edid_caps->audio_modes[i].sample_rate;
5403 audio_info->modes[i].sample_size =
5404 edid_caps->audio_modes[i].sample_size;
5405 }
5406 }
5407
5408 audio_info->flags.all = edid_caps->speaker_flags;
5409
5410 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5411 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5412 audio_info->video_latency = drm_connector->video_latency[0];
5413 audio_info->audio_latency = drm_connector->audio_latency[0];
5414 }
5415
5416 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5417
5418}
5419
3ee6b26b
AD
5420static void
5421copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5422 struct drm_display_mode *dst_mode)
e7b07cee
HW
5423{
5424 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5425 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5426 dst_mode->crtc_clock = src_mode->crtc_clock;
5427 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5428 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5429 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5430 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5431 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5432 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5433 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5434 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5435 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5436 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5437 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5438}
5439
3ee6b26b
AD
5440static void
5441decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5442 const struct drm_display_mode *native_mode,
5443 bool scale_enabled)
e7b07cee
HW
5444{
5445 if (scale_enabled) {
5446 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5447 } else if (native_mode->clock == drm_mode->clock &&
5448 native_mode->htotal == drm_mode->htotal &&
5449 native_mode->vtotal == drm_mode->vtotal) {
5450 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5451 } else {
5452 /* no scaling nor amdgpu inserted, no need to patch */
5453 }
5454}
5455
aed15309
ML
5456static struct dc_sink *
5457create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5458{
2e0ac3d6 5459 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5460 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5461 sink_init_data.link = aconnector->dc_link;
5462 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5463
5464 sink = dc_sink_create(&sink_init_data);
423788c7 5465 if (!sink) {
2e0ac3d6 5466 DRM_ERROR("Failed to create sink!\n");
aed15309 5467 return NULL;
423788c7 5468 }
2e0ac3d6 5469 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5470
aed15309 5471 return sink;
2e0ac3d6
HW
5472}
5473
fa2123db
ML
5474static void set_multisync_trigger_params(
5475 struct dc_stream_state *stream)
5476{
ec372186
ML
5477 struct dc_stream_state *master = NULL;
5478
fa2123db 5479 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5480 master = stream->triggered_crtc_reset.event_source;
5481 stream->triggered_crtc_reset.event =
5482 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5483 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5484 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5485 }
5486}
5487
5488static void set_master_stream(struct dc_stream_state *stream_set[],
5489 int stream_count)
5490{
5491 int j, highest_rfr = 0, master_stream = 0;
5492
5493 for (j = 0; j < stream_count; j++) {
5494 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5495 int refresh_rate = 0;
5496
380604e2 5497 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5498 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5499 if (refresh_rate > highest_rfr) {
5500 highest_rfr = refresh_rate;
5501 master_stream = j;
5502 }
5503 }
5504 }
5505 for (j = 0; j < stream_count; j++) {
03736f4c 5506 if (stream_set[j])
fa2123db
ML
5507 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5508 }
5509}
5510
5511static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5512{
5513 int i = 0;
ec372186 5514 struct dc_stream_state *stream;
fa2123db
ML
5515
5516 if (context->stream_count < 2)
5517 return;
5518 for (i = 0; i < context->stream_count ; i++) {
5519 if (!context->streams[i])
5520 continue;
1f6010a9
DF
5521 /*
5522 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5523 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5524 * For now it's set to false
fa2123db 5525 */
fa2123db 5526 }
ec372186 5527
fa2123db 5528 set_master_stream(context->streams, context->stream_count);
ec372186
ML
5529
5530 for (i = 0; i < context->stream_count ; i++) {
5531 stream = context->streams[i];
5532
5533 if (!stream)
5534 continue;
5535
5536 set_multisync_trigger_params(stream);
5537 }
fa2123db
ML
5538}
5539
a85ba005
NC
5540static struct drm_display_mode *
5541get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5542 bool use_probed_modes)
5543{
5544 struct drm_display_mode *m, *m_pref = NULL;
5545 u16 current_refresh, highest_refresh;
5546 struct list_head *list_head = use_probed_modes ?
5547 &aconnector->base.probed_modes :
5548 &aconnector->base.modes;
5549
5550 if (aconnector->freesync_vid_base.clock != 0)
5551 return &aconnector->freesync_vid_base;
5552
5553 /* Find the preferred mode */
5554 list_for_each_entry (m, list_head, head) {
5555 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5556 m_pref = m;
5557 break;
5558 }
5559 }
5560
5561 if (!m_pref) {
5562 /* Probably an EDID with no preferred mode. Fallback to first entry */
5563 m_pref = list_first_entry_or_null(
5564 &aconnector->base.modes, struct drm_display_mode, head);
5565 if (!m_pref) {
5566 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5567 return NULL;
5568 }
5569 }
5570
5571 highest_refresh = drm_mode_vrefresh(m_pref);
5572
5573 /*
5574 * Find the mode with highest refresh rate with same resolution.
5575 * For some monitors, preferred mode is not the mode with highest
5576 * supported refresh rate.
5577 */
5578 list_for_each_entry (m, list_head, head) {
5579 current_refresh = drm_mode_vrefresh(m);
5580
5581 if (m->hdisplay == m_pref->hdisplay &&
5582 m->vdisplay == m_pref->vdisplay &&
5583 highest_refresh < current_refresh) {
5584 highest_refresh = current_refresh;
5585 m_pref = m;
5586 }
5587 }
5588
5589 aconnector->freesync_vid_base = *m_pref;
5590 return m_pref;
5591}
5592
fe8858bb 5593static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
5594 struct amdgpu_dm_connector *aconnector)
5595{
5596 struct drm_display_mode *high_mode;
5597 int timing_diff;
5598
5599 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5600 if (!high_mode || !mode)
5601 return false;
5602
5603 timing_diff = high_mode->vtotal - mode->vtotal;
5604
5605 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5606 high_mode->hdisplay != mode->hdisplay ||
5607 high_mode->vdisplay != mode->vdisplay ||
5608 high_mode->hsync_start != mode->hsync_start ||
5609 high_mode->hsync_end != mode->hsync_end ||
5610 high_mode->htotal != mode->htotal ||
5611 high_mode->hskew != mode->hskew ||
5612 high_mode->vscan != mode->vscan ||
5613 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5614 high_mode->vsync_end - mode->vsync_end != timing_diff)
5615 return false;
5616 else
5617 return true;
5618}
5619
3ee6b26b
AD
5620static struct dc_stream_state *
5621create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5622 const struct drm_display_mode *drm_mode,
b333730d 5623 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5624 const struct dc_stream_state *old_stream,
5625 int requested_bpc)
e7b07cee
HW
5626{
5627 struct drm_display_mode *preferred_mode = NULL;
391ef035 5628 struct drm_connector *drm_connector;
42ba01fc
NK
5629 const struct drm_connector_state *con_state =
5630 dm_state ? &dm_state->base : NULL;
0971c40e 5631 struct dc_stream_state *stream = NULL;
e7b07cee 5632 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
5633 struct drm_display_mode saved_mode;
5634 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 5635 bool native_mode_found = false;
a85ba005 5636 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 5637 int mode_refresh;
58124bf8 5638 int preferred_refresh = 0;
defeb878 5639#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015
DF
5640 struct dsc_dec_dpcd_caps dsc_caps;
5641 uint32_t link_bandwidth_kbps;
7c431455 5642#endif
aed15309 5643 struct dc_sink *sink = NULL;
a85ba005
NC
5644
5645 memset(&saved_mode, 0, sizeof(saved_mode));
5646
b830ebc9 5647 if (aconnector == NULL) {
e7b07cee 5648 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5649 return stream;
e7b07cee
HW
5650 }
5651
e7b07cee 5652 drm_connector = &aconnector->base;
2e0ac3d6 5653
f4ac176e 5654 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5655 sink = create_fake_sink(aconnector);
5656 if (!sink)
5657 return stream;
aed15309
ML
5658 } else {
5659 sink = aconnector->dc_sink;
dcd5fb82 5660 dc_sink_retain(sink);
f4ac176e 5661 }
2e0ac3d6 5662
aed15309 5663 stream = dc_create_stream_for_sink(sink);
4562236b 5664
b830ebc9 5665 if (stream == NULL) {
e7b07cee 5666 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5667 goto finish;
e7b07cee
HW
5668 }
5669
ceb3dbb4
JL
5670 stream->dm_stream_context = aconnector;
5671
4a36fcba
WL
5672 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5673 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5674
e7b07cee
HW
5675 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5676 /* Search for preferred mode */
5677 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5678 native_mode_found = true;
5679 break;
5680 }
5681 }
5682 if (!native_mode_found)
5683 preferred_mode = list_first_entry_or_null(
5684 &aconnector->base.modes,
5685 struct drm_display_mode,
5686 head);
5687
b333730d
BL
5688 mode_refresh = drm_mode_vrefresh(&mode);
5689
b830ebc9 5690 if (preferred_mode == NULL) {
1f6010a9
DF
5691 /*
5692 * This may not be an error, the use case is when we have no
e7b07cee
HW
5693 * usermode calls to reset and set mode upon hotplug. In this
5694 * case, we call set mode ourselves to restore the previous mode
5695 * and the modelist may not be filled in in time.
5696 */
f1ad2f5e 5697 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 5698 } else {
a85ba005
NC
5699 recalculate_timing |= amdgpu_freesync_vid_mode &&
5700 is_freesync_video_mode(&mode, aconnector);
5701 if (recalculate_timing) {
5702 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5703 saved_mode = mode;
5704 mode = *freesync_mode;
5705 } else {
5706 decide_crtc_timing_for_drm_display_mode(
e7b07cee 5707 &mode, preferred_mode,
f4791779 5708 dm_state ? (dm_state->scaling != RMX_OFF) : false);
a85ba005
NC
5709 }
5710
58124bf8 5711 preferred_refresh = drm_mode_vrefresh(preferred_mode);
e7b07cee
HW
5712 }
5713
a85ba005
NC
5714 if (recalculate_timing)
5715 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 5716 else if (!dm_state)
f783577c
JFZ
5717 drm_mode_set_crtcinfo(&mode, 0);
5718
a85ba005 5719 /*
b333730d
BL
5720 * If scaling is enabled and refresh rate didn't change
5721 * we copy the vic and polarities of the old timings
5722 */
a85ba005
NC
5723 if (!recalculate_timing || mode_refresh != preferred_refresh)
5724 fill_stream_properties_from_drm_display_mode(
5725 stream, &mode, &aconnector->base, con_state, NULL,
5726 requested_bpc);
b333730d 5727 else
a85ba005
NC
5728 fill_stream_properties_from_drm_display_mode(
5729 stream, &mode, &aconnector->base, con_state, old_stream,
5730 requested_bpc);
b333730d 5731
df2f1015
DF
5732 stream->timing.flags.DSC = 0;
5733
5734 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
defeb878 5735#if defined(CONFIG_DRM_AMD_DC_DCN)
2af0f378
NC
5736 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5737 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6d824ed5 5738 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
df2f1015
DF
5739 &dsc_caps);
5740 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5741 dc_link_get_link_cap(aconnector->dc_link));
5742
0749ddeb 5743 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
bcc6aa61 5744 /* Set DSC policy according to dsc_clock_en */
0749ddeb
EB
5745 dc_dsc_policy_set_enable_dsc_when_not_needed(
5746 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
bcc6aa61 5747
0417df16 5748 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
df2f1015 5749 &dsc_caps,
0417df16 5750 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
9abdf392 5751 0,
df2f1015
DF
5752 link_bandwidth_kbps,
5753 &stream->timing,
5754 &stream->timing.dsc_cfg))
5755 stream->timing.flags.DSC = 1;
27e84dd7 5756 /* Overwrite the stream flag if DSC is enabled through debugfs */
0749ddeb 5757 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
097e6d98 5758 stream->timing.flags.DSC = 1;
734e4c97 5759
28b2f656
EB
5760 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5761 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734e4c97 5762
28b2f656
EB
5763 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5764 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5268bf13
EB
5765
5766 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5767 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
097e6d98 5768 }
39a4eb85 5769#endif
df2f1015 5770 }
39a4eb85 5771
e7b07cee
HW
5772 update_stream_scaling_settings(&mode, dm_state, stream);
5773
5774 fill_audio_info(
5775 &stream->audio_info,
5776 drm_connector,
aed15309 5777 sink);
e7b07cee 5778
ceb3dbb4 5779 update_stream_signal(stream, sink);
9182b4cb 5780
d832fc3b 5781 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5782 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5783
8a488f5d
RL
5784 if (stream->link->psr_settings.psr_feature_enabled) {
5785 //
5786 // should decide stream support vsc sdp colorimetry capability
5787 // before building vsc info packet
5788 //
5789 stream->use_vsc_sdp_for_colorimetry = false;
5790 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5791 stream->use_vsc_sdp_for_colorimetry =
5792 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5793 } else {
5794 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5795 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5796 }
8a488f5d 5797 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5798 }
aed15309 5799finish:
dcd5fb82 5800 dc_sink_release(sink);
9e3efe3e 5801
e7b07cee
HW
5802 return stream;
5803}
5804
7578ecda 5805static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5806{
5807 drm_crtc_cleanup(crtc);
5808 kfree(crtc);
5809}
5810
5811static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5812 struct drm_crtc_state *state)
e7b07cee
HW
5813{
5814 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5815
5816 /* TODO Destroy dc_stream objects are stream object is flattened */
5817 if (cur->stream)
5818 dc_stream_release(cur->stream);
5819
5820
5821 __drm_atomic_helper_crtc_destroy_state(state);
5822
5823
5824 kfree(state);
5825}
5826
5827static void dm_crtc_reset_state(struct drm_crtc *crtc)
5828{
5829 struct dm_crtc_state *state;
5830
5831 if (crtc->state)
5832 dm_crtc_destroy_state(crtc, crtc->state);
5833
5834 state = kzalloc(sizeof(*state), GFP_KERNEL);
5835 if (WARN_ON(!state))
5836 return;
5837
1f8a52ec 5838 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5839}
5840
5841static struct drm_crtc_state *
5842dm_crtc_duplicate_state(struct drm_crtc *crtc)
5843{
5844 struct dm_crtc_state *state, *cur;
5845
5846 cur = to_dm_crtc_state(crtc->state);
5847
5848 if (WARN_ON(!crtc->state))
5849 return NULL;
5850
2004f45e 5851 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5852 if (!state)
5853 return NULL;
e7b07cee
HW
5854
5855 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5856
5857 if (cur->stream) {
5858 state->stream = cur->stream;
5859 dc_stream_retain(state->stream);
5860 }
5861
d6ef9b41 5862 state->active_planes = cur->active_planes;
98e6436d 5863 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5864 state->abm_level = cur->abm_level;
bb47de73
NK
5865 state->vrr_supported = cur->vrr_supported;
5866 state->freesync_config = cur->freesync_config;
cf020d49
NK
5867 state->cm_has_degamma = cur->cm_has_degamma;
5868 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
e7b07cee
HW
5869 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5870
5871 return &state->base;
5872}
5873
86bc2219 5874#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 5875static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
5876{
5877 crtc_debugfs_init(crtc);
5878
5879 return 0;
5880}
5881#endif
5882
d2574c33
MK
5883static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5884{
5885 enum dc_irq_source irq_source;
5886 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5887 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5888 int rc;
5889
5890 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5891
5892 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5893
4711c033
LT
5894 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5895 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
5896 return rc;
5897}
589d2739
HW
5898
5899static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5900{
5901 enum dc_irq_source irq_source;
5902 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5903 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 5904 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 5905#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 5906 struct amdgpu_display_manager *dm = &adev->dm;
ea3b4242
QZ
5907 unsigned long flags;
5908#endif
d2574c33
MK
5909 int rc = 0;
5910
5911 if (enable) {
5912 /* vblank irq on -> Only need vupdate irq in vrr mode */
5913 if (amdgpu_dm_vrr_active(acrtc_state))
5914 rc = dm_set_vupdate_irq(crtc, true);
5915 } else {
5916 /* vblank irq off -> vupdate irq off */
5917 rc = dm_set_vupdate_irq(crtc, false);
5918 }
5919
5920 if (rc)
5921 return rc;
589d2739
HW
5922
5923 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
5924
5925 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5926 return -EBUSY;
5927
98ab5f35
BL
5928 if (amdgpu_in_reset(adev))
5929 return 0;
5930
4928b480 5931#if defined(CONFIG_DRM_AMD_DC_DCN)
ea3b4242
QZ
5932 spin_lock_irqsave(&dm->vblank_lock, flags);
5933 dm->vblank_workqueue->dm = dm;
5934 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5935 dm->vblank_workqueue->enable = enable;
5936 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5937 schedule_work(&dm->vblank_workqueue->mall_work);
4928b480 5938#endif
71338cb4 5939
71338cb4 5940 return 0;
589d2739
HW
5941}
5942
5943static int dm_enable_vblank(struct drm_crtc *crtc)
5944{
5945 return dm_set_vblank(crtc, true);
5946}
5947
5948static void dm_disable_vblank(struct drm_crtc *crtc)
5949{
5950 dm_set_vblank(crtc, false);
5951}
5952
e7b07cee
HW
5953/* Implemented only the options currently availible for the driver */
5954static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5955 .reset = dm_crtc_reset_state,
5956 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
5957 .set_config = drm_atomic_helper_set_config,
5958 .page_flip = drm_atomic_helper_page_flip,
5959 .atomic_duplicate_state = dm_crtc_duplicate_state,
5960 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5961 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5962 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5963 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5964 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5965 .enable_vblank = dm_enable_vblank,
5966 .disable_vblank = dm_disable_vblank,
e3eff4b5 5967 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
5968#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5969 .late_register = amdgpu_dm_crtc_late_register,
5970#endif
e7b07cee
HW
5971};
5972
5973static enum drm_connector_status
5974amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5975{
5976 bool connected;
c84dec2f 5977 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 5978
1f6010a9
DF
5979 /*
5980 * Notes:
e7b07cee
HW
5981 * 1. This interface is NOT called in context of HPD irq.
5982 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
5983 * makes it a bad place for *any* MST-related activity.
5984 */
e7b07cee 5985
8580d60b
HW
5986 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5987 !aconnector->fake_enable)
e7b07cee
HW
5988 connected = (aconnector->dc_sink != NULL);
5989 else
5990 connected = (aconnector->base.force == DRM_FORCE_ON);
5991
0f877894
OV
5992 update_subconnector_property(aconnector);
5993
e7b07cee
HW
5994 return (connected ? connector_status_connected :
5995 connector_status_disconnected);
5996}
5997
3ee6b26b
AD
5998int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5999 struct drm_connector_state *connector_state,
6000 struct drm_property *property,
6001 uint64_t val)
e7b07cee
HW
6002{
6003 struct drm_device *dev = connector->dev;
1348969a 6004 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6005 struct dm_connector_state *dm_old_state =
6006 to_dm_connector_state(connector->state);
6007 struct dm_connector_state *dm_new_state =
6008 to_dm_connector_state(connector_state);
6009
6010 int ret = -EINVAL;
6011
6012 if (property == dev->mode_config.scaling_mode_property) {
6013 enum amdgpu_rmx_type rmx_type;
6014
6015 switch (val) {
6016 case DRM_MODE_SCALE_CENTER:
6017 rmx_type = RMX_CENTER;
6018 break;
6019 case DRM_MODE_SCALE_ASPECT:
6020 rmx_type = RMX_ASPECT;
6021 break;
6022 case DRM_MODE_SCALE_FULLSCREEN:
6023 rmx_type = RMX_FULL;
6024 break;
6025 case DRM_MODE_SCALE_NONE:
6026 default:
6027 rmx_type = RMX_OFF;
6028 break;
6029 }
6030
6031 if (dm_old_state->scaling == rmx_type)
6032 return 0;
6033
6034 dm_new_state->scaling = rmx_type;
6035 ret = 0;
6036 } else if (property == adev->mode_info.underscan_hborder_property) {
6037 dm_new_state->underscan_hborder = val;
6038 ret = 0;
6039 } else if (property == adev->mode_info.underscan_vborder_property) {
6040 dm_new_state->underscan_vborder = val;
6041 ret = 0;
6042 } else if (property == adev->mode_info.underscan_property) {
6043 dm_new_state->underscan_enable = val;
6044 ret = 0;
c1ee92f9
DF
6045 } else if (property == adev->mode_info.abm_level_property) {
6046 dm_new_state->abm_level = val;
6047 ret = 0;
e7b07cee
HW
6048 }
6049
6050 return ret;
6051}
6052
3ee6b26b
AD
6053int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6054 const struct drm_connector_state *state,
6055 struct drm_property *property,
6056 uint64_t *val)
e7b07cee
HW
6057{
6058 struct drm_device *dev = connector->dev;
1348969a 6059 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6060 struct dm_connector_state *dm_state =
6061 to_dm_connector_state(state);
6062 int ret = -EINVAL;
6063
6064 if (property == dev->mode_config.scaling_mode_property) {
6065 switch (dm_state->scaling) {
6066 case RMX_CENTER:
6067 *val = DRM_MODE_SCALE_CENTER;
6068 break;
6069 case RMX_ASPECT:
6070 *val = DRM_MODE_SCALE_ASPECT;
6071 break;
6072 case RMX_FULL:
6073 *val = DRM_MODE_SCALE_FULLSCREEN;
6074 break;
6075 case RMX_OFF:
6076 default:
6077 *val = DRM_MODE_SCALE_NONE;
6078 break;
6079 }
6080 ret = 0;
6081 } else if (property == adev->mode_info.underscan_hborder_property) {
6082 *val = dm_state->underscan_hborder;
6083 ret = 0;
6084 } else if (property == adev->mode_info.underscan_vborder_property) {
6085 *val = dm_state->underscan_vborder;
6086 ret = 0;
6087 } else if (property == adev->mode_info.underscan_property) {
6088 *val = dm_state->underscan_enable;
6089 ret = 0;
c1ee92f9
DF
6090 } else if (property == adev->mode_info.abm_level_property) {
6091 *val = dm_state->abm_level;
6092 ret = 0;
e7b07cee 6093 }
c1ee92f9 6094
e7b07cee
HW
6095 return ret;
6096}
6097
526c654a
ED
6098static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6099{
6100 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6101
6102 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6103}
6104
7578ecda 6105static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6106{
c84dec2f 6107 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6108 const struct dc_link *link = aconnector->dc_link;
1348969a 6109 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6110 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 6111
5dff80bd
AG
6112 /*
6113 * Call only if mst_mgr was iniitalized before since it's not done
6114 * for all connector types.
6115 */
6116 if (aconnector->mst_mgr.dev)
6117 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6118
e7b07cee
HW
6119#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6120 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6121
89fc8d4e 6122 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
6123 link->type != dc_connection_none &&
6124 dm->backlight_dev) {
6125 backlight_device_unregister(dm->backlight_dev);
6126 dm->backlight_dev = NULL;
e7b07cee
HW
6127 }
6128#endif
dcd5fb82
MF
6129
6130 if (aconnector->dc_em_sink)
6131 dc_sink_release(aconnector->dc_em_sink);
6132 aconnector->dc_em_sink = NULL;
6133 if (aconnector->dc_sink)
6134 dc_sink_release(aconnector->dc_sink);
6135 aconnector->dc_sink = NULL;
6136
e86e8947 6137 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6138 drm_connector_unregister(connector);
6139 drm_connector_cleanup(connector);
526c654a
ED
6140 if (aconnector->i2c) {
6141 i2c_del_adapter(&aconnector->i2c->base);
6142 kfree(aconnector->i2c);
6143 }
7daec99f 6144 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6145
e7b07cee
HW
6146 kfree(connector);
6147}
6148
6149void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6150{
6151 struct dm_connector_state *state =
6152 to_dm_connector_state(connector->state);
6153
df099b9b
LSL
6154 if (connector->state)
6155 __drm_atomic_helper_connector_destroy_state(connector->state);
6156
e7b07cee
HW
6157 kfree(state);
6158
6159 state = kzalloc(sizeof(*state), GFP_KERNEL);
6160
6161 if (state) {
6162 state->scaling = RMX_OFF;
6163 state->underscan_enable = false;
6164 state->underscan_hborder = 0;
6165 state->underscan_vborder = 0;
01933ba4 6166 state->base.max_requested_bpc = 8;
3261e013
ML
6167 state->vcpi_slots = 0;
6168 state->pbn = 0;
c3e50f89
NK
6169 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6170 state->abm_level = amdgpu_dm_abm_level;
6171
df099b9b 6172 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6173 }
6174}
6175
3ee6b26b
AD
6176struct drm_connector_state *
6177amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6178{
6179 struct dm_connector_state *state =
6180 to_dm_connector_state(connector->state);
6181
6182 struct dm_connector_state *new_state =
6183 kmemdup(state, sizeof(*state), GFP_KERNEL);
6184
98e6436d
AK
6185 if (!new_state)
6186 return NULL;
e7b07cee 6187
98e6436d
AK
6188 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6189
6190 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6191 new_state->abm_level = state->abm_level;
922454c2
NK
6192 new_state->scaling = state->scaling;
6193 new_state->underscan_enable = state->underscan_enable;
6194 new_state->underscan_hborder = state->underscan_hborder;
6195 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6196 new_state->vcpi_slots = state->vcpi_slots;
6197 new_state->pbn = state->pbn;
98e6436d 6198 return &new_state->base;
e7b07cee
HW
6199}
6200
14f04fa4
AD
6201static int
6202amdgpu_dm_connector_late_register(struct drm_connector *connector)
6203{
6204 struct amdgpu_dm_connector *amdgpu_dm_connector =
6205 to_amdgpu_dm_connector(connector);
00a8037e 6206 int r;
14f04fa4 6207
00a8037e
AD
6208 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6209 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6210 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6211 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6212 if (r)
6213 return r;
6214 }
6215
6216#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6217 connector_debugfs_init(amdgpu_dm_connector);
6218#endif
6219
6220 return 0;
6221}
6222
e7b07cee
HW
6223static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6224 .reset = amdgpu_dm_connector_funcs_reset,
6225 .detect = amdgpu_dm_connector_detect,
6226 .fill_modes = drm_helper_probe_single_connector_modes,
6227 .destroy = amdgpu_dm_connector_destroy,
6228 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6229 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6230 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6231 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6232 .late_register = amdgpu_dm_connector_late_register,
526c654a 6233 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6234};
6235
e7b07cee
HW
6236static int get_modes(struct drm_connector *connector)
6237{
6238 return amdgpu_dm_connector_get_modes(connector);
6239}
6240
c84dec2f 6241static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6242{
6243 struct dc_sink_init_data init_params = {
6244 .link = aconnector->dc_link,
6245 .sink_signal = SIGNAL_TYPE_VIRTUAL
6246 };
70e8ffc5 6247 struct edid *edid;
e7b07cee 6248
a89ff457 6249 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6250 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6251 aconnector->base.name);
6252
6253 aconnector->base.force = DRM_FORCE_OFF;
6254 aconnector->base.override_edid = false;
6255 return;
6256 }
6257
70e8ffc5
HW
6258 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6259
e7b07cee
HW
6260 aconnector->edid = edid;
6261
6262 aconnector->dc_em_sink = dc_link_add_remote_sink(
6263 aconnector->dc_link,
6264 (uint8_t *)edid,
6265 (edid->extensions + 1) * EDID_LENGTH,
6266 &init_params);
6267
dcd5fb82 6268 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6269 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6270 aconnector->dc_link->local_sink :
6271 aconnector->dc_em_sink;
dcd5fb82
MF
6272 dc_sink_retain(aconnector->dc_sink);
6273 }
e7b07cee
HW
6274}
6275
c84dec2f 6276static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6277{
6278 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6279
1f6010a9
DF
6280 /*
6281 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6282 * Those settings have to be != 0 to get initial modeset
6283 */
6284 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6285 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6286 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6287 }
6288
6289
6290 aconnector->base.override_edid = true;
6291 create_eml_sink(aconnector);
6292}
6293
cbd14ae7
SW
6294static struct dc_stream_state *
6295create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6296 const struct drm_display_mode *drm_mode,
6297 const struct dm_connector_state *dm_state,
6298 const struct dc_stream_state *old_stream)
6299{
6300 struct drm_connector *connector = &aconnector->base;
1348969a 6301 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6302 struct dc_stream_state *stream;
4b7da34b
SW
6303 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6304 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6305 enum dc_status dc_result = DC_OK;
6306
6307 do {
6308 stream = create_stream_for_sink(aconnector, drm_mode,
6309 dm_state, old_stream,
6310 requested_bpc);
6311 if (stream == NULL) {
6312 DRM_ERROR("Failed to create stream for sink!\n");
6313 break;
6314 }
6315
6316 dc_result = dc_validate_stream(adev->dm.dc, stream);
6317
6318 if (dc_result != DC_OK) {
74a16675 6319 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6320 drm_mode->hdisplay,
6321 drm_mode->vdisplay,
6322 drm_mode->clock,
74a16675
RS
6323 dc_result,
6324 dc_status_to_str(dc_result));
cbd14ae7
SW
6325
6326 dc_stream_release(stream);
6327 stream = NULL;
6328 requested_bpc -= 2; /* lower bpc to retry validation */
6329 }
6330
6331 } while (stream == NULL && requested_bpc >= 6);
6332
68eb3ae3
WS
6333 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6334 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6335
6336 aconnector->force_yuv420_output = true;
6337 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6338 dm_state, old_stream);
6339 aconnector->force_yuv420_output = false;
6340 }
6341
cbd14ae7
SW
6342 return stream;
6343}
6344
ba9ca088 6345enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6346 struct drm_display_mode *mode)
e7b07cee
HW
6347{
6348 int result = MODE_ERROR;
6349 struct dc_sink *dc_sink;
e7b07cee 6350 /* TODO: Unhardcode stream count */
0971c40e 6351 struct dc_stream_state *stream;
c84dec2f 6352 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6353
6354 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6355 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6356 return result;
6357
1f6010a9
DF
6358 /*
6359 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6360 * EDID mgmt
6361 */
6362 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6363 !aconnector->dc_em_sink)
6364 handle_edid_mgmt(aconnector);
6365
c84dec2f 6366 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6367
ad975f44
VL
6368 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6369 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6370 DRM_ERROR("dc_sink is NULL!\n");
6371 goto fail;
6372 }
6373
cbd14ae7
SW
6374 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6375 if (stream) {
6376 dc_stream_release(stream);
e7b07cee 6377 result = MODE_OK;
cbd14ae7 6378 }
e7b07cee
HW
6379
6380fail:
6381 /* TODO: error handling*/
6382 return result;
6383}
6384
88694af9
NK
6385static int fill_hdr_info_packet(const struct drm_connector_state *state,
6386 struct dc_info_packet *out)
6387{
6388 struct hdmi_drm_infoframe frame;
6389 unsigned char buf[30]; /* 26 + 4 */
6390 ssize_t len;
6391 int ret, i;
6392
6393 memset(out, 0, sizeof(*out));
6394
6395 if (!state->hdr_output_metadata)
6396 return 0;
6397
6398 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6399 if (ret)
6400 return ret;
6401
6402 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6403 if (len < 0)
6404 return (int)len;
6405
6406 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6407 if (len != 30)
6408 return -EINVAL;
6409
6410 /* Prepare the infopacket for DC. */
6411 switch (state->connector->connector_type) {
6412 case DRM_MODE_CONNECTOR_HDMIA:
6413 out->hb0 = 0x87; /* type */
6414 out->hb1 = 0x01; /* version */
6415 out->hb2 = 0x1A; /* length */
6416 out->sb[0] = buf[3]; /* checksum */
6417 i = 1;
6418 break;
6419
6420 case DRM_MODE_CONNECTOR_DisplayPort:
6421 case DRM_MODE_CONNECTOR_eDP:
6422 out->hb0 = 0x00; /* sdp id, zero */
6423 out->hb1 = 0x87; /* type */
6424 out->hb2 = 0x1D; /* payload len - 1 */
6425 out->hb3 = (0x13 << 2); /* sdp version */
6426 out->sb[0] = 0x01; /* version */
6427 out->sb[1] = 0x1A; /* length */
6428 i = 2;
6429 break;
6430
6431 default:
6432 return -EINVAL;
6433 }
6434
6435 memcpy(&out->sb[i], &buf[4], 26);
6436 out->valid = true;
6437
6438 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6439 sizeof(out->sb), false);
6440
6441 return 0;
6442}
6443
88694af9
NK
6444static int
6445amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6446 struct drm_atomic_state *state)
88694af9 6447{
51e857af
SP
6448 struct drm_connector_state *new_con_state =
6449 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6450 struct drm_connector_state *old_con_state =
6451 drm_atomic_get_old_connector_state(state, conn);
6452 struct drm_crtc *crtc = new_con_state->crtc;
6453 struct drm_crtc_state *new_crtc_state;
6454 int ret;
6455
e8a98235
RS
6456 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6457
88694af9
NK
6458 if (!crtc)
6459 return 0;
6460
72921cdf 6461 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
6462 struct dc_info_packet hdr_infopacket;
6463
6464 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6465 if (ret)
6466 return ret;
6467
6468 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6469 if (IS_ERR(new_crtc_state))
6470 return PTR_ERR(new_crtc_state);
6471
6472 /*
6473 * DC considers the stream backends changed if the
6474 * static metadata changes. Forcing the modeset also
6475 * gives a simple way for userspace to switch from
b232d4ed
NK
6476 * 8bpc to 10bpc when setting the metadata to enter
6477 * or exit HDR.
6478 *
6479 * Changing the static metadata after it's been
6480 * set is permissible, however. So only force a
6481 * modeset if we're entering or exiting HDR.
88694af9 6482 */
b232d4ed
NK
6483 new_crtc_state->mode_changed =
6484 !old_con_state->hdr_output_metadata ||
6485 !new_con_state->hdr_output_metadata;
88694af9
NK
6486 }
6487
6488 return 0;
6489}
6490
e7b07cee
HW
6491static const struct drm_connector_helper_funcs
6492amdgpu_dm_connector_helper_funcs = {
6493 /*
1f6010a9 6494 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6495 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6496 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6497 * in get_modes call back, not just return the modes count
6498 */
e7b07cee
HW
6499 .get_modes = get_modes,
6500 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6501 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6502};
6503
6504static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6505{
6506}
6507
d6ef9b41 6508static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6509{
6510 struct drm_atomic_state *state = new_crtc_state->state;
6511 struct drm_plane *plane;
6512 int num_active = 0;
6513
6514 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6515 struct drm_plane_state *new_plane_state;
6516
6517 /* Cursor planes are "fake". */
6518 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6519 continue;
6520
6521 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6522
6523 if (!new_plane_state) {
6524 /*
6525 * The plane is enable on the CRTC and hasn't changed
6526 * state. This means that it previously passed
6527 * validation and is therefore enabled.
6528 */
6529 num_active += 1;
6530 continue;
6531 }
6532
6533 /* We need a framebuffer to be considered enabled. */
6534 num_active += (new_plane_state->fb != NULL);
6535 }
6536
d6ef9b41
NK
6537 return num_active;
6538}
6539
8fe684e9
NK
6540static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6541 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6542{
6543 struct dm_crtc_state *dm_new_crtc_state =
6544 to_dm_crtc_state(new_crtc_state);
6545
6546 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6547
6548 if (!dm_new_crtc_state->stream)
6549 return;
6550
6551 dm_new_crtc_state->active_planes =
6552 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6553}
6554
3ee6b26b 6555static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6556 struct drm_atomic_state *state)
e7b07cee 6557{
29b77ad7
MR
6558 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6559 crtc);
1348969a 6560 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6561 struct dc *dc = adev->dm.dc;
29b77ad7 6562 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6563 int ret = -EINVAL;
6564
5b8c5969 6565 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6566
29b77ad7 6567 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6568
9b690ef3 6569 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6570 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6571 WARN_ON(1);
6572 return ret;
6573 }
6574
bc92c065 6575 /*
b836a274
MD
6576 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6577 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6578 * planes are disabled, which is not supported by the hardware. And there is legacy
6579 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6580 */
29b77ad7 6581 if (crtc_state->enable &&
ea9522f5
SS
6582 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6583 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6584 return -EINVAL;
ea9522f5 6585 }
c14a005c 6586
b836a274
MD
6587 /* In some use cases, like reset, no stream is attached */
6588 if (!dm_crtc_state->stream)
6589 return 0;
6590
62c933f9 6591 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6592 return 0;
6593
ea9522f5 6594 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6595 return ret;
6596}
6597
3ee6b26b
AD
6598static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6599 const struct drm_display_mode *mode,
6600 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6601{
6602 return true;
6603}
6604
6605static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6606 .disable = dm_crtc_helper_disable,
6607 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6608 .mode_fixup = dm_crtc_helper_mode_fixup,
6609 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6610};
6611
6612static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6613{
6614
6615}
6616
3261e013
ML
6617static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6618{
6619 switch (display_color_depth) {
6620 case COLOR_DEPTH_666:
6621 return 6;
6622 case COLOR_DEPTH_888:
6623 return 8;
6624 case COLOR_DEPTH_101010:
6625 return 10;
6626 case COLOR_DEPTH_121212:
6627 return 12;
6628 case COLOR_DEPTH_141414:
6629 return 14;
6630 case COLOR_DEPTH_161616:
6631 return 16;
6632 default:
6633 break;
6634 }
6635 return 0;
6636}
6637
3ee6b26b
AD
6638static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6639 struct drm_crtc_state *crtc_state,
6640 struct drm_connector_state *conn_state)
e7b07cee 6641{
3261e013
ML
6642 struct drm_atomic_state *state = crtc_state->state;
6643 struct drm_connector *connector = conn_state->connector;
6644 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6645 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6646 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6647 struct drm_dp_mst_topology_mgr *mst_mgr;
6648 struct drm_dp_mst_port *mst_port;
6649 enum dc_color_depth color_depth;
6650 int clock, bpp = 0;
1bc22f20 6651 bool is_y420 = false;
3261e013
ML
6652
6653 if (!aconnector->port || !aconnector->dc_sink)
6654 return 0;
6655
6656 mst_port = aconnector->port;
6657 mst_mgr = &aconnector->mst_port->mst_mgr;
6658
6659 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6660 return 0;
6661
6662 if (!state->duplicated) {
cbd14ae7 6663 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6664 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6665 aconnector->force_yuv420_output;
cbd14ae7
SW
6666 color_depth = convert_color_depth_from_display_info(connector,
6667 is_y420,
6668 max_bpc);
3261e013
ML
6669 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6670 clock = adjusted_mode->clock;
dc48529f 6671 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6672 }
6673 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6674 mst_mgr,
6675 mst_port,
1c6c1cb5 6676 dm_new_connector_state->pbn,
03ca9600 6677 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6678 if (dm_new_connector_state->vcpi_slots < 0) {
6679 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6680 return dm_new_connector_state->vcpi_slots;
6681 }
e7b07cee
HW
6682 return 0;
6683}
6684
6685const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6686 .disable = dm_encoder_helper_disable,
6687 .atomic_check = dm_encoder_helper_atomic_check
6688};
6689
d9fe1a4c 6690#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6691static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6692 struct dc_state *dc_state)
6693{
6694 struct dc_stream_state *stream = NULL;
6695 struct drm_connector *connector;
5760dcb9 6696 struct drm_connector_state *new_con_state;
29b9ba74
ML
6697 struct amdgpu_dm_connector *aconnector;
6698 struct dm_connector_state *dm_conn_state;
6699 int i, j, clock, bpp;
6700 int vcpi, pbn_div, pbn = 0;
6701
5760dcb9 6702 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
6703
6704 aconnector = to_amdgpu_dm_connector(connector);
6705
6706 if (!aconnector->port)
6707 continue;
6708
6709 if (!new_con_state || !new_con_state->crtc)
6710 continue;
6711
6712 dm_conn_state = to_dm_connector_state(new_con_state);
6713
6714 for (j = 0; j < dc_state->stream_count; j++) {
6715 stream = dc_state->streams[j];
6716 if (!stream)
6717 continue;
6718
6719 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6720 break;
6721
6722 stream = NULL;
6723 }
6724
6725 if (!stream)
6726 continue;
6727
6728 if (stream->timing.flags.DSC != 1) {
6729 drm_dp_mst_atomic_enable_dsc(state,
6730 aconnector->port,
6731 dm_conn_state->pbn,
6732 0,
6733 false);
6734 continue;
6735 }
6736
6737 pbn_div = dm_mst_get_pbn_divider(stream->link);
6738 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6739 clock = stream->timing.pix_clk_100hz / 10;
6740 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6741 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6742 aconnector->port,
6743 pbn, pbn_div,
6744 true);
6745 if (vcpi < 0)
6746 return vcpi;
6747
6748 dm_conn_state->pbn = pbn;
6749 dm_conn_state->vcpi_slots = vcpi;
6750 }
6751 return 0;
6752}
d9fe1a4c 6753#endif
29b9ba74 6754
e7b07cee
HW
6755static void dm_drm_plane_reset(struct drm_plane *plane)
6756{
6757 struct dm_plane_state *amdgpu_state = NULL;
6758
6759 if (plane->state)
6760 plane->funcs->atomic_destroy_state(plane, plane->state);
6761
6762 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6763 WARN_ON(amdgpu_state == NULL);
1f6010a9 6764
7ddaef96
NK
6765 if (amdgpu_state)
6766 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6767}
6768
6769static struct drm_plane_state *
6770dm_drm_plane_duplicate_state(struct drm_plane *plane)
6771{
6772 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6773
6774 old_dm_plane_state = to_dm_plane_state(plane->state);
6775 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6776 if (!dm_plane_state)
6777 return NULL;
6778
6779 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6780
3be5262e
HW
6781 if (old_dm_plane_state->dc_state) {
6782 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6783 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6784 }
6785
6786 return &dm_plane_state->base;
6787}
6788
dfd84d90 6789static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6790 struct drm_plane_state *state)
e7b07cee
HW
6791{
6792 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6793
3be5262e
HW
6794 if (dm_plane_state->dc_state)
6795 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6796
0627bbd3 6797 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6798}
6799
6800static const struct drm_plane_funcs dm_plane_funcs = {
6801 .update_plane = drm_atomic_helper_update_plane,
6802 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6803 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6804 .reset = dm_drm_plane_reset,
6805 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6806 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6807 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6808};
6809
3ee6b26b
AD
6810static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6811 struct drm_plane_state *new_state)
e7b07cee
HW
6812{
6813 struct amdgpu_framebuffer *afb;
6814 struct drm_gem_object *obj;
5d43be0c 6815 struct amdgpu_device *adev;
e7b07cee 6816 struct amdgpu_bo *rbo;
e7b07cee 6817 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6818 struct list_head list;
6819 struct ttm_validate_buffer tv;
6820 struct ww_acquire_ctx ticket;
5d43be0c
CK
6821 uint32_t domain;
6822 int r;
e7b07cee
HW
6823
6824 if (!new_state->fb) {
4711c033 6825 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
6826 return 0;
6827 }
6828
6829 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6830 obj = new_state->fb->obj[0];
e7b07cee 6831 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6832 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6833 INIT_LIST_HEAD(&list);
6834
6835 tv.bo = &rbo->tbo;
6836 tv.num_shared = 1;
6837 list_add(&tv.head, &list);
6838
9165fb87 6839 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6840 if (r) {
6841 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6842 return r;
0f257b09 6843 }
e7b07cee 6844
5d43be0c 6845 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6846 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6847 else
6848 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6849
7b7c6c81 6850 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6851 if (unlikely(r != 0)) {
30b7c614
HW
6852 if (r != -ERESTARTSYS)
6853 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6854 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6855 return r;
6856 }
6857
bb812f1e
JZ
6858 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6859 if (unlikely(r != 0)) {
6860 amdgpu_bo_unpin(rbo);
0f257b09 6861 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6862 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6863 return r;
6864 }
7df7e505 6865
0f257b09 6866 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6867
7b7c6c81 6868 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6869
6870 amdgpu_bo_ref(rbo);
6871
cf322b49
NK
6872 /**
6873 * We don't do surface updates on planes that have been newly created,
6874 * but we also don't have the afb->address during atomic check.
6875 *
6876 * Fill in buffer attributes depending on the address here, but only on
6877 * newly created planes since they're not being used by DC yet and this
6878 * won't modify global state.
6879 */
6880 dm_plane_state_old = to_dm_plane_state(plane->state);
6881 dm_plane_state_new = to_dm_plane_state(new_state);
6882
3be5262e 6883 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6884 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6885 struct dc_plane_state *plane_state =
6886 dm_plane_state_new->dc_state;
6887 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6888
320932bf 6889 fill_plane_buffer_attributes(
695af5f9 6890 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6891 afb->tiling_flags,
cf322b49
NK
6892 &plane_state->tiling_info, &plane_state->plane_size,
6893 &plane_state->dcc, &plane_state->address,
6eed95b0 6894 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6895 }
6896
e7b07cee
HW
6897 return 0;
6898}
6899
3ee6b26b
AD
6900static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6901 struct drm_plane_state *old_state)
e7b07cee
HW
6902{
6903 struct amdgpu_bo *rbo;
e7b07cee
HW
6904 int r;
6905
6906 if (!old_state->fb)
6907 return;
6908
e68d14dd 6909 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6910 r = amdgpu_bo_reserve(rbo, false);
6911 if (unlikely(r)) {
6912 DRM_ERROR("failed to reserve rbo before unpin\n");
6913 return;
b830ebc9
HW
6914 }
6915
6916 amdgpu_bo_unpin(rbo);
6917 amdgpu_bo_unreserve(rbo);
6918 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6919}
6920
8c44515b
AP
6921static int dm_plane_helper_check_state(struct drm_plane_state *state,
6922 struct drm_crtc_state *new_crtc_state)
6923{
6300b3bd
MK
6924 struct drm_framebuffer *fb = state->fb;
6925 int min_downscale, max_upscale;
6926 int min_scale = 0;
6927 int max_scale = INT_MAX;
6928
40d916a2 6929 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 6930 if (fb && state->crtc) {
40d916a2
NC
6931 /* Validate viewport to cover the case when only the position changes */
6932 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6933 int viewport_width = state->crtc_w;
6934 int viewport_height = state->crtc_h;
6935
6936 if (state->crtc_x < 0)
6937 viewport_width += state->crtc_x;
6938 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6939 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6940
6941 if (state->crtc_y < 0)
6942 viewport_height += state->crtc_y;
6943 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6944 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6945
4abdb72b
NC
6946 if (viewport_width < 0 || viewport_height < 0) {
6947 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6948 return -EINVAL;
6949 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6950 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 6951 return -EINVAL;
4abdb72b
NC
6952 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6953 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 6954 return -EINVAL;
4abdb72b
NC
6955 }
6956
40d916a2
NC
6957 }
6958
6959 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
6960 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6961 &min_downscale, &max_upscale);
6962 /*
6963 * Convert to drm convention: 16.16 fixed point, instead of dc's
6964 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6965 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6966 */
6967 min_scale = (1000 << 16) / max_upscale;
6968 max_scale = (1000 << 16) / min_downscale;
6969 }
8c44515b 6970
8c44515b 6971 return drm_atomic_helper_check_plane_state(
6300b3bd 6972 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
6973}
6974
7578ecda 6975static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 6976 struct drm_atomic_state *state)
cbd19488 6977{
7c11b99a
MR
6978 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6979 plane);
1348969a 6980 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 6981 struct dc *dc = adev->dm.dc;
78171832 6982 struct dm_plane_state *dm_plane_state;
695af5f9 6983 struct dc_scaling_info scaling_info;
8c44515b 6984 struct drm_crtc_state *new_crtc_state;
695af5f9 6985 int ret;
78171832 6986
ba5c1649 6987 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 6988
ba5c1649 6989 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 6990
3be5262e 6991 if (!dm_plane_state->dc_state)
9a3329b1 6992 return 0;
cbd19488 6993
8c44515b 6994 new_crtc_state =
dec92020 6995 drm_atomic_get_new_crtc_state(state,
ba5c1649 6996 new_plane_state->crtc);
8c44515b
AP
6997 if (!new_crtc_state)
6998 return -EINVAL;
6999
ba5c1649 7000 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7001 if (ret)
7002 return ret;
7003
ba5c1649 7004 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
695af5f9
NK
7005 if (ret)
7006 return ret;
a05bcff1 7007
62c933f9 7008 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7009 return 0;
7010
7011 return -EINVAL;
7012}
7013
674e78ac 7014static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7015 struct drm_atomic_state *state)
674e78ac
NK
7016{
7017 /* Only support async updates on cursor planes. */
7018 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7019 return -EINVAL;
7020
7021 return 0;
7022}
7023
7024static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7025 struct drm_atomic_state *state)
674e78ac 7026{
5ddb0bd4
MR
7027 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7028 plane);
674e78ac 7029 struct drm_plane_state *old_state =
5ddb0bd4 7030 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7031
e8a98235
RS
7032 trace_amdgpu_dm_atomic_update_cursor(new_state);
7033
332af874 7034 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7035
7036 plane->state->src_x = new_state->src_x;
7037 plane->state->src_y = new_state->src_y;
7038 plane->state->src_w = new_state->src_w;
7039 plane->state->src_h = new_state->src_h;
7040 plane->state->crtc_x = new_state->crtc_x;
7041 plane->state->crtc_y = new_state->crtc_y;
7042 plane->state->crtc_w = new_state->crtc_w;
7043 plane->state->crtc_h = new_state->crtc_h;
7044
7045 handle_cursor_update(plane, old_state);
7046}
7047
e7b07cee
HW
7048static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7049 .prepare_fb = dm_plane_helper_prepare_fb,
7050 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7051 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7052 .atomic_async_check = dm_plane_atomic_async_check,
7053 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7054};
7055
7056/*
7057 * TODO: these are currently initialized to rgb formats only.
7058 * For future use cases we should either initialize them dynamically based on
7059 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7060 * check will succeed, and let DC implement proper check
e7b07cee 7061 */
d90371b0 7062static const uint32_t rgb_formats[] = {
e7b07cee
HW
7063 DRM_FORMAT_XRGB8888,
7064 DRM_FORMAT_ARGB8888,
7065 DRM_FORMAT_RGBA8888,
7066 DRM_FORMAT_XRGB2101010,
7067 DRM_FORMAT_XBGR2101010,
7068 DRM_FORMAT_ARGB2101010,
7069 DRM_FORMAT_ABGR2101010,
bcd47f60
MR
7070 DRM_FORMAT_XBGR8888,
7071 DRM_FORMAT_ABGR8888,
46dd9ff7 7072 DRM_FORMAT_RGB565,
e7b07cee
HW
7073};
7074
0d579c7e
NK
7075static const uint32_t overlay_formats[] = {
7076 DRM_FORMAT_XRGB8888,
7077 DRM_FORMAT_ARGB8888,
7078 DRM_FORMAT_RGBA8888,
7079 DRM_FORMAT_XBGR8888,
7080 DRM_FORMAT_ABGR8888,
7267a1a9 7081 DRM_FORMAT_RGB565
e7b07cee
HW
7082};
7083
7084static const u32 cursor_formats[] = {
7085 DRM_FORMAT_ARGB8888
7086};
7087
37c6a93b
NK
7088static int get_plane_formats(const struct drm_plane *plane,
7089 const struct dc_plane_cap *plane_cap,
7090 uint32_t *formats, int max_formats)
e7b07cee 7091{
37c6a93b
NK
7092 int i, num_formats = 0;
7093
7094 /*
7095 * TODO: Query support for each group of formats directly from
7096 * DC plane caps. This will require adding more formats to the
7097 * caps list.
7098 */
e7b07cee 7099
f180b4bc 7100 switch (plane->type) {
e7b07cee 7101 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7102 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7103 if (num_formats >= max_formats)
7104 break;
7105
7106 formats[num_formats++] = rgb_formats[i];
7107 }
7108
ea36ad34 7109 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7110 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7111 if (plane_cap && plane_cap->pixel_format_support.p010)
7112 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7113 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7114 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7115 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7116 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7117 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7118 }
e7b07cee 7119 break;
37c6a93b 7120
e7b07cee 7121 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7122 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7123 if (num_formats >= max_formats)
7124 break;
7125
7126 formats[num_formats++] = overlay_formats[i];
7127 }
e7b07cee 7128 break;
37c6a93b 7129
e7b07cee 7130 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7131 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7132 if (num_formats >= max_formats)
7133 break;
7134
7135 formats[num_formats++] = cursor_formats[i];
7136 }
e7b07cee
HW
7137 break;
7138 }
7139
37c6a93b
NK
7140 return num_formats;
7141}
7142
7143static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7144 struct drm_plane *plane,
7145 unsigned long possible_crtcs,
7146 const struct dc_plane_cap *plane_cap)
7147{
7148 uint32_t formats[32];
7149 int num_formats;
7150 int res = -EPERM;
ecc874a6 7151 unsigned int supported_rotations;
faa37f54 7152 uint64_t *modifiers = NULL;
37c6a93b
NK
7153
7154 num_formats = get_plane_formats(plane, plane_cap, formats,
7155 ARRAY_SIZE(formats));
7156
faa37f54
BN
7157 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7158 if (res)
7159 return res;
7160
4a580877 7161 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7162 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7163 modifiers, plane->type, NULL);
7164 kfree(modifiers);
37c6a93b
NK
7165 if (res)
7166 return res;
7167
cc1fec57
NK
7168 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7169 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7170 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7171 BIT(DRM_MODE_BLEND_PREMULTI);
7172
7173 drm_plane_create_alpha_property(plane);
7174 drm_plane_create_blend_mode_property(plane, blend_caps);
7175 }
7176
fc8e5230 7177 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7178 plane_cap &&
7179 (plane_cap->pixel_format_support.nv12 ||
7180 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7181 /* This only affects YUV formats. */
7182 drm_plane_create_color_properties(
7183 plane,
7184 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7185 BIT(DRM_COLOR_YCBCR_BT709) |
7186 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7187 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7188 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7189 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7190 }
7191
ecc874a6
PLG
7192 supported_rotations =
7193 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7194 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7195
1347385f
SS
7196 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7197 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7198 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7199 supported_rotations);
ecc874a6 7200
f180b4bc 7201 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7202
96719c54 7203 /* Create (reset) the plane state */
f180b4bc
HW
7204 if (plane->funcs->reset)
7205 plane->funcs->reset(plane);
96719c54 7206
37c6a93b 7207 return 0;
e7b07cee
HW
7208}
7209
7578ecda
AD
7210static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7211 struct drm_plane *plane,
7212 uint32_t crtc_index)
e7b07cee
HW
7213{
7214 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7215 struct drm_plane *cursor_plane;
e7b07cee
HW
7216
7217 int res = -ENOMEM;
7218
7219 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7220 if (!cursor_plane)
7221 goto fail;
7222
f180b4bc 7223 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7224 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7225
7226 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7227 if (!acrtc)
7228 goto fail;
7229
7230 res = drm_crtc_init_with_planes(
7231 dm->ddev,
7232 &acrtc->base,
7233 plane,
f180b4bc 7234 cursor_plane,
e7b07cee
HW
7235 &amdgpu_dm_crtc_funcs, NULL);
7236
7237 if (res)
7238 goto fail;
7239
7240 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7241
96719c54
HW
7242 /* Create (reset) the plane state */
7243 if (acrtc->base.funcs->reset)
7244 acrtc->base.funcs->reset(&acrtc->base);
7245
e7b07cee
HW
7246 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7247 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7248
7249 acrtc->crtc_id = crtc_index;
7250 acrtc->base.enabled = false;
c37e2d29 7251 acrtc->otg_inst = -1;
e7b07cee
HW
7252
7253 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7254 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7255 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7256 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7257
e7b07cee
HW
7258 return 0;
7259
7260fail:
b830ebc9
HW
7261 kfree(acrtc);
7262 kfree(cursor_plane);
e7b07cee
HW
7263 return res;
7264}
7265
7266
7267static int to_drm_connector_type(enum signal_type st)
7268{
7269 switch (st) {
7270 case SIGNAL_TYPE_HDMI_TYPE_A:
7271 return DRM_MODE_CONNECTOR_HDMIA;
7272 case SIGNAL_TYPE_EDP:
7273 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7274 case SIGNAL_TYPE_LVDS:
7275 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7276 case SIGNAL_TYPE_RGB:
7277 return DRM_MODE_CONNECTOR_VGA;
7278 case SIGNAL_TYPE_DISPLAY_PORT:
7279 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7280 return DRM_MODE_CONNECTOR_DisplayPort;
7281 case SIGNAL_TYPE_DVI_DUAL_LINK:
7282 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7283 return DRM_MODE_CONNECTOR_DVID;
7284 case SIGNAL_TYPE_VIRTUAL:
7285 return DRM_MODE_CONNECTOR_VIRTUAL;
7286
7287 default:
7288 return DRM_MODE_CONNECTOR_Unknown;
7289 }
7290}
7291
2b4c1c05
DV
7292static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7293{
62afb4ad
JRS
7294 struct drm_encoder *encoder;
7295
7296 /* There is only one encoder per connector */
7297 drm_connector_for_each_possible_encoder(connector, encoder)
7298 return encoder;
7299
7300 return NULL;
2b4c1c05
DV
7301}
7302
e7b07cee
HW
7303static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7304{
e7b07cee
HW
7305 struct drm_encoder *encoder;
7306 struct amdgpu_encoder *amdgpu_encoder;
7307
2b4c1c05 7308 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7309
7310 if (encoder == NULL)
7311 return;
7312
7313 amdgpu_encoder = to_amdgpu_encoder(encoder);
7314
7315 amdgpu_encoder->native_mode.clock = 0;
7316
7317 if (!list_empty(&connector->probed_modes)) {
7318 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7319
e7b07cee 7320 list_for_each_entry(preferred_mode,
b830ebc9
HW
7321 &connector->probed_modes,
7322 head) {
7323 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7324 amdgpu_encoder->native_mode = *preferred_mode;
7325
e7b07cee
HW
7326 break;
7327 }
7328
7329 }
7330}
7331
3ee6b26b
AD
7332static struct drm_display_mode *
7333amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7334 char *name,
7335 int hdisplay, int vdisplay)
e7b07cee
HW
7336{
7337 struct drm_device *dev = encoder->dev;
7338 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7339 struct drm_display_mode *mode = NULL;
7340 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7341
7342 mode = drm_mode_duplicate(dev, native_mode);
7343
b830ebc9 7344 if (mode == NULL)
e7b07cee
HW
7345 return NULL;
7346
7347 mode->hdisplay = hdisplay;
7348 mode->vdisplay = vdisplay;
7349 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7350 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7351
7352 return mode;
7353
7354}
7355
7356static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7357 struct drm_connector *connector)
e7b07cee
HW
7358{
7359 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7360 struct drm_display_mode *mode = NULL;
7361 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7362 struct amdgpu_dm_connector *amdgpu_dm_connector =
7363 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7364 int i;
7365 int n;
7366 struct mode_size {
7367 char name[DRM_DISPLAY_MODE_LEN];
7368 int w;
7369 int h;
b830ebc9 7370 } common_modes[] = {
e7b07cee
HW
7371 { "640x480", 640, 480},
7372 { "800x600", 800, 600},
7373 { "1024x768", 1024, 768},
7374 { "1280x720", 1280, 720},
7375 { "1280x800", 1280, 800},
7376 {"1280x1024", 1280, 1024},
7377 { "1440x900", 1440, 900},
7378 {"1680x1050", 1680, 1050},
7379 {"1600x1200", 1600, 1200},
7380 {"1920x1080", 1920, 1080},
7381 {"1920x1200", 1920, 1200}
7382 };
7383
b830ebc9 7384 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7385
7386 for (i = 0; i < n; i++) {
7387 struct drm_display_mode *curmode = NULL;
7388 bool mode_existed = false;
7389
7390 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7391 common_modes[i].h > native_mode->vdisplay ||
7392 (common_modes[i].w == native_mode->hdisplay &&
7393 common_modes[i].h == native_mode->vdisplay))
7394 continue;
e7b07cee
HW
7395
7396 list_for_each_entry(curmode, &connector->probed_modes, head) {
7397 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7398 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7399 mode_existed = true;
7400 break;
7401 }
7402 }
7403
7404 if (mode_existed)
7405 continue;
7406
7407 mode = amdgpu_dm_create_common_mode(encoder,
7408 common_modes[i].name, common_modes[i].w,
7409 common_modes[i].h);
7410 drm_mode_probed_add(connector, mode);
c84dec2f 7411 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7412 }
7413}
7414
3ee6b26b
AD
7415static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7416 struct edid *edid)
e7b07cee 7417{
c84dec2f
HW
7418 struct amdgpu_dm_connector *amdgpu_dm_connector =
7419 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7420
7421 if (edid) {
7422 /* empty probed_modes */
7423 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7424 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7425 drm_add_edid_modes(connector, edid);
7426
f1e5e913
YMM
7427 /* sorting the probed modes before calling function
7428 * amdgpu_dm_get_native_mode() since EDID can have
7429 * more than one preferred mode. The modes that are
7430 * later in the probed mode list could be of higher
7431 * and preferred resolution. For example, 3840x2160
7432 * resolution in base EDID preferred timing and 4096x2160
7433 * preferred resolution in DID extension block later.
7434 */
7435 drm_mode_sort(&connector->probed_modes);
e7b07cee 7436 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7437
7438 /* Freesync capabilities are reset by calling
7439 * drm_add_edid_modes() and need to be
7440 * restored here.
7441 */
7442 amdgpu_dm_update_freesync_caps(connector, edid);
a8d8d3dc 7443 } else {
c84dec2f 7444 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7445 }
e7b07cee
HW
7446}
7447
a85ba005
NC
7448static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7449 struct drm_display_mode *mode)
7450{
7451 struct drm_display_mode *m;
7452
7453 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7454 if (drm_mode_equal(m, mode))
7455 return true;
7456 }
7457
7458 return false;
7459}
7460
7461static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7462{
7463 const struct drm_display_mode *m;
7464 struct drm_display_mode *new_mode;
7465 uint i;
7466 uint32_t new_modes_count = 0;
7467
7468 /* Standard FPS values
7469 *
7470 * 23.976 - TV/NTSC
7471 * 24 - Cinema
7472 * 25 - TV/PAL
7473 * 29.97 - TV/NTSC
7474 * 30 - TV/NTSC
7475 * 48 - Cinema HFR
7476 * 50 - TV/PAL
7477 * 60 - Commonly used
7478 * 48,72,96 - Multiples of 24
7479 */
7480 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7481 48000, 50000, 60000, 72000, 96000 };
7482
7483 /*
7484 * Find mode with highest refresh rate with the same resolution
7485 * as the preferred mode. Some monitors report a preferred mode
7486 * with lower resolution than the highest refresh rate supported.
7487 */
7488
7489 m = get_highest_refresh_rate_mode(aconnector, true);
7490 if (!m)
7491 return 0;
7492
7493 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7494 uint64_t target_vtotal, target_vtotal_diff;
7495 uint64_t num, den;
7496
7497 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7498 continue;
7499
7500 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7501 common_rates[i] > aconnector->max_vfreq * 1000)
7502 continue;
7503
7504 num = (unsigned long long)m->clock * 1000 * 1000;
7505 den = common_rates[i] * (unsigned long long)m->htotal;
7506 target_vtotal = div_u64(num, den);
7507 target_vtotal_diff = target_vtotal - m->vtotal;
7508
7509 /* Check for illegal modes */
7510 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7511 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7512 m->vtotal + target_vtotal_diff < m->vsync_end)
7513 continue;
7514
7515 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7516 if (!new_mode)
7517 goto out;
7518
7519 new_mode->vtotal += (u16)target_vtotal_diff;
7520 new_mode->vsync_start += (u16)target_vtotal_diff;
7521 new_mode->vsync_end += (u16)target_vtotal_diff;
7522 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7523 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7524
7525 if (!is_duplicate_mode(aconnector, new_mode)) {
7526 drm_mode_probed_add(&aconnector->base, new_mode);
7527 new_modes_count += 1;
7528 } else
7529 drm_mode_destroy(aconnector->base.dev, new_mode);
7530 }
7531 out:
7532 return new_modes_count;
7533}
7534
7535static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7536 struct edid *edid)
7537{
7538 struct amdgpu_dm_connector *amdgpu_dm_connector =
7539 to_amdgpu_dm_connector(connector);
7540
7541 if (!(amdgpu_freesync_vid_mode && edid))
7542 return;
fe8858bb 7543
a85ba005
NC
7544 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7545 amdgpu_dm_connector->num_modes +=
7546 add_fs_modes(amdgpu_dm_connector);
7547}
7548
7578ecda 7549static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 7550{
c84dec2f
HW
7551 struct amdgpu_dm_connector *amdgpu_dm_connector =
7552 to_amdgpu_dm_connector(connector);
e7b07cee 7553 struct drm_encoder *encoder;
c84dec2f 7554 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 7555
2b4c1c05 7556 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 7557
5c0e6840 7558 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
7559 amdgpu_dm_connector->num_modes =
7560 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
7561 } else {
7562 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7563 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 7564 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 7565 }
3e332d3a 7566 amdgpu_dm_fbc_init(connector);
5099114b 7567
c84dec2f 7568 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
7569}
7570
3ee6b26b
AD
7571void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7572 struct amdgpu_dm_connector *aconnector,
7573 int connector_type,
7574 struct dc_link *link,
7575 int link_index)
e7b07cee 7576{
1348969a 7577 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7578
f04bee34
NK
7579 /*
7580 * Some of the properties below require access to state, like bpc.
7581 * Allocate some default initial connector state with our reset helper.
7582 */
7583 if (aconnector->base.funcs->reset)
7584 aconnector->base.funcs->reset(&aconnector->base);
7585
e7b07cee
HW
7586 aconnector->connector_id = link_index;
7587 aconnector->dc_link = link;
7588 aconnector->base.interlace_allowed = false;
7589 aconnector->base.doublescan_allowed = false;
7590 aconnector->base.stereo_allowed = false;
7591 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7592 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7593 aconnector->audio_inst = -1;
e7b07cee
HW
7594 mutex_init(&aconnector->hpd_lock);
7595
1f6010a9
DF
7596 /*
7597 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7598 * which means HPD hot plug not supported
7599 */
e7b07cee
HW
7600 switch (connector_type) {
7601 case DRM_MODE_CONNECTOR_HDMIA:
7602 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7603 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7604 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7605 break;
7606 case DRM_MODE_CONNECTOR_DisplayPort:
7607 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7608 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7609 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7610 break;
7611 case DRM_MODE_CONNECTOR_DVID:
7612 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7613 break;
7614 default:
7615 break;
7616 }
7617
7618 drm_object_attach_property(&aconnector->base.base,
7619 dm->ddev->mode_config.scaling_mode_property,
7620 DRM_MODE_SCALE_NONE);
7621
7622 drm_object_attach_property(&aconnector->base.base,
7623 adev->mode_info.underscan_property,
7624 UNDERSCAN_OFF);
7625 drm_object_attach_property(&aconnector->base.base,
7626 adev->mode_info.underscan_hborder_property,
7627 0);
7628 drm_object_attach_property(&aconnector->base.base,
7629 adev->mode_info.underscan_vborder_property,
7630 0);
1825fd34 7631
8c61b31e
JFZ
7632 if (!aconnector->mst_port)
7633 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7634
4a8ca46b
RL
7635 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7636 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7637 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7638
c1ee92f9 7639 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7640 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7641 drm_object_attach_property(&aconnector->base.base,
7642 adev->mode_info.abm_level_property, 0);
7643 }
bb47de73
NK
7644
7645 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7646 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7647 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 7648 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 7649
8c61b31e
JFZ
7650 if (!aconnector->mst_port)
7651 drm_connector_attach_vrr_capable_property(&aconnector->base);
7652
0c8620d6 7653#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7654 if (adev->dm.hdcp_workqueue)
53e108aa 7655 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7656#endif
bb47de73 7657 }
e7b07cee
HW
7658}
7659
7578ecda
AD
7660static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7661 struct i2c_msg *msgs, int num)
e7b07cee
HW
7662{
7663 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7664 struct ddc_service *ddc_service = i2c->ddc_service;
7665 struct i2c_command cmd;
7666 int i;
7667 int result = -EIO;
7668
b830ebc9 7669 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7670
7671 if (!cmd.payloads)
7672 return result;
7673
7674 cmd.number_of_payloads = num;
7675 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7676 cmd.speed = 100;
7677
7678 for (i = 0; i < num; i++) {
7679 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7680 cmd.payloads[i].address = msgs[i].addr;
7681 cmd.payloads[i].length = msgs[i].len;
7682 cmd.payloads[i].data = msgs[i].buf;
7683 }
7684
c85e6e54
DF
7685 if (dc_submit_i2c(
7686 ddc_service->ctx->dc,
7687 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7688 &cmd))
7689 result = num;
7690
7691 kfree(cmd.payloads);
7692 return result;
7693}
7694
7578ecda 7695static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7696{
7697 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7698}
7699
7700static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7701 .master_xfer = amdgpu_dm_i2c_xfer,
7702 .functionality = amdgpu_dm_i2c_func,
7703};
7704
3ee6b26b
AD
7705static struct amdgpu_i2c_adapter *
7706create_i2c(struct ddc_service *ddc_service,
7707 int link_index,
7708 int *res)
e7b07cee
HW
7709{
7710 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7711 struct amdgpu_i2c_adapter *i2c;
7712
b830ebc9 7713 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7714 if (!i2c)
7715 return NULL;
e7b07cee
HW
7716 i2c->base.owner = THIS_MODULE;
7717 i2c->base.class = I2C_CLASS_DDC;
7718 i2c->base.dev.parent = &adev->pdev->dev;
7719 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7720 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7721 i2c_set_adapdata(&i2c->base, i2c);
7722 i2c->ddc_service = ddc_service;
c85e6e54 7723 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7724
7725 return i2c;
7726}
7727
89fc8d4e 7728
1f6010a9
DF
7729/*
7730 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7731 * dc_link which will be represented by this aconnector.
7732 */
7578ecda
AD
7733static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7734 struct amdgpu_dm_connector *aconnector,
7735 uint32_t link_index,
7736 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7737{
7738 int res = 0;
7739 int connector_type;
7740 struct dc *dc = dm->dc;
7741 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7742 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7743
7744 link->priv = aconnector;
e7b07cee 7745
f1ad2f5e 7746 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7747
7748 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7749 if (!i2c) {
7750 DRM_ERROR("Failed to create i2c adapter data\n");
7751 return -ENOMEM;
7752 }
7753
e7b07cee
HW
7754 aconnector->i2c = i2c;
7755 res = i2c_add_adapter(&i2c->base);
7756
7757 if (res) {
7758 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7759 goto out_free;
7760 }
7761
7762 connector_type = to_drm_connector_type(link->connector_signal);
7763
17165de2 7764 res = drm_connector_init_with_ddc(
e7b07cee
HW
7765 dm->ddev,
7766 &aconnector->base,
7767 &amdgpu_dm_connector_funcs,
17165de2
AP
7768 connector_type,
7769 &i2c->base);
e7b07cee
HW
7770
7771 if (res) {
7772 DRM_ERROR("connector_init failed\n");
7773 aconnector->connector_id = -1;
7774 goto out_free;
7775 }
7776
7777 drm_connector_helper_add(
7778 &aconnector->base,
7779 &amdgpu_dm_connector_helper_funcs);
7780
7781 amdgpu_dm_connector_init_helper(
7782 dm,
7783 aconnector,
7784 connector_type,
7785 link,
7786 link_index);
7787
cde4c44d 7788 drm_connector_attach_encoder(
e7b07cee
HW
7789 &aconnector->base, &aencoder->base);
7790
e7b07cee
HW
7791 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7792 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7793 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7794
e7b07cee
HW
7795out_free:
7796 if (res) {
7797 kfree(i2c);
7798 aconnector->i2c = NULL;
7799 }
7800 return res;
7801}
7802
7803int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7804{
7805 switch (adev->mode_info.num_crtc) {
7806 case 1:
7807 return 0x1;
7808 case 2:
7809 return 0x3;
7810 case 3:
7811 return 0x7;
7812 case 4:
7813 return 0xf;
7814 case 5:
7815 return 0x1f;
7816 case 6:
7817 default:
7818 return 0x3f;
7819 }
7820}
7821
7578ecda
AD
7822static int amdgpu_dm_encoder_init(struct drm_device *dev,
7823 struct amdgpu_encoder *aencoder,
7824 uint32_t link_index)
e7b07cee 7825{
1348969a 7826 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7827
7828 int res = drm_encoder_init(dev,
7829 &aencoder->base,
7830 &amdgpu_dm_encoder_funcs,
7831 DRM_MODE_ENCODER_TMDS,
7832 NULL);
7833
7834 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7835
7836 if (!res)
7837 aencoder->encoder_id = link_index;
7838 else
7839 aencoder->encoder_id = -1;
7840
7841 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7842
7843 return res;
7844}
7845
3ee6b26b
AD
7846static void manage_dm_interrupts(struct amdgpu_device *adev,
7847 struct amdgpu_crtc *acrtc,
7848 bool enable)
e7b07cee
HW
7849{
7850 /*
8fe684e9
NK
7851 * We have no guarantee that the frontend index maps to the same
7852 * backend index - some even map to more than one.
7853 *
7854 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7855 */
7856 int irq_type =
734dd01d 7857 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7858 adev,
7859 acrtc->crtc_id);
7860
7861 if (enable) {
7862 drm_crtc_vblank_on(&acrtc->base);
7863 amdgpu_irq_get(
7864 adev,
7865 &adev->pageflip_irq,
7866 irq_type);
86bc2219
WL
7867#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7868 amdgpu_irq_get(
7869 adev,
7870 &adev->vline0_irq,
7871 irq_type);
7872#endif
e7b07cee 7873 } else {
86bc2219
WL
7874#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7875 amdgpu_irq_put(
7876 adev,
7877 &adev->vline0_irq,
7878 irq_type);
7879#endif
e7b07cee
HW
7880 amdgpu_irq_put(
7881 adev,
7882 &adev->pageflip_irq,
7883 irq_type);
7884 drm_crtc_vblank_off(&acrtc->base);
7885 }
7886}
7887
8fe684e9
NK
7888static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7889 struct amdgpu_crtc *acrtc)
7890{
7891 int irq_type =
7892 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7893
7894 /**
7895 * This reads the current state for the IRQ and force reapplies
7896 * the setting to hardware.
7897 */
7898 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7899}
7900
3ee6b26b
AD
7901static bool
7902is_scaling_state_different(const struct dm_connector_state *dm_state,
7903 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7904{
7905 if (dm_state->scaling != old_dm_state->scaling)
7906 return true;
7907 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7908 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7909 return true;
7910 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7911 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7912 return true;
b830ebc9
HW
7913 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7914 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7915 return true;
e7b07cee
HW
7916 return false;
7917}
7918
0c8620d6
BL
7919#ifdef CONFIG_DRM_AMD_DC_HDCP
7920static bool is_content_protection_different(struct drm_connector_state *state,
7921 const struct drm_connector_state *old_state,
7922 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7923{
7924 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7925 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7926
31c0ed90 7927 /* Handle: Type0/1 change */
53e108aa
BL
7928 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7929 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7930 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7931 return true;
7932 }
7933
31c0ed90
BL
7934 /* CP is being re enabled, ignore this
7935 *
7936 * Handles: ENABLED -> DESIRED
7937 */
0c8620d6
BL
7938 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7939 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7940 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7941 return false;
7942 }
7943
31c0ed90
BL
7944 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7945 *
7946 * Handles: UNDESIRED -> ENABLED
7947 */
0c8620d6
BL
7948 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7949 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7950 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7951
7952 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7953 * hot-plug, headless s3, dpms
31c0ed90
BL
7954 *
7955 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7956 */
97f6c917
BL
7957 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7958 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7959 dm_con_state->update_hdcp = false;
0c8620d6 7960 return true;
97f6c917 7961 }
0c8620d6 7962
31c0ed90
BL
7963 /*
7964 * Handles: UNDESIRED -> UNDESIRED
7965 * DESIRED -> DESIRED
7966 * ENABLED -> ENABLED
7967 */
0c8620d6
BL
7968 if (old_state->content_protection == state->content_protection)
7969 return false;
7970
31c0ed90
BL
7971 /*
7972 * Handles: UNDESIRED -> DESIRED
7973 * DESIRED -> UNDESIRED
7974 * ENABLED -> UNDESIRED
7975 */
97f6c917 7976 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
7977 return true;
7978
31c0ed90
BL
7979 /*
7980 * Handles: DESIRED -> ENABLED
7981 */
0c8620d6
BL
7982 return false;
7983}
7984
0c8620d6 7985#endif
3ee6b26b
AD
7986static void remove_stream(struct amdgpu_device *adev,
7987 struct amdgpu_crtc *acrtc,
7988 struct dc_stream_state *stream)
e7b07cee
HW
7989{
7990 /* this is the update mode case */
e7b07cee
HW
7991
7992 acrtc->otg_inst = -1;
7993 acrtc->enabled = false;
7994}
7995
7578ecda
AD
7996static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7997 struct dc_cursor_position *position)
2a8f6ccb 7998{
f4c2cc43 7999 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8000 int x, y;
8001 int xorigin = 0, yorigin = 0;
8002
e371e19c 8003 if (!crtc || !plane->state->fb)
2a8f6ccb 8004 return 0;
2a8f6ccb
HW
8005
8006 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8007 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8008 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8009 __func__,
8010 plane->state->crtc_w,
8011 plane->state->crtc_h);
8012 return -EINVAL;
8013 }
8014
8015 x = plane->state->crtc_x;
8016 y = plane->state->crtc_y;
c14a005c 8017
e371e19c
NK
8018 if (x <= -amdgpu_crtc->max_cursor_width ||
8019 y <= -amdgpu_crtc->max_cursor_height)
8020 return 0;
8021
2a8f6ccb
HW
8022 if (x < 0) {
8023 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8024 x = 0;
8025 }
8026 if (y < 0) {
8027 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8028 y = 0;
8029 }
8030 position->enable = true;
d243b6ff 8031 position->translate_by_source = true;
2a8f6ccb
HW
8032 position->x = x;
8033 position->y = y;
8034 position->x_hotspot = xorigin;
8035 position->y_hotspot = yorigin;
8036
8037 return 0;
8038}
8039
3ee6b26b
AD
8040static void handle_cursor_update(struct drm_plane *plane,
8041 struct drm_plane_state *old_plane_state)
e7b07cee 8042{
1348969a 8043 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8044 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8045 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8046 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8047 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8048 uint64_t address = afb ? afb->address : 0;
6a30a929 8049 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8050 struct dc_cursor_attributes attributes;
8051 int ret;
8052
e7b07cee
HW
8053 if (!plane->state->fb && !old_plane_state->fb)
8054 return;
8055
cb2318b7 8056 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8057 __func__,
8058 amdgpu_crtc->crtc_id,
8059 plane->state->crtc_w,
8060 plane->state->crtc_h);
2a8f6ccb
HW
8061
8062 ret = get_cursor_position(plane, crtc, &position);
8063 if (ret)
8064 return;
8065
8066 if (!position.enable) {
8067 /* turn off cursor */
674e78ac
NK
8068 if (crtc_state && crtc_state->stream) {
8069 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8070 dc_stream_set_cursor_position(crtc_state->stream,
8071 &position);
674e78ac
NK
8072 mutex_unlock(&adev->dm.dc_lock);
8073 }
2a8f6ccb 8074 return;
e7b07cee 8075 }
e7b07cee 8076
2a8f6ccb
HW
8077 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8078 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8079
c1cefe11 8080 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8081 attributes.address.high_part = upper_32_bits(address);
8082 attributes.address.low_part = lower_32_bits(address);
8083 attributes.width = plane->state->crtc_w;
8084 attributes.height = plane->state->crtc_h;
8085 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8086 attributes.rotation_angle = 0;
8087 attributes.attribute_flags.value = 0;
8088
03a66367 8089 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8090
886daac9 8091 if (crtc_state->stream) {
674e78ac 8092 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8093 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8094 &attributes))
8095 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8096
2a8f6ccb
HW
8097 if (!dc_stream_set_cursor_position(crtc_state->stream,
8098 &position))
8099 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8100 mutex_unlock(&adev->dm.dc_lock);
886daac9 8101 }
2a8f6ccb 8102}
e7b07cee
HW
8103
8104static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8105{
8106
8107 assert_spin_locked(&acrtc->base.dev->event_lock);
8108 WARN_ON(acrtc->event);
8109
8110 acrtc->event = acrtc->base.state->event;
8111
8112 /* Set the flip status */
8113 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8114
8115 /* Mark this event as consumed */
8116 acrtc->base.state->event = NULL;
8117
cb2318b7
VL
8118 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8119 acrtc->crtc_id);
e7b07cee
HW
8120}
8121
bb47de73
NK
8122static void update_freesync_state_on_stream(
8123 struct amdgpu_display_manager *dm,
8124 struct dm_crtc_state *new_crtc_state,
180db303
NK
8125 struct dc_stream_state *new_stream,
8126 struct dc_plane_state *surface,
8127 u32 flip_timestamp_in_us)
bb47de73 8128{
09aef2c4 8129 struct mod_vrr_params vrr_params;
bb47de73 8130 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8131 struct amdgpu_device *adev = dm->adev;
585d450c 8132 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8133 unsigned long flags;
4cda3243 8134 bool pack_sdp_v1_3 = false;
bb47de73
NK
8135
8136 if (!new_stream)
8137 return;
8138
8139 /*
8140 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8141 * For now it's sufficient to just guard against these conditions.
8142 */
8143
8144 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8145 return;
8146
4a580877 8147 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8148 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8149
180db303
NK
8150 if (surface) {
8151 mod_freesync_handle_preflip(
8152 dm->freesync_module,
8153 surface,
8154 new_stream,
8155 flip_timestamp_in_us,
8156 &vrr_params);
09aef2c4
MK
8157
8158 if (adev->family < AMDGPU_FAMILY_AI &&
8159 amdgpu_dm_vrr_active(new_crtc_state)) {
8160 mod_freesync_handle_v_update(dm->freesync_module,
8161 new_stream, &vrr_params);
e63e2491
EB
8162
8163 /* Need to call this before the frame ends. */
8164 dc_stream_adjust_vmin_vmax(dm->dc,
8165 new_crtc_state->stream,
8166 &vrr_params.adjust);
09aef2c4 8167 }
180db303 8168 }
bb47de73
NK
8169
8170 mod_freesync_build_vrr_infopacket(
8171 dm->freesync_module,
8172 new_stream,
180db303 8173 &vrr_params,
ecd0136b
HT
8174 PACKET_TYPE_VRR,
8175 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8176 &vrr_infopacket,
8177 pack_sdp_v1_3);
bb47de73 8178
8a48b44c 8179 new_crtc_state->freesync_timing_changed |=
585d450c 8180 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8181 &vrr_params.adjust,
8182 sizeof(vrr_params.adjust)) != 0);
bb47de73 8183
8a48b44c 8184 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8185 (memcmp(&new_crtc_state->vrr_infopacket,
8186 &vrr_infopacket,
8187 sizeof(vrr_infopacket)) != 0);
8188
585d450c 8189 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8190 new_crtc_state->vrr_infopacket = vrr_infopacket;
8191
585d450c 8192 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8193 new_stream->vrr_infopacket = vrr_infopacket;
8194
8195 if (new_crtc_state->freesync_vrr_info_changed)
8196 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8197 new_crtc_state->base.crtc->base.id,
8198 (int)new_crtc_state->base.vrr_enabled,
180db303 8199 (int)vrr_params.state);
09aef2c4 8200
4a580877 8201 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8202}
8203
585d450c 8204static void update_stream_irq_parameters(
e854194c
MK
8205 struct amdgpu_display_manager *dm,
8206 struct dm_crtc_state *new_crtc_state)
8207{
8208 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8209 struct mod_vrr_params vrr_params;
e854194c 8210 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8211 struct amdgpu_device *adev = dm->adev;
585d450c 8212 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8213 unsigned long flags;
e854194c
MK
8214
8215 if (!new_stream)
8216 return;
8217
8218 /*
8219 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8220 * For now it's sufficient to just guard against these conditions.
8221 */
8222 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8223 return;
8224
4a580877 8225 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8226 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8227
e854194c
MK
8228 if (new_crtc_state->vrr_supported &&
8229 config.min_refresh_in_uhz &&
8230 config.max_refresh_in_uhz) {
a85ba005
NC
8231 /*
8232 * if freesync compatible mode was set, config.state will be set
8233 * in atomic check
8234 */
8235 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8236 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8237 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8238 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8239 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8240 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8241 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8242 } else {
8243 config.state = new_crtc_state->base.vrr_enabled ?
8244 VRR_STATE_ACTIVE_VARIABLE :
8245 VRR_STATE_INACTIVE;
8246 }
e854194c
MK
8247 } else {
8248 config.state = VRR_STATE_UNSUPPORTED;
8249 }
8250
8251 mod_freesync_build_vrr_params(dm->freesync_module,
8252 new_stream,
8253 &config, &vrr_params);
8254
8255 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8256 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8257 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8258
585d450c
AP
8259 new_crtc_state->freesync_config = config;
8260 /* Copy state for access from DM IRQ handler */
8261 acrtc->dm_irq_params.freesync_config = config;
8262 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8263 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8264 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8265}
8266
66b0c973
MK
8267static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8268 struct dm_crtc_state *new_state)
8269{
8270 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8271 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8272
8273 if (!old_vrr_active && new_vrr_active) {
8274 /* Transition VRR inactive -> active:
8275 * While VRR is active, we must not disable vblank irq, as a
8276 * reenable after disable would compute bogus vblank/pflip
8277 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8278 *
8279 * We also need vupdate irq for the actual core vblank handling
8280 * at end of vblank.
66b0c973 8281 */
d2574c33 8282 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8283 drm_crtc_vblank_get(new_state->base.crtc);
8284 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8285 __func__, new_state->base.crtc->base.id);
8286 } else if (old_vrr_active && !new_vrr_active) {
8287 /* Transition VRR active -> inactive:
8288 * Allow vblank irq disable again for fixed refresh rate.
8289 */
d2574c33 8290 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
8291 drm_crtc_vblank_put(new_state->base.crtc);
8292 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8293 __func__, new_state->base.crtc->base.id);
8294 }
8295}
8296
8ad27806
NK
8297static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8298{
8299 struct drm_plane *plane;
5760dcb9 8300 struct drm_plane_state *old_plane_state;
8ad27806
NK
8301 int i;
8302
8303 /*
8304 * TODO: Make this per-stream so we don't issue redundant updates for
8305 * commits with multiple streams.
8306 */
5760dcb9 8307 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
8308 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8309 handle_cursor_update(plane, old_plane_state);
8310}
8311
3be5262e 8312static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 8313 struct dc_state *dc_state,
3ee6b26b
AD
8314 struct drm_device *dev,
8315 struct amdgpu_display_manager *dm,
8316 struct drm_crtc *pcrtc,
420cd472 8317 bool wait_for_vblank)
e7b07cee 8318{
efc8278e 8319 uint32_t i;
8a48b44c 8320 uint64_t timestamp_ns;
e7b07cee 8321 struct drm_plane *plane;
0bc9706d 8322 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 8323 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
8324 struct drm_crtc_state *new_pcrtc_state =
8325 drm_atomic_get_new_crtc_state(state, pcrtc);
8326 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
8327 struct dm_crtc_state *dm_old_crtc_state =
8328 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 8329 int planes_count = 0, vpos, hpos;
570c91d5 8330 long r;
e7b07cee 8331 unsigned long flags;
8a48b44c 8332 struct amdgpu_bo *abo;
fdd1fe57
MK
8333 uint32_t target_vblank, last_flip_vblank;
8334 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 8335 bool pflip_present = false;
bc7f670e
DF
8336 struct {
8337 struct dc_surface_update surface_updates[MAX_SURFACES];
8338 struct dc_plane_info plane_infos[MAX_SURFACES];
8339 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 8340 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 8341 struct dc_stream_update stream_update;
74aa7bd4 8342 } *bundle;
bc7f670e 8343
74aa7bd4 8344 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 8345
74aa7bd4
DF
8346 if (!bundle) {
8347 dm_error("Failed to allocate update bundle\n");
4b510503
NK
8348 goto cleanup;
8349 }
e7b07cee 8350
8ad27806
NK
8351 /*
8352 * Disable the cursor first if we're disabling all the planes.
8353 * It'll remain on the screen after the planes are re-enabled
8354 * if we don't.
8355 */
8356 if (acrtc_state->active_planes == 0)
8357 amdgpu_dm_commit_cursors(state);
8358
e7b07cee 8359 /* update planes when needed */
efc8278e 8360 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 8361 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 8362 struct drm_crtc_state *new_crtc_state;
0bc9706d 8363 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 8364 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 8365 bool plane_needs_flip;
c7af5f77 8366 struct dc_plane_state *dc_plane;
54d76575 8367 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 8368
80c218d5
NK
8369 /* Cursor plane is handled after stream updates */
8370 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 8371 continue;
e7b07cee 8372
f5ba60fe
DD
8373 if (!fb || !crtc || pcrtc != crtc)
8374 continue;
8375
8376 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8377 if (!new_crtc_state->active)
e7b07cee
HW
8378 continue;
8379
bc7f670e 8380 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 8381
74aa7bd4 8382 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 8383 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
8384 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8385 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 8386 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 8387 }
8a48b44c 8388
695af5f9
NK
8389 fill_dc_scaling_info(new_plane_state,
8390 &bundle->scaling_infos[planes_count]);
8a48b44c 8391
695af5f9
NK
8392 bundle->surface_updates[planes_count].scaling_info =
8393 &bundle->scaling_infos[planes_count];
8a48b44c 8394
f5031000 8395 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 8396
f5031000 8397 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 8398
f5031000
DF
8399 if (!plane_needs_flip) {
8400 planes_count += 1;
8401 continue;
8402 }
8a48b44c 8403
2fac0f53
CK
8404 abo = gem_to_amdgpu_bo(fb->obj[0]);
8405
f8308898
AG
8406 /*
8407 * Wait for all fences on this FB. Do limited wait to avoid
8408 * deadlock during GPU reset when this fence will not signal
8409 * but we hold reservation lock for the BO.
8410 */
52791eee 8411 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 8412 false,
f8308898
AG
8413 msecs_to_jiffies(5000));
8414 if (unlikely(r <= 0))
ed8a5fb2 8415 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 8416
695af5f9 8417 fill_dc_plane_info_and_addr(
8ce5d842 8418 dm->adev, new_plane_state,
6eed95b0 8419 afb->tiling_flags,
695af5f9 8420 &bundle->plane_infos[planes_count],
87b7ebc2 8421 &bundle->flip_addrs[planes_count].address,
6eed95b0 8422 afb->tmz_surface, false);
87b7ebc2 8423
4711c033 8424 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
8425 new_plane_state->plane->index,
8426 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
8427
8428 bundle->surface_updates[planes_count].plane_info =
8429 &bundle->plane_infos[planes_count];
8a48b44c 8430
caff0e66
NK
8431 /*
8432 * Only allow immediate flips for fast updates that don't
8433 * change FB pitch, DCC state, rotation or mirroing.
8434 */
f5031000 8435 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 8436 crtc->state->async_flip &&
caff0e66 8437 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 8438
f5031000
DF
8439 timestamp_ns = ktime_get_ns();
8440 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8441 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8442 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 8443
f5031000
DF
8444 if (!bundle->surface_updates[planes_count].surface) {
8445 DRM_ERROR("No surface for CRTC: id=%d\n",
8446 acrtc_attach->crtc_id);
8447 continue;
bc7f670e
DF
8448 }
8449
f5031000
DF
8450 if (plane == pcrtc->primary)
8451 update_freesync_state_on_stream(
8452 dm,
8453 acrtc_state,
8454 acrtc_state->stream,
8455 dc_plane,
8456 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 8457
4711c033 8458 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
8459 __func__,
8460 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8461 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
8462
8463 planes_count += 1;
8464
8a48b44c
DF
8465 }
8466
74aa7bd4 8467 if (pflip_present) {
634092b1
MK
8468 if (!vrr_active) {
8469 /* Use old throttling in non-vrr fixed refresh rate mode
8470 * to keep flip scheduling based on target vblank counts
8471 * working in a backwards compatible way, e.g., for
8472 * clients using the GLX_OML_sync_control extension or
8473 * DRI3/Present extension with defined target_msc.
8474 */
e3eff4b5 8475 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
8476 }
8477 else {
8478 /* For variable refresh rate mode only:
8479 * Get vblank of last completed flip to avoid > 1 vrr
8480 * flips per video frame by use of throttling, but allow
8481 * flip programming anywhere in the possibly large
8482 * variable vrr vblank interval for fine-grained flip
8483 * timing control and more opportunity to avoid stutter
8484 * on late submission of flips.
8485 */
8486 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 8487 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
8488 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8489 }
8490
fdd1fe57 8491 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
8492
8493 /*
8494 * Wait until we're out of the vertical blank period before the one
8495 * targeted by the flip
8496 */
8497 while ((acrtc_attach->enabled &&
8498 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8499 0, &vpos, &hpos, NULL,
8500 NULL, &pcrtc->hwmode)
8501 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8502 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8503 (int)(target_vblank -
e3eff4b5 8504 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
8505 usleep_range(1000, 1100);
8506 }
8507
8fe684e9
NK
8508 /**
8509 * Prepare the flip event for the pageflip interrupt to handle.
8510 *
8511 * This only works in the case where we've already turned on the
8512 * appropriate hardware blocks (eg. HUBP) so in the transition case
8513 * from 0 -> n planes we have to skip a hardware generated event
8514 * and rely on sending it from software.
8515 */
8516 if (acrtc_attach->base.state->event &&
8517 acrtc_state->active_planes > 0) {
8a48b44c
DF
8518 drm_crtc_vblank_get(pcrtc);
8519
8520 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8521
8522 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8523 prepare_flip_isr(acrtc_attach);
8524
8525 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8526 }
8527
8528 if (acrtc_state->stream) {
8a48b44c 8529 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 8530 bundle->stream_update.vrr_infopacket =
8a48b44c 8531 &acrtc_state->stream->vrr_infopacket;
e7b07cee 8532 }
e7b07cee
HW
8533 }
8534
bc92c065 8535 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
8536 if ((planes_count || acrtc_state->active_planes == 0) &&
8537 acrtc_state->stream) {
b6e881c9 8538 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 8539 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
8540 bundle->stream_update.src = acrtc_state->stream->src;
8541 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
8542 }
8543
cf020d49
NK
8544 if (new_pcrtc_state->color_mgmt_changed) {
8545 /*
8546 * TODO: This isn't fully correct since we've actually
8547 * already modified the stream in place.
8548 */
8549 bundle->stream_update.gamut_remap =
8550 &acrtc_state->stream->gamut_remap_matrix;
8551 bundle->stream_update.output_csc_transform =
8552 &acrtc_state->stream->csc_color_matrix;
8553 bundle->stream_update.out_transfer_func =
8554 acrtc_state->stream->out_transfer_func;
8555 }
bc7f670e 8556
8a48b44c 8557 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 8558 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 8559 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 8560
e63e2491
EB
8561 /*
8562 * If FreeSync state on the stream has changed then we need to
8563 * re-adjust the min/max bounds now that DC doesn't handle this
8564 * as part of commit.
8565 */
a85ba005 8566 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
8567 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8568 dc_stream_adjust_vmin_vmax(
8569 dm->dc, acrtc_state->stream,
585d450c 8570 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
8571 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8572 }
bc7f670e 8573 mutex_lock(&dm->dc_lock);
8c322309 8574 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 8575 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
8576 amdgpu_dm_psr_disable(acrtc_state->stream);
8577
bc7f670e 8578 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 8579 bundle->surface_updates,
bc7f670e
DF
8580 planes_count,
8581 acrtc_state->stream,
efc8278e
AJ
8582 &bundle->stream_update,
8583 dc_state);
8c322309 8584
8fe684e9
NK
8585 /**
8586 * Enable or disable the interrupts on the backend.
8587 *
8588 * Most pipes are put into power gating when unused.
8589 *
8590 * When power gating is enabled on a pipe we lose the
8591 * interrupt enablement state when power gating is disabled.
8592 *
8593 * So we need to update the IRQ control state in hardware
8594 * whenever the pipe turns on (since it could be previously
8595 * power gated) or off (since some pipes can't be power gated
8596 * on some ASICs).
8597 */
8598 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8599 dm_update_pflip_irq_state(drm_to_adev(dev),
8600 acrtc_attach);
8fe684e9 8601
8c322309 8602 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8603 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8604 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
8605 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8606 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
8607 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8608 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
8609 amdgpu_dm_psr_enable(acrtc_state->stream);
8610 }
8611
bc7f670e 8612 mutex_unlock(&dm->dc_lock);
e7b07cee 8613 }
4b510503 8614
8ad27806
NK
8615 /*
8616 * Update cursor state *after* programming all the planes.
8617 * This avoids redundant programming in the case where we're going
8618 * to be disabling a single plane - those pipes are being disabled.
8619 */
8620 if (acrtc_state->active_planes)
8621 amdgpu_dm_commit_cursors(state);
80c218d5 8622
4b510503 8623cleanup:
74aa7bd4 8624 kfree(bundle);
e7b07cee
HW
8625}
8626
6ce8f316
NK
8627static void amdgpu_dm_commit_audio(struct drm_device *dev,
8628 struct drm_atomic_state *state)
8629{
1348969a 8630 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8631 struct amdgpu_dm_connector *aconnector;
8632 struct drm_connector *connector;
8633 struct drm_connector_state *old_con_state, *new_con_state;
8634 struct drm_crtc_state *new_crtc_state;
8635 struct dm_crtc_state *new_dm_crtc_state;
8636 const struct dc_stream_status *status;
8637 int i, inst;
8638
8639 /* Notify device removals. */
8640 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8641 if (old_con_state->crtc != new_con_state->crtc) {
8642 /* CRTC changes require notification. */
8643 goto notify;
8644 }
8645
8646 if (!new_con_state->crtc)
8647 continue;
8648
8649 new_crtc_state = drm_atomic_get_new_crtc_state(
8650 state, new_con_state->crtc);
8651
8652 if (!new_crtc_state)
8653 continue;
8654
8655 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8656 continue;
8657
8658 notify:
8659 aconnector = to_amdgpu_dm_connector(connector);
8660
8661 mutex_lock(&adev->dm.audio_lock);
8662 inst = aconnector->audio_inst;
8663 aconnector->audio_inst = -1;
8664 mutex_unlock(&adev->dm.audio_lock);
8665
8666 amdgpu_dm_audio_eld_notify(adev, inst);
8667 }
8668
8669 /* Notify audio device additions. */
8670 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8671 if (!new_con_state->crtc)
8672 continue;
8673
8674 new_crtc_state = drm_atomic_get_new_crtc_state(
8675 state, new_con_state->crtc);
8676
8677 if (!new_crtc_state)
8678 continue;
8679
8680 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8681 continue;
8682
8683 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8684 if (!new_dm_crtc_state->stream)
8685 continue;
8686
8687 status = dc_stream_get_status(new_dm_crtc_state->stream);
8688 if (!status)
8689 continue;
8690
8691 aconnector = to_amdgpu_dm_connector(connector);
8692
8693 mutex_lock(&adev->dm.audio_lock);
8694 inst = status->audio_inst;
8695 aconnector->audio_inst = inst;
8696 mutex_unlock(&adev->dm.audio_lock);
8697
8698 amdgpu_dm_audio_eld_notify(adev, inst);
8699 }
8700}
8701
1f6010a9 8702/*
27b3f4fc
LSL
8703 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8704 * @crtc_state: the DRM CRTC state
8705 * @stream_state: the DC stream state.
8706 *
8707 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8708 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8709 */
8710static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8711 struct dc_stream_state *stream_state)
8712{
b9952f93 8713 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8714}
e7b07cee 8715
b8592b48
LL
8716/**
8717 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8718 * @state: The atomic state to commit
8719 *
8720 * This will tell DC to commit the constructed DC state from atomic_check,
8721 * programming the hardware. Any failures here implies a hardware failure, since
8722 * atomic check should have filtered anything non-kosher.
8723 */
7578ecda 8724static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8725{
8726 struct drm_device *dev = state->dev;
1348969a 8727 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8728 struct amdgpu_display_manager *dm = &adev->dm;
8729 struct dm_atomic_state *dm_state;
eb3dc897 8730 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8731 uint32_t i, j;
5cc6dcbd 8732 struct drm_crtc *crtc;
0bc9706d 8733 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8734 unsigned long flags;
8735 bool wait_for_vblank = true;
8736 struct drm_connector *connector;
c2cea706 8737 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8738 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8739 int crtc_disable_count = 0;
6ee90e88 8740 bool mode_set_reset_required = false;
e7b07cee 8741
e8a98235
RS
8742 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8743
e7b07cee
HW
8744 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8745
eb3dc897
NK
8746 dm_state = dm_atomic_get_new_state(state);
8747 if (dm_state && dm_state->context) {
8748 dc_state = dm_state->context;
8749 } else {
8750 /* No state changes, retain current state. */
813d20dc 8751 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8752 ASSERT(dc_state_temp);
8753 dc_state = dc_state_temp;
8754 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8755 }
e7b07cee 8756
6d90a208
AP
8757 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8758 new_crtc_state, i) {
8759 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8760
8761 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8762
8763 if (old_crtc_state->active &&
8764 (!new_crtc_state->active ||
8765 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8766 manage_dm_interrupts(adev, acrtc, false);
8767 dc_stream_release(dm_old_crtc_state->stream);
8768 }
8769 }
8770
8976f73b
RS
8771 drm_atomic_helper_calc_timestamping_constants(state);
8772
e7b07cee 8773 /* update changed items */
0bc9706d 8774 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8775 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8776
54d76575
LSL
8777 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8778 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8779
4711c033 8780 DRM_DEBUG_ATOMIC(
e7b07cee
HW
8781 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8782 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8783 "connectors_changed:%d\n",
8784 acrtc->crtc_id,
0bc9706d
LSL
8785 new_crtc_state->enable,
8786 new_crtc_state->active,
8787 new_crtc_state->planes_changed,
8788 new_crtc_state->mode_changed,
8789 new_crtc_state->active_changed,
8790 new_crtc_state->connectors_changed);
e7b07cee 8791
5c68c652
VL
8792 /* Disable cursor if disabling crtc */
8793 if (old_crtc_state->active && !new_crtc_state->active) {
8794 struct dc_cursor_position position;
8795
8796 memset(&position, 0, sizeof(position));
8797 mutex_lock(&dm->dc_lock);
8798 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8799 mutex_unlock(&dm->dc_lock);
8800 }
8801
27b3f4fc
LSL
8802 /* Copy all transient state flags into dc state */
8803 if (dm_new_crtc_state->stream) {
8804 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8805 dm_new_crtc_state->stream);
8806 }
8807
e7b07cee
HW
8808 /* handles headless hotplug case, updating new_state and
8809 * aconnector as needed
8810 */
8811
54d76575 8812 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8813
4711c033 8814 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8815
54d76575 8816 if (!dm_new_crtc_state->stream) {
e7b07cee 8817 /*
b830ebc9
HW
8818 * this could happen because of issues with
8819 * userspace notifications delivery.
8820 * In this case userspace tries to set mode on
1f6010a9
DF
8821 * display which is disconnected in fact.
8822 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8823 * We expect reset mode will come soon.
8824 *
8825 * This can also happen when unplug is done
8826 * during resume sequence ended
8827 *
8828 * In this case, we want to pretend we still
8829 * have a sink to keep the pipe running so that
8830 * hw state is consistent with the sw state
8831 */
f1ad2f5e 8832 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8833 __func__, acrtc->base.base.id);
8834 continue;
8835 }
8836
54d76575
LSL
8837 if (dm_old_crtc_state->stream)
8838 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8839
97028037
LP
8840 pm_runtime_get_noresume(dev->dev);
8841
e7b07cee 8842 acrtc->enabled = true;
0bc9706d
LSL
8843 acrtc->hw_mode = new_crtc_state->mode;
8844 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8845 mode_set_reset_required = true;
0bc9706d 8846 } else if (modereset_required(new_crtc_state)) {
4711c033 8847 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8848 /* i.e. reset mode */
6ee90e88 8849 if (dm_old_crtc_state->stream)
54d76575 8850 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 8851
6ee90e88 8852 mode_set_reset_required = true;
e7b07cee
HW
8853 }
8854 } /* for_each_crtc_in_state() */
8855
eb3dc897 8856 if (dc_state) {
6ee90e88 8857 /* if there mode set or reset, disable eDP PSR */
8858 if (mode_set_reset_required)
8859 amdgpu_dm_psr_disable_all(dm);
8860
eb3dc897 8861 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8862 mutex_lock(&dm->dc_lock);
eb3dc897 8863 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
8864#if defined(CONFIG_DRM_AMD_DC_DCN)
8865 /* Allow idle optimization when vblank count is 0 for display off */
8866 if (dm->active_vblank_irq_count == 0)
8867 dc_allow_idle_optimizations(dm->dc,true);
8868#endif
674e78ac 8869 mutex_unlock(&dm->dc_lock);
fa2123db 8870 }
fe8858bb 8871
0bc9706d 8872 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8873 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8874
54d76575 8875 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8876
54d76575 8877 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8878 const struct dc_stream_status *status =
54d76575 8879 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8880
eb3dc897 8881 if (!status)
09f609c3
LL
8882 status = dc_stream_get_status_from_state(dc_state,
8883 dm_new_crtc_state->stream);
e7b07cee 8884 if (!status)
54d76575 8885 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8886 else
8887 acrtc->otg_inst = status->primary_otg_inst;
8888 }
8889 }
0c8620d6
BL
8890#ifdef CONFIG_DRM_AMD_DC_HDCP
8891 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8892 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8893 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8894 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8895
8896 new_crtc_state = NULL;
8897
8898 if (acrtc)
8899 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8900
8901 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8902
8903 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8904 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8905 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8906 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8907 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8908 continue;
8909 }
8910
8911 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8912 hdcp_update_display(
8913 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8914 new_con_state->hdcp_content_type,
0e86d3d4 8915 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
8916 }
8917#endif
e7b07cee 8918
02d6a6fc 8919 /* Handle connector state changes */
c2cea706 8920 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8921 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8922 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8923 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 8924 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 8925 struct dc_stream_update stream_update;
b232d4ed 8926 struct dc_info_packet hdr_packet;
e7b07cee 8927 struct dc_stream_status *status = NULL;
b232d4ed 8928 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8929
efc8278e 8930 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
8931 memset(&stream_update, 0, sizeof(stream_update));
8932
44d09c6a 8933 if (acrtc) {
0bc9706d 8934 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8935 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8936 }
0bc9706d 8937
e7b07cee 8938 /* Skip any modesets/resets */
0bc9706d 8939 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8940 continue;
8941
54d76575 8942 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8943 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8944
b232d4ed
NK
8945 scaling_changed = is_scaling_state_different(dm_new_con_state,
8946 dm_old_con_state);
8947
8948 abm_changed = dm_new_crtc_state->abm_level !=
8949 dm_old_crtc_state->abm_level;
8950
8951 hdr_changed =
72921cdf 8952 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
8953
8954 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8955 continue;
e7b07cee 8956
b6e881c9 8957 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8958 if (scaling_changed) {
02d6a6fc 8959 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8960 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8961
02d6a6fc
DF
8962 stream_update.src = dm_new_crtc_state->stream->src;
8963 stream_update.dst = dm_new_crtc_state->stream->dst;
8964 }
8965
b232d4ed 8966 if (abm_changed) {
02d6a6fc
DF
8967 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8968
8969 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8970 }
70e8ffc5 8971
b232d4ed
NK
8972 if (hdr_changed) {
8973 fill_hdr_info_packet(new_con_state, &hdr_packet);
8974 stream_update.hdr_static_metadata = &hdr_packet;
8975 }
8976
54d76575 8977 status = dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8978 WARN_ON(!status);
3be5262e 8979 WARN_ON(!status->plane_count);
e7b07cee 8980
02d6a6fc
DF
8981 /*
8982 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8983 * Here we create an empty update on each plane.
8984 * To fix this, DC should permit updating only stream properties.
8985 */
8986 for (j = 0; j < status->plane_count; j++)
efc8278e 8987 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
8988
8989
8990 mutex_lock(&dm->dc_lock);
8991 dc_commit_updates_for_stream(dm->dc,
efc8278e 8992 dummy_updates,
02d6a6fc
DF
8993 status->plane_count,
8994 dm_new_crtc_state->stream,
efc8278e
AJ
8995 &stream_update,
8996 dc_state);
02d6a6fc 8997 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
8998 }
8999
b5e83f6f 9000 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9001 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9002 new_crtc_state, i) {
fe2a1965
LP
9003 if (old_crtc_state->active && !new_crtc_state->active)
9004 crtc_disable_count++;
9005
54d76575 9006 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9007 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9008
585d450c
AP
9009 /* For freesync config update on crtc state and params for irq */
9010 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9011
66b0c973
MK
9012 /* Handle vrr on->off / off->on transitions */
9013 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9014 dm_new_crtc_state);
e7b07cee
HW
9015 }
9016
8fe684e9
NK
9017 /**
9018 * Enable interrupts for CRTCs that are newly enabled or went through
9019 * a modeset. It was intentionally deferred until after the front end
9020 * state was modified to wait until the OTG was on and so the IRQ
9021 * handlers didn't access stale or invalid state.
9022 */
9023 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9024 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9025#ifdef CONFIG_DEBUG_FS
86bc2219 9026 bool configure_crc = false;
8e7b6fee 9027 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9028#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9029 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9030#endif
9031 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9032 cur_crc_src = acrtc->dm_irq_params.crc_src;
9033 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9034#endif
585d450c
AP
9035 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9036
8fe684e9
NK
9037 if (new_crtc_state->active &&
9038 (!old_crtc_state->active ||
9039 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9040 dc_stream_retain(dm_new_crtc_state->stream);
9041 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9042 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9043
24eb9374 9044#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9045 /**
9046 * Frontend may have changed so reapply the CRC capture
9047 * settings for the stream.
9048 */
9049 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9050
8e7b6fee 9051 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9052 configure_crc = true;
9053#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9054 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9055 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9056 acrtc->dm_irq_params.crc_window.update_win = true;
9057 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9058 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9059 crc_rd_wrk->crtc = crtc;
9060 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9061 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9062 }
86bc2219 9063#endif
e2881d6d 9064 }
c920888c 9065
86bc2219 9066 if (configure_crc)
bbc49fc0
WL
9067 if (amdgpu_dm_crtc_configure_crc_source(
9068 crtc, dm_new_crtc_state, cur_crc_src))
9069 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9070#endif
8fe684e9
NK
9071 }
9072 }
e7b07cee 9073
420cd472 9074 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9075 if (new_crtc_state->async_flip)
420cd472
DF
9076 wait_for_vblank = false;
9077
e7b07cee 9078 /* update planes when needed per crtc*/
5cc6dcbd 9079 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9080 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9081
54d76575 9082 if (dm_new_crtc_state->stream)
eb3dc897 9083 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9084 dm, crtc, wait_for_vblank);
e7b07cee
HW
9085 }
9086
6ce8f316
NK
9087 /* Update audio instances for each connector. */
9088 amdgpu_dm_commit_audio(dev, state);
9089
7230362c
AD
9090#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9091 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9092 /* restore the backlight level */
9093 if (dm->backlight_dev)
9094 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9095#endif
e7b07cee
HW
9096 /*
9097 * send vblank event on all events not handled in flip and
9098 * mark consumed event for drm_atomic_helper_commit_hw_done
9099 */
4a580877 9100 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9101 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9102
0bc9706d
LSL
9103 if (new_crtc_state->event)
9104 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9105
0bc9706d 9106 new_crtc_state->event = NULL;
e7b07cee 9107 }
4a580877 9108 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9109
29c8f234
LL
9110 /* Signal HW programming completion */
9111 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9112
9113 if (wait_for_vblank)
320a1274 9114 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9115
9116 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9117
5f6fab24
AD
9118 /* return the stolen vga memory back to VRAM */
9119 if (!adev->mman.keep_stolen_vga_memory)
9120 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9121 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9122
1f6010a9
DF
9123 /*
9124 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9125 * so we can put the GPU into runtime suspend if we're not driving any
9126 * displays anymore
9127 */
fe2a1965
LP
9128 for (i = 0; i < crtc_disable_count; i++)
9129 pm_runtime_put_autosuspend(dev->dev);
97028037 9130 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9131
9132 if (dc_state_temp)
9133 dc_release_state(dc_state_temp);
e7b07cee
HW
9134}
9135
9136
9137static int dm_force_atomic_commit(struct drm_connector *connector)
9138{
9139 int ret = 0;
9140 struct drm_device *ddev = connector->dev;
9141 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9142 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9143 struct drm_plane *plane = disconnected_acrtc->base.primary;
9144 struct drm_connector_state *conn_state;
9145 struct drm_crtc_state *crtc_state;
9146 struct drm_plane_state *plane_state;
9147
9148 if (!state)
9149 return -ENOMEM;
9150
9151 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9152
9153 /* Construct an atomic state to restore previous display setting */
9154
9155 /*
9156 * Attach connectors to drm_atomic_state
9157 */
9158 conn_state = drm_atomic_get_connector_state(state, connector);
9159
9160 ret = PTR_ERR_OR_ZERO(conn_state);
9161 if (ret)
2dc39051 9162 goto out;
e7b07cee
HW
9163
9164 /* Attach crtc to drm_atomic_state*/
9165 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9166
9167 ret = PTR_ERR_OR_ZERO(crtc_state);
9168 if (ret)
2dc39051 9169 goto out;
e7b07cee
HW
9170
9171 /* force a restore */
9172 crtc_state->mode_changed = true;
9173
9174 /* Attach plane to drm_atomic_state */
9175 plane_state = drm_atomic_get_plane_state(state, plane);
9176
9177 ret = PTR_ERR_OR_ZERO(plane_state);
9178 if (ret)
2dc39051 9179 goto out;
e7b07cee
HW
9180
9181 /* Call commit internally with the state we just constructed */
9182 ret = drm_atomic_commit(state);
e7b07cee 9183
2dc39051 9184out:
e7b07cee 9185 drm_atomic_state_put(state);
2dc39051
VL
9186 if (ret)
9187 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9188
9189 return ret;
9190}
9191
9192/*
1f6010a9
DF
9193 * This function handles all cases when set mode does not come upon hotplug.
9194 * This includes when a display is unplugged then plugged back into the
9195 * same port and when running without usermode desktop manager supprot
e7b07cee 9196 */
3ee6b26b
AD
9197void dm_restore_drm_connector_state(struct drm_device *dev,
9198 struct drm_connector *connector)
e7b07cee 9199{
c84dec2f 9200 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9201 struct amdgpu_crtc *disconnected_acrtc;
9202 struct dm_crtc_state *acrtc_state;
9203
9204 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9205 return;
9206
9207 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9208 if (!disconnected_acrtc)
9209 return;
e7b07cee 9210
70e8ffc5
HW
9211 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9212 if (!acrtc_state->stream)
e7b07cee
HW
9213 return;
9214
9215 /*
9216 * If the previous sink is not released and different from the current,
9217 * we deduce we are in a state where we can not rely on usermode call
9218 * to turn on the display, so we do it here
9219 */
9220 if (acrtc_state->stream->sink != aconnector->dc_sink)
9221 dm_force_atomic_commit(&aconnector->base);
9222}
9223
1f6010a9 9224/*
e7b07cee
HW
9225 * Grabs all modesetting locks to serialize against any blocking commits,
9226 * Waits for completion of all non blocking commits.
9227 */
3ee6b26b
AD
9228static int do_aquire_global_lock(struct drm_device *dev,
9229 struct drm_atomic_state *state)
e7b07cee
HW
9230{
9231 struct drm_crtc *crtc;
9232 struct drm_crtc_commit *commit;
9233 long ret;
9234
1f6010a9
DF
9235 /*
9236 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9237 * ensure that when the framework release it the
9238 * extra locks we are locking here will get released to
9239 */
9240 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9241 if (ret)
9242 return ret;
9243
9244 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9245 spin_lock(&crtc->commit_lock);
9246 commit = list_first_entry_or_null(&crtc->commit_list,
9247 struct drm_crtc_commit, commit_entry);
9248 if (commit)
9249 drm_crtc_commit_get(commit);
9250 spin_unlock(&crtc->commit_lock);
9251
9252 if (!commit)
9253 continue;
9254
1f6010a9
DF
9255 /*
9256 * Make sure all pending HW programming completed and
e7b07cee
HW
9257 * page flips done
9258 */
9259 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9260
9261 if (ret > 0)
9262 ret = wait_for_completion_interruptible_timeout(
9263 &commit->flip_done, 10*HZ);
9264
9265 if (ret == 0)
9266 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9267 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9268
9269 drm_crtc_commit_put(commit);
9270 }
9271
9272 return ret < 0 ? ret : 0;
9273}
9274
bb47de73
NK
9275static void get_freesync_config_for_crtc(
9276 struct dm_crtc_state *new_crtc_state,
9277 struct dm_connector_state *new_con_state)
98e6436d
AK
9278{
9279 struct mod_freesync_config config = {0};
98e6436d
AK
9280 struct amdgpu_dm_connector *aconnector =
9281 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9282 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9283 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9284 bool fs_vid_mode = false;
98e6436d 9285
a057ec46 9286 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9287 vrefresh >= aconnector->min_vfreq &&
9288 vrefresh <= aconnector->max_vfreq;
bb47de73 9289
a057ec46
IB
9290 if (new_crtc_state->vrr_supported) {
9291 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
9292 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9293
9294 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9295 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9296 config.vsif_supported = true;
180db303 9297 config.btr = true;
98e6436d 9298
a85ba005
NC
9299 if (fs_vid_mode) {
9300 config.state = VRR_STATE_ACTIVE_FIXED;
9301 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9302 goto out;
9303 } else if (new_crtc_state->base.vrr_enabled) {
9304 config.state = VRR_STATE_ACTIVE_VARIABLE;
9305 } else {
9306 config.state = VRR_STATE_INACTIVE;
9307 }
9308 }
9309out:
bb47de73
NK
9310 new_crtc_state->freesync_config = config;
9311}
98e6436d 9312
bb47de73
NK
9313static void reset_freesync_config_for_crtc(
9314 struct dm_crtc_state *new_crtc_state)
9315{
9316 new_crtc_state->vrr_supported = false;
98e6436d 9317
bb47de73
NK
9318 memset(&new_crtc_state->vrr_infopacket, 0,
9319 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9320}
9321
a85ba005
NC
9322static bool
9323is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9324 struct drm_crtc_state *new_crtc_state)
9325{
9326 struct drm_display_mode old_mode, new_mode;
9327
9328 if (!old_crtc_state || !new_crtc_state)
9329 return false;
9330
9331 old_mode = old_crtc_state->mode;
9332 new_mode = new_crtc_state->mode;
9333
9334 if (old_mode.clock == new_mode.clock &&
9335 old_mode.hdisplay == new_mode.hdisplay &&
9336 old_mode.vdisplay == new_mode.vdisplay &&
9337 old_mode.htotal == new_mode.htotal &&
9338 old_mode.vtotal != new_mode.vtotal &&
9339 old_mode.hsync_start == new_mode.hsync_start &&
9340 old_mode.vsync_start != new_mode.vsync_start &&
9341 old_mode.hsync_end == new_mode.hsync_end &&
9342 old_mode.vsync_end != new_mode.vsync_end &&
9343 old_mode.hskew == new_mode.hskew &&
9344 old_mode.vscan == new_mode.vscan &&
9345 (old_mode.vsync_end - old_mode.vsync_start) ==
9346 (new_mode.vsync_end - new_mode.vsync_start))
9347 return true;
9348
9349 return false;
9350}
9351
9352static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9353 uint64_t num, den, res;
9354 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9355
9356 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9357
9358 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9359 den = (unsigned long long)new_crtc_state->mode.htotal *
9360 (unsigned long long)new_crtc_state->mode.vtotal;
9361
9362 res = div_u64(num, den);
9363 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9364}
9365
4b9674e5
LL
9366static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9367 struct drm_atomic_state *state,
9368 struct drm_crtc *crtc,
9369 struct drm_crtc_state *old_crtc_state,
9370 struct drm_crtc_state *new_crtc_state,
9371 bool enable,
9372 bool *lock_and_validation_needed)
e7b07cee 9373{
eb3dc897 9374 struct dm_atomic_state *dm_state = NULL;
54d76575 9375 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 9376 struct dc_stream_state *new_stream;
62f55537 9377 int ret = 0;
d4d4a645 9378
1f6010a9
DF
9379 /*
9380 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9381 * update changed items
9382 */
4b9674e5
LL
9383 struct amdgpu_crtc *acrtc = NULL;
9384 struct amdgpu_dm_connector *aconnector = NULL;
9385 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9386 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 9387
4b9674e5 9388 new_stream = NULL;
9635b754 9389
4b9674e5
LL
9390 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9391 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9392 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 9393 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 9394
4b9674e5
LL
9395 /* TODO This hack should go away */
9396 if (aconnector && enable) {
9397 /* Make sure fake sink is created in plug-in scenario */
9398 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9399 &aconnector->base);
9400 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9401 &aconnector->base);
19f89e23 9402
4b9674e5
LL
9403 if (IS_ERR(drm_new_conn_state)) {
9404 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9405 goto fail;
9406 }
19f89e23 9407
4b9674e5
LL
9408 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9409 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 9410
02d35a67
JFZ
9411 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9412 goto skip_modeset;
9413
cbd14ae7
SW
9414 new_stream = create_validate_stream_for_sink(aconnector,
9415 &new_crtc_state->mode,
9416 dm_new_conn_state,
9417 dm_old_crtc_state->stream);
19f89e23 9418
4b9674e5
LL
9419 /*
9420 * we can have no stream on ACTION_SET if a display
9421 * was disconnected during S3, in this case it is not an
9422 * error, the OS will be updated after detection, and
9423 * will do the right thing on next atomic commit
9424 */
19f89e23 9425
4b9674e5
LL
9426 if (!new_stream) {
9427 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9428 __func__, acrtc->base.base.id);
9429 ret = -ENOMEM;
9430 goto fail;
9431 }
e7b07cee 9432
3d4e52d0
VL
9433 /*
9434 * TODO: Check VSDB bits to decide whether this should
9435 * be enabled or not.
9436 */
9437 new_stream->triggered_crtc_reset.enabled =
9438 dm->force_timing_sync;
9439
4b9674e5 9440 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 9441
88694af9
NK
9442 ret = fill_hdr_info_packet(drm_new_conn_state,
9443 &new_stream->hdr_static_metadata);
9444 if (ret)
9445 goto fail;
9446
7e930949
NK
9447 /*
9448 * If we already removed the old stream from the context
9449 * (and set the new stream to NULL) then we can't reuse
9450 * the old stream even if the stream and scaling are unchanged.
9451 * We'll hit the BUG_ON and black screen.
9452 *
9453 * TODO: Refactor this function to allow this check to work
9454 * in all conditions.
9455 */
a85ba005
NC
9456 if (amdgpu_freesync_vid_mode &&
9457 dm_new_crtc_state->stream &&
9458 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9459 goto skip_modeset;
9460
7e930949
NK
9461 if (dm_new_crtc_state->stream &&
9462 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
9463 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9464 new_crtc_state->mode_changed = false;
9465 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9466 new_crtc_state->mode_changed);
62f55537 9467 }
4b9674e5 9468 }
b830ebc9 9469
02d35a67 9470 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
9471 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9472 goto skip_modeset;
e7b07cee 9473
4711c033 9474 DRM_DEBUG_ATOMIC(
4b9674e5
LL
9475 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9476 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9477 "connectors_changed:%d\n",
9478 acrtc->crtc_id,
9479 new_crtc_state->enable,
9480 new_crtc_state->active,
9481 new_crtc_state->planes_changed,
9482 new_crtc_state->mode_changed,
9483 new_crtc_state->active_changed,
9484 new_crtc_state->connectors_changed);
62f55537 9485
4b9674e5
LL
9486 /* Remove stream for any changed/disabled CRTC */
9487 if (!enable) {
62f55537 9488
4b9674e5
LL
9489 if (!dm_old_crtc_state->stream)
9490 goto skip_modeset;
eb3dc897 9491
a85ba005
NC
9492 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9493 is_timing_unchanged_for_freesync(new_crtc_state,
9494 old_crtc_state)) {
9495 new_crtc_state->mode_changed = false;
9496 DRM_DEBUG_DRIVER(
9497 "Mode change not required for front porch change, "
9498 "setting mode_changed to %d",
9499 new_crtc_state->mode_changed);
9500
9501 set_freesync_fixed_config(dm_new_crtc_state);
9502
9503 goto skip_modeset;
9504 } else if (amdgpu_freesync_vid_mode && aconnector &&
9505 is_freesync_video_mode(&new_crtc_state->mode,
9506 aconnector)) {
9507 set_freesync_fixed_config(dm_new_crtc_state);
9508 }
9509
4b9674e5
LL
9510 ret = dm_atomic_get_state(state, &dm_state);
9511 if (ret)
9512 goto fail;
e7b07cee 9513
4b9674e5
LL
9514 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9515 crtc->base.id);
62f55537 9516
4b9674e5
LL
9517 /* i.e. reset mode */
9518 if (dc_remove_stream_from_ctx(
9519 dm->dc,
9520 dm_state->context,
9521 dm_old_crtc_state->stream) != DC_OK) {
9522 ret = -EINVAL;
9523 goto fail;
9524 }
62f55537 9525
4b9674e5
LL
9526 dc_stream_release(dm_old_crtc_state->stream);
9527 dm_new_crtc_state->stream = NULL;
bb47de73 9528
4b9674e5 9529 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 9530
4b9674e5 9531 *lock_and_validation_needed = true;
62f55537 9532
4b9674e5
LL
9533 } else {/* Add stream for any updated/enabled CRTC */
9534 /*
9535 * Quick fix to prevent NULL pointer on new_stream when
9536 * added MST connectors not found in existing crtc_state in the chained mode
9537 * TODO: need to dig out the root cause of that
9538 */
9539 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9540 goto skip_modeset;
62f55537 9541
4b9674e5
LL
9542 if (modereset_required(new_crtc_state))
9543 goto skip_modeset;
62f55537 9544
4b9674e5
LL
9545 if (modeset_required(new_crtc_state, new_stream,
9546 dm_old_crtc_state->stream)) {
62f55537 9547
4b9674e5 9548 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 9549
4b9674e5
LL
9550 ret = dm_atomic_get_state(state, &dm_state);
9551 if (ret)
9552 goto fail;
27b3f4fc 9553
4b9674e5 9554 dm_new_crtc_state->stream = new_stream;
62f55537 9555
4b9674e5 9556 dc_stream_retain(new_stream);
1dc90497 9557
4711c033
LT
9558 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9559 crtc->base.id);
1dc90497 9560
4b9674e5
LL
9561 if (dc_add_stream_to_ctx(
9562 dm->dc,
9563 dm_state->context,
9564 dm_new_crtc_state->stream) != DC_OK) {
9565 ret = -EINVAL;
9566 goto fail;
9b690ef3
BL
9567 }
9568
4b9674e5
LL
9569 *lock_and_validation_needed = true;
9570 }
9571 }
e277adc5 9572
4b9674e5
LL
9573skip_modeset:
9574 /* Release extra reference */
9575 if (new_stream)
9576 dc_stream_release(new_stream);
e277adc5 9577
4b9674e5
LL
9578 /*
9579 * We want to do dc stream updates that do not require a
9580 * full modeset below.
9581 */
2afda735 9582 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
9583 return 0;
9584 /*
9585 * Given above conditions, the dc state cannot be NULL because:
9586 * 1. We're in the process of enabling CRTCs (just been added
9587 * to the dc context, or already is on the context)
9588 * 2. Has a valid connector attached, and
9589 * 3. Is currently active and enabled.
9590 * => The dc stream state currently exists.
9591 */
9592 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 9593
4b9674e5
LL
9594 /* Scaling or underscan settings */
9595 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9596 update_stream_scaling_settings(
9597 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 9598
b05e2c5e
DF
9599 /* ABM settings */
9600 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9601
4b9674e5
LL
9602 /*
9603 * Color management settings. We also update color properties
9604 * when a modeset is needed, to ensure it gets reprogrammed.
9605 */
9606 if (dm_new_crtc_state->base.color_mgmt_changed ||
9607 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 9608 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
9609 if (ret)
9610 goto fail;
62f55537 9611 }
e7b07cee 9612
4b9674e5
LL
9613 /* Update Freesync settings. */
9614 get_freesync_config_for_crtc(dm_new_crtc_state,
9615 dm_new_conn_state);
9616
62f55537 9617 return ret;
9635b754
DS
9618
9619fail:
9620 if (new_stream)
9621 dc_stream_release(new_stream);
9622 return ret;
62f55537 9623}
9b690ef3 9624
f6ff2a08
NK
9625static bool should_reset_plane(struct drm_atomic_state *state,
9626 struct drm_plane *plane,
9627 struct drm_plane_state *old_plane_state,
9628 struct drm_plane_state *new_plane_state)
9629{
9630 struct drm_plane *other;
9631 struct drm_plane_state *old_other_state, *new_other_state;
9632 struct drm_crtc_state *new_crtc_state;
9633 int i;
9634
70a1efac
NK
9635 /*
9636 * TODO: Remove this hack once the checks below are sufficient
9637 * enough to determine when we need to reset all the planes on
9638 * the stream.
9639 */
9640 if (state->allow_modeset)
9641 return true;
9642
f6ff2a08
NK
9643 /* Exit early if we know that we're adding or removing the plane. */
9644 if (old_plane_state->crtc != new_plane_state->crtc)
9645 return true;
9646
9647 /* old crtc == new_crtc == NULL, plane not in context. */
9648 if (!new_plane_state->crtc)
9649 return false;
9650
9651 new_crtc_state =
9652 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9653
9654 if (!new_crtc_state)
9655 return true;
9656
7316c4ad
NK
9657 /* CRTC Degamma changes currently require us to recreate planes. */
9658 if (new_crtc_state->color_mgmt_changed)
9659 return true;
9660
f6ff2a08
NK
9661 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9662 return true;
9663
9664 /*
9665 * If there are any new primary or overlay planes being added or
9666 * removed then the z-order can potentially change. To ensure
9667 * correct z-order and pipe acquisition the current DC architecture
9668 * requires us to remove and recreate all existing planes.
9669 *
9670 * TODO: Come up with a more elegant solution for this.
9671 */
9672 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 9673 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
9674 if (other->type == DRM_PLANE_TYPE_CURSOR)
9675 continue;
9676
9677 if (old_other_state->crtc != new_plane_state->crtc &&
9678 new_other_state->crtc != new_plane_state->crtc)
9679 continue;
9680
9681 if (old_other_state->crtc != new_other_state->crtc)
9682 return true;
9683
dc4cb30d
NK
9684 /* Src/dst size and scaling updates. */
9685 if (old_other_state->src_w != new_other_state->src_w ||
9686 old_other_state->src_h != new_other_state->src_h ||
9687 old_other_state->crtc_w != new_other_state->crtc_w ||
9688 old_other_state->crtc_h != new_other_state->crtc_h)
9689 return true;
9690
9691 /* Rotation / mirroring updates. */
9692 if (old_other_state->rotation != new_other_state->rotation)
9693 return true;
9694
9695 /* Blending updates. */
9696 if (old_other_state->pixel_blend_mode !=
9697 new_other_state->pixel_blend_mode)
9698 return true;
9699
9700 /* Alpha updates. */
9701 if (old_other_state->alpha != new_other_state->alpha)
9702 return true;
9703
9704 /* Colorspace changes. */
9705 if (old_other_state->color_range != new_other_state->color_range ||
9706 old_other_state->color_encoding != new_other_state->color_encoding)
9707 return true;
9708
9a81cc60
NK
9709 /* Framebuffer checks fall at the end. */
9710 if (!old_other_state->fb || !new_other_state->fb)
9711 continue;
9712
9713 /* Pixel format changes can require bandwidth updates. */
9714 if (old_other_state->fb->format != new_other_state->fb->format)
9715 return true;
9716
6eed95b0
BN
9717 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9718 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9719
9720 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9721 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9722 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9723 return true;
9724 }
9725
9726 return false;
9727}
9728
b0455fda
SS
9729static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9730 struct drm_plane_state *new_plane_state,
9731 struct drm_framebuffer *fb)
9732{
e72868c4
SS
9733 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9734 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9735 unsigned int pitch;
e72868c4 9736 bool linear;
b0455fda
SS
9737
9738 if (fb->width > new_acrtc->max_cursor_width ||
9739 fb->height > new_acrtc->max_cursor_height) {
9740 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9741 new_plane_state->fb->width,
9742 new_plane_state->fb->height);
9743 return -EINVAL;
9744 }
9745 if (new_plane_state->src_w != fb->width << 16 ||
9746 new_plane_state->src_h != fb->height << 16) {
9747 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9748 return -EINVAL;
9749 }
9750
9751 /* Pitch in pixels */
9752 pitch = fb->pitches[0] / fb->format->cpp[0];
9753
9754 if (fb->width != pitch) {
9755 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9756 fb->width, pitch);
9757 return -EINVAL;
9758 }
9759
9760 switch (pitch) {
9761 case 64:
9762 case 128:
9763 case 256:
9764 /* FB pitch is supported by cursor plane */
9765 break;
9766 default:
9767 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9768 return -EINVAL;
9769 }
9770
e72868c4
SS
9771 /* Core DRM takes care of checking FB modifiers, so we only need to
9772 * check tiling flags when the FB doesn't have a modifier. */
9773 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9774 if (adev->family < AMDGPU_FAMILY_AI) {
9775 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9776 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9777 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9778 } else {
9779 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9780 }
9781 if (!linear) {
9782 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9783 return -EINVAL;
9784 }
9785 }
9786
b0455fda
SS
9787 return 0;
9788}
9789
9e869063
LL
9790static int dm_update_plane_state(struct dc *dc,
9791 struct drm_atomic_state *state,
9792 struct drm_plane *plane,
9793 struct drm_plane_state *old_plane_state,
9794 struct drm_plane_state *new_plane_state,
9795 bool enable,
9796 bool *lock_and_validation_needed)
62f55537 9797{
eb3dc897
NK
9798
9799 struct dm_atomic_state *dm_state = NULL;
62f55537 9800 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9801 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9802 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9803 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9804 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9805 bool needs_reset;
62f55537 9806 int ret = 0;
e7b07cee 9807
9b690ef3 9808
9e869063
LL
9809 new_plane_crtc = new_plane_state->crtc;
9810 old_plane_crtc = old_plane_state->crtc;
9811 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9812 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9813
626bf90f
SS
9814 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9815 if (!enable || !new_plane_crtc ||
9816 drm_atomic_plane_disabling(plane->state, new_plane_state))
9817 return 0;
9818
9819 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9820
5f581248
SS
9821 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9822 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9823 return -EINVAL;
9824 }
9825
24f99d2b 9826 if (new_plane_state->fb) {
b0455fda
SS
9827 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9828 new_plane_state->fb);
9829 if (ret)
9830 return ret;
24f99d2b
SS
9831 }
9832
9e869063 9833 return 0;
626bf90f 9834 }
9b690ef3 9835
f6ff2a08
NK
9836 needs_reset = should_reset_plane(state, plane, old_plane_state,
9837 new_plane_state);
9838
9e869063
LL
9839 /* Remove any changed/removed planes */
9840 if (!enable) {
f6ff2a08 9841 if (!needs_reset)
9e869063 9842 return 0;
a7b06724 9843
9e869063
LL
9844 if (!old_plane_crtc)
9845 return 0;
62f55537 9846
9e869063
LL
9847 old_crtc_state = drm_atomic_get_old_crtc_state(
9848 state, old_plane_crtc);
9849 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9850
9e869063
LL
9851 if (!dm_old_crtc_state->stream)
9852 return 0;
62f55537 9853
9e869063
LL
9854 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9855 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9856
9e869063
LL
9857 ret = dm_atomic_get_state(state, &dm_state);
9858 if (ret)
9859 return ret;
eb3dc897 9860
9e869063
LL
9861 if (!dc_remove_plane_from_context(
9862 dc,
9863 dm_old_crtc_state->stream,
9864 dm_old_plane_state->dc_state,
9865 dm_state->context)) {
62f55537 9866
c3537613 9867 return -EINVAL;
9e869063 9868 }
e7b07cee 9869
9b690ef3 9870
9e869063
LL
9871 dc_plane_state_release(dm_old_plane_state->dc_state);
9872 dm_new_plane_state->dc_state = NULL;
1dc90497 9873
9e869063 9874 *lock_and_validation_needed = true;
1dc90497 9875
9e869063
LL
9876 } else { /* Add new planes */
9877 struct dc_plane_state *dc_new_plane_state;
1dc90497 9878
9e869063
LL
9879 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9880 return 0;
e7b07cee 9881
9e869063
LL
9882 if (!new_plane_crtc)
9883 return 0;
e7b07cee 9884
9e869063
LL
9885 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9886 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9887
9e869063
LL
9888 if (!dm_new_crtc_state->stream)
9889 return 0;
62f55537 9890
f6ff2a08 9891 if (!needs_reset)
9e869063 9892 return 0;
62f55537 9893
8c44515b
AP
9894 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9895 if (ret)
9896 return ret;
9897
9e869063 9898 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9899
9e869063
LL
9900 dc_new_plane_state = dc_create_plane_state(dc);
9901 if (!dc_new_plane_state)
9902 return -ENOMEM;
62f55537 9903
4711c033
LT
9904 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9905 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9906
695af5f9 9907 ret = fill_dc_plane_attributes(
1348969a 9908 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9909 dc_new_plane_state,
9910 new_plane_state,
9911 new_crtc_state);
9912 if (ret) {
9913 dc_plane_state_release(dc_new_plane_state);
9914 return ret;
9915 }
62f55537 9916
9e869063
LL
9917 ret = dm_atomic_get_state(state, &dm_state);
9918 if (ret) {
9919 dc_plane_state_release(dc_new_plane_state);
9920 return ret;
9921 }
eb3dc897 9922
9e869063
LL
9923 /*
9924 * Any atomic check errors that occur after this will
9925 * not need a release. The plane state will be attached
9926 * to the stream, and therefore part of the atomic
9927 * state. It'll be released when the atomic state is
9928 * cleaned.
9929 */
9930 if (!dc_add_plane_to_context(
9931 dc,
9932 dm_new_crtc_state->stream,
9933 dc_new_plane_state,
9934 dm_state->context)) {
62f55537 9935
9e869063
LL
9936 dc_plane_state_release(dc_new_plane_state);
9937 return -EINVAL;
9938 }
8c45c5db 9939
9e869063 9940 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9941
9e869063
LL
9942 /* Tell DC to do a full surface update every time there
9943 * is a plane change. Inefficient, but works for now.
9944 */
9945 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9946
9947 *lock_and_validation_needed = true;
62f55537 9948 }
e7b07cee
HW
9949
9950
62f55537
AG
9951 return ret;
9952}
a87fa993 9953
12f4849a
SS
9954static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9955 struct drm_crtc *crtc,
9956 struct drm_crtc_state *new_crtc_state)
9957{
9958 struct drm_plane_state *new_cursor_state, *new_primary_state;
9959 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9960
9961 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9962 * cursor per pipe but it's going to inherit the scaling and
9963 * positioning from the underlying pipe. Check the cursor plane's
9964 * blending properties match the primary plane's. */
9965
9966 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9967 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
7df4ceb6
SE
9968 if (!new_cursor_state || !new_primary_state ||
9969 !new_cursor_state->fb || !new_primary_state->fb) {
12f4849a
SS
9970 return 0;
9971 }
9972
9973 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9974 (new_cursor_state->src_w >> 16);
9975 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9976 (new_cursor_state->src_h >> 16);
9977
9978 primary_scale_w = new_primary_state->crtc_w * 1000 /
9979 (new_primary_state->src_w >> 16);
9980 primary_scale_h = new_primary_state->crtc_h * 1000 /
9981 (new_primary_state->src_h >> 16);
9982
9983 if (cursor_scale_w != primary_scale_w ||
9984 cursor_scale_h != primary_scale_h) {
9985 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9986 return -EINVAL;
9987 }
9988
9989 return 0;
9990}
9991
e10517b3 9992#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
9993static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9994{
9995 struct drm_connector *connector;
9996 struct drm_connector_state *conn_state;
9997 struct amdgpu_dm_connector *aconnector = NULL;
9998 int i;
9999 for_each_new_connector_in_state(state, connector, conn_state, i) {
10000 if (conn_state->crtc != crtc)
10001 continue;
10002
10003 aconnector = to_amdgpu_dm_connector(connector);
10004 if (!aconnector->port || !aconnector->mst_port)
10005 aconnector = NULL;
10006 else
10007 break;
10008 }
10009
10010 if (!aconnector)
10011 return 0;
10012
10013 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10014}
e10517b3 10015#endif
44be939f 10016
16e9b3e5
RS
10017static int validate_overlay(struct drm_atomic_state *state)
10018{
10019 int i;
10020 struct drm_plane *plane;
10021 struct drm_plane_state *old_plane_state, *new_plane_state;
10022 struct drm_plane_state *primary_state, *overlay_state = NULL;
10023
10024 /* Check if primary plane is contained inside overlay */
10025 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10026 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10027 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10028 return 0;
10029
10030 overlay_state = new_plane_state;
10031 continue;
10032 }
10033 }
10034
10035 /* check if we're making changes to the overlay plane */
10036 if (!overlay_state)
10037 return 0;
10038
10039 /* check if overlay plane is enabled */
10040 if (!overlay_state->crtc)
10041 return 0;
10042
10043 /* find the primary plane for the CRTC that the overlay is enabled on */
10044 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10045 if (IS_ERR(primary_state))
10046 return PTR_ERR(primary_state);
10047
10048 /* check if primary plane is enabled */
10049 if (!primary_state->crtc)
10050 return 0;
10051
10052 /* Perform the bounds check to ensure the overlay plane covers the primary */
10053 if (primary_state->crtc_x < overlay_state->crtc_x ||
10054 primary_state->crtc_y < overlay_state->crtc_y ||
10055 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10056 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10057 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10058 return -EINVAL;
10059 }
10060
10061 return 0;
10062}
10063
b8592b48
LL
10064/**
10065 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10066 * @dev: The DRM device
10067 * @state: The atomic state to commit
10068 *
10069 * Validate that the given atomic state is programmable by DC into hardware.
10070 * This involves constructing a &struct dc_state reflecting the new hardware
10071 * state we wish to commit, then querying DC to see if it is programmable. It's
10072 * important not to modify the existing DC state. Otherwise, atomic_check
10073 * may unexpectedly commit hardware changes.
10074 *
10075 * When validating the DC state, it's important that the right locks are
10076 * acquired. For full updates case which removes/adds/updates streams on one
10077 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10078 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10079 * flip using DRMs synchronization events.
b8592b48
LL
10080 *
10081 * Note that DM adds the affected connectors for all CRTCs in state, when that
10082 * might not seem necessary. This is because DC stream creation requires the
10083 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10084 * be possible but non-trivial - a possible TODO item.
10085 *
10086 * Return: -Error code if validation failed.
10087 */
7578ecda
AD
10088static int amdgpu_dm_atomic_check(struct drm_device *dev,
10089 struct drm_atomic_state *state)
62f55537 10090{
1348969a 10091 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10092 struct dm_atomic_state *dm_state = NULL;
62f55537 10093 struct dc *dc = adev->dm.dc;
62f55537 10094 struct drm_connector *connector;
c2cea706 10095 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10096 struct drm_crtc *crtc;
fc9e9920 10097 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10098 struct drm_plane *plane;
10099 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10100 enum dc_status status;
1e88ad0a 10101 int ret, i;
62f55537 10102 bool lock_and_validation_needed = false;
886876ec 10103 struct dm_crtc_state *dm_old_crtc_state;
62f55537 10104
e8a98235 10105 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10106
62f55537 10107 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
10108 if (ret)
10109 goto fail;
62f55537 10110
c5892a10
SW
10111 /* Check connector changes */
10112 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10113 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10114 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10115
10116 /* Skip connectors that are disabled or part of modeset already. */
10117 if (!old_con_state->crtc && !new_con_state->crtc)
10118 continue;
10119
10120 if (!new_con_state->crtc)
10121 continue;
10122
10123 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10124 if (IS_ERR(new_crtc_state)) {
10125 ret = PTR_ERR(new_crtc_state);
10126 goto fail;
10127 }
10128
10129 if (dm_old_con_state->abm_level !=
10130 dm_new_con_state->abm_level)
10131 new_crtc_state->connectors_changed = true;
10132 }
10133
e10517b3 10134#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10135 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10136 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10137 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10138 ret = add_affected_mst_dsc_crtcs(state, crtc);
10139 if (ret)
10140 goto fail;
10141 }
10142 }
10143 }
e10517b3 10144#endif
1e88ad0a 10145 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10146 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10147
1e88ad0a 10148 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10149 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10150 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10151 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10152 continue;
7bef1af3 10153
1e88ad0a
S
10154 if (!new_crtc_state->enable)
10155 continue;
fc9e9920 10156
1e88ad0a
S
10157 ret = drm_atomic_add_affected_connectors(state, crtc);
10158 if (ret)
10159 return ret;
fc9e9920 10160
1e88ad0a
S
10161 ret = drm_atomic_add_affected_planes(state, crtc);
10162 if (ret)
10163 goto fail;
115a385c 10164
cbac53f7 10165 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10166 new_crtc_state->mode_changed = true;
e7b07cee
HW
10167 }
10168
2d9e6431
NK
10169 /*
10170 * Add all primary and overlay planes on the CRTC to the state
10171 * whenever a plane is enabled to maintain correct z-ordering
10172 * and to enable fast surface updates.
10173 */
10174 drm_for_each_crtc(crtc, dev) {
10175 bool modified = false;
10176
10177 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10178 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10179 continue;
10180
10181 if (new_plane_state->crtc == crtc ||
10182 old_plane_state->crtc == crtc) {
10183 modified = true;
10184 break;
10185 }
10186 }
10187
10188 if (!modified)
10189 continue;
10190
10191 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10192 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10193 continue;
10194
10195 new_plane_state =
10196 drm_atomic_get_plane_state(state, plane);
10197
10198 if (IS_ERR(new_plane_state)) {
10199 ret = PTR_ERR(new_plane_state);
10200 goto fail;
10201 }
10202 }
10203 }
10204
62f55537 10205 /* Remove exiting planes if they are modified */
9e869063
LL
10206 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10207 ret = dm_update_plane_state(dc, state, plane,
10208 old_plane_state,
10209 new_plane_state,
10210 false,
10211 &lock_and_validation_needed);
10212 if (ret)
10213 goto fail;
62f55537
AG
10214 }
10215
10216 /* Disable all crtcs which require disable */
4b9674e5
LL
10217 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10218 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10219 old_crtc_state,
10220 new_crtc_state,
10221 false,
10222 &lock_and_validation_needed);
10223 if (ret)
10224 goto fail;
62f55537
AG
10225 }
10226
10227 /* Enable all crtcs which require enable */
4b9674e5
LL
10228 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10229 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10230 old_crtc_state,
10231 new_crtc_state,
10232 true,
10233 &lock_and_validation_needed);
10234 if (ret)
10235 goto fail;
62f55537
AG
10236 }
10237
16e9b3e5
RS
10238 ret = validate_overlay(state);
10239 if (ret)
10240 goto fail;
10241
62f55537 10242 /* Add new/modified planes */
9e869063
LL
10243 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10244 ret = dm_update_plane_state(dc, state, plane,
10245 old_plane_state,
10246 new_plane_state,
10247 true,
10248 &lock_and_validation_needed);
10249 if (ret)
10250 goto fail;
62f55537
AG
10251 }
10252
b349f76e
ES
10253 /* Run this here since we want to validate the streams we created */
10254 ret = drm_atomic_helper_check_planes(dev, state);
10255 if (ret)
10256 goto fail;
62f55537 10257
12f4849a
SS
10258 /* Check cursor planes scaling */
10259 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10260 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10261 if (ret)
10262 goto fail;
10263 }
10264
43d10d30
NK
10265 if (state->legacy_cursor_update) {
10266 /*
10267 * This is a fast cursor update coming from the plane update
10268 * helper, check if it can be done asynchronously for better
10269 * performance.
10270 */
10271 state->async_update =
10272 !drm_atomic_helper_async_check(dev, state);
10273
10274 /*
10275 * Skip the remaining global validation if this is an async
10276 * update. Cursor updates can be done without affecting
10277 * state or bandwidth calcs and this avoids the performance
10278 * penalty of locking the private state object and
10279 * allocating a new dc_state.
10280 */
10281 if (state->async_update)
10282 return 0;
10283 }
10284
ebdd27e1 10285 /* Check scaling and underscan changes*/
1f6010a9 10286 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10287 * new stream into context w\o causing full reset. Need to
10288 * decide how to handle.
10289 */
c2cea706 10290 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10291 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10292 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10293 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10294
10295 /* Skip any modesets/resets */
0bc9706d
LSL
10296 if (!acrtc || drm_atomic_crtc_needs_modeset(
10297 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10298 continue;
10299
b830ebc9 10300 /* Skip any thing not scale or underscan changes */
54d76575 10301 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10302 continue;
10303
10304 lock_and_validation_needed = true;
10305 }
10306
f6d7c7fa
NK
10307 /**
10308 * Streams and planes are reset when there are changes that affect
10309 * bandwidth. Anything that affects bandwidth needs to go through
10310 * DC global validation to ensure that the configuration can be applied
10311 * to hardware.
10312 *
10313 * We have to currently stall out here in atomic_check for outstanding
10314 * commits to finish in this case because our IRQ handlers reference
10315 * DRM state directly - we can end up disabling interrupts too early
10316 * if we don't.
10317 *
10318 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10319 */
f6d7c7fa 10320 if (lock_and_validation_needed) {
eb3dc897
NK
10321 ret = dm_atomic_get_state(state, &dm_state);
10322 if (ret)
10323 goto fail;
e7b07cee
HW
10324
10325 ret = do_aquire_global_lock(dev, state);
10326 if (ret)
10327 goto fail;
1dc90497 10328
d9fe1a4c 10329#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
10330 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10331 goto fail;
10332
29b9ba74
ML
10333 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10334 if (ret)
10335 goto fail;
d9fe1a4c 10336#endif
29b9ba74 10337
ded58c7b
ZL
10338 /*
10339 * Perform validation of MST topology in the state:
10340 * We need to perform MST atomic check before calling
10341 * dc_validate_global_state(), or there is a chance
10342 * to get stuck in an infinite loop and hang eventually.
10343 */
10344 ret = drm_dp_mst_atomic_check(state);
10345 if (ret)
10346 goto fail;
74a16675
RS
10347 status = dc_validate_global_state(dc, dm_state->context, false);
10348 if (status != DC_OK) {
10349 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10350 dc_status_to_str(status), status);
e7b07cee
HW
10351 ret = -EINVAL;
10352 goto fail;
10353 }
bd200d19 10354 } else {
674e78ac 10355 /*
bd200d19
NK
10356 * The commit is a fast update. Fast updates shouldn't change
10357 * the DC context, affect global validation, and can have their
10358 * commit work done in parallel with other commits not touching
10359 * the same resource. If we have a new DC context as part of
10360 * the DM atomic state from validation we need to free it and
10361 * retain the existing one instead.
fde9f39a
MR
10362 *
10363 * Furthermore, since the DM atomic state only contains the DC
10364 * context and can safely be annulled, we can free the state
10365 * and clear the associated private object now to free
10366 * some memory and avoid a possible use-after-free later.
674e78ac 10367 */
bd200d19 10368
fde9f39a
MR
10369 for (i = 0; i < state->num_private_objs; i++) {
10370 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 10371
fde9f39a
MR
10372 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10373 int j = state->num_private_objs-1;
bd200d19 10374
fde9f39a
MR
10375 dm_atomic_destroy_state(obj,
10376 state->private_objs[i].state);
10377
10378 /* If i is not at the end of the array then the
10379 * last element needs to be moved to where i was
10380 * before the array can safely be truncated.
10381 */
10382 if (i != j)
10383 state->private_objs[i] =
10384 state->private_objs[j];
bd200d19 10385
fde9f39a
MR
10386 state->private_objs[j].ptr = NULL;
10387 state->private_objs[j].state = NULL;
10388 state->private_objs[j].old_state = NULL;
10389 state->private_objs[j].new_state = NULL;
10390
10391 state->num_private_objs = j;
10392 break;
10393 }
bd200d19 10394 }
e7b07cee
HW
10395 }
10396
caff0e66
NK
10397 /* Store the overall update type for use later in atomic check. */
10398 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10399 struct dm_crtc_state *dm_new_crtc_state =
10400 to_dm_crtc_state(new_crtc_state);
10401
f6d7c7fa
NK
10402 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10403 UPDATE_TYPE_FULL :
10404 UPDATE_TYPE_FAST;
e7b07cee
HW
10405 }
10406
10407 /* Must be success */
10408 WARN_ON(ret);
e8a98235
RS
10409
10410 trace_amdgpu_dm_atomic_check_finish(state, ret);
10411
e7b07cee
HW
10412 return ret;
10413
10414fail:
10415 if (ret == -EDEADLK)
01e28f9c 10416 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 10417 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 10418 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 10419 else
01e28f9c 10420 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 10421
e8a98235
RS
10422 trace_amdgpu_dm_atomic_check_finish(state, ret);
10423
e7b07cee
HW
10424 return ret;
10425}
10426
3ee6b26b
AD
10427static bool is_dp_capable_without_timing_msa(struct dc *dc,
10428 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
10429{
10430 uint8_t dpcd_data;
10431 bool capable = false;
10432
c84dec2f 10433 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
10434 dm_helpers_dp_read_dpcd(
10435 NULL,
c84dec2f 10436 amdgpu_dm_connector->dc_link,
e7b07cee
HW
10437 DP_DOWN_STREAM_PORT_COUNT,
10438 &dpcd_data,
10439 sizeof(dpcd_data))) {
10440 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10441 }
10442
10443 return capable;
10444}
f9b4f20c
SW
10445
10446static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10447 uint8_t *edid_ext, int len,
10448 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10449{
10450 int i;
10451 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10452 struct dc *dc = adev->dm.dc;
10453
10454 /* send extension block to DMCU for parsing */
10455 for (i = 0; i < len; i += 8) {
10456 bool res;
10457 int offset;
10458
10459 /* send 8 bytes a time */
10460 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10461 return false;
10462
10463 if (i+8 == len) {
10464 /* EDID block sent completed, expect result */
10465 int version, min_rate, max_rate;
10466
10467 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10468 if (res) {
10469 /* amd vsdb found */
10470 vsdb_info->freesync_supported = 1;
10471 vsdb_info->amd_vsdb_version = version;
10472 vsdb_info->min_refresh_rate_hz = min_rate;
10473 vsdb_info->max_refresh_rate_hz = max_rate;
10474 return true;
10475 }
10476 /* not amd vsdb */
10477 return false;
10478 }
10479
10480 /* check for ack*/
10481 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10482 if (!res)
10483 return false;
10484 }
10485
10486 return false;
10487}
10488
7c7dd774 10489static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
10490 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10491{
10492 uint8_t *edid_ext = NULL;
10493 int i;
10494 bool valid_vsdb_found = false;
10495
10496 /*----- drm_find_cea_extension() -----*/
10497 /* No EDID or EDID extensions */
10498 if (edid == NULL || edid->extensions == 0)
7c7dd774 10499 return -ENODEV;
f9b4f20c
SW
10500
10501 /* Find CEA extension */
10502 for (i = 0; i < edid->extensions; i++) {
10503 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10504 if (edid_ext[0] == CEA_EXT)
10505 break;
10506 }
10507
10508 if (i == edid->extensions)
7c7dd774 10509 return -ENODEV;
f9b4f20c
SW
10510
10511 /*----- cea_db_offsets() -----*/
10512 if (edid_ext[0] != CEA_EXT)
7c7dd774 10513 return -ENODEV;
f9b4f20c
SW
10514
10515 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
10516
10517 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
10518}
10519
98e6436d
AK
10520void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10521 struct edid *edid)
e7b07cee 10522{
eb0709ba 10523 int i = 0;
e7b07cee
HW
10524 struct detailed_timing *timing;
10525 struct detailed_non_pixel *data;
10526 struct detailed_data_monitor_range *range;
c84dec2f
HW
10527 struct amdgpu_dm_connector *amdgpu_dm_connector =
10528 to_amdgpu_dm_connector(connector);
bb47de73 10529 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
10530
10531 struct drm_device *dev = connector->dev;
1348969a 10532 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 10533 bool freesync_capable = false;
f9b4f20c 10534 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 10535
8218d7f1
HW
10536 if (!connector->state) {
10537 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 10538 goto update;
8218d7f1
HW
10539 }
10540
98e6436d
AK
10541 if (!edid) {
10542 dm_con_state = to_dm_connector_state(connector->state);
10543
10544 amdgpu_dm_connector->min_vfreq = 0;
10545 amdgpu_dm_connector->max_vfreq = 0;
10546 amdgpu_dm_connector->pixel_clock_mhz = 0;
10547
bb47de73 10548 goto update;
98e6436d
AK
10549 }
10550
8218d7f1
HW
10551 dm_con_state = to_dm_connector_state(connector->state);
10552
c84dec2f 10553 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 10554 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 10555 goto update;
e7b07cee
HW
10556 }
10557 if (!adev->dm.freesync_module)
bb47de73 10558 goto update;
f9b4f20c
SW
10559
10560
10561 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10562 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10563 bool edid_check_required = false;
10564
10565 if (edid) {
e7b07cee
HW
10566 edid_check_required = is_dp_capable_without_timing_msa(
10567 adev->dm.dc,
c84dec2f 10568 amdgpu_dm_connector);
e7b07cee 10569 }
e7b07cee 10570
f9b4f20c
SW
10571 if (edid_check_required == true && (edid->version > 1 ||
10572 (edid->version == 1 && edid->revision > 1))) {
10573 for (i = 0; i < 4; i++) {
e7b07cee 10574
f9b4f20c
SW
10575 timing = &edid->detailed_timings[i];
10576 data = &timing->data.other_data;
10577 range = &data->data.range;
10578 /*
10579 * Check if monitor has continuous frequency mode
10580 */
10581 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10582 continue;
10583 /*
10584 * Check for flag range limits only. If flag == 1 then
10585 * no additional timing information provided.
10586 * Default GTF, GTF Secondary curve and CVT are not
10587 * supported
10588 */
10589 if (range->flags != 1)
10590 continue;
a0ffc3fd 10591
f9b4f20c
SW
10592 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10593 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10594 amdgpu_dm_connector->pixel_clock_mhz =
10595 range->pixel_clock_mhz * 10;
a0ffc3fd 10596
f9b4f20c
SW
10597 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10598 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 10599
f9b4f20c
SW
10600 break;
10601 }
98e6436d 10602
f9b4f20c
SW
10603 if (amdgpu_dm_connector->max_vfreq -
10604 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 10605
f9b4f20c
SW
10606 freesync_capable = true;
10607 }
10608 }
10609 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
10610 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10611 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
10612 timing = &edid->detailed_timings[i];
10613 data = &timing->data.other_data;
10614
10615 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10616 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10617 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10618 freesync_capable = true;
10619
10620 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10621 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
10622 }
10623 }
bb47de73
NK
10624
10625update:
10626 if (dm_con_state)
10627 dm_con_state->freesync_capable = freesync_capable;
10628
10629 if (connector->vrr_capable_property)
10630 drm_connector_set_vrr_capable_property(connector,
10631 freesync_capable);
e7b07cee
HW
10632}
10633
8c322309
RL
10634static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10635{
10636 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10637
10638 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10639 return;
10640 if (link->type == dc_connection_none)
10641 return;
10642 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10643 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
10644 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10645
10646 if (dpcd_data[0] == 0) {
1cfbbdde 10647 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
10648 link->psr_settings.psr_feature_enabled = false;
10649 } else {
1cfbbdde 10650 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
10651 link->psr_settings.psr_feature_enabled = true;
10652 }
10653
10654 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
10655 }
10656}
10657
10658/*
10659 * amdgpu_dm_link_setup_psr() - configure psr link
10660 * @stream: stream state
10661 *
10662 * Return: true if success
10663 */
10664static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10665{
10666 struct dc_link *link = NULL;
10667 struct psr_config psr_config = {0};
10668 struct psr_context psr_context = {0};
8c322309
RL
10669 bool ret = false;
10670
10671 if (stream == NULL)
10672 return false;
10673
10674 link = stream->link;
8c322309 10675
d1ebfdd8 10676 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
10677
10678 if (psr_config.psr_version > 0) {
10679 psr_config.psr_exit_link_training_required = 0x1;
10680 psr_config.psr_frame_capture_indication_req = 0;
10681 psr_config.psr_rfb_setup_time = 0x37;
10682 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10683 psr_config.allow_smu_optimizations = 0x0;
10684
10685 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10686
10687 }
d1ebfdd8 10688 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
10689
10690 return ret;
10691}
10692
10693/*
10694 * amdgpu_dm_psr_enable() - enable psr f/w
10695 * @stream: stream state
10696 *
10697 * Return: true if success
10698 */
10699bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10700{
10701 struct dc_link *link = stream->link;
5b5abe95
AK
10702 unsigned int vsync_rate_hz = 0;
10703 struct dc_static_screen_params params = {0};
10704 /* Calculate number of static frames before generating interrupt to
10705 * enter PSR.
10706 */
5b5abe95
AK
10707 // Init fail safe of 2 frames static
10708 unsigned int num_frames_static = 2;
8c322309
RL
10709
10710 DRM_DEBUG_DRIVER("Enabling psr...\n");
10711
5b5abe95
AK
10712 vsync_rate_hz = div64_u64(div64_u64((
10713 stream->timing.pix_clk_100hz * 100),
10714 stream->timing.v_total),
10715 stream->timing.h_total);
10716
10717 /* Round up
10718 * Calculate number of frames such that at least 30 ms of time has
10719 * passed.
10720 */
7aa62404
RL
10721 if (vsync_rate_hz != 0) {
10722 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 10723 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 10724 }
5b5abe95
AK
10725
10726 params.triggers.cursor_update = true;
10727 params.triggers.overlay_update = true;
10728 params.triggers.surface_update = true;
10729 params.num_frames = num_frames_static;
8c322309 10730
5b5abe95 10731 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 10732 &stream, 1,
5b5abe95 10733 &params);
8c322309 10734
1d496907 10735 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
10736}
10737
10738/*
10739 * amdgpu_dm_psr_disable() - disable psr f/w
10740 * @stream: stream state
10741 *
10742 * Return: true if success
10743 */
10744static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10745{
10746
10747 DRM_DEBUG_DRIVER("Disabling psr...\n");
10748
1d496907 10749 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 10750}
3d4e52d0 10751
6ee90e88 10752/*
10753 * amdgpu_dm_psr_disable() - disable psr f/w
10754 * if psr is enabled on any stream
10755 *
10756 * Return: true if success
10757 */
10758static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10759{
10760 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10761 return dc_set_psr_allow_active(dm->dc, false);
10762}
10763
3d4e52d0
VL
10764void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10765{
1348969a 10766 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
10767 struct dc *dc = adev->dm.dc;
10768 int i;
10769
10770 mutex_lock(&adev->dm.dc_lock);
10771 if (dc->current_state) {
10772 for (i = 0; i < dc->current_state->stream_count; ++i)
10773 dc->current_state->streams[i]
10774 ->triggered_crtc_reset.enabled =
10775 adev->dm.force_timing_sync;
10776
10777 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10778 dc_trigger_sync(dc, dc->current_state);
10779 }
10780 mutex_unlock(&adev->dm.dc_lock);
10781}
9d83722d
RS
10782
10783void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10784 uint32_t value, const char *func_name)
10785{
10786#ifdef DM_CHECK_ADDR_0
10787 if (address == 0) {
10788 DC_ERR("invalid register write. address = 0");
10789 return;
10790 }
10791#endif
10792 cgs_write_register(ctx->cgs_device, address, value);
10793 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10794}
10795
10796uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10797 const char *func_name)
10798{
10799 uint32_t value;
10800#ifdef DM_CHECK_ADDR_0
10801 if (address == 0) {
10802 DC_ERR("invalid register read; address = 0\n");
10803 return 0;
10804 }
10805#endif
10806
10807 if (ctx->dmub_srv &&
10808 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10809 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10810 ASSERT(false);
10811 return 0;
10812 }
10813
10814 value = cgs_read_register(ctx->cgs_device, address);
10815
10816 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10817
10818 return value;
10819}
81927e28
JS
10820
10821int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10822 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10823{
10824 struct amdgpu_device *adev = ctx->driver_context;
10825 int ret = 0;
10826
10827 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10828 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10829 if (ret == 0) {
10830 *operation_result = AUX_RET_ERROR_TIMEOUT;
10831 return -1;
10832 }
10833 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10834
10835 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10836 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10837
10838 // For read case, Copy data to payload
10839 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10840 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10841 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10842 adev->dm.dmub_notify->aux_reply.length);
10843 }
10844
10845 return adev->dm.dmub_notify->aux_reply.length;
10846}