drm/amd/display: do not dereference on NULL
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
1dc90497 32#include "dc/inc/core_types.h"
a7669aff 33#include "dal_asic_id.h"
cdca3f21 34#include "dmub/dmub_srv.h"
743b9786
NK
35#include "dc/inc/hw/dmcu.h"
36#include "dc/inc/hw/abm.h"
9a71c7d3 37#include "dc/dc_dmub_srv.h"
f9b4f20c 38#include "dc/dc_edid_parser.h"
81927e28 39#include "dc/dc_stat.h"
9d83722d 40#include "amdgpu_dm_trace.h"
4562236b
HW
41
42#include "vid.h"
43#include "amdgpu.h"
a49dcb88 44#include "amdgpu_display.h"
a94d5569 45#include "amdgpu_ucode.h"
4562236b
HW
46#include "atom.h"
47#include "amdgpu_dm.h"
52704fca
BL
48#ifdef CONFIG_DRM_AMD_DC_HDCP
49#include "amdgpu_dm_hdcp.h"
53e108aa 50#include <drm/drm_hdcp.h>
52704fca 51#endif
e7b07cee 52#include "amdgpu_pm.h"
4562236b
HW
53
54#include "amd_shared.h"
55#include "amdgpu_dm_irq.h"
56#include "dm_helpers.h"
e7b07cee 57#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
58#if defined(CONFIG_DEBUG_FS)
59#include "amdgpu_dm_debugfs.h"
60#endif
4562236b
HW
61
62#include "ivsrcid/ivsrcid_vislands30.h"
63
81927e28 64#include "i2caux_interface.h"
4562236b
HW
65#include <linux/module.h>
66#include <linux/moduleparam.h>
e7b07cee 67#include <linux/types.h>
97028037 68#include <linux/pm_runtime.h>
09d21852 69#include <linux/pci.h>
a94d5569 70#include <linux/firmware.h>
6ce8f316 71#include <linux/component.h>
4562236b
HW
72
73#include <drm/drm_atomic.h>
674e78ac 74#include <drm/drm_atomic_uapi.h>
4562236b
HW
75#include <drm/drm_atomic_helper.h>
76#include <drm/drm_dp_mst_helper.h>
e7b07cee 77#include <drm/drm_fb_helper.h>
09d21852 78#include <drm/drm_fourcc.h>
e7b07cee 79#include <drm/drm_edid.h>
09d21852 80#include <drm/drm_vblank.h>
6ce8f316 81#include <drm/drm_audio_component.h>
4562236b 82
b86a1aa3 83#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 84#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 85
ad941f7a
FX
86#include "dcn/dcn_1_0_offset.h"
87#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
88#include "soc15_hw_ip.h"
89#include "vega10_ip_offset.h"
ff5ef992
AD
90
91#include "soc15_common.h"
92#endif
93
e7b07cee 94#include "modules/inc/mod_freesync.h"
bbf854dc 95#include "modules/power/power_helpers.h"
ecd0136b 96#include "modules/inc/mod_info_packet.h"
e7b07cee 97
743b9786
NK
98#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
99MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
100#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
102#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
104#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
106#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
108#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
110#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
2200eb9e 112
a94d5569
DF
113#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
114MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 115
5ea23931
RL
116#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
117MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
118
8c7aea40
NK
119/* Number of bytes in PSP header for firmware. */
120#define PSP_HEADER_BYTES 0x100
121
122/* Number of bytes in PSP footer for firmware. */
123#define PSP_FOOTER_BYTES 0x100
124
b8592b48
LL
125/**
126 * DOC: overview
127 *
128 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 129 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
130 * requests into DC requests, and DC responses into DRM responses.
131 *
132 * The root control structure is &struct amdgpu_display_manager.
133 */
134
7578ecda
AD
135/* basic init/fini API */
136static int amdgpu_dm_init(struct amdgpu_device *adev);
137static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 138static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 139
0f877894
OV
140static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
141{
142 switch (link->dpcd_caps.dongle_type) {
143 case DISPLAY_DONGLE_NONE:
144 return DRM_MODE_SUBCONNECTOR_Native;
145 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
146 return DRM_MODE_SUBCONNECTOR_VGA;
147 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
148 case DISPLAY_DONGLE_DP_DVI_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_DVID;
150 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
151 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_HDMIA;
153 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
154 default:
155 return DRM_MODE_SUBCONNECTOR_Unknown;
156 }
157}
158
159static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
160{
161 struct dc_link *link = aconnector->dc_link;
162 struct drm_connector *connector = &aconnector->base;
163 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
164
165 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
166 return;
167
168 if (aconnector->dc_sink)
169 subconnector = get_subconnector_type(link);
170
171 drm_object_property_set_value(&connector->base,
172 connector->dev->mode_config.dp_subconnector_property,
173 subconnector);
174}
175
1f6010a9
DF
176/*
177 * initializes drm_device display related structures, based on the information
7578ecda
AD
178 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
179 * drm_encoder, drm_mode_config
180 *
181 * Returns 0 on success
182 */
183static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
184/* removes and deallocates the drm structures, created by the above function */
185static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
186
7578ecda 187static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 188 struct drm_plane *plane,
cc1fec57
NK
189 unsigned long possible_crtcs,
190 const struct dc_plane_cap *plane_cap);
7578ecda
AD
191static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
192 struct drm_plane *plane,
193 uint32_t link_index);
194static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
195 struct amdgpu_dm_connector *amdgpu_dm_connector,
196 uint32_t link_index,
197 struct amdgpu_encoder *amdgpu_encoder);
198static int amdgpu_dm_encoder_init(struct drm_device *dev,
199 struct amdgpu_encoder *aencoder,
200 uint32_t link_index);
201
202static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
203
7578ecda
AD
204static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
205
206static int amdgpu_dm_atomic_check(struct drm_device *dev,
207 struct drm_atomic_state *state);
208
674e78ac
NK
209static void handle_cursor_update(struct drm_plane *plane,
210 struct drm_plane_state *old_plane_state);
7578ecda 211
8c322309
RL
212static void amdgpu_dm_set_psr_caps(struct dc_link *link);
213static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
214static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
215static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
6ee90e88 216static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
8c322309 217
dfbbfe3c
BN
218static const struct drm_format_info *
219amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
220
a85ba005
NC
221static bool
222is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 struct drm_crtc_state *new_crtc_state);
4562236b
HW
224/*
225 * dm_vblank_get_counter
226 *
227 * @brief
228 * Get counter for number of vertical blanks
229 *
230 * @param
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
233 *
234 * @return
235 * Counter for vertical blanks
236 */
237static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238{
239 if (crtc >= adev->mode_info.num_crtc)
240 return 0;
241 else {
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
585d450c 244 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 crtc);
4562236b
HW
247 return 0;
248 }
249
585d450c 250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
251 }
252}
253
254static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 255 u32 *vbl, u32 *position)
4562236b 256{
81c50963
ST
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
4562236b
HW
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 return -EINVAL;
261 else {
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
585d450c 264 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 crtc);
4562236b
HW
267 return 0;
268 }
269
81c50963
ST
270 /*
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
273 */
585d450c 274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
275 &v_blank_start,
276 &v_blank_end,
277 &h_position,
278 &v_position);
279
e806208d
AG
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
282 }
283
284 return 0;
285}
286
287static bool dm_is_idle(void *handle)
288{
289 /* XXX todo */
290 return true;
291}
292
293static int dm_wait_for_idle(void *handle)
294{
295 /* XXX todo */
296 return 0;
297}
298
299static bool dm_check_soft_reset(void *handle)
300{
301 return false;
302}
303
304static int dm_soft_reset(void *handle)
305{
306 /* XXX todo */
307 return 0;
308}
309
3ee6b26b
AD
310static struct amdgpu_crtc *
311get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 int otg_inst)
4562236b 313{
4a580877 314 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
317
4562236b
HW
318 if (otg_inst == -1) {
319 WARN_ON(1);
320 return adev->mode_info.crtcs[0];
321 }
322
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326 if (amdgpu_crtc->otg_inst == otg_inst)
327 return amdgpu_crtc;
328 }
329
330 return NULL;
331}
332
585d450c
AP
333static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334{
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
339}
340
66b0c973
MK
341static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342{
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345}
346
a85ba005
NC
347static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
349{
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
351 return true;
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 return true;
354 else
355 return false;
356}
357
b8e8c934
HW
358/**
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
361 *
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
364 */
4562236b
HW
365static void dm_pflip_high_irq(void *interrupt_params)
366{
4562236b
HW
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
370 unsigned long flags;
71bbe51a 371 struct drm_pending_vblank_event *e;
71bbe51a
MK
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 bool vrr_active;
4562236b
HW
374
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377 /* IRQ could occur when in initial stage */
1f6010a9 378 /* TODO work and BO cleanup */
4562236b 379 if (amdgpu_crtc == NULL) {
cb2318b7 380 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
381 return;
382 }
383
4a580877 384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
385
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
391 amdgpu_crtc);
4a580877 392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
393 return;
394 }
395
71bbe51a
MK
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
4562236b 399
71bbe51a
MK
400 if (!e)
401 WARN_ON(1);
1159898a 402
585d450c 403 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
404
405 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
406 if (!vrr_active ||
585d450c 407 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
408 &v_blank_end, &hpos, &vpos) ||
409 (vpos < v_blank_start)) {
410 /* Update to correct count and vblank timestamp if racing with
411 * vblank irq. This also updates to the correct vblank timestamp
412 * even in VRR mode, as scanout is past the front-porch atm.
413 */
414 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 415
71bbe51a
MK
416 /* Wake up userspace by sending the pageflip event with proper
417 * count and timestamp of vblank of flip completion.
418 */
419 if (e) {
420 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
421
422 /* Event sent, so done with vblank for this flip */
423 drm_crtc_vblank_put(&amdgpu_crtc->base);
424 }
425 } else if (e) {
426 /* VRR active and inside front-porch: vblank count and
427 * timestamp for pageflip event will only be up to date after
428 * drm_crtc_handle_vblank() has been executed from late vblank
429 * irq handler after start of back-porch (vline 0). We queue the
430 * pageflip event for send-out by drm_crtc_handle_vblank() with
431 * updated timestamp and count, once it runs after us.
432 *
433 * We need to open-code this instead of using the helper
434 * drm_crtc_arm_vblank_event(), as that helper would
435 * call drm_crtc_accurate_vblank_count(), which we must
436 * not call in VRR mode while we are in front-porch!
437 */
438
439 /* sequence will be replaced by real count during send-out. */
440 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
441 e->pipe = amdgpu_crtc->crtc_id;
442
4a580877 443 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
444 e = NULL;
445 }
4562236b 446
fdd1fe57
MK
447 /* Keep track of vblank of this flip for flip throttling. We use the
448 * cooked hw counter, as that one incremented at start of this vblank
449 * of pageflip completion, so last_flip_vblank is the forbidden count
450 * for queueing new pageflips if vsync + VRR is enabled.
451 */
5d1c59c4 452 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 453 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 454
54f5499a 455 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 456 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 457
cb2318b7
VL
458 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
459 amdgpu_crtc->crtc_id, amdgpu_crtc,
460 vrr_active, (int) !e);
4562236b
HW
461}
462
d2574c33
MK
463static void dm_vupdate_high_irq(void *interrupt_params)
464{
465 struct common_irq_params *irq_params = interrupt_params;
466 struct amdgpu_device *adev = irq_params->adev;
467 struct amdgpu_crtc *acrtc;
47588233
RS
468 struct drm_device *drm_dev;
469 struct drm_vblank_crtc *vblank;
470 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 471 unsigned long flags;
585d450c 472 int vrr_active;
d2574c33
MK
473
474 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
475
476 if (acrtc) {
585d450c 477 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
478 drm_dev = acrtc->base.dev;
479 vblank = &drm_dev->vblank[acrtc->base.index];
480 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
481 frame_duration_ns = vblank->time - previous_timestamp;
482
483 if (frame_duration_ns > 0) {
484 trace_amdgpu_refresh_rate_track(acrtc->base.index,
485 frame_duration_ns,
486 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
487 atomic64_set(&irq_params->previous_timestamp, vblank->time);
488 }
d2574c33 489
cb2318b7 490 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 491 acrtc->crtc_id,
585d450c 492 vrr_active);
d2574c33
MK
493
494 /* Core vblank handling is done here after end of front-porch in
495 * vrr mode, as vblank timestamping will give valid results
496 * while now done after front-porch. This will also deliver
497 * page-flip completion events that have been queued to us
498 * if a pageflip happened inside front-porch.
499 */
585d450c 500 if (vrr_active) {
d2574c33 501 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
502
503 /* BTR processing for pre-DCE12 ASICs */
585d450c 504 if (acrtc->dm_irq_params.stream &&
09aef2c4 505 adev->family < AMDGPU_FAMILY_AI) {
4a580877 506 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
507 mod_freesync_handle_v_update(
508 adev->dm.freesync_module,
585d450c
AP
509 acrtc->dm_irq_params.stream,
510 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
511
512 dc_stream_adjust_vmin_vmax(
513 adev->dm.dc,
585d450c
AP
514 acrtc->dm_irq_params.stream,
515 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 516 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
517 }
518 }
d2574c33
MK
519 }
520}
521
b8e8c934
HW
522/**
523 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 524 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
525 *
526 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
527 * event handler.
528 */
4562236b
HW
529static void dm_crtc_high_irq(void *interrupt_params)
530{
531 struct common_irq_params *irq_params = interrupt_params;
532 struct amdgpu_device *adev = irq_params->adev;
4562236b 533 struct amdgpu_crtc *acrtc;
09aef2c4 534 unsigned long flags;
585d450c 535 int vrr_active;
4562236b 536
b57de80a 537 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
538 if (!acrtc)
539 return;
540
585d450c 541 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 542
cb2318b7 543 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 544 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 545
2346ef47
NK
546 /**
547 * Core vblank handling at start of front-porch is only possible
548 * in non-vrr mode, as only there vblank timestamping will give
549 * valid results while done in front-porch. Otherwise defer it
550 * to dm_vupdate_high_irq after end of front-porch.
551 */
585d450c 552 if (!vrr_active)
2346ef47
NK
553 drm_crtc_handle_vblank(&acrtc->base);
554
555 /**
556 * Following stuff must happen at start of vblank, for crc
557 * computation and below-the-range btr support in vrr mode.
558 */
16f17eda 559 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
560
561 /* BTR updates need to happen before VUPDATE on Vega and above. */
562 if (adev->family < AMDGPU_FAMILY_AI)
563 return;
16f17eda 564
4a580877 565 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 566
585d450c
AP
567 if (acrtc->dm_irq_params.stream &&
568 acrtc->dm_irq_params.vrr_params.supported &&
569 acrtc->dm_irq_params.freesync_config.state ==
570 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 571 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
572 acrtc->dm_irq_params.stream,
573 &acrtc->dm_irq_params.vrr_params);
16f17eda 574
585d450c
AP
575 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
576 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
577 }
578
2b5aed9a
MK
579 /*
580 * If there aren't any active_planes then DCH HUBP may be clock-gated.
581 * In that case, pageflip completion interrupts won't fire and pageflip
582 * completion events won't get delivered. Prevent this by sending
583 * pending pageflip events from here if a flip is still pending.
584 *
585 * If any planes are enabled, use dm_pflip_high_irq() instead, to
586 * avoid race conditions between flip programming and completion,
587 * which could cause too early flip completion events.
588 */
2346ef47
NK
589 if (adev->family >= AMDGPU_FAMILY_RV &&
590 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 591 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
592 if (acrtc->event) {
593 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
594 acrtc->event = NULL;
595 drm_crtc_vblank_put(&acrtc->base);
596 }
597 acrtc->pflip_status = AMDGPU_FLIP_NONE;
598 }
599
4a580877 600 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
601}
602
86bc2219 603#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 604#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
605/**
606 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
607 * DCN generation ASICs
48e01bf4 608 * @interrupt_params: interrupt parameters
86bc2219
WL
609 *
610 * Used to set crc window/read out crc value at vertical line 0 position
611 */
86bc2219
WL
612static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
613{
614 struct common_irq_params *irq_params = interrupt_params;
615 struct amdgpu_device *adev = irq_params->adev;
616 struct amdgpu_crtc *acrtc;
617
618 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
619
620 if (!acrtc)
621 return;
622
623 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
624}
625#endif
86bc2219 626
81927e28
JS
627/**
628 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
629 * @interrupt_params: used for determining the Outbox instance
630 *
631 * Handles the Outbox Interrupt
632 * event handler.
633 */
634#define DMUB_TRACE_MAX_READ 64
635static void dm_dmub_outbox1_low_irq(void *interrupt_params)
636{
637 struct dmub_notification notify;
638 struct common_irq_params *irq_params = interrupt_params;
639 struct amdgpu_device *adev = irq_params->adev;
640 struct amdgpu_display_manager *dm = &adev->dm;
641 struct dmcub_trace_buf_entry entry = { 0 };
642 uint32_t count = 0;
643
644 if (dc_enable_dmub_notifications(adev->dm.dc)) {
645 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
646 do {
647 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
648 } while (notify.pending_notification);
649
650 if (adev->dm.dmub_notify)
651 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
652 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
653 complete(&adev->dm.dmub_aux_transfer_done);
654 // TODO : HPD Implementation
655
656 } else {
657 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
658 }
659 }
660
661
662 do {
663 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
664 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
665 entry.param0, entry.param1);
666
667 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
668 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
669 } else
670 break;
671
672 count++;
673
674 } while (count <= DMUB_TRACE_MAX_READ);
675
676 ASSERT(count <= DMUB_TRACE_MAX_READ);
677}
86bc2219
WL
678#endif
679
4562236b
HW
680static int dm_set_clockgating_state(void *handle,
681 enum amd_clockgating_state state)
682{
683 return 0;
684}
685
686static int dm_set_powergating_state(void *handle,
687 enum amd_powergating_state state)
688{
689 return 0;
690}
691
692/* Prototypes of private functions */
693static int dm_early_init(void* handle);
694
a32e24b4 695/* Allocate memory for FBC compressed data */
3e332d3a 696static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 697{
3e332d3a 698 struct drm_device *dev = connector->dev;
1348969a 699 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 700 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
701 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
702 struct drm_display_mode *mode;
42e67c3b
RL
703 unsigned long max_size = 0;
704
705 if (adev->dm.dc->fbc_compressor == NULL)
706 return;
a32e24b4 707
3e332d3a 708 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
709 return;
710
3e332d3a
RL
711 if (compressor->bo_ptr)
712 return;
42e67c3b 713
42e67c3b 714
3e332d3a
RL
715 list_for_each_entry(mode, &connector->modes, head) {
716 if (max_size < mode->htotal * mode->vtotal)
717 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
718 }
719
720 if (max_size) {
721 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 722 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 723 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
724
725 if (r)
42e67c3b
RL
726 DRM_ERROR("DM: Failed to initialize FBC\n");
727 else {
728 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
729 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
730 }
731
a32e24b4
RL
732 }
733
734}
a32e24b4 735
6ce8f316
NK
736static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
737 int pipe, bool *enabled,
738 unsigned char *buf, int max_bytes)
739{
740 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 741 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
742 struct drm_connector *connector;
743 struct drm_connector_list_iter conn_iter;
744 struct amdgpu_dm_connector *aconnector;
745 int ret = 0;
746
747 *enabled = false;
748
749 mutex_lock(&adev->dm.audio_lock);
750
751 drm_connector_list_iter_begin(dev, &conn_iter);
752 drm_for_each_connector_iter(connector, &conn_iter) {
753 aconnector = to_amdgpu_dm_connector(connector);
754 if (aconnector->audio_inst != port)
755 continue;
756
757 *enabled = true;
758 ret = drm_eld_size(connector->eld);
759 memcpy(buf, connector->eld, min(max_bytes, ret));
760
761 break;
762 }
763 drm_connector_list_iter_end(&conn_iter);
764
765 mutex_unlock(&adev->dm.audio_lock);
766
767 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
768
769 return ret;
770}
771
772static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
773 .get_eld = amdgpu_dm_audio_component_get_eld,
774};
775
776static int amdgpu_dm_audio_component_bind(struct device *kdev,
777 struct device *hda_kdev, void *data)
778{
779 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 780 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
781 struct drm_audio_component *acomp = data;
782
783 acomp->ops = &amdgpu_dm_audio_component_ops;
784 acomp->dev = kdev;
785 adev->dm.audio_component = acomp;
786
787 return 0;
788}
789
790static void amdgpu_dm_audio_component_unbind(struct device *kdev,
791 struct device *hda_kdev, void *data)
792{
793 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 794 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
795 struct drm_audio_component *acomp = data;
796
797 acomp->ops = NULL;
798 acomp->dev = NULL;
799 adev->dm.audio_component = NULL;
800}
801
802static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
803 .bind = amdgpu_dm_audio_component_bind,
804 .unbind = amdgpu_dm_audio_component_unbind,
805};
806
807static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
808{
809 int i, ret;
810
811 if (!amdgpu_audio)
812 return 0;
813
814 adev->mode_info.audio.enabled = true;
815
816 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
817
818 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
819 adev->mode_info.audio.pin[i].channels = -1;
820 adev->mode_info.audio.pin[i].rate = -1;
821 adev->mode_info.audio.pin[i].bits_per_sample = -1;
822 adev->mode_info.audio.pin[i].status_bits = 0;
823 adev->mode_info.audio.pin[i].category_code = 0;
824 adev->mode_info.audio.pin[i].connected = false;
825 adev->mode_info.audio.pin[i].id =
826 adev->dm.dc->res_pool->audios[i]->inst;
827 adev->mode_info.audio.pin[i].offset = 0;
828 }
829
830 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
831 if (ret < 0)
832 return ret;
833
834 adev->dm.audio_registered = true;
835
836 return 0;
837}
838
839static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
840{
841 if (!amdgpu_audio)
842 return;
843
844 if (!adev->mode_info.audio.enabled)
845 return;
846
847 if (adev->dm.audio_registered) {
848 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
849 adev->dm.audio_registered = false;
850 }
851
852 /* TODO: Disable audio? */
853
854 adev->mode_info.audio.enabled = false;
855}
856
dfd84d90 857static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
858{
859 struct drm_audio_component *acomp = adev->dm.audio_component;
860
861 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
862 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
863
864 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
865 pin, -1);
866 }
867}
868
743b9786
NK
869static int dm_dmub_hw_init(struct amdgpu_device *adev)
870{
743b9786
NK
871 const struct dmcub_firmware_header_v1_0 *hdr;
872 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 873 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
874 const struct firmware *dmub_fw = adev->dm.dmub_fw;
875 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
876 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
877 struct dmub_srv_hw_params hw_params;
878 enum dmub_status status;
879 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 880 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
881 bool has_hw_support;
882
883 if (!dmub_srv)
884 /* DMUB isn't supported on the ASIC. */
885 return 0;
886
8c7aea40
NK
887 if (!fb_info) {
888 DRM_ERROR("No framebuffer info for DMUB service.\n");
889 return -EINVAL;
890 }
891
743b9786
NK
892 if (!dmub_fw) {
893 /* Firmware required for DMUB support. */
894 DRM_ERROR("No firmware provided for DMUB.\n");
895 return -EINVAL;
896 }
897
898 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
899 if (status != DMUB_STATUS_OK) {
900 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
901 return -EINVAL;
902 }
903
904 if (!has_hw_support) {
905 DRM_INFO("DMUB unsupported on ASIC\n");
906 return 0;
907 }
908
909 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
910
743b9786
NK
911 fw_inst_const = dmub_fw->data +
912 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 913 PSP_HEADER_BYTES;
743b9786
NK
914
915 fw_bss_data = dmub_fw->data +
916 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
917 le32_to_cpu(hdr->inst_const_bytes);
918
919 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
920 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
921 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
922
923 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
924
ddde28a5
HW
925 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
926 * amdgpu_ucode_init_single_fw will load dmub firmware
927 * fw_inst_const part to cw0; otherwise, the firmware back door load
928 * will be done by dm_dmub_hw_init
929 */
930 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
931 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
932 fw_inst_const_size);
933 }
934
a576b345
NK
935 if (fw_bss_data_size)
936 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
937 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
938
939 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
940 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
941 adev->bios_size);
942
943 /* Reset regions that need to be reset. */
944 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
945 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
946
947 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
948 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
949
950 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
951 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
952
953 /* Initialize hardware. */
954 memset(&hw_params, 0, sizeof(hw_params));
955 hw_params.fb_base = adev->gmc.fb_start;
956 hw_params.fb_offset = adev->gmc.aper_base;
957
31a7f4bb
HW
958 /* backdoor load firmware and trigger dmub running */
959 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
960 hw_params.load_inst_const = true;
961
743b9786
NK
962 if (dmcu)
963 hw_params.psp_version = dmcu->psp_version;
964
8c7aea40
NK
965 for (i = 0; i < fb_info->num_fb; ++i)
966 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
967
968 status = dmub_srv_hw_init(dmub_srv, &hw_params);
969 if (status != DMUB_STATUS_OK) {
970 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
971 return -EINVAL;
972 }
973
974 /* Wait for firmware load to finish. */
975 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
976 if (status != DMUB_STATUS_OK)
977 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
978
979 /* Init DMCU and ABM if available. */
980 if (dmcu && abm) {
981 dmcu->funcs->dmcu_init(dmcu);
982 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
983 }
984
051b7887
RL
985 if (!adev->dm.dc->ctx->dmub_srv)
986 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
987 if (!adev->dm.dc->ctx->dmub_srv) {
988 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
989 return -ENOMEM;
990 }
991
743b9786
NK
992 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
993 adev->dm.dmcub_fw_version);
994
995 return 0;
996}
997
a3fe0e33 998#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 999static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1000{
c0fb85ae
YZ
1001 uint64_t pt_base;
1002 uint32_t logical_addr_low;
1003 uint32_t logical_addr_high;
1004 uint32_t agp_base, agp_bot, agp_top;
1005 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1006
c0fb85ae
YZ
1007 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1008 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1009
c0fb85ae
YZ
1010 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1011 /*
1012 * Raven2 has a HW issue that it is unable to use the vram which
1013 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1014 * workaround that increase system aperture high address (add 1)
1015 * to get rid of the VM fault and hardware hang.
1016 */
1017 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1018 else
1019 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1020
c0fb85ae
YZ
1021 agp_base = 0;
1022 agp_bot = adev->gmc.agp_start >> 24;
1023 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1024
c44a22b3 1025
c0fb85ae
YZ
1026 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1027 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1028 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1029 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1030 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1031 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1032
c0fb85ae
YZ
1033 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1034 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1035
1036 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1037 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1038 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1039
1040 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1041 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1042 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1043
1044 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1045 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1046 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1047
1048 pa_config->is_hvm_enabled = 0;
c44a22b3 1049
c44a22b3 1050}
e6cd859d 1051#endif
ea3b4242
QZ
1052#if defined(CONFIG_DRM_AMD_DC_DCN)
1053static void event_mall_stutter(struct work_struct *work)
1054{
1055
1056 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1057 struct amdgpu_display_manager *dm = vblank_work->dm;
1058
1059 mutex_lock(&dm->dc_lock);
1060
1061 if (vblank_work->enable)
1062 dm->active_vblank_irq_count++;
5af50b0b 1063 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1064 dm->active_vblank_irq_count--;
1065
2cbcb78c 1066 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1067
4711c033 1068 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242
QZ
1069
1070 mutex_unlock(&dm->dc_lock);
1071}
1072
1073static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1074{
1075
1076 int max_caps = dc->caps.max_links;
1077 struct vblank_workqueue *vblank_work;
1078 int i = 0;
1079
1080 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1081 if (ZERO_OR_NULL_PTR(vblank_work)) {
1082 kfree(vblank_work);
1083 return NULL;
1084 }
c44a22b3 1085
ea3b4242
QZ
1086 for (i = 0; i < max_caps; i++)
1087 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1088
1089 return vblank_work;
1090}
1091#endif
7578ecda 1092static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1093{
1094 struct dc_init_data init_data;
52704fca
BL
1095#ifdef CONFIG_DRM_AMD_DC_HDCP
1096 struct dc_callback_init init_params;
1097#endif
743b9786 1098 int r;
52704fca 1099
4a580877 1100 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1101 adev->dm.adev = adev;
1102
4562236b
HW
1103 /* Zero all the fields */
1104 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1105#ifdef CONFIG_DRM_AMD_DC_HDCP
1106 memset(&init_params, 0, sizeof(init_params));
1107#endif
4562236b 1108
674e78ac 1109 mutex_init(&adev->dm.dc_lock);
6ce8f316 1110 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1111#if defined(CONFIG_DRM_AMD_DC_DCN)
1112 spin_lock_init(&adev->dm.vblank_lock);
1113#endif
674e78ac 1114
4562236b
HW
1115 if(amdgpu_dm_irq_init(adev)) {
1116 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1117 goto error;
1118 }
1119
1120 init_data.asic_id.chip_family = adev->family;
1121
2dc31ca1 1122 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1123 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1124
770d13b1 1125 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1126 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1127 init_data.asic_id.atombios_base_address =
1128 adev->mode_info.atom_context->bios;
1129
1130 init_data.driver = adev;
1131
1132 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1133
1134 if (!adev->dm.cgs_device) {
1135 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1136 goto error;
1137 }
1138
1139 init_data.cgs_device = adev->dm.cgs_device;
1140
4562236b
HW
1141 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1142
60fb100b
AD
1143 switch (adev->asic_type) {
1144 case CHIP_CARRIZO:
1145 case CHIP_STONEY:
1146 case CHIP_RAVEN:
fe3db437 1147 case CHIP_RENOIR:
6e227308 1148 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1149 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1150 init_data.flags.disable_dmcu = true;
60fb100b 1151 break;
6df9218a
CL
1152#if defined(CONFIG_DRM_AMD_DC_DCN)
1153 case CHIP_VANGOGH:
1154 init_data.flags.gpu_vm_support = true;
1155 break;
1156#endif
60fb100b
AD
1157 default:
1158 break;
1159 }
6e227308 1160
04b94af4
AD
1161 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1162 init_data.flags.fbc_support = true;
1163
d99f38ae
AD
1164 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1165 init_data.flags.multi_mon_pp_mclk_switch = true;
1166
eaf56410
LL
1167 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1168 init_data.flags.disable_fractional_pwm = true;
1169
27eaa492 1170 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1171
0dd79532 1172 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1173 /* Display Core create. */
1174 adev->dm.dc = dc_create(&init_data);
1175
423788c7 1176 if (adev->dm.dc) {
76121231 1177 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1178 } else {
76121231 1179 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1180 goto error;
1181 }
4562236b 1182
8a791dab
HW
1183 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1184 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1185 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1186 }
1187
f99d8762
HW
1188 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1189 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1190
8a791dab
HW
1191 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1192 adev->dm.dc->debug.disable_stutter = true;
1193
1194 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1195 adev->dm.dc->debug.disable_dsc = true;
1196
1197 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1198 adev->dm.dc->debug.disable_clock_gate = true;
1199
743b9786
NK
1200 r = dm_dmub_hw_init(adev);
1201 if (r) {
1202 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1203 goto error;
1204 }
1205
bb6785c1
NK
1206 dc_hardware_init(adev->dm.dc);
1207
0b08c54b 1208#if defined(CONFIG_DRM_AMD_DC_DCN)
13524856 1209 if (adev->apu_flags) {
e6cd859d
AD
1210 struct dc_phy_addr_space_config pa_config;
1211
0b08c54b 1212 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1213
0b08c54b
YZ
1214 // Call the DC init_memory func
1215 dc_setup_system_context(adev->dm.dc, &pa_config);
1216 }
1217#endif
c0fb85ae 1218
4562236b
HW
1219 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1220 if (!adev->dm.freesync_module) {
1221 DRM_ERROR(
1222 "amdgpu: failed to initialize freesync_module.\n");
1223 } else
f1ad2f5e 1224 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1225 adev->dm.freesync_module);
1226
e277adc5
LSL
1227 amdgpu_dm_init_color_mod();
1228
ea3b4242
QZ
1229#if defined(CONFIG_DRM_AMD_DC_DCN)
1230 if (adev->dm.dc->caps.max_links > 0) {
1231 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1232
1233 if (!adev->dm.vblank_workqueue)
1234 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1235 else
1236 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1237 }
1238#endif
1239
52704fca 1240#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1241 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1242 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1243
96a3b32e
BL
1244 if (!adev->dm.hdcp_workqueue)
1245 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1246 else
1247 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1248
96a3b32e
BL
1249 dc_init_callbacks(adev->dm.dc, &init_params);
1250 }
9a65df19
WL
1251#endif
1252#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1253 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1254#endif
81927e28
JS
1255 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1256 init_completion(&adev->dm.dmub_aux_transfer_done);
1257 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1258 if (!adev->dm.dmub_notify) {
1259 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1260 goto error;
1261 }
1262 amdgpu_dm_outbox_init(adev);
1263 }
1264
4562236b
HW
1265 if (amdgpu_dm_initialize_drm_device(adev)) {
1266 DRM_ERROR(
1267 "amdgpu: failed to initialize sw for display support.\n");
1268 goto error;
1269 }
1270
f74367e4
AD
1271 /* create fake encoders for MST */
1272 dm_dp_create_fake_mst_encoders(adev);
1273
4562236b
HW
1274 /* TODO: Add_display_info? */
1275
1276 /* TODO use dynamic cursor width */
4a580877
LT
1277 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1278 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1279
4a580877 1280 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1281 DRM_ERROR(
1282 "amdgpu: failed to initialize sw for display support.\n");
1283 goto error;
1284 }
1285
c0fb85ae 1286
f1ad2f5e 1287 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1288
1289 return 0;
1290error:
1291 amdgpu_dm_fini(adev);
1292
59d0f396 1293 return -EINVAL;
4562236b
HW
1294}
1295
7578ecda 1296static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1297{
f74367e4
AD
1298 int i;
1299
1300 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1301 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1302 }
1303
6ce8f316
NK
1304 amdgpu_dm_audio_fini(adev);
1305
4562236b 1306 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1307
9a65df19
WL
1308#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1309 if (adev->dm.crc_rd_wrk) {
1310 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1311 kfree(adev->dm.crc_rd_wrk);
1312 adev->dm.crc_rd_wrk = NULL;
1313 }
1314#endif
52704fca
BL
1315#ifdef CONFIG_DRM_AMD_DC_HDCP
1316 if (adev->dm.hdcp_workqueue) {
e96b1b29 1317 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1318 adev->dm.hdcp_workqueue = NULL;
1319 }
1320
1321 if (adev->dm.dc)
1322 dc_deinit_callbacks(adev->dm.dc);
1323#endif
51ba6912
QZ
1324
1325#if defined(CONFIG_DRM_AMD_DC_DCN)
1326 if (adev->dm.vblank_workqueue) {
1327 adev->dm.vblank_workqueue->dm = NULL;
1328 kfree(adev->dm.vblank_workqueue);
1329 adev->dm.vblank_workqueue = NULL;
1330 }
1331#endif
1332
3beac533 1333 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1334
81927e28
JS
1335 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1336 kfree(adev->dm.dmub_notify);
1337 adev->dm.dmub_notify = NULL;
1338 }
1339
743b9786
NK
1340 if (adev->dm.dmub_bo)
1341 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1342 &adev->dm.dmub_bo_gpu_addr,
1343 &adev->dm.dmub_bo_cpu_addr);
52704fca 1344
c8bdf2b6
ED
1345 /* DC Destroy TODO: Replace destroy DAL */
1346 if (adev->dm.dc)
1347 dc_destroy(&adev->dm.dc);
4562236b
HW
1348 /*
1349 * TODO: pageflip, vlank interrupt
1350 *
1351 * amdgpu_dm_irq_fini(adev);
1352 */
1353
1354 if (adev->dm.cgs_device) {
1355 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1356 adev->dm.cgs_device = NULL;
1357 }
1358 if (adev->dm.freesync_module) {
1359 mod_freesync_destroy(adev->dm.freesync_module);
1360 adev->dm.freesync_module = NULL;
1361 }
674e78ac 1362
6ce8f316 1363 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1364 mutex_destroy(&adev->dm.dc_lock);
1365
4562236b
HW
1366 return;
1367}
1368
a94d5569 1369static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1370{
a7669aff 1371 const char *fw_name_dmcu = NULL;
a94d5569
DF
1372 int r;
1373 const struct dmcu_firmware_header_v1_0 *hdr;
1374
1375 switch(adev->asic_type) {
55e56389
MR
1376#if defined(CONFIG_DRM_AMD_DC_SI)
1377 case CHIP_TAHITI:
1378 case CHIP_PITCAIRN:
1379 case CHIP_VERDE:
1380 case CHIP_OLAND:
1381#endif
a94d5569
DF
1382 case CHIP_BONAIRE:
1383 case CHIP_HAWAII:
1384 case CHIP_KAVERI:
1385 case CHIP_KABINI:
1386 case CHIP_MULLINS:
1387 case CHIP_TONGA:
1388 case CHIP_FIJI:
1389 case CHIP_CARRIZO:
1390 case CHIP_STONEY:
1391 case CHIP_POLARIS11:
1392 case CHIP_POLARIS10:
1393 case CHIP_POLARIS12:
1394 case CHIP_VEGAM:
1395 case CHIP_VEGA10:
1396 case CHIP_VEGA12:
1397 case CHIP_VEGA20:
476e955d 1398 case CHIP_NAVI10:
baebcf2e 1399 case CHIP_NAVI14:
30221ad8 1400 case CHIP_RENOIR:
79037324 1401 case CHIP_SIENNA_CICHLID:
a6c5308f 1402 case CHIP_NAVY_FLOUNDER:
2a411205 1403 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 1404 case CHIP_BEIGE_GOBY:
469989ca 1405 case CHIP_VANGOGH:
a94d5569 1406 return 0;
5ea23931
RL
1407 case CHIP_NAVI12:
1408 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1409 break;
a94d5569 1410 case CHIP_RAVEN:
a7669aff
HW
1411 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1412 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1413 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1414 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1415 else
a7669aff 1416 return 0;
a94d5569
DF
1417 break;
1418 default:
1419 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1420 return -EINVAL;
a94d5569
DF
1421 }
1422
1423 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1424 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1425 return 0;
1426 }
1427
1428 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1429 if (r == -ENOENT) {
1430 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1431 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1432 adev->dm.fw_dmcu = NULL;
1433 return 0;
1434 }
1435 if (r) {
1436 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1437 fw_name_dmcu);
1438 return r;
1439 }
1440
1441 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1442 if (r) {
1443 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1444 fw_name_dmcu);
1445 release_firmware(adev->dm.fw_dmcu);
1446 adev->dm.fw_dmcu = NULL;
1447 return r;
1448 }
1449
1450 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1451 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1452 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1453 adev->firmware.fw_size +=
1454 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1455
1456 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1457 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1458 adev->firmware.fw_size +=
1459 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1460
ee6e89c0
DF
1461 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1462
a94d5569
DF
1463 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1464
4562236b
HW
1465 return 0;
1466}
1467
743b9786
NK
1468static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1469{
1470 struct amdgpu_device *adev = ctx;
1471
1472 return dm_read_reg(adev->dm.dc->ctx, address);
1473}
1474
1475static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1476 uint32_t value)
1477{
1478 struct amdgpu_device *adev = ctx;
1479
1480 return dm_write_reg(adev->dm.dc->ctx, address, value);
1481}
1482
1483static int dm_dmub_sw_init(struct amdgpu_device *adev)
1484{
1485 struct dmub_srv_create_params create_params;
8c7aea40
NK
1486 struct dmub_srv_region_params region_params;
1487 struct dmub_srv_region_info region_info;
1488 struct dmub_srv_fb_params fb_params;
1489 struct dmub_srv_fb_info *fb_info;
1490 struct dmub_srv *dmub_srv;
743b9786
NK
1491 const struct dmcub_firmware_header_v1_0 *hdr;
1492 const char *fw_name_dmub;
1493 enum dmub_asic dmub_asic;
1494 enum dmub_status status;
1495 int r;
1496
1497 switch (adev->asic_type) {
1498 case CHIP_RENOIR:
1499 dmub_asic = DMUB_ASIC_DCN21;
1500 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1501 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1502 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1503 break;
79037324
BL
1504 case CHIP_SIENNA_CICHLID:
1505 dmub_asic = DMUB_ASIC_DCN30;
1506 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1507 break;
5ce868fc
BL
1508 case CHIP_NAVY_FLOUNDER:
1509 dmub_asic = DMUB_ASIC_DCN30;
1510 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1511 break;
469989ca
RL
1512 case CHIP_VANGOGH:
1513 dmub_asic = DMUB_ASIC_DCN301;
1514 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1515 break;
2a411205
BL
1516 case CHIP_DIMGREY_CAVEFISH:
1517 dmub_asic = DMUB_ASIC_DCN302;
1518 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1519 break;
656fe9b6
AP
1520 case CHIP_BEIGE_GOBY:
1521 dmub_asic = DMUB_ASIC_DCN303;
1522 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1523 break;
743b9786
NK
1524
1525 default:
1526 /* ASIC doesn't support DMUB. */
1527 return 0;
1528 }
1529
743b9786
NK
1530 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1531 if (r) {
1532 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1533 return 0;
1534 }
1535
1536 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1537 if (r) {
1538 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1539 return 0;
1540 }
1541
743b9786 1542 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
743b9786 1543
9a6ed547
NK
1544 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1545 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1546 AMDGPU_UCODE_ID_DMCUB;
1547 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1548 adev->dm.dmub_fw;
1549 adev->firmware.fw_size +=
1550 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1551
9a6ed547
NK
1552 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1553 adev->dm.dmcub_fw_version);
1554 }
1555
1556 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1557
8c7aea40
NK
1558 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1559 dmub_srv = adev->dm.dmub_srv;
1560
1561 if (!dmub_srv) {
1562 DRM_ERROR("Failed to allocate DMUB service!\n");
1563 return -ENOMEM;
1564 }
1565
1566 memset(&create_params, 0, sizeof(create_params));
1567 create_params.user_ctx = adev;
1568 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1569 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1570 create_params.asic = dmub_asic;
1571
1572 /* Create the DMUB service. */
1573 status = dmub_srv_create(dmub_srv, &create_params);
1574 if (status != DMUB_STATUS_OK) {
1575 DRM_ERROR("Error creating DMUB service: %d\n", status);
1576 return -EINVAL;
1577 }
1578
1579 /* Calculate the size of all the regions for the DMUB service. */
1580 memset(&region_params, 0, sizeof(region_params));
1581
1582 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1583 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1584 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1585 region_params.vbios_size = adev->bios_size;
0922b899 1586 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1587 adev->dm.dmub_fw->data +
1588 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1589 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1590 region_params.fw_inst_const =
1591 adev->dm.dmub_fw->data +
1592 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1593 PSP_HEADER_BYTES;
8c7aea40
NK
1594
1595 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1596 &region_info);
1597
1598 if (status != DMUB_STATUS_OK) {
1599 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1600 return -EINVAL;
1601 }
1602
1603 /*
1604 * Allocate a framebuffer based on the total size of all the regions.
1605 * TODO: Move this into GART.
1606 */
1607 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1608 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1609 &adev->dm.dmub_bo_gpu_addr,
1610 &adev->dm.dmub_bo_cpu_addr);
1611 if (r)
1612 return r;
1613
1614 /* Rebase the regions on the framebuffer address. */
1615 memset(&fb_params, 0, sizeof(fb_params));
1616 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1617 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1618 fb_params.region_info = &region_info;
1619
1620 adev->dm.dmub_fb_info =
1621 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1622 fb_info = adev->dm.dmub_fb_info;
1623
1624 if (!fb_info) {
1625 DRM_ERROR(
1626 "Failed to allocate framebuffer info for DMUB service!\n");
1627 return -ENOMEM;
1628 }
1629
1630 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1631 if (status != DMUB_STATUS_OK) {
1632 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1633 return -EINVAL;
1634 }
1635
743b9786
NK
1636 return 0;
1637}
1638
a94d5569
DF
1639static int dm_sw_init(void *handle)
1640{
1641 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1642 int r;
1643
1644 r = dm_dmub_sw_init(adev);
1645 if (r)
1646 return r;
a94d5569
DF
1647
1648 return load_dmcu_fw(adev);
1649}
1650
4562236b
HW
1651static int dm_sw_fini(void *handle)
1652{
a94d5569
DF
1653 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654
8c7aea40
NK
1655 kfree(adev->dm.dmub_fb_info);
1656 adev->dm.dmub_fb_info = NULL;
1657
743b9786
NK
1658 if (adev->dm.dmub_srv) {
1659 dmub_srv_destroy(adev->dm.dmub_srv);
1660 adev->dm.dmub_srv = NULL;
1661 }
1662
75e1658e
ND
1663 release_firmware(adev->dm.dmub_fw);
1664 adev->dm.dmub_fw = NULL;
743b9786 1665
75e1658e
ND
1666 release_firmware(adev->dm.fw_dmcu);
1667 adev->dm.fw_dmcu = NULL;
a94d5569 1668
4562236b
HW
1669 return 0;
1670}
1671
7abcf6b5 1672static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1673{
c84dec2f 1674 struct amdgpu_dm_connector *aconnector;
4562236b 1675 struct drm_connector *connector;
f8d2d39e 1676 struct drm_connector_list_iter iter;
7abcf6b5 1677 int ret = 0;
4562236b 1678
f8d2d39e
LP
1679 drm_connector_list_iter_begin(dev, &iter);
1680 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1681 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1682 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1683 aconnector->mst_mgr.aux) {
f1ad2f5e 1684 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1685 aconnector,
1686 aconnector->base.base.id);
7abcf6b5
AG
1687
1688 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1689 if (ret < 0) {
1690 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1691 aconnector->dc_link->type =
1692 dc_connection_single;
1693 break;
7abcf6b5 1694 }
f8d2d39e 1695 }
4562236b 1696 }
f8d2d39e 1697 drm_connector_list_iter_end(&iter);
4562236b 1698
7abcf6b5
AG
1699 return ret;
1700}
1701
1702static int dm_late_init(void *handle)
1703{
42e67c3b 1704 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1705
bbf854dc
DF
1706 struct dmcu_iram_parameters params;
1707 unsigned int linear_lut[16];
1708 int i;
17bdb4a8 1709 struct dmcu *dmcu = NULL;
bbf854dc 1710
17bdb4a8
JFZ
1711 dmcu = adev->dm.dc->res_pool->dmcu;
1712
bbf854dc
DF
1713 for (i = 0; i < 16; i++)
1714 linear_lut[i] = 0xFFFF * i / 15;
1715
1716 params.set = 0;
1717 params.backlight_ramping_start = 0xCCCC;
1718 params.backlight_ramping_reduction = 0xCCCCCCCC;
1719 params.backlight_lut_array_size = 16;
1720 params.backlight_lut_array = linear_lut;
1721
2ad0cdf9
AK
1722 /* Min backlight level after ABM reduction, Don't allow below 1%
1723 * 0xFFFF x 0.01 = 0x28F
1724 */
1725 params.min_abm_backlight = 0x28F;
5cb32419 1726 /* In the case where abm is implemented on dmcub,
6e568e43
JW
1727 * dmcu object will be null.
1728 * ABM 2.4 and up are implemented on dmcub.
1729 */
1730 if (dmcu) {
1731 if (!dmcu_load_iram(dmcu, params))
1732 return -EINVAL;
1733 } else if (adev->dm.dc->ctx->dmub_srv) {
1734 struct dc_link *edp_links[MAX_NUM_EDP];
1735 int edp_num;
1736
1737 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1738 for (i = 0; i < edp_num; i++) {
1739 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1740 return -EINVAL;
1741 }
1742 }
bbf854dc 1743
4a580877 1744 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1745}
1746
1747static void s3_handle_mst(struct drm_device *dev, bool suspend)
1748{
c84dec2f 1749 struct amdgpu_dm_connector *aconnector;
4562236b 1750 struct drm_connector *connector;
f8d2d39e 1751 struct drm_connector_list_iter iter;
fe7553be
LP
1752 struct drm_dp_mst_topology_mgr *mgr;
1753 int ret;
1754 bool need_hotplug = false;
4562236b 1755
f8d2d39e
LP
1756 drm_connector_list_iter_begin(dev, &iter);
1757 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
1758 aconnector = to_amdgpu_dm_connector(connector);
1759 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1760 aconnector->mst_port)
1761 continue;
1762
1763 mgr = &aconnector->mst_mgr;
1764
1765 if (suspend) {
1766 drm_dp_mst_topology_mgr_suspend(mgr);
1767 } else {
6f85f738 1768 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
1769 if (ret < 0) {
1770 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1771 need_hotplug = true;
1772 }
1773 }
4562236b 1774 }
f8d2d39e 1775 drm_connector_list_iter_end(&iter);
fe7553be
LP
1776
1777 if (need_hotplug)
1778 drm_kms_helper_hotplug_event(dev);
4562236b
HW
1779}
1780
9340dfd3
HW
1781static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1782{
1783 struct smu_context *smu = &adev->smu;
1784 int ret = 0;
1785
1786 if (!is_support_sw_smu(adev))
1787 return 0;
1788
1789 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1790 * on window driver dc implementation.
1791 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1792 * should be passed to smu during boot up and resume from s3.
1793 * boot up: dc calculate dcn watermark clock settings within dc_create,
1794 * dcn20_resource_construct
1795 * then call pplib functions below to pass the settings to smu:
1796 * smu_set_watermarks_for_clock_ranges
1797 * smu_set_watermarks_table
1798 * navi10_set_watermarks_table
1799 * smu_write_watermarks_table
1800 *
1801 * For Renoir, clock settings of dcn watermark are also fixed values.
1802 * dc has implemented different flow for window driver:
1803 * dc_hardware_init / dc_set_power_state
1804 * dcn10_init_hw
1805 * notify_wm_ranges
1806 * set_wm_ranges
1807 * -- Linux
1808 * smu_set_watermarks_for_clock_ranges
1809 * renoir_set_watermarks_table
1810 * smu_write_watermarks_table
1811 *
1812 * For Linux,
1813 * dc_hardware_init -> amdgpu_dm_init
1814 * dc_set_power_state --> dm_resume
1815 *
1816 * therefore, this function apply to navi10/12/14 but not Renoir
1817 * *
1818 */
1819 switch(adev->asic_type) {
1820 case CHIP_NAVI10:
1821 case CHIP_NAVI14:
1822 case CHIP_NAVI12:
1823 break;
1824 default:
1825 return 0;
1826 }
1827
e7a95eea
EQ
1828 ret = smu_write_watermarks_table(smu);
1829 if (ret) {
1830 DRM_ERROR("Failed to update WMTABLE!\n");
1831 return ret;
9340dfd3
HW
1832 }
1833
9340dfd3
HW
1834 return 0;
1835}
1836
b8592b48
LL
1837/**
1838 * dm_hw_init() - Initialize DC device
28d687ea 1839 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1840 *
1841 * Initialize the &struct amdgpu_display_manager device. This involves calling
1842 * the initializers of each DM component, then populating the struct with them.
1843 *
1844 * Although the function implies hardware initialization, both hardware and
1845 * software are initialized here. Splitting them out to their relevant init
1846 * hooks is a future TODO item.
1847 *
1848 * Some notable things that are initialized here:
1849 *
1850 * - Display Core, both software and hardware
1851 * - DC modules that we need (freesync and color management)
1852 * - DRM software states
1853 * - Interrupt sources and handlers
1854 * - Vblank support
1855 * - Debug FS entries, if enabled
1856 */
4562236b
HW
1857static int dm_hw_init(void *handle)
1858{
1859 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1860 /* Create DAL display manager */
1861 amdgpu_dm_init(adev);
4562236b
HW
1862 amdgpu_dm_hpd_init(adev);
1863
4562236b
HW
1864 return 0;
1865}
1866
b8592b48
LL
1867/**
1868 * dm_hw_fini() - Teardown DC device
28d687ea 1869 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
1870 *
1871 * Teardown components within &struct amdgpu_display_manager that require
1872 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1873 * were loaded. Also flush IRQ workqueues and disable them.
1874 */
4562236b
HW
1875static int dm_hw_fini(void *handle)
1876{
1877 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1878
1879 amdgpu_dm_hpd_fini(adev);
1880
1881 amdgpu_dm_irq_fini(adev);
21de3396 1882 amdgpu_dm_fini(adev);
4562236b
HW
1883 return 0;
1884}
1885
cdaae837
BL
1886
1887static int dm_enable_vblank(struct drm_crtc *crtc);
1888static void dm_disable_vblank(struct drm_crtc *crtc);
1889
1890static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1891 struct dc_state *state, bool enable)
1892{
1893 enum dc_irq_source irq_source;
1894 struct amdgpu_crtc *acrtc;
1895 int rc = -EBUSY;
1896 int i = 0;
1897
1898 for (i = 0; i < state->stream_count; i++) {
1899 acrtc = get_crtc_by_otg_inst(
1900 adev, state->stream_status[i].primary_otg_inst);
1901
1902 if (acrtc && state->stream_status[i].plane_count != 0) {
1903 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1904 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
1905 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1906 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
1907 if (rc)
1908 DRM_WARN("Failed to %s pflip interrupts\n",
1909 enable ? "enable" : "disable");
1910
1911 if (enable) {
1912 rc = dm_enable_vblank(&acrtc->base);
1913 if (rc)
1914 DRM_WARN("Failed to enable vblank interrupts\n");
1915 } else {
1916 dm_disable_vblank(&acrtc->base);
1917 }
1918
1919 }
1920 }
1921
1922}
1923
dfd84d90 1924static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
1925{
1926 struct dc_state *context = NULL;
1927 enum dc_status res = DC_ERROR_UNEXPECTED;
1928 int i;
1929 struct dc_stream_state *del_streams[MAX_PIPES];
1930 int del_streams_count = 0;
1931
1932 memset(del_streams, 0, sizeof(del_streams));
1933
1934 context = dc_create_state(dc);
1935 if (context == NULL)
1936 goto context_alloc_fail;
1937
1938 dc_resource_state_copy_construct_current(dc, context);
1939
1940 /* First remove from context all streams */
1941 for (i = 0; i < context->stream_count; i++) {
1942 struct dc_stream_state *stream = context->streams[i];
1943
1944 del_streams[del_streams_count++] = stream;
1945 }
1946
1947 /* Remove all planes for removed streams and then remove the streams */
1948 for (i = 0; i < del_streams_count; i++) {
1949 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1950 res = DC_FAIL_DETACH_SURFACES;
1951 goto fail;
1952 }
1953
1954 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1955 if (res != DC_OK)
1956 goto fail;
1957 }
1958
1959
1960 res = dc_validate_global_state(dc, context, false);
1961
1962 if (res != DC_OK) {
1963 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1964 goto fail;
1965 }
1966
1967 res = dc_commit_state(dc, context);
1968
1969fail:
1970 dc_release_state(context);
1971
1972context_alloc_fail:
1973 return res;
1974}
1975
4562236b
HW
1976static int dm_suspend(void *handle)
1977{
1978 struct amdgpu_device *adev = handle;
1979 struct amdgpu_display_manager *dm = &adev->dm;
1980 int ret = 0;
4562236b 1981
53b3f8f4 1982 if (amdgpu_in_reset(adev)) {
cdaae837 1983 mutex_lock(&dm->dc_lock);
98ab5f35
BL
1984
1985#if defined(CONFIG_DRM_AMD_DC_DCN)
1986 dc_allow_idle_optimizations(adev->dm.dc, false);
1987#endif
1988
cdaae837
BL
1989 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1990
1991 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1992
1993 amdgpu_dm_commit_zero_streams(dm->dc);
1994
1995 amdgpu_dm_irq_suspend(adev);
1996
1997 return ret;
1998 }
4562236b 1999
d2f0b53b 2000 WARN_ON(adev->dm.cached_state);
4a580877 2001 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2002
4a580877 2003 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2004
4562236b
HW
2005 amdgpu_dm_irq_suspend(adev);
2006
32f5062d 2007 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2008
1c2075d4 2009 return 0;
4562236b
HW
2010}
2011
1daf8c63
AD
2012static struct amdgpu_dm_connector *
2013amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2014 struct drm_crtc *crtc)
4562236b
HW
2015{
2016 uint32_t i;
c2cea706 2017 struct drm_connector_state *new_con_state;
4562236b
HW
2018 struct drm_connector *connector;
2019 struct drm_crtc *crtc_from_state;
2020
c2cea706
LSL
2021 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2022 crtc_from_state = new_con_state->crtc;
4562236b
HW
2023
2024 if (crtc_from_state == crtc)
c84dec2f 2025 return to_amdgpu_dm_connector(connector);
4562236b
HW
2026 }
2027
2028 return NULL;
2029}
2030
fbbdadf2
BL
2031static void emulated_link_detect(struct dc_link *link)
2032{
2033 struct dc_sink_init_data sink_init_data = { 0 };
2034 struct display_sink_capability sink_caps = { 0 };
2035 enum dc_edid_status edid_status;
2036 struct dc_context *dc_ctx = link->ctx;
2037 struct dc_sink *sink = NULL;
2038 struct dc_sink *prev_sink = NULL;
2039
2040 link->type = dc_connection_none;
2041 prev_sink = link->local_sink;
2042
30164a16
VL
2043 if (prev_sink)
2044 dc_sink_release(prev_sink);
fbbdadf2
BL
2045
2046 switch (link->connector_signal) {
2047 case SIGNAL_TYPE_HDMI_TYPE_A: {
2048 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2049 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2050 break;
2051 }
2052
2053 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2054 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2055 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2056 break;
2057 }
2058
2059 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2060 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2061 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2062 break;
2063 }
2064
2065 case SIGNAL_TYPE_LVDS: {
2066 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2067 sink_caps.signal = SIGNAL_TYPE_LVDS;
2068 break;
2069 }
2070
2071 case SIGNAL_TYPE_EDP: {
2072 sink_caps.transaction_type =
2073 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2074 sink_caps.signal = SIGNAL_TYPE_EDP;
2075 break;
2076 }
2077
2078 case SIGNAL_TYPE_DISPLAY_PORT: {
2079 sink_caps.transaction_type =
2080 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2081 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2082 break;
2083 }
2084
2085 default:
2086 DC_ERROR("Invalid connector type! signal:%d\n",
2087 link->connector_signal);
2088 return;
2089 }
2090
2091 sink_init_data.link = link;
2092 sink_init_data.sink_signal = sink_caps.signal;
2093
2094 sink = dc_sink_create(&sink_init_data);
2095 if (!sink) {
2096 DC_ERROR("Failed to create sink!\n");
2097 return;
2098 }
2099
dcd5fb82 2100 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2101 link->local_sink = sink;
2102
2103 edid_status = dm_helpers_read_local_edid(
2104 link->ctx,
2105 link,
2106 sink);
2107
2108 if (edid_status != EDID_OK)
2109 DC_ERROR("Failed to read EDID");
2110
2111}
2112
cdaae837
BL
2113static void dm_gpureset_commit_state(struct dc_state *dc_state,
2114 struct amdgpu_display_manager *dm)
2115{
2116 struct {
2117 struct dc_surface_update surface_updates[MAX_SURFACES];
2118 struct dc_plane_info plane_infos[MAX_SURFACES];
2119 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2120 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2121 struct dc_stream_update stream_update;
2122 } * bundle;
2123 int k, m;
2124
2125 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2126
2127 if (!bundle) {
2128 dm_error("Failed to allocate update bundle\n");
2129 goto cleanup;
2130 }
2131
2132 for (k = 0; k < dc_state->stream_count; k++) {
2133 bundle->stream_update.stream = dc_state->streams[k];
2134
2135 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2136 bundle->surface_updates[m].surface =
2137 dc_state->stream_status->plane_states[m];
2138 bundle->surface_updates[m].surface->force_full_update =
2139 true;
2140 }
2141 dc_commit_updates_for_stream(
2142 dm->dc, bundle->surface_updates,
2143 dc_state->stream_status->plane_count,
efc8278e 2144 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2145 }
2146
2147cleanup:
2148 kfree(bundle);
2149
2150 return;
2151}
2152
3c4d55c9
AP
2153static void dm_set_dpms_off(struct dc_link *link)
2154{
2155 struct dc_stream_state *stream_state;
2156 struct amdgpu_dm_connector *aconnector = link->priv;
2157 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2158 struct dc_stream_update stream_update;
2159 bool dpms_off = true;
2160
2161 memset(&stream_update, 0, sizeof(stream_update));
2162 stream_update.dpms_off = &dpms_off;
2163
2164 mutex_lock(&adev->dm.dc_lock);
2165 stream_state = dc_stream_find_from_link(link);
2166
2167 if (stream_state == NULL) {
2168 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2169 mutex_unlock(&adev->dm.dc_lock);
2170 return;
2171 }
2172
2173 stream_update.stream = stream_state;
2174 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2175 stream_state, &stream_update,
2176 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2177 mutex_unlock(&adev->dm.dc_lock);
2178}
2179
4562236b
HW
2180static int dm_resume(void *handle)
2181{
2182 struct amdgpu_device *adev = handle;
4a580877 2183 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2184 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2185 struct amdgpu_dm_connector *aconnector;
4562236b 2186 struct drm_connector *connector;
f8d2d39e 2187 struct drm_connector_list_iter iter;
4562236b 2188 struct drm_crtc *crtc;
c2cea706 2189 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2190 struct dm_crtc_state *dm_new_crtc_state;
2191 struct drm_plane *plane;
2192 struct drm_plane_state *new_plane_state;
2193 struct dm_plane_state *dm_new_plane_state;
113b7a01 2194 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2195 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2196 struct dc_state *dc_state;
2197 int i, r, j;
4562236b 2198
53b3f8f4 2199 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2200 dc_state = dm->cached_dc_state;
2201
2202 r = dm_dmub_hw_init(adev);
2203 if (r)
2204 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2205
2206 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2207 dc_resume(dm->dc);
2208
2209 amdgpu_dm_irq_resume_early(adev);
2210
2211 for (i = 0; i < dc_state->stream_count; i++) {
2212 dc_state->streams[i]->mode_changed = true;
2213 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2214 dc_state->stream_status->plane_states[j]->update_flags.raw
2215 = 0xffffffff;
2216 }
2217 }
2218
2219 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2220
cdaae837
BL
2221 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2222
2223 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2224
2225 dc_release_state(dm->cached_dc_state);
2226 dm->cached_dc_state = NULL;
2227
2228 amdgpu_dm_irq_resume_late(adev);
2229
2230 mutex_unlock(&dm->dc_lock);
2231
2232 return 0;
2233 }
113b7a01
LL
2234 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2235 dc_release_state(dm_state->context);
2236 dm_state->context = dc_create_state(dm->dc);
2237 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2238 dc_resource_state_construct(dm->dc, dm_state->context);
2239
8c7aea40
NK
2240 /* Before powering on DC we need to re-initialize DMUB. */
2241 r = dm_dmub_hw_init(adev);
2242 if (r)
2243 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2244
a80aa93d
ML
2245 /* power on hardware */
2246 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2247
4562236b
HW
2248 /* program HPD filter */
2249 dc_resume(dm->dc);
2250
4562236b
HW
2251 /*
2252 * early enable HPD Rx IRQ, should be done before set mode as short
2253 * pulse interrupts are used for MST
2254 */
2255 amdgpu_dm_irq_resume_early(adev);
2256
d20ebea8 2257 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2258 s3_handle_mst(ddev, false);
2259
4562236b 2260 /* Do detection*/
f8d2d39e
LP
2261 drm_connector_list_iter_begin(ddev, &iter);
2262 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2263 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2264
2265 /*
2266 * this is the case when traversing through already created
2267 * MST connectors, should be skipped
2268 */
2269 if (aconnector->mst_port)
2270 continue;
2271
03ea364c 2272 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2273 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2274 DRM_ERROR("KMS: Failed to detect connector\n");
2275
2276 if (aconnector->base.force && new_connection_type == dc_connection_none)
2277 emulated_link_detect(aconnector->dc_link);
2278 else
2279 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2280
2281 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2282 aconnector->fake_enable = false;
2283
dcd5fb82
MF
2284 if (aconnector->dc_sink)
2285 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2286 aconnector->dc_sink = NULL;
2287 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2288 mutex_unlock(&aconnector->hpd_lock);
4562236b 2289 }
f8d2d39e 2290 drm_connector_list_iter_end(&iter);
4562236b 2291
1f6010a9 2292 /* Force mode set in atomic commit */
a80aa93d 2293 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2294 new_crtc_state->active_changed = true;
4f346e65 2295
fcb4019e
LSL
2296 /*
2297 * atomic_check is expected to create the dc states. We need to release
2298 * them here, since they were duplicated as part of the suspend
2299 * procedure.
2300 */
a80aa93d 2301 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2302 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2303 if (dm_new_crtc_state->stream) {
2304 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2305 dc_stream_release(dm_new_crtc_state->stream);
2306 dm_new_crtc_state->stream = NULL;
2307 }
2308 }
2309
a80aa93d 2310 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2311 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2312 if (dm_new_plane_state->dc_state) {
2313 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2314 dc_plane_state_release(dm_new_plane_state->dc_state);
2315 dm_new_plane_state->dc_state = NULL;
2316 }
2317 }
2318
2d1af6a1 2319 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2320
a80aa93d 2321 dm->cached_state = NULL;
0a214e2f 2322
9faa4237 2323 amdgpu_dm_irq_resume_late(adev);
4562236b 2324
9340dfd3
HW
2325 amdgpu_dm_smu_write_watermarks_table(adev);
2326
2d1af6a1 2327 return 0;
4562236b
HW
2328}
2329
b8592b48
LL
2330/**
2331 * DOC: DM Lifecycle
2332 *
2333 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2334 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2335 * the base driver's device list to be initialized and torn down accordingly.
2336 *
2337 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2338 */
2339
4562236b
HW
2340static const struct amd_ip_funcs amdgpu_dm_funcs = {
2341 .name = "dm",
2342 .early_init = dm_early_init,
7abcf6b5 2343 .late_init = dm_late_init,
4562236b
HW
2344 .sw_init = dm_sw_init,
2345 .sw_fini = dm_sw_fini,
2346 .hw_init = dm_hw_init,
2347 .hw_fini = dm_hw_fini,
2348 .suspend = dm_suspend,
2349 .resume = dm_resume,
2350 .is_idle = dm_is_idle,
2351 .wait_for_idle = dm_wait_for_idle,
2352 .check_soft_reset = dm_check_soft_reset,
2353 .soft_reset = dm_soft_reset,
2354 .set_clockgating_state = dm_set_clockgating_state,
2355 .set_powergating_state = dm_set_powergating_state,
2356};
2357
2358const struct amdgpu_ip_block_version dm_ip_block =
2359{
2360 .type = AMD_IP_BLOCK_TYPE_DCE,
2361 .major = 1,
2362 .minor = 0,
2363 .rev = 0,
2364 .funcs = &amdgpu_dm_funcs,
2365};
2366
ca3268c4 2367
b8592b48
LL
2368/**
2369 * DOC: atomic
2370 *
2371 * *WIP*
2372 */
0a323b84 2373
b3663f70 2374static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2375 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2376 .get_format_info = amd_get_format_info,
366c1baa 2377 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2378 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2379 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2380};
2381
2382static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2383 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2384};
2385
94562810
RS
2386static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2387{
2388 u32 max_cll, min_cll, max, min, q, r;
2389 struct amdgpu_dm_backlight_caps *caps;
2390 struct amdgpu_display_manager *dm;
2391 struct drm_connector *conn_base;
2392 struct amdgpu_device *adev;
ec11fe37 2393 struct dc_link *link = NULL;
94562810
RS
2394 static const u8 pre_computed_values[] = {
2395 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2396 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2397
2398 if (!aconnector || !aconnector->dc_link)
2399 return;
2400
ec11fe37 2401 link = aconnector->dc_link;
2402 if (link->connector_signal != SIGNAL_TYPE_EDP)
2403 return;
2404
94562810 2405 conn_base = &aconnector->base;
1348969a 2406 adev = drm_to_adev(conn_base->dev);
94562810
RS
2407 dm = &adev->dm;
2408 caps = &dm->backlight_caps;
2409 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2410 caps->aux_support = false;
2411 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2412 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2413
2414 if (caps->ext_caps->bits.oled == 1 ||
2415 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2416 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2417 caps->aux_support = true;
2418
7a46f05e
TI
2419 if (amdgpu_backlight == 0)
2420 caps->aux_support = false;
2421 else if (amdgpu_backlight == 1)
2422 caps->aux_support = true;
2423
94562810
RS
2424 /* From the specification (CTA-861-G), for calculating the maximum
2425 * luminance we need to use:
2426 * Luminance = 50*2**(CV/32)
2427 * Where CV is a one-byte value.
2428 * For calculating this expression we may need float point precision;
2429 * to avoid this complexity level, we take advantage that CV is divided
2430 * by a constant. From the Euclids division algorithm, we know that CV
2431 * can be written as: CV = 32*q + r. Next, we replace CV in the
2432 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2433 * need to pre-compute the value of r/32. For pre-computing the values
2434 * We just used the following Ruby line:
2435 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2436 * The results of the above expressions can be verified at
2437 * pre_computed_values.
2438 */
2439 q = max_cll >> 5;
2440 r = max_cll % 32;
2441 max = (1 << q) * pre_computed_values[r];
2442
2443 // min luminance: maxLum * (CV/255)^2 / 100
2444 q = DIV_ROUND_CLOSEST(min_cll, 255);
2445 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2446
2447 caps->aux_max_input_signal = max;
2448 caps->aux_min_input_signal = min;
2449}
2450
97e51c16
HW
2451void amdgpu_dm_update_connector_after_detect(
2452 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2453{
2454 struct drm_connector *connector = &aconnector->base;
2455 struct drm_device *dev = connector->dev;
b73a22d3 2456 struct dc_sink *sink;
4562236b
HW
2457
2458 /* MST handled by drm_mst framework */
2459 if (aconnector->mst_mgr.mst_state == true)
2460 return;
2461
4562236b 2462 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2463 if (sink)
2464 dc_sink_retain(sink);
4562236b 2465
1f6010a9
DF
2466 /*
2467 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2468 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2469 * Skip if already done during boot.
4562236b
HW
2470 */
2471 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2472 && aconnector->dc_em_sink) {
2473
1f6010a9
DF
2474 /*
2475 * For S3 resume with headless use eml_sink to fake stream
2476 * because on resume connector->sink is set to NULL
4562236b
HW
2477 */
2478 mutex_lock(&dev->mode_config.mutex);
2479
2480 if (sink) {
922aa1e1 2481 if (aconnector->dc_sink) {
98e6436d 2482 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2483 /*
2484 * retain and release below are used to
2485 * bump up refcount for sink because the link doesn't point
2486 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2487 * reshuffle by UMD we will get into unwanted dc_sink release
2488 */
dcd5fb82 2489 dc_sink_release(aconnector->dc_sink);
922aa1e1 2490 }
4562236b 2491 aconnector->dc_sink = sink;
dcd5fb82 2492 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2493 amdgpu_dm_update_freesync_caps(connector,
2494 aconnector->edid);
4562236b 2495 } else {
98e6436d 2496 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2497 if (!aconnector->dc_sink) {
4562236b 2498 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2499 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2500 }
4562236b
HW
2501 }
2502
2503 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2504
2505 if (sink)
2506 dc_sink_release(sink);
4562236b
HW
2507 return;
2508 }
2509
2510 /*
2511 * TODO: temporary guard to look for proper fix
2512 * if this sink is MST sink, we should not do anything
2513 */
dcd5fb82
MF
2514 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2515 dc_sink_release(sink);
4562236b 2516 return;
dcd5fb82 2517 }
4562236b
HW
2518
2519 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2520 /*
2521 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2522 * Do nothing!!
2523 */
f1ad2f5e 2524 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2525 aconnector->connector_id);
dcd5fb82
MF
2526 if (sink)
2527 dc_sink_release(sink);
4562236b
HW
2528 return;
2529 }
2530
f1ad2f5e 2531 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2532 aconnector->connector_id, aconnector->dc_sink, sink);
2533
2534 mutex_lock(&dev->mode_config.mutex);
2535
1f6010a9
DF
2536 /*
2537 * 1. Update status of the drm connector
2538 * 2. Send an event and let userspace tell us what to do
2539 */
4562236b 2540 if (sink) {
1f6010a9
DF
2541 /*
2542 * TODO: check if we still need the S3 mode update workaround.
2543 * If yes, put it here.
2544 */
c64b0d6b 2545 if (aconnector->dc_sink) {
98e6436d 2546 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2547 dc_sink_release(aconnector->dc_sink);
2548 }
4562236b
HW
2549
2550 aconnector->dc_sink = sink;
dcd5fb82 2551 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2552 if (sink->dc_edid.length == 0) {
4562236b 2553 aconnector->edid = NULL;
e6142dd5
AP
2554 if (aconnector->dc_link->aux_mode) {
2555 drm_dp_cec_unset_edid(
2556 &aconnector->dm_dp_aux.aux);
2557 }
900b3cb1 2558 } else {
4562236b 2559 aconnector->edid =
e6142dd5 2560 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2561
c555f023 2562 drm_connector_update_edid_property(connector,
e6142dd5 2563 aconnector->edid);
e6142dd5
AP
2564 if (aconnector->dc_link->aux_mode)
2565 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2566 aconnector->edid);
4562236b 2567 }
e6142dd5 2568
98e6436d 2569 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2570 update_connector_ext_caps(aconnector);
4562236b 2571 } else {
e86e8947 2572 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2573 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2574 drm_connector_update_edid_property(connector, NULL);
4562236b 2575 aconnector->num_modes = 0;
dcd5fb82 2576 dc_sink_release(aconnector->dc_sink);
4562236b 2577 aconnector->dc_sink = NULL;
5326c452 2578 aconnector->edid = NULL;
0c8620d6
BL
2579#ifdef CONFIG_DRM_AMD_DC_HDCP
2580 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2581 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2582 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2583#endif
4562236b
HW
2584 }
2585
2586 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2587
0f877894
OV
2588 update_subconnector_property(aconnector);
2589
dcd5fb82
MF
2590 if (sink)
2591 dc_sink_release(sink);
4562236b
HW
2592}
2593
2594static void handle_hpd_irq(void *param)
2595{
c84dec2f 2596 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2597 struct drm_connector *connector = &aconnector->base;
2598 struct drm_device *dev = connector->dev;
fbbdadf2 2599 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2600 struct amdgpu_device *adev = drm_to_adev(dev);
b972b4f9 2601#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2602 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 2603#endif
4562236b 2604
b972b4f9
HW
2605 if (adev->dm.disable_hpd_irq)
2606 return;
2607
1f6010a9
DF
2608 /*
2609 * In case of failure or MST no need to update connector status or notify the OS
2610 * since (for MST case) MST does this in its own context.
4562236b
HW
2611 */
2612 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2613
0c8620d6 2614#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2615 if (adev->dm.hdcp_workqueue) {
96a3b32e 2616 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2617 dm_con_state->update_hdcp = true;
2618 }
0c8620d6 2619#endif
2e0ac3d6
HW
2620 if (aconnector->fake_enable)
2621 aconnector->fake_enable = false;
2622
fbbdadf2
BL
2623 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2624 DRM_ERROR("KMS: Failed to detect connector\n");
2625
2626 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2627 emulated_link_detect(aconnector->dc_link);
2628
2629
2630 drm_modeset_lock_all(dev);
2631 dm_restore_drm_connector_state(dev, connector);
2632 drm_modeset_unlock_all(dev);
2633
2634 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2635 drm_kms_helper_hotplug_event(dev);
2636
2637 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9
AP
2638 if (new_connection_type == dc_connection_none &&
2639 aconnector->dc_link->type == dc_connection_none)
2640 dm_set_dpms_off(aconnector->dc_link);
4562236b 2641
3c4d55c9 2642 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2643
2644 drm_modeset_lock_all(dev);
2645 dm_restore_drm_connector_state(dev, connector);
2646 drm_modeset_unlock_all(dev);
2647
2648 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2649 drm_kms_helper_hotplug_event(dev);
2650 }
2651 mutex_unlock(&aconnector->hpd_lock);
2652
2653}
2654
c84dec2f 2655static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2656{
2657 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2658 uint8_t dret;
2659 bool new_irq_handled = false;
2660 int dpcd_addr;
2661 int dpcd_bytes_to_read;
2662
2663 const int max_process_count = 30;
2664 int process_count = 0;
2665
2666 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2667
2668 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2669 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2670 /* DPCD 0x200 - 0x201 for downstream IRQ */
2671 dpcd_addr = DP_SINK_COUNT;
2672 } else {
2673 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2674 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2675 dpcd_addr = DP_SINK_COUNT_ESI;
2676 }
2677
2678 dret = drm_dp_dpcd_read(
2679 &aconnector->dm_dp_aux.aux,
2680 dpcd_addr,
2681 esi,
2682 dpcd_bytes_to_read);
2683
2684 while (dret == dpcd_bytes_to_read &&
2685 process_count < max_process_count) {
2686 uint8_t retry;
2687 dret = 0;
2688
2689 process_count++;
2690
f1ad2f5e 2691 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2692 /* handle HPD short pulse irq */
2693 if (aconnector->mst_mgr.mst_state)
2694 drm_dp_mst_hpd_irq(
2695 &aconnector->mst_mgr,
2696 esi,
2697 &new_irq_handled);
4562236b
HW
2698
2699 if (new_irq_handled) {
2700 /* ACK at DPCD to notify down stream */
2701 const int ack_dpcd_bytes_to_write =
2702 dpcd_bytes_to_read - 1;
2703
2704 for (retry = 0; retry < 3; retry++) {
2705 uint8_t wret;
2706
2707 wret = drm_dp_dpcd_write(
2708 &aconnector->dm_dp_aux.aux,
2709 dpcd_addr + 1,
2710 &esi[1],
2711 ack_dpcd_bytes_to_write);
2712 if (wret == ack_dpcd_bytes_to_write)
2713 break;
2714 }
2715
1f6010a9 2716 /* check if there is new irq to be handled */
4562236b
HW
2717 dret = drm_dp_dpcd_read(
2718 &aconnector->dm_dp_aux.aux,
2719 dpcd_addr,
2720 esi,
2721 dpcd_bytes_to_read);
2722
2723 new_irq_handled = false;
d4a6e8a9 2724 } else {
4562236b 2725 break;
d4a6e8a9 2726 }
4562236b
HW
2727 }
2728
2729 if (process_count == max_process_count)
f1ad2f5e 2730 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
2731}
2732
2733static void handle_hpd_rx_irq(void *param)
2734{
c84dec2f 2735 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
2736 struct drm_connector *connector = &aconnector->base;
2737 struct drm_device *dev = connector->dev;
53cbf65c 2738 struct dc_link *dc_link = aconnector->dc_link;
4562236b 2739 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 2740 bool result = false;
fbbdadf2 2741 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 2742 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 2743 union hpd_irq_data hpd_irq_data;
d2aa1356 2744 bool lock_flag = 0;
2a0f9270
BL
2745
2746 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 2747
b972b4f9
HW
2748 if (adev->dm.disable_hpd_irq)
2749 return;
2750
2751
1f6010a9
DF
2752 /*
2753 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
2754 * conflict, after implement i2c helper, this mutex should be
2755 * retired.
2756 */
b86e7eef 2757 mutex_lock(&aconnector->hpd_lock);
4562236b 2758
3083a984
QZ
2759 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2760
2761 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2762 (dc_link->type == dc_connection_mst_branch)) {
2763 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2764 result = true;
2765 dm_handle_hpd_rx_irq(aconnector);
2766 goto out;
2767 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2768 result = false;
2769 dm_handle_hpd_rx_irq(aconnector);
2770 goto out;
2771 }
2772 }
2773
d2aa1356
AP
2774 /*
2775 * TODO: We need the lock to avoid touching DC state while it's being
2776 * modified during automated compliance testing, or when link loss
2777 * happens. While this should be split into subhandlers and proper
2778 * interfaces to avoid having to conditionally lock like this in the
2779 * outer layer, we need this workaround temporarily to allow MST
2780 * lightup in some scenarios to avoid timeout.
2781 */
2782 if (!amdgpu_in_reset(adev) &&
2783 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2784 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
cf8b92a7 2785 mutex_lock(&adev->dm.dc_lock);
d2aa1356
AP
2786 lock_flag = 1;
2787 }
2788
2a0f9270 2789#ifdef CONFIG_DRM_AMD_DC_HDCP
c8ea79a8 2790 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2a0f9270 2791#else
c8ea79a8 2792 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2a0f9270 2793#endif
d2aa1356 2794 if (!amdgpu_in_reset(adev) && lock_flag)
cf8b92a7 2795 mutex_unlock(&adev->dm.dc_lock);
c8ea79a8 2796
3083a984 2797out:
c8ea79a8 2798 if (result && !is_mst_root_connector) {
4562236b 2799 /* Downstream Port status changed. */
fbbdadf2
BL
2800 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2801 DRM_ERROR("KMS: Failed to detect connector\n");
2802
2803 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2804 emulated_link_detect(dc_link);
2805
2806 if (aconnector->fake_enable)
2807 aconnector->fake_enable = false;
2808
2809 amdgpu_dm_update_connector_after_detect(aconnector);
2810
2811
2812 drm_modeset_lock_all(dev);
2813 dm_restore_drm_connector_state(dev, connector);
2814 drm_modeset_unlock_all(dev);
2815
2816 drm_kms_helper_hotplug_event(dev);
2817 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
2818
2819 if (aconnector->fake_enable)
2820 aconnector->fake_enable = false;
2821
4562236b
HW
2822 amdgpu_dm_update_connector_after_detect(aconnector);
2823
2824
2825 drm_modeset_lock_all(dev);
2826 dm_restore_drm_connector_state(dev, connector);
2827 drm_modeset_unlock_all(dev);
2828
2829 drm_kms_helper_hotplug_event(dev);
2830 }
2831 }
2a0f9270 2832#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
2833 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2834 if (adev->dm.hdcp_workqueue)
2835 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2836 }
2a0f9270 2837#endif
4562236b 2838
b86e7eef 2839 if (dc_link->type != dc_connection_mst_branch)
e86e8947 2840 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
2841
2842 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
2843}
2844
2845static void register_hpd_handlers(struct amdgpu_device *adev)
2846{
4a580877 2847 struct drm_device *dev = adev_to_drm(adev);
4562236b 2848 struct drm_connector *connector;
c84dec2f 2849 struct amdgpu_dm_connector *aconnector;
4562236b
HW
2850 const struct dc_link *dc_link;
2851 struct dc_interrupt_params int_params = {0};
2852
2853 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2854 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2855
2856 list_for_each_entry(connector,
2857 &dev->mode_config.connector_list, head) {
2858
c84dec2f 2859 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2860 dc_link = aconnector->dc_link;
2861
2862 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2863 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2864 int_params.irq_source = dc_link->irq_source_hpd;
2865
2866 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2867 handle_hpd_irq,
2868 (void *) aconnector);
2869 }
2870
2871 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2872
2873 /* Also register for DP short pulse (hpd_rx). */
2874 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2875 int_params.irq_source = dc_link->irq_source_hpd_rx;
2876
2877 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2878 handle_hpd_rx_irq,
2879 (void *) aconnector);
2880 }
2881 }
2882}
2883
55e56389
MR
2884#if defined(CONFIG_DRM_AMD_DC_SI)
2885/* Register IRQ sources and initialize IRQ callbacks */
2886static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2887{
2888 struct dc *dc = adev->dm.dc;
2889 struct common_irq_params *c_irq_params;
2890 struct dc_interrupt_params int_params = {0};
2891 int r;
2892 int i;
2893 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2894
2895 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2896 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2897
2898 /*
2899 * Actions of amdgpu_irq_add_id():
2900 * 1. Register a set() function with base driver.
2901 * Base driver will call set() function to enable/disable an
2902 * interrupt in DC hardware.
2903 * 2. Register amdgpu_dm_irq_handler().
2904 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2905 * coming from DC hardware.
2906 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2907 * for acknowledging and handling. */
2908
2909 /* Use VBLANK interrupt */
2910 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2911 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2912 if (r) {
2913 DRM_ERROR("Failed to add crtc irq id!\n");
2914 return r;
2915 }
2916
2917 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2918 int_params.irq_source =
2919 dc_interrupt_to_irq_source(dc, i+1 , 0);
2920
2921 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2922
2923 c_irq_params->adev = adev;
2924 c_irq_params->irq_src = int_params.irq_source;
2925
2926 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2927 dm_crtc_high_irq, c_irq_params);
2928 }
2929
2930 /* Use GRPH_PFLIP interrupt */
2931 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2932 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2933 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2934 if (r) {
2935 DRM_ERROR("Failed to add page flip irq id!\n");
2936 return r;
2937 }
2938
2939 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2940 int_params.irq_source =
2941 dc_interrupt_to_irq_source(dc, i, 0);
2942
2943 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2944
2945 c_irq_params->adev = adev;
2946 c_irq_params->irq_src = int_params.irq_source;
2947
2948 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2949 dm_pflip_high_irq, c_irq_params);
2950
2951 }
2952
2953 /* HPD */
2954 r = amdgpu_irq_add_id(adev, client_id,
2955 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2956 if (r) {
2957 DRM_ERROR("Failed to add hpd irq id!\n");
2958 return r;
2959 }
2960
2961 register_hpd_handlers(adev);
2962
2963 return 0;
2964}
2965#endif
2966
4562236b
HW
2967/* Register IRQ sources and initialize IRQ callbacks */
2968static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2969{
2970 struct dc *dc = adev->dm.dc;
2971 struct common_irq_params *c_irq_params;
2972 struct dc_interrupt_params int_params = {0};
2973 int r;
2974 int i;
1ffdeca6 2975 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 2976
84374725 2977 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 2978 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
2979
2980 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2981 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2982
1f6010a9
DF
2983 /*
2984 * Actions of amdgpu_irq_add_id():
4562236b
HW
2985 * 1. Register a set() function with base driver.
2986 * Base driver will call set() function to enable/disable an
2987 * interrupt in DC hardware.
2988 * 2. Register amdgpu_dm_irq_handler().
2989 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2990 * coming from DC hardware.
2991 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2992 * for acknowledging and handling. */
2993
b57de80a 2994 /* Use VBLANK interrupt */
e9029155 2995 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 2996 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
2997 if (r) {
2998 DRM_ERROR("Failed to add crtc irq id!\n");
2999 return r;
3000 }
3001
3002 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3003 int_params.irq_source =
3d761e79 3004 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3005
b57de80a 3006 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3007
3008 c_irq_params->adev = adev;
3009 c_irq_params->irq_src = int_params.irq_source;
3010
3011 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3012 dm_crtc_high_irq, c_irq_params);
3013 }
3014
d2574c33
MK
3015 /* Use VUPDATE interrupt */
3016 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3017 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3018 if (r) {
3019 DRM_ERROR("Failed to add vupdate irq id!\n");
3020 return r;
3021 }
3022
3023 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3024 int_params.irq_source =
3025 dc_interrupt_to_irq_source(dc, i, 0);
3026
3027 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3028
3029 c_irq_params->adev = adev;
3030 c_irq_params->irq_src = int_params.irq_source;
3031
3032 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3033 dm_vupdate_high_irq, c_irq_params);
3034 }
3035
3d761e79 3036 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3037 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3038 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3039 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3040 if (r) {
3041 DRM_ERROR("Failed to add page flip irq id!\n");
3042 return r;
3043 }
3044
3045 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3046 int_params.irq_source =
3047 dc_interrupt_to_irq_source(dc, i, 0);
3048
3049 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3050
3051 c_irq_params->adev = adev;
3052 c_irq_params->irq_src = int_params.irq_source;
3053
3054 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3055 dm_pflip_high_irq, c_irq_params);
3056
3057 }
3058
3059 /* HPD */
2c8ad2d5
AD
3060 r = amdgpu_irq_add_id(adev, client_id,
3061 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3062 if (r) {
3063 DRM_ERROR("Failed to add hpd irq id!\n");
3064 return r;
3065 }
3066
3067 register_hpd_handlers(adev);
3068
3069 return 0;
3070}
3071
b86a1aa3 3072#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3073/* Register IRQ sources and initialize IRQ callbacks */
3074static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3075{
3076 struct dc *dc = adev->dm.dc;
3077 struct common_irq_params *c_irq_params;
3078 struct dc_interrupt_params int_params = {0};
3079 int r;
3080 int i;
660d5406
WL
3081#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3082 static const unsigned int vrtl_int_srcid[] = {
3083 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3084 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3085 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3086 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3087 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3088 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3089 };
3090#endif
ff5ef992
AD
3091
3092 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3093 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3094
1f6010a9
DF
3095 /*
3096 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3097 * 1. Register a set() function with base driver.
3098 * Base driver will call set() function to enable/disable an
3099 * interrupt in DC hardware.
3100 * 2. Register amdgpu_dm_irq_handler().
3101 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3102 * coming from DC hardware.
3103 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3104 * for acknowledging and handling.
1f6010a9 3105 */
ff5ef992
AD
3106
3107 /* Use VSTARTUP interrupt */
3108 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3109 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3110 i++) {
3760f76c 3111 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3112
3113 if (r) {
3114 DRM_ERROR("Failed to add crtc irq id!\n");
3115 return r;
3116 }
3117
3118 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3119 int_params.irq_source =
3120 dc_interrupt_to_irq_source(dc, i, 0);
3121
3122 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3123
3124 c_irq_params->adev = adev;
3125 c_irq_params->irq_src = int_params.irq_source;
3126
2346ef47
NK
3127 amdgpu_dm_irq_register_interrupt(
3128 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3129 }
3130
86bc2219
WL
3131 /* Use otg vertical line interrupt */
3132#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3133 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3134 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3135 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3136
3137 if (r) {
3138 DRM_ERROR("Failed to add vline0 irq id!\n");
3139 return r;
3140 }
3141
3142 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3143 int_params.irq_source =
660d5406
WL
3144 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3145
3146 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3147 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3148 break;
3149 }
86bc2219
WL
3150
3151 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3152 - DC_IRQ_SOURCE_DC1_VLINE0];
3153
3154 c_irq_params->adev = adev;
3155 c_irq_params->irq_src = int_params.irq_source;
3156
3157 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3158 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3159 }
3160#endif
3161
2346ef47
NK
3162 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3163 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3164 * to trigger at end of each vblank, regardless of state of the lock,
3165 * matching DCE behaviour.
3166 */
3167 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3168 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3169 i++) {
3170 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3171
3172 if (r) {
3173 DRM_ERROR("Failed to add vupdate irq id!\n");
3174 return r;
3175 }
3176
3177 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3178 int_params.irq_source =
3179 dc_interrupt_to_irq_source(dc, i, 0);
3180
3181 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3182
3183 c_irq_params->adev = adev;
3184 c_irq_params->irq_src = int_params.irq_source;
3185
ff5ef992 3186 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3187 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3188 }
3189
ff5ef992
AD
3190 /* Use GRPH_PFLIP interrupt */
3191 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3192 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3193 i++) {
3760f76c 3194 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3195 if (r) {
3196 DRM_ERROR("Failed to add page flip irq id!\n");
3197 return r;
3198 }
3199
3200 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3201 int_params.irq_source =
3202 dc_interrupt_to_irq_source(dc, i, 0);
3203
3204 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3205
3206 c_irq_params->adev = adev;
3207 c_irq_params->irq_src = int_params.irq_source;
3208
3209 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3210 dm_pflip_high_irq, c_irq_params);
3211
3212 }
3213
81927e28
JS
3214 /* HPD */
3215 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3216 &adev->hpd_irq);
3217 if (r) {
3218 DRM_ERROR("Failed to add hpd irq id!\n");
3219 return r;
3220 }
a08f16cf 3221
81927e28 3222 register_hpd_handlers(adev);
a08f16cf 3223
81927e28
JS
3224 return 0;
3225}
3226/* Register Outbox IRQ sources and initialize IRQ callbacks */
3227static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3228{
3229 struct dc *dc = adev->dm.dc;
3230 struct common_irq_params *c_irq_params;
3231 struct dc_interrupt_params int_params = {0};
3232 int r, i;
3233
3234 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3235 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3236
3237 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3238 &adev->dmub_outbox_irq);
3239 if (r) {
3240 DRM_ERROR("Failed to add outbox irq id!\n");
3241 return r;
3242 }
3243
3244 if (dc->ctx->dmub_srv) {
3245 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3246 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3247 int_params.irq_source =
81927e28 3248 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3249
81927e28 3250 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3251
3252 c_irq_params->adev = adev;
3253 c_irq_params->irq_src = int_params.irq_source;
3254
3255 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3256 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3257 }
3258
ff5ef992
AD
3259 return 0;
3260}
3261#endif
3262
eb3dc897
NK
3263/*
3264 * Acquires the lock for the atomic state object and returns
3265 * the new atomic state.
3266 *
3267 * This should only be called during atomic check.
3268 */
3269static int dm_atomic_get_state(struct drm_atomic_state *state,
3270 struct dm_atomic_state **dm_state)
3271{
3272 struct drm_device *dev = state->dev;
1348969a 3273 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3274 struct amdgpu_display_manager *dm = &adev->dm;
3275 struct drm_private_state *priv_state;
eb3dc897
NK
3276
3277 if (*dm_state)
3278 return 0;
3279
eb3dc897
NK
3280 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3281 if (IS_ERR(priv_state))
3282 return PTR_ERR(priv_state);
3283
3284 *dm_state = to_dm_atomic_state(priv_state);
3285
3286 return 0;
3287}
3288
dfd84d90 3289static struct dm_atomic_state *
eb3dc897
NK
3290dm_atomic_get_new_state(struct drm_atomic_state *state)
3291{
3292 struct drm_device *dev = state->dev;
1348969a 3293 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3294 struct amdgpu_display_manager *dm = &adev->dm;
3295 struct drm_private_obj *obj;
3296 struct drm_private_state *new_obj_state;
3297 int i;
3298
3299 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3300 if (obj->funcs == dm->atomic_obj.funcs)
3301 return to_dm_atomic_state(new_obj_state);
3302 }
3303
3304 return NULL;
3305}
3306
eb3dc897
NK
3307static struct drm_private_state *
3308dm_atomic_duplicate_state(struct drm_private_obj *obj)
3309{
3310 struct dm_atomic_state *old_state, *new_state;
3311
3312 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3313 if (!new_state)
3314 return NULL;
3315
3316 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3317
813d20dc
AW
3318 old_state = to_dm_atomic_state(obj->state);
3319
3320 if (old_state && old_state->context)
3321 new_state->context = dc_copy_state(old_state->context);
3322
eb3dc897
NK
3323 if (!new_state->context) {
3324 kfree(new_state);
3325 return NULL;
3326 }
3327
eb3dc897
NK
3328 return &new_state->base;
3329}
3330
3331static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3332 struct drm_private_state *state)
3333{
3334 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3335
3336 if (dm_state && dm_state->context)
3337 dc_release_state(dm_state->context);
3338
3339 kfree(dm_state);
3340}
3341
3342static struct drm_private_state_funcs dm_atomic_state_funcs = {
3343 .atomic_duplicate_state = dm_atomic_duplicate_state,
3344 .atomic_destroy_state = dm_atomic_destroy_state,
3345};
3346
4562236b
HW
3347static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3348{
eb3dc897 3349 struct dm_atomic_state *state;
4562236b
HW
3350 int r;
3351
3352 adev->mode_info.mode_config_initialized = true;
3353
4a580877
LT
3354 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3355 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3356
4a580877
LT
3357 adev_to_drm(adev)->mode_config.max_width = 16384;
3358 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3359
4a580877
LT
3360 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3361 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3362 /* indicates support for immediate flip */
4a580877 3363 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3364
4a580877 3365 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3366
eb3dc897
NK
3367 state = kzalloc(sizeof(*state), GFP_KERNEL);
3368 if (!state)
3369 return -ENOMEM;
3370
813d20dc 3371 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3372 if (!state->context) {
3373 kfree(state);
3374 return -ENOMEM;
3375 }
3376
3377 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3378
4a580877 3379 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3380 &adev->dm.atomic_obj,
eb3dc897
NK
3381 &state->base,
3382 &dm_atomic_state_funcs);
3383
3dc9b1ce 3384 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3385 if (r) {
3386 dc_release_state(state->context);
3387 kfree(state);
4562236b 3388 return r;
b67a468a 3389 }
4562236b 3390
6ce8f316 3391 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3392 if (r) {
3393 dc_release_state(state->context);
3394 kfree(state);
6ce8f316 3395 return r;
b67a468a 3396 }
6ce8f316 3397
4562236b
HW
3398 return 0;
3399}
3400
206bbafe
DF
3401#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3402#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3403#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3404
4562236b
HW
3405#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3406 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3407
206bbafe
DF
3408static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3409{
3410#if defined(CONFIG_ACPI)
3411 struct amdgpu_dm_backlight_caps caps;
3412
58965855
FS
3413 memset(&caps, 0, sizeof(caps));
3414
206bbafe
DF
3415 if (dm->backlight_caps.caps_valid)
3416 return;
3417
f9b7f370 3418 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3419 if (caps.caps_valid) {
94562810
RS
3420 dm->backlight_caps.caps_valid = true;
3421 if (caps.aux_support)
3422 return;
206bbafe
DF
3423 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3424 dm->backlight_caps.max_input_signal = caps.max_input_signal;
206bbafe
DF
3425 } else {
3426 dm->backlight_caps.min_input_signal =
3427 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3428 dm->backlight_caps.max_input_signal =
3429 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3430 }
3431#else
94562810
RS
3432 if (dm->backlight_caps.aux_support)
3433 return;
3434
8bcbc9ef
DF
3435 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3436 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3437#endif
3438}
3439
69d9f427
AM
3440static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3441 unsigned *min, unsigned *max)
94562810 3442{
94562810 3443 if (!caps)
69d9f427 3444 return 0;
94562810 3445
69d9f427
AM
3446 if (caps->aux_support) {
3447 // Firmware limits are in nits, DC API wants millinits.
3448 *max = 1000 * caps->aux_max_input_signal;
3449 *min = 1000 * caps->aux_min_input_signal;
94562810 3450 } else {
69d9f427
AM
3451 // Firmware limits are 8-bit, PWM control is 16-bit.
3452 *max = 0x101 * caps->max_input_signal;
3453 *min = 0x101 * caps->min_input_signal;
94562810 3454 }
69d9f427
AM
3455 return 1;
3456}
94562810 3457
69d9f427
AM
3458static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3459 uint32_t brightness)
3460{
3461 unsigned min, max;
94562810 3462
69d9f427
AM
3463 if (!get_brightness_range(caps, &min, &max))
3464 return brightness;
3465
3466 // Rescale 0..255 to min..max
3467 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3468 AMDGPU_MAX_BL_LEVEL);
3469}
3470
3471static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3472 uint32_t brightness)
3473{
3474 unsigned min, max;
3475
3476 if (!get_brightness_range(caps, &min, &max))
3477 return brightness;
3478
3479 if (brightness < min)
3480 return 0;
3481 // Rescale min..max to 0..255
3482 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3483 max - min);
94562810
RS
3484}
3485
3d6c9164
AD
3486static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3487 u32 user_brightness)
4562236b 3488{
206bbafe 3489 struct amdgpu_dm_backlight_caps caps;
118b4627 3490 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3d6c9164 3491 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
94562810 3492 bool rc;
118b4627 3493 int i;
4562236b 3494
206bbafe
DF
3495 amdgpu_dm_update_backlight_caps(dm);
3496 caps = dm->backlight_caps;
94562810 3497
3d6c9164
AD
3498 for (i = 0; i < dm->num_of_edps; i++) {
3499 dm->brightness[i] = user_brightness;
3500 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
118b4627 3501 link[i] = (struct dc_link *)dm->backlight_link[i];
3d6c9164 3502 }
94562810 3503
3d6c9164 3504 /* Change brightness based on AUX property */
118b4627
ML
3505 if (caps.aux_support) {
3506 for (i = 0; i < dm->num_of_edps; i++) {
3d6c9164 3507 rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
118b4627
ML
3508 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3509 if (!rc) {
cd11b58c 3510 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
118b4627
ML
3511 break;
3512 }
3513 }
3514 } else {
3515 for (i = 0; i < dm->num_of_edps; i++) {
3d6c9164 3516 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
118b4627 3517 if (!rc) {
cd11b58c 3518 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
118b4627
ML
3519 break;
3520 }
3521 }
3522 }
94562810
RS
3523
3524 return rc ? 0 : 1;
4562236b
HW
3525}
3526
3d6c9164 3527static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3528{
620a0d27 3529 struct amdgpu_display_manager *dm = bl_get_data(bd);
3d6c9164
AD
3530
3531 amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3532
3533 return 0;
3534}
3535
3536static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3537{
0ad3e64e
AD
3538 struct amdgpu_dm_backlight_caps caps;
3539
3540 amdgpu_dm_update_backlight_caps(dm);
3541 caps = dm->backlight_caps;
620a0d27 3542
0ad3e64e 3543 if (caps.aux_support) {
118b4627 3544 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
0ad3e64e
AD
3545 u32 avg, peak;
3546 bool rc;
3547
3548 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3549 if (!rc)
3d6c9164 3550 return dm->brightness[0];
0ad3e64e
AD
3551 return convert_brightness_to_user(&caps, avg);
3552 } else {
118b4627 3553 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
0ad3e64e
AD
3554
3555 if (ret == DC_ERROR_UNEXPECTED)
3d6c9164 3556 return dm->brightness[0];
0ad3e64e
AD
3557 return convert_brightness_to_user(&caps, ret);
3558 }
4562236b
HW
3559}
3560
3d6c9164
AD
3561static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3562{
3563 struct amdgpu_display_manager *dm = bl_get_data(bd);
3564
3565 return amdgpu_dm_backlight_get_level(dm);
3566}
3567
4562236b 3568static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3569 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3570 .get_brightness = amdgpu_dm_backlight_get_brightness,
3571 .update_status = amdgpu_dm_backlight_update_status,
3572};
3573
7578ecda
AD
3574static void
3575amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3576{
3577 char bl_name[16];
3578 struct backlight_properties props = { 0 };
3d6c9164 3579 int i;
4562236b 3580
206bbafe 3581 amdgpu_dm_update_backlight_caps(dm);
3d6c9164
AD
3582 for (i = 0; i < dm->num_of_edps; i++)
3583 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
206bbafe 3584
4562236b 3585 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3586 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3587 props.type = BACKLIGHT_RAW;
3588
3589 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4a580877 3590 adev_to_drm(dm->adev)->primary->index);
4562236b
HW
3591
3592 dm->backlight_dev = backlight_device_register(bl_name,
4a580877
LT
3593 adev_to_drm(dm->adev)->dev,
3594 dm,
3595 &amdgpu_dm_backlight_ops,
3596 &props);
4562236b 3597
74baea42 3598 if (IS_ERR(dm->backlight_dev))
4562236b
HW
3599 DRM_ERROR("DM: Backlight registration failed!\n");
3600 else
f1ad2f5e 3601 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b
HW
3602}
3603
3604#endif
3605
df534fff 3606static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3607 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3608 enum drm_plane_type plane_type,
3609 const struct dc_plane_cap *plane_cap)
df534fff 3610{
f180b4bc 3611 struct drm_plane *plane;
df534fff
S
3612 unsigned long possible_crtcs;
3613 int ret = 0;
3614
f180b4bc 3615 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3616 if (!plane) {
3617 DRM_ERROR("KMS: Failed to allocate plane\n");
3618 return -ENOMEM;
3619 }
b2fddb13 3620 plane->type = plane_type;
df534fff
S
3621
3622 /*
b2fddb13
NK
3623 * HACK: IGT tests expect that the primary plane for a CRTC
3624 * can only have one possible CRTC. Only expose support for
3625 * any CRTC if they're not going to be used as a primary plane
3626 * for a CRTC - like overlay or underlay planes.
df534fff
S
3627 */
3628 possible_crtcs = 1 << plane_id;
3629 if (plane_id >= dm->dc->caps.max_streams)
3630 possible_crtcs = 0xff;
3631
cc1fec57 3632 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3633
3634 if (ret) {
3635 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3636 kfree(plane);
df534fff
S
3637 return ret;
3638 }
3639
54087768
NK
3640 if (mode_info)
3641 mode_info->planes[plane_id] = plane;
3642
df534fff
S
3643 return ret;
3644}
3645
89fc8d4e
HW
3646
3647static void register_backlight_device(struct amdgpu_display_manager *dm,
3648 struct dc_link *link)
3649{
3650#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3651 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3652
3653 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3654 link->type != dc_connection_none) {
1f6010a9
DF
3655 /*
3656 * Event if registration failed, we should continue with
89fc8d4e
HW
3657 * DM initialization because not having a backlight control
3658 * is better then a black screen.
3659 */
118b4627
ML
3660 if (!dm->backlight_dev)
3661 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 3662
118b4627
ML
3663 if (dm->backlight_dev) {
3664 dm->backlight_link[dm->num_of_edps] = link;
3665 dm->num_of_edps++;
3666 }
89fc8d4e
HW
3667 }
3668#endif
3669}
3670
3671
1f6010a9
DF
3672/*
3673 * In this architecture, the association
4562236b
HW
3674 * connector -> encoder -> crtc
3675 * id not really requried. The crtc and connector will hold the
3676 * display_index as an abstraction to use with DAL component
3677 *
3678 * Returns 0 on success
3679 */
7578ecda 3680static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3681{
3682 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3683 int32_t i;
c84dec2f 3684 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 3685 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 3686 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 3687 uint32_t link_cnt;
cc1fec57 3688 int32_t primary_planes;
fbbdadf2 3689 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 3690 const struct dc_plane_cap *plane;
4562236b 3691
d58159de
AD
3692 dm->display_indexes_num = dm->dc->caps.max_streams;
3693 /* Update the actual used number of crtc */
3694 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3695
4562236b 3696 link_cnt = dm->dc->caps.max_links;
4562236b
HW
3697 if (amdgpu_dm_mode_config_init(dm->adev)) {
3698 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 3699 return -EINVAL;
4562236b
HW
3700 }
3701
b2fddb13
NK
3702 /* There is one primary plane per CRTC */
3703 primary_planes = dm->dc->caps.max_streams;
54087768 3704 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 3705
b2fddb13
NK
3706 /*
3707 * Initialize primary planes, implicit planes for legacy IOCTLS.
3708 * Order is reversed to match iteration order in atomic check.
3709 */
3710 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
3711 plane = &dm->dc->caps.planes[i];
3712
b2fddb13 3713 if (initialize_plane(dm, mode_info, i,
cc1fec57 3714 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 3715 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 3716 goto fail;
d4e13b0d 3717 }
df534fff 3718 }
92f3ac40 3719
0d579c7e
NK
3720 /*
3721 * Initialize overlay planes, index starting after primary planes.
3722 * These planes have a higher DRM index than the primary planes since
3723 * they should be considered as having a higher z-order.
3724 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
3725 *
3726 * Only support DCN for now, and only expose one so we don't encourage
3727 * userspace to use up all the pipes.
0d579c7e 3728 */
cc1fec57
NK
3729 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3730 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3731
3732 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3733 continue;
3734
3735 if (!plane->blends_with_above || !plane->blends_with_below)
3736 continue;
3737
ea36ad34 3738 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
3739 continue;
3740
54087768 3741 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 3742 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 3743 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 3744 goto fail;
d4e13b0d 3745 }
cc1fec57
NK
3746
3747 /* Only create one overlay plane. */
3748 break;
d4e13b0d 3749 }
4562236b 3750
d4e13b0d 3751 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 3752 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 3753 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 3754 goto fail;
4562236b 3755 }
4562236b 3756
50610b74 3757#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28
JS
3758 /* Use Outbox interrupt */
3759 switch (adev->asic_type) {
81927e28
JS
3760 case CHIP_SIENNA_CICHLID:
3761 case CHIP_NAVY_FLOUNDER:
81927e28
JS
3762 case CHIP_RENOIR:
3763 if (register_outbox_irq_handlers(dm->adev)) {
3764 DRM_ERROR("DM: Failed to initialize IRQ\n");
3765 goto fail;
3766 }
3767 break;
3768 default:
3769 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3770 }
50610b74 3771#endif
81927e28 3772
4562236b
HW
3773 /* loops over all connectors on the board */
3774 for (i = 0; i < link_cnt; i++) {
89fc8d4e 3775 struct dc_link *link = NULL;
4562236b
HW
3776
3777 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3778 DRM_ERROR(
3779 "KMS: Cannot support more than %d display indexes\n",
3780 AMDGPU_DM_MAX_DISPLAY_INDEX);
3781 continue;
3782 }
3783
3784 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3785 if (!aconnector)
cd8a2ae8 3786 goto fail;
4562236b
HW
3787
3788 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 3789 if (!aencoder)
cd8a2ae8 3790 goto fail;
4562236b
HW
3791
3792 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3793 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 3794 goto fail;
4562236b
HW
3795 }
3796
3797 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3798 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 3799 goto fail;
4562236b
HW
3800 }
3801
89fc8d4e
HW
3802 link = dc_get_link_at_index(dm->dc, i);
3803
fbbdadf2
BL
3804 if (!dc_link_detect_sink(link, &new_connection_type))
3805 DRM_ERROR("KMS: Failed to detect connector\n");
3806
3807 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3808 emulated_link_detect(link);
3809 amdgpu_dm_update_connector_after_detect(aconnector);
3810
3811 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 3812 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 3813 register_backlight_device(dm, link);
397a9bc5
RL
3814 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3815 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
3816 }
3817
3818
4562236b
HW
3819 }
3820
3821 /* Software is initialized. Now we can register interrupt handlers. */
3822 switch (adev->asic_type) {
55e56389
MR
3823#if defined(CONFIG_DRM_AMD_DC_SI)
3824 case CHIP_TAHITI:
3825 case CHIP_PITCAIRN:
3826 case CHIP_VERDE:
3827 case CHIP_OLAND:
3828 if (dce60_register_irq_handlers(dm->adev)) {
3829 DRM_ERROR("DM: Failed to initialize IRQ\n");
3830 goto fail;
3831 }
3832 break;
3833#endif
4562236b
HW
3834 case CHIP_BONAIRE:
3835 case CHIP_HAWAII:
cd4b356f
AD
3836 case CHIP_KAVERI:
3837 case CHIP_KABINI:
3838 case CHIP_MULLINS:
4562236b
HW
3839 case CHIP_TONGA:
3840 case CHIP_FIJI:
3841 case CHIP_CARRIZO:
3842 case CHIP_STONEY:
3843 case CHIP_POLARIS11:
3844 case CHIP_POLARIS10:
b264d345 3845 case CHIP_POLARIS12:
7737de91 3846 case CHIP_VEGAM:
2c8ad2d5 3847 case CHIP_VEGA10:
2325ff30 3848 case CHIP_VEGA12:
1fe6bf2f 3849 case CHIP_VEGA20:
4562236b
HW
3850 if (dce110_register_irq_handlers(dm->adev)) {
3851 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3852 goto fail;
4562236b
HW
3853 }
3854 break;
b86a1aa3 3855#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 3856 case CHIP_RAVEN:
fbd2afe5 3857 case CHIP_NAVI12:
476e955d 3858 case CHIP_NAVI10:
fce651e3 3859 case CHIP_NAVI14:
30221ad8 3860 case CHIP_RENOIR:
79037324 3861 case CHIP_SIENNA_CICHLID:
a6c5308f 3862 case CHIP_NAVY_FLOUNDER:
2a411205 3863 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 3864 case CHIP_BEIGE_GOBY:
469989ca 3865 case CHIP_VANGOGH:
ff5ef992
AD
3866 if (dcn10_register_irq_handlers(dm->adev)) {
3867 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 3868 goto fail;
ff5ef992
AD
3869 }
3870 break;
3871#endif
4562236b 3872 default:
e63f8673 3873 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 3874 goto fail;
4562236b
HW
3875 }
3876
4562236b 3877 return 0;
cd8a2ae8 3878fail:
4562236b 3879 kfree(aencoder);
4562236b 3880 kfree(aconnector);
54087768 3881
59d0f396 3882 return -EINVAL;
4562236b
HW
3883}
3884
7578ecda 3885static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b
HW
3886{
3887 drm_mode_config_cleanup(dm->ddev);
eb3dc897 3888 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
3889 return;
3890}
3891
3892/******************************************************************************
3893 * amdgpu_display_funcs functions
3894 *****************************************************************************/
3895
1f6010a9 3896/*
4562236b
HW
3897 * dm_bandwidth_update - program display watermarks
3898 *
3899 * @adev: amdgpu_device pointer
3900 *
3901 * Calculate and program the display watermarks and line buffer allocation.
3902 */
3903static void dm_bandwidth_update(struct amdgpu_device *adev)
3904{
49c07a99 3905 /* TODO: implement later */
4562236b
HW
3906}
3907
39cc5be2 3908static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
3909 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3910 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
3911 .backlight_set_level = NULL, /* never called for DC */
3912 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
3913 .hpd_sense = NULL,/* called unconditionally */
3914 .hpd_set_polarity = NULL, /* called unconditionally */
3915 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3916 .page_flip_get_scanoutpos =
3917 dm_crtc_get_scanoutpos,/* called unconditionally */
3918 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3919 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
3920};
3921
3922#if defined(CONFIG_DEBUG_KERNEL_DC)
3923
3ee6b26b
AD
3924static ssize_t s3_debug_store(struct device *device,
3925 struct device_attribute *attr,
3926 const char *buf,
3927 size_t count)
4562236b
HW
3928{
3929 int ret;
3930 int s3_state;
ef1de361 3931 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 3932 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
3933
3934 ret = kstrtoint(buf, 0, &s3_state);
3935
3936 if (ret == 0) {
3937 if (s3_state) {
3938 dm_resume(adev);
4a580877 3939 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
3940 } else
3941 dm_suspend(adev);
3942 }
3943
3944 return ret == 0 ? count : 0;
3945}
3946
3947DEVICE_ATTR_WO(s3_debug);
3948
3949#endif
3950
3951static int dm_early_init(void *handle)
3952{
3953 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3954
4562236b 3955 switch (adev->asic_type) {
55e56389
MR
3956#if defined(CONFIG_DRM_AMD_DC_SI)
3957 case CHIP_TAHITI:
3958 case CHIP_PITCAIRN:
3959 case CHIP_VERDE:
3960 adev->mode_info.num_crtc = 6;
3961 adev->mode_info.num_hpd = 6;
3962 adev->mode_info.num_dig = 6;
3963 break;
3964 case CHIP_OLAND:
3965 adev->mode_info.num_crtc = 2;
3966 adev->mode_info.num_hpd = 2;
3967 adev->mode_info.num_dig = 2;
3968 break;
3969#endif
4562236b
HW
3970 case CHIP_BONAIRE:
3971 case CHIP_HAWAII:
3972 adev->mode_info.num_crtc = 6;
3973 adev->mode_info.num_hpd = 6;
3974 adev->mode_info.num_dig = 6;
4562236b 3975 break;
cd4b356f
AD
3976 case CHIP_KAVERI:
3977 adev->mode_info.num_crtc = 4;
3978 adev->mode_info.num_hpd = 6;
3979 adev->mode_info.num_dig = 7;
cd4b356f
AD
3980 break;
3981 case CHIP_KABINI:
3982 case CHIP_MULLINS:
3983 adev->mode_info.num_crtc = 2;
3984 adev->mode_info.num_hpd = 6;
3985 adev->mode_info.num_dig = 6;
cd4b356f 3986 break;
4562236b
HW
3987 case CHIP_FIJI:
3988 case CHIP_TONGA:
3989 adev->mode_info.num_crtc = 6;
3990 adev->mode_info.num_hpd = 6;
3991 adev->mode_info.num_dig = 7;
4562236b
HW
3992 break;
3993 case CHIP_CARRIZO:
3994 adev->mode_info.num_crtc = 3;
3995 adev->mode_info.num_hpd = 6;
3996 adev->mode_info.num_dig = 9;
4562236b
HW
3997 break;
3998 case CHIP_STONEY:
3999 adev->mode_info.num_crtc = 2;
4000 adev->mode_info.num_hpd = 6;
4001 adev->mode_info.num_dig = 9;
4562236b
HW
4002 break;
4003 case CHIP_POLARIS11:
b264d345 4004 case CHIP_POLARIS12:
4562236b
HW
4005 adev->mode_info.num_crtc = 5;
4006 adev->mode_info.num_hpd = 5;
4007 adev->mode_info.num_dig = 5;
4562236b
HW
4008 break;
4009 case CHIP_POLARIS10:
7737de91 4010 case CHIP_VEGAM:
4562236b
HW
4011 adev->mode_info.num_crtc = 6;
4012 adev->mode_info.num_hpd = 6;
4013 adev->mode_info.num_dig = 6;
4562236b 4014 break;
2c8ad2d5 4015 case CHIP_VEGA10:
2325ff30 4016 case CHIP_VEGA12:
1fe6bf2f 4017 case CHIP_VEGA20:
2c8ad2d5
AD
4018 adev->mode_info.num_crtc = 6;
4019 adev->mode_info.num_hpd = 6;
4020 adev->mode_info.num_dig = 6;
4021 break;
b86a1aa3 4022#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 4023 case CHIP_RAVEN:
20f2ffe5
AD
4024 case CHIP_RENOIR:
4025 case CHIP_VANGOGH:
ff5ef992
AD
4026 adev->mode_info.num_crtc = 4;
4027 adev->mode_info.num_hpd = 4;
4028 adev->mode_info.num_dig = 4;
ff5ef992 4029 break;
476e955d 4030 case CHIP_NAVI10:
fbd2afe5 4031 case CHIP_NAVI12:
79037324 4032 case CHIP_SIENNA_CICHLID:
a6c5308f 4033 case CHIP_NAVY_FLOUNDER:
476e955d
HW
4034 adev->mode_info.num_crtc = 6;
4035 adev->mode_info.num_hpd = 6;
4036 adev->mode_info.num_dig = 6;
4037 break;
fce651e3 4038 case CHIP_NAVI14:
2a411205 4039 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
4040 adev->mode_info.num_crtc = 5;
4041 adev->mode_info.num_hpd = 5;
4042 adev->mode_info.num_dig = 5;
4043 break;
656fe9b6
AP
4044 case CHIP_BEIGE_GOBY:
4045 adev->mode_info.num_crtc = 2;
4046 adev->mode_info.num_hpd = 2;
4047 adev->mode_info.num_dig = 2;
4048 break;
20f2ffe5 4049#endif
4562236b 4050 default:
e63f8673 4051 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
4052 return -EINVAL;
4053 }
4054
c8dd5715
MD
4055 amdgpu_dm_set_irq_funcs(adev);
4056
39cc5be2
AD
4057 if (adev->mode_info.funcs == NULL)
4058 adev->mode_info.funcs = &dm_display_funcs;
4059
1f6010a9
DF
4060 /*
4061 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4062 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4063 * amdgpu_device_init()
4064 */
4562236b
HW
4065#if defined(CONFIG_DEBUG_KERNEL_DC)
4066 device_create_file(
4a580877 4067 adev_to_drm(adev)->dev,
4562236b
HW
4068 &dev_attr_s3_debug);
4069#endif
4070
4071 return 0;
4072}
4073
9b690ef3 4074static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4075 struct dc_stream_state *new_stream,
4076 struct dc_stream_state *old_stream)
9b690ef3 4077{
2afda735 4078 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4079}
4080
4081static bool modereset_required(struct drm_crtc_state *crtc_state)
4082{
2afda735 4083 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4084}
4085
7578ecda 4086static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4087{
4088 drm_encoder_cleanup(encoder);
4089 kfree(encoder);
4090}
4091
4092static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4093 .destroy = amdgpu_dm_encoder_destroy,
4094};
4095
e7b07cee 4096
6300b3bd
MK
4097static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4098 struct drm_framebuffer *fb,
4099 int *min_downscale, int *max_upscale)
4100{
4101 struct amdgpu_device *adev = drm_to_adev(dev);
4102 struct dc *dc = adev->dm.dc;
4103 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4104 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4105
4106 switch (fb->format->format) {
4107 case DRM_FORMAT_P010:
4108 case DRM_FORMAT_NV12:
4109 case DRM_FORMAT_NV21:
4110 *max_upscale = plane_cap->max_upscale_factor.nv12;
4111 *min_downscale = plane_cap->max_downscale_factor.nv12;
4112 break;
4113
4114 case DRM_FORMAT_XRGB16161616F:
4115 case DRM_FORMAT_ARGB16161616F:
4116 case DRM_FORMAT_XBGR16161616F:
4117 case DRM_FORMAT_ABGR16161616F:
4118 *max_upscale = plane_cap->max_upscale_factor.fp16;
4119 *min_downscale = plane_cap->max_downscale_factor.fp16;
4120 break;
4121
4122 default:
4123 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4124 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4125 break;
4126 }
4127
4128 /*
4129 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4130 * scaling factor of 1.0 == 1000 units.
4131 */
4132 if (*max_upscale == 1)
4133 *max_upscale = 1000;
4134
4135 if (*min_downscale == 1)
4136 *min_downscale = 1000;
4137}
4138
4139
695af5f9
NK
4140static int fill_dc_scaling_info(const struct drm_plane_state *state,
4141 struct dc_scaling_info *scaling_info)
e7b07cee 4142{
6300b3bd 4143 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4144
695af5f9 4145 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4146
695af5f9
NK
4147 /* Source is fixed 16.16 but we ignore mantissa for now... */
4148 scaling_info->src_rect.x = state->src_x >> 16;
4149 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4150
d89f6048
HW
4151 /*
4152 * For reasons we don't (yet) fully understand a non-zero
4153 * src_y coordinate into an NV12 buffer can cause a
4154 * system hang. To avoid hangs (and maybe be overly cautious)
4155 * let's reject both non-zero src_x and src_y.
4156 *
4157 * We currently know of only one use-case to reproduce a
4158 * scenario with non-zero src_x and src_y for NV12, which
4159 * is to gesture the YouTube Android app into full screen
4160 * on ChromeOS.
4161 */
4162 if (state->fb &&
4163 state->fb->format->format == DRM_FORMAT_NV12 &&
4164 (scaling_info->src_rect.x != 0 ||
4165 scaling_info->src_rect.y != 0))
4166 return -EINVAL;
4167
695af5f9
NK
4168 scaling_info->src_rect.width = state->src_w >> 16;
4169 if (scaling_info->src_rect.width == 0)
4170 return -EINVAL;
4171
4172 scaling_info->src_rect.height = state->src_h >> 16;
4173 if (scaling_info->src_rect.height == 0)
4174 return -EINVAL;
4175
4176 scaling_info->dst_rect.x = state->crtc_x;
4177 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4178
4179 if (state->crtc_w == 0)
695af5f9 4180 return -EINVAL;
e7b07cee 4181
695af5f9 4182 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4183
4184 if (state->crtc_h == 0)
695af5f9 4185 return -EINVAL;
e7b07cee 4186
695af5f9 4187 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4188
695af5f9
NK
4189 /* DRM doesn't specify clipping on destination output. */
4190 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4191
6300b3bd
MK
4192 /* Validate scaling per-format with DC plane caps */
4193 if (state->plane && state->plane->dev && state->fb) {
4194 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4195 &min_downscale, &max_upscale);
4196 } else {
4197 min_downscale = 250;
4198 max_upscale = 16000;
4199 }
4200
6491f0c0
NK
4201 scale_w = scaling_info->dst_rect.width * 1000 /
4202 scaling_info->src_rect.width;
e7b07cee 4203
6300b3bd 4204 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4205 return -EINVAL;
4206
4207 scale_h = scaling_info->dst_rect.height * 1000 /
4208 scaling_info->src_rect.height;
4209
6300b3bd 4210 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4211 return -EINVAL;
4212
695af5f9
NK
4213 /*
4214 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4215 * assume reasonable defaults based on the format.
4216 */
e7b07cee 4217
695af5f9 4218 return 0;
4562236b 4219}
695af5f9 4220
a3241991
BN
4221static void
4222fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4223 uint64_t tiling_flags)
e7b07cee 4224{
a3241991
BN
4225 /* Fill GFX8 params */
4226 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4227 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4228
a3241991
BN
4229 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4230 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4231 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4232 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4233 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4234
a3241991
BN
4235 /* XXX fix me for VI */
4236 tiling_info->gfx8.num_banks = num_banks;
4237 tiling_info->gfx8.array_mode =
4238 DC_ARRAY_2D_TILED_THIN1;
4239 tiling_info->gfx8.tile_split = tile_split;
4240 tiling_info->gfx8.bank_width = bankw;
4241 tiling_info->gfx8.bank_height = bankh;
4242 tiling_info->gfx8.tile_aspect = mtaspect;
4243 tiling_info->gfx8.tile_mode =
4244 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4245 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4246 == DC_ARRAY_1D_TILED_THIN1) {
4247 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4248 }
4249
a3241991
BN
4250 tiling_info->gfx8.pipe_config =
4251 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4252}
4253
a3241991
BN
4254static void
4255fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4256 union dc_tiling_info *tiling_info)
4257{
4258 tiling_info->gfx9.num_pipes =
4259 adev->gfx.config.gb_addr_config_fields.num_pipes;
4260 tiling_info->gfx9.num_banks =
4261 adev->gfx.config.gb_addr_config_fields.num_banks;
4262 tiling_info->gfx9.pipe_interleave =
4263 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4264 tiling_info->gfx9.num_shader_engines =
4265 adev->gfx.config.gb_addr_config_fields.num_se;
4266 tiling_info->gfx9.max_compressed_frags =
4267 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4268 tiling_info->gfx9.num_rb_per_se =
4269 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4270 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
4271 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4272 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4273 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
656fe9b6 4274 adev->asic_type == CHIP_BEIGE_GOBY ||
a3241991
BN
4275 adev->asic_type == CHIP_VANGOGH)
4276 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4277}
4278
695af5f9 4279static int
a3241991
BN
4280validate_dcc(struct amdgpu_device *adev,
4281 const enum surface_pixel_format format,
4282 const enum dc_rotation_angle rotation,
4283 const union dc_tiling_info *tiling_info,
4284 const struct dc_plane_dcc_param *dcc,
4285 const struct dc_plane_address *address,
4286 const struct plane_size *plane_size)
7df7e505
NK
4287{
4288 struct dc *dc = adev->dm.dc;
8daa1218
NC
4289 struct dc_dcc_surface_param input;
4290 struct dc_surface_dcc_cap output;
7df7e505 4291
8daa1218
NC
4292 memset(&input, 0, sizeof(input));
4293 memset(&output, 0, sizeof(output));
4294
a3241991 4295 if (!dcc->enable)
87b7ebc2
RS
4296 return 0;
4297
a3241991
BN
4298 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4299 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4300 return -EINVAL;
7df7e505 4301
695af5f9 4302 input.format = format;
12e2b2d4
DL
4303 input.surface_size.width = plane_size->surface_size.width;
4304 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4305 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4306
695af5f9 4307 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4308 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4309 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4310 input.scan = SCAN_DIRECTION_VERTICAL;
4311
4312 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4313 return -EINVAL;
7df7e505
NK
4314
4315 if (!output.capable)
09e5665a 4316 return -EINVAL;
7df7e505 4317
a3241991
BN
4318 if (dcc->independent_64b_blks == 0 &&
4319 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4320 return -EINVAL;
7df7e505 4321
a3241991
BN
4322 return 0;
4323}
4324
37384b3f
BN
4325static bool
4326modifier_has_dcc(uint64_t modifier)
4327{
4328 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4329}
4330
4331static unsigned
4332modifier_gfx9_swizzle_mode(uint64_t modifier)
4333{
4334 if (modifier == DRM_FORMAT_MOD_LINEAR)
4335 return 0;
4336
4337 return AMD_FMT_MOD_GET(TILE, modifier);
4338}
4339
dfbbfe3c
BN
4340static const struct drm_format_info *
4341amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4342{
816853f9 4343 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4344}
4345
37384b3f
BN
4346static void
4347fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4348 union dc_tiling_info *tiling_info,
4349 uint64_t modifier)
4350{
4351 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4352 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4353 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4354 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4355
4356 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4357
4358 if (!IS_AMD_FMT_MOD(modifier))
4359 return;
4360
4361 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4362 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4363
4364 if (adev->family >= AMDGPU_FAMILY_NV) {
4365 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4366 } else {
4367 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4368
4369 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4370 }
4371}
4372
faa37f54
BN
4373enum dm_micro_swizzle {
4374 MICRO_SWIZZLE_Z = 0,
4375 MICRO_SWIZZLE_S = 1,
4376 MICRO_SWIZZLE_D = 2,
4377 MICRO_SWIZZLE_R = 3
4378};
4379
4380static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4381 uint32_t format,
4382 uint64_t modifier)
4383{
4384 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4385 const struct drm_format_info *info = drm_format_info(format);
fe180178 4386 int i;
faa37f54
BN
4387
4388 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4389
4390 if (!info)
4391 return false;
4392
4393 /*
fe180178
QZ
4394 * We always have to allow these modifiers:
4395 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4396 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4397 */
fe180178
QZ
4398 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4399 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4400 return true;
fe180178 4401 }
faa37f54 4402
fe180178
QZ
4403 /* Check that the modifier is on the list of the plane's supported modifiers. */
4404 for (i = 0; i < plane->modifier_count; i++) {
4405 if (modifier == plane->modifiers[i])
4406 break;
4407 }
4408 if (i == plane->modifier_count)
faa37f54
BN
4409 return false;
4410
4411 /*
4412 * For D swizzle the canonical modifier depends on the bpp, so check
4413 * it here.
4414 */
4415 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4416 adev->family >= AMDGPU_FAMILY_NV) {
4417 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4418 return false;
4419 }
4420
4421 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4422 info->cpp[0] < 8)
4423 return false;
4424
4425 if (modifier_has_dcc(modifier)) {
4426 /* Per radeonsi comments 16/64 bpp are more complicated. */
4427 if (info->cpp[0] != 4)
4428 return false;
951796f2
SS
4429 /* We support multi-planar formats, but not when combined with
4430 * additional DCC metadata planes. */
4431 if (info->num_planes > 1)
4432 return false;
faa37f54
BN
4433 }
4434
4435 return true;
4436}
4437
4438static void
4439add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4440{
4441 if (!*mods)
4442 return;
4443
4444 if (*cap - *size < 1) {
4445 uint64_t new_cap = *cap * 2;
4446 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4447
4448 if (!new_mods) {
4449 kfree(*mods);
4450 *mods = NULL;
4451 return;
4452 }
4453
4454 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4455 kfree(*mods);
4456 *mods = new_mods;
4457 *cap = new_cap;
4458 }
4459
4460 (*mods)[*size] = mod;
4461 *size += 1;
4462}
4463
4464static void
4465add_gfx9_modifiers(const struct amdgpu_device *adev,
4466 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4467{
4468 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4469 int pipe_xor_bits = min(8, pipes +
4470 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4471 int bank_xor_bits = min(8 - pipe_xor_bits,
4472 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4473 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4474 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4475
4476
4477 if (adev->family == AMDGPU_FAMILY_RV) {
4478 /* Raven2 and later */
4479 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4480
4481 /*
4482 * No _D DCC swizzles yet because we only allow 32bpp, which
4483 * doesn't support _D on DCN
4484 */
4485
4486 if (has_constant_encode) {
4487 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4488 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4489 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4490 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4491 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4492 AMD_FMT_MOD_SET(DCC, 1) |
4493 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4494 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4495 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4496 }
4497
4498 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4499 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4500 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4501 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4502 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4503 AMD_FMT_MOD_SET(DCC, 1) |
4504 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4505 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4506 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4507
4508 if (has_constant_encode) {
4509 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4510 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4511 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4512 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4513 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4514 AMD_FMT_MOD_SET(DCC, 1) |
4515 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4516 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4517 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4518
4519 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4520 AMD_FMT_MOD_SET(RB, rb) |
4521 AMD_FMT_MOD_SET(PIPE, pipes));
4522 }
4523
4524 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4525 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4526 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4527 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4528 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4529 AMD_FMT_MOD_SET(DCC, 1) |
4530 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4531 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4532 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4533 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4534 AMD_FMT_MOD_SET(RB, rb) |
4535 AMD_FMT_MOD_SET(PIPE, pipes));
4536 }
4537
4538 /*
4539 * Only supported for 64bpp on Raven, will be filtered on format in
4540 * dm_plane_format_mod_supported.
4541 */
4542 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4543 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4544 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4545 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4546 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4547
4548 if (adev->family == AMDGPU_FAMILY_RV) {
4549 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4551 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4552 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4553 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4554 }
4555
4556 /*
4557 * Only supported for 64bpp on Raven, will be filtered on format in
4558 * dm_plane_format_mod_supported.
4559 */
4560 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4561 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4562 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4563
4564 if (adev->family == AMDGPU_FAMILY_RV) {
4565 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4566 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4567 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4568 }
4569}
4570
4571static void
4572add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4573 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4574{
4575 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4576
4577 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4578 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4579 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4580 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4581 AMD_FMT_MOD_SET(DCC, 1) |
4582 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4583 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4584 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4585
4586 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4587 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4588 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4589 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4590 AMD_FMT_MOD_SET(DCC, 1) |
4591 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4592 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4593 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4594 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4595
4596 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4597 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4598 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4599 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4600
4601 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4602 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4603 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4604 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4605
4606
4607 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4608 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4609 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4610 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4611
4612 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4613 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4614 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4615}
4616
4617static void
4618add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4619 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4620{
4621 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4622 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4623
4624 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4625 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4626 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4627 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4628 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4629 AMD_FMT_MOD_SET(DCC, 1) |
4630 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4631 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4632 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4633 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4634
4635 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4636 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4637 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4638 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4639 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4640 AMD_FMT_MOD_SET(DCC, 1) |
4641 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4642 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4643 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4644 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4645 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4646
4647 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4648 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4649 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4650 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4651 AMD_FMT_MOD_SET(PACKERS, pkrs));
4652
4653 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4654 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4655 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4656 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4657 AMD_FMT_MOD_SET(PACKERS, pkrs));
4658
4659 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4660 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4661 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4662 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4663
4664 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4665 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4666 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4667}
4668
4669static int
4670get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4671{
4672 uint64_t size = 0, capacity = 128;
4673 *mods = NULL;
4674
4675 /* We have not hooked up any pre-GFX9 modifiers. */
4676 if (adev->family < AMDGPU_FAMILY_AI)
4677 return 0;
4678
4679 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4680
4681 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4682 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4683 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4684 return *mods ? 0 : -ENOMEM;
4685 }
4686
4687 switch (adev->family) {
4688 case AMDGPU_FAMILY_AI:
4689 case AMDGPU_FAMILY_RV:
4690 add_gfx9_modifiers(adev, mods, &size, &capacity);
4691 break;
4692 case AMDGPU_FAMILY_NV:
4693 case AMDGPU_FAMILY_VGH:
4694 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4695 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4696 else
4697 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4698 break;
4699 }
4700
4701 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4702
4703 /* INVALID marks the end of the list. */
4704 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4705
4706 if (!*mods)
4707 return -ENOMEM;
4708
4709 return 0;
4710}
4711
37384b3f
BN
4712static int
4713fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4714 const struct amdgpu_framebuffer *afb,
4715 const enum surface_pixel_format format,
4716 const enum dc_rotation_angle rotation,
4717 const struct plane_size *plane_size,
4718 union dc_tiling_info *tiling_info,
4719 struct dc_plane_dcc_param *dcc,
4720 struct dc_plane_address *address,
4721 const bool force_disable_dcc)
4722{
4723 const uint64_t modifier = afb->base.modifier;
4724 int ret;
4725
4726 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4727 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4728
4729 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4730 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4731
4732 dcc->enable = 1;
4733 dcc->meta_pitch = afb->base.pitches[1];
4734 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4735
4736 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4737 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4738 }
4739
4740 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4741 if (ret)
4742 return ret;
7df7e505 4743
09e5665a
NK
4744 return 0;
4745}
4746
4747static int
320932bf 4748fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 4749 const struct amdgpu_framebuffer *afb,
695af5f9
NK
4750 const enum surface_pixel_format format,
4751 const enum dc_rotation_angle rotation,
4752 const uint64_t tiling_flags,
09e5665a 4753 union dc_tiling_info *tiling_info,
12e2b2d4 4754 struct plane_size *plane_size,
09e5665a 4755 struct dc_plane_dcc_param *dcc,
87b7ebc2 4756 struct dc_plane_address *address,
5888f07a 4757 bool tmz_surface,
87b7ebc2 4758 bool force_disable_dcc)
09e5665a 4759{
320932bf 4760 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
4761 int ret;
4762
4763 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 4764 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 4765 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
4766 memset(address, 0, sizeof(*address));
4767
5888f07a
HW
4768 address->tmz_surface = tmz_surface;
4769
695af5f9 4770 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
4771 uint64_t addr = afb->address + fb->offsets[0];
4772
12e2b2d4
DL
4773 plane_size->surface_size.x = 0;
4774 plane_size->surface_size.y = 0;
4775 plane_size->surface_size.width = fb->width;
4776 plane_size->surface_size.height = fb->height;
4777 plane_size->surface_pitch =
320932bf
NK
4778 fb->pitches[0] / fb->format->cpp[0];
4779
e0634e8d 4780 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
4781 address->grph.addr.low_part = lower_32_bits(addr);
4782 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 4783 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 4784 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 4785 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 4786
12e2b2d4
DL
4787 plane_size->surface_size.x = 0;
4788 plane_size->surface_size.y = 0;
4789 plane_size->surface_size.width = fb->width;
4790 plane_size->surface_size.height = fb->height;
4791 plane_size->surface_pitch =
320932bf
NK
4792 fb->pitches[0] / fb->format->cpp[0];
4793
12e2b2d4
DL
4794 plane_size->chroma_size.x = 0;
4795 plane_size->chroma_size.y = 0;
320932bf 4796 /* TODO: set these based on surface format */
12e2b2d4
DL
4797 plane_size->chroma_size.width = fb->width / 2;
4798 plane_size->chroma_size.height = fb->height / 2;
320932bf 4799
12e2b2d4 4800 plane_size->chroma_pitch =
320932bf
NK
4801 fb->pitches[1] / fb->format->cpp[1];
4802
e0634e8d
NK
4803 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4804 address->video_progressive.luma_addr.low_part =
be7b9b32 4805 lower_32_bits(luma_addr);
e0634e8d 4806 address->video_progressive.luma_addr.high_part =
be7b9b32 4807 upper_32_bits(luma_addr);
e0634e8d
NK
4808 address->video_progressive.chroma_addr.low_part =
4809 lower_32_bits(chroma_addr);
4810 address->video_progressive.chroma_addr.high_part =
4811 upper_32_bits(chroma_addr);
4812 }
09e5665a 4813
a3241991 4814 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
4815 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4816 rotation, plane_size,
4817 tiling_info, dcc,
4818 address,
4819 force_disable_dcc);
09e5665a
NK
4820 if (ret)
4821 return ret;
a3241991
BN
4822 } else {
4823 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
4824 }
4825
4826 return 0;
7df7e505
NK
4827}
4828
d74004b6 4829static void
695af5f9 4830fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
4831 bool *per_pixel_alpha, bool *global_alpha,
4832 int *global_alpha_value)
4833{
4834 *per_pixel_alpha = false;
4835 *global_alpha = false;
4836 *global_alpha_value = 0xff;
4837
4838 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4839 return;
4840
4841 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4842 static const uint32_t alpha_formats[] = {
4843 DRM_FORMAT_ARGB8888,
4844 DRM_FORMAT_RGBA8888,
4845 DRM_FORMAT_ABGR8888,
4846 };
4847 uint32_t format = plane_state->fb->format->format;
4848 unsigned int i;
4849
4850 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4851 if (format == alpha_formats[i]) {
4852 *per_pixel_alpha = true;
4853 break;
4854 }
4855 }
4856 }
4857
4858 if (plane_state->alpha < 0xffff) {
4859 *global_alpha = true;
4860 *global_alpha_value = plane_state->alpha >> 8;
4861 }
4862}
4863
004fefa3
NK
4864static int
4865fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 4866 const enum surface_pixel_format format,
004fefa3
NK
4867 enum dc_color_space *color_space)
4868{
4869 bool full_range;
4870
4871 *color_space = COLOR_SPACE_SRGB;
4872
4873 /* DRM color properties only affect non-RGB formats. */
695af5f9 4874 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
4875 return 0;
4876
4877 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4878
4879 switch (plane_state->color_encoding) {
4880 case DRM_COLOR_YCBCR_BT601:
4881 if (full_range)
4882 *color_space = COLOR_SPACE_YCBCR601;
4883 else
4884 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4885 break;
4886
4887 case DRM_COLOR_YCBCR_BT709:
4888 if (full_range)
4889 *color_space = COLOR_SPACE_YCBCR709;
4890 else
4891 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4892 break;
4893
4894 case DRM_COLOR_YCBCR_BT2020:
4895 if (full_range)
4896 *color_space = COLOR_SPACE_2020_YCBCR;
4897 else
4898 return -EINVAL;
4899 break;
4900
4901 default:
4902 return -EINVAL;
4903 }
4904
4905 return 0;
4906}
4907
695af5f9
NK
4908static int
4909fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4910 const struct drm_plane_state *plane_state,
4911 const uint64_t tiling_flags,
4912 struct dc_plane_info *plane_info,
87b7ebc2 4913 struct dc_plane_address *address,
5888f07a 4914 bool tmz_surface,
87b7ebc2 4915 bool force_disable_dcc)
695af5f9
NK
4916{
4917 const struct drm_framebuffer *fb = plane_state->fb;
4918 const struct amdgpu_framebuffer *afb =
4919 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
4920 int ret;
4921
4922 memset(plane_info, 0, sizeof(*plane_info));
4923
4924 switch (fb->format->format) {
4925 case DRM_FORMAT_C8:
4926 plane_info->format =
4927 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4928 break;
4929 case DRM_FORMAT_RGB565:
4930 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4931 break;
4932 case DRM_FORMAT_XRGB8888:
4933 case DRM_FORMAT_ARGB8888:
4934 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4935 break;
4936 case DRM_FORMAT_XRGB2101010:
4937 case DRM_FORMAT_ARGB2101010:
4938 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4939 break;
4940 case DRM_FORMAT_XBGR2101010:
4941 case DRM_FORMAT_ABGR2101010:
4942 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4943 break;
4944 case DRM_FORMAT_XBGR8888:
4945 case DRM_FORMAT_ABGR8888:
4946 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4947 break;
4948 case DRM_FORMAT_NV21:
4949 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4950 break;
4951 case DRM_FORMAT_NV12:
4952 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4953 break;
cbec6477
SW
4954 case DRM_FORMAT_P010:
4955 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4956 break;
492548dc
SW
4957 case DRM_FORMAT_XRGB16161616F:
4958 case DRM_FORMAT_ARGB16161616F:
4959 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4960 break;
2a5195dc
MK
4961 case DRM_FORMAT_XBGR16161616F:
4962 case DRM_FORMAT_ABGR16161616F:
4963 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4964 break;
58020403
MK
4965 case DRM_FORMAT_XRGB16161616:
4966 case DRM_FORMAT_ARGB16161616:
4967 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4968 break;
4969 case DRM_FORMAT_XBGR16161616:
4970 case DRM_FORMAT_ABGR16161616:
4971 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4972 break;
695af5f9
NK
4973 default:
4974 DRM_ERROR(
92f1d09c
SA
4975 "Unsupported screen format %p4cc\n",
4976 &fb->format->format);
695af5f9
NK
4977 return -EINVAL;
4978 }
4979
4980 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4981 case DRM_MODE_ROTATE_0:
4982 plane_info->rotation = ROTATION_ANGLE_0;
4983 break;
4984 case DRM_MODE_ROTATE_90:
4985 plane_info->rotation = ROTATION_ANGLE_90;
4986 break;
4987 case DRM_MODE_ROTATE_180:
4988 plane_info->rotation = ROTATION_ANGLE_180;
4989 break;
4990 case DRM_MODE_ROTATE_270:
4991 plane_info->rotation = ROTATION_ANGLE_270;
4992 break;
4993 default:
4994 plane_info->rotation = ROTATION_ANGLE_0;
4995 break;
4996 }
4997
4998 plane_info->visible = true;
4999 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5000
6d83a32d
MS
5001 plane_info->layer_index = 0;
5002
695af5f9
NK
5003 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5004 &plane_info->color_space);
5005 if (ret)
5006 return ret;
5007
5008 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5009 plane_info->rotation, tiling_flags,
5010 &plane_info->tiling_info,
5011 &plane_info->plane_size,
5888f07a 5012 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5013 force_disable_dcc);
695af5f9
NK
5014 if (ret)
5015 return ret;
5016
5017 fill_blending_from_plane_state(
5018 plane_state, &plane_info->per_pixel_alpha,
5019 &plane_info->global_alpha, &plane_info->global_alpha_value);
5020
5021 return 0;
5022}
5023
5024static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5025 struct dc_plane_state *dc_plane_state,
5026 struct drm_plane_state *plane_state,
5027 struct drm_crtc_state *crtc_state)
e7b07cee 5028{
cf020d49 5029 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5030 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5031 struct dc_scaling_info scaling_info;
5032 struct dc_plane_info plane_info;
695af5f9 5033 int ret;
87b7ebc2 5034 bool force_disable_dcc = false;
e7b07cee 5035
695af5f9
NK
5036 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5037 if (ret)
5038 return ret;
e7b07cee 5039
695af5f9
NK
5040 dc_plane_state->src_rect = scaling_info.src_rect;
5041 dc_plane_state->dst_rect = scaling_info.dst_rect;
5042 dc_plane_state->clip_rect = scaling_info.clip_rect;
5043 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5044
87b7ebc2 5045 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5046 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5047 afb->tiling_flags,
695af5f9 5048 &plane_info,
87b7ebc2 5049 &dc_plane_state->address,
6eed95b0 5050 afb->tmz_surface,
87b7ebc2 5051 force_disable_dcc);
004fefa3
NK
5052 if (ret)
5053 return ret;
5054
695af5f9
NK
5055 dc_plane_state->format = plane_info.format;
5056 dc_plane_state->color_space = plane_info.color_space;
5057 dc_plane_state->format = plane_info.format;
5058 dc_plane_state->plane_size = plane_info.plane_size;
5059 dc_plane_state->rotation = plane_info.rotation;
5060 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5061 dc_plane_state->stereo_format = plane_info.stereo_format;
5062 dc_plane_state->tiling_info = plane_info.tiling_info;
5063 dc_plane_state->visible = plane_info.visible;
5064 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5065 dc_plane_state->global_alpha = plane_info.global_alpha;
5066 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5067 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5068 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5069 dc_plane_state->flip_int_enabled = true;
695af5f9 5070
e277adc5
LSL
5071 /*
5072 * Always set input transfer function, since plane state is refreshed
5073 * every time.
5074 */
cf020d49
NK
5075 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5076 if (ret)
5077 return ret;
e7b07cee 5078
cf020d49 5079 return 0;
e7b07cee
HW
5080}
5081
3ee6b26b
AD
5082static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5083 const struct dm_connector_state *dm_state,
5084 struct dc_stream_state *stream)
e7b07cee
HW
5085{
5086 enum amdgpu_rmx_type rmx_type;
5087
5088 struct rect src = { 0 }; /* viewport in composition space*/
5089 struct rect dst = { 0 }; /* stream addressable area */
5090
5091 /* no mode. nothing to be done */
5092 if (!mode)
5093 return;
5094
5095 /* Full screen scaling by default */
5096 src.width = mode->hdisplay;
5097 src.height = mode->vdisplay;
5098 dst.width = stream->timing.h_addressable;
5099 dst.height = stream->timing.v_addressable;
5100
f4791779
HW
5101 if (dm_state) {
5102 rmx_type = dm_state->scaling;
5103 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5104 if (src.width * dst.height <
5105 src.height * dst.width) {
5106 /* height needs less upscaling/more downscaling */
5107 dst.width = src.width *
5108 dst.height / src.height;
5109 } else {
5110 /* width needs less upscaling/more downscaling */
5111 dst.height = src.height *
5112 dst.width / src.width;
5113 }
5114 } else if (rmx_type == RMX_CENTER) {
5115 dst = src;
e7b07cee 5116 }
e7b07cee 5117
f4791779
HW
5118 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5119 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5120
f4791779
HW
5121 if (dm_state->underscan_enable) {
5122 dst.x += dm_state->underscan_hborder / 2;
5123 dst.y += dm_state->underscan_vborder / 2;
5124 dst.width -= dm_state->underscan_hborder;
5125 dst.height -= dm_state->underscan_vborder;
5126 }
e7b07cee
HW
5127 }
5128
5129 stream->src = src;
5130 stream->dst = dst;
5131
4711c033
LT
5132 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5133 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5134
5135}
5136
3ee6b26b 5137static enum dc_color_depth
42ba01fc 5138convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5139 bool is_y420, int requested_bpc)
e7b07cee 5140{
1bc22f20 5141 uint8_t bpc;
01c22997 5142
1bc22f20
SW
5143 if (is_y420) {
5144 bpc = 8;
5145
5146 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5147 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5148 bpc = 16;
5149 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5150 bpc = 12;
5151 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5152 bpc = 10;
5153 } else {
5154 bpc = (uint8_t)connector->display_info.bpc;
5155 /* Assume 8 bpc by default if no bpc is specified. */
5156 bpc = bpc ? bpc : 8;
5157 }
e7b07cee 5158
cbd14ae7 5159 if (requested_bpc > 0) {
01c22997
NK
5160 /*
5161 * Cap display bpc based on the user requested value.
5162 *
5163 * The value for state->max_bpc may not correctly updated
5164 * depending on when the connector gets added to the state
5165 * or if this was called outside of atomic check, so it
5166 * can't be used directly.
5167 */
cbd14ae7 5168 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5169
1825fd34
NK
5170 /* Round down to the nearest even number. */
5171 bpc = bpc - (bpc & 1);
5172 }
07e3a1cf 5173
e7b07cee
HW
5174 switch (bpc) {
5175 case 0:
1f6010a9
DF
5176 /*
5177 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5178 * EDID revision before 1.4
5179 * TODO: Fix edid parsing
5180 */
5181 return COLOR_DEPTH_888;
5182 case 6:
5183 return COLOR_DEPTH_666;
5184 case 8:
5185 return COLOR_DEPTH_888;
5186 case 10:
5187 return COLOR_DEPTH_101010;
5188 case 12:
5189 return COLOR_DEPTH_121212;
5190 case 14:
5191 return COLOR_DEPTH_141414;
5192 case 16:
5193 return COLOR_DEPTH_161616;
5194 default:
5195 return COLOR_DEPTH_UNDEFINED;
5196 }
5197}
5198
3ee6b26b
AD
5199static enum dc_aspect_ratio
5200get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5201{
e11d4147
LSL
5202 /* 1-1 mapping, since both enums follow the HDMI spec. */
5203 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5204}
5205
3ee6b26b
AD
5206static enum dc_color_space
5207get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5208{
5209 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5210
5211 switch (dc_crtc_timing->pixel_encoding) {
5212 case PIXEL_ENCODING_YCBCR422:
5213 case PIXEL_ENCODING_YCBCR444:
5214 case PIXEL_ENCODING_YCBCR420:
5215 {
5216 /*
5217 * 27030khz is the separation point between HDTV and SDTV
5218 * according to HDMI spec, we use YCbCr709 and YCbCr601
5219 * respectively
5220 */
380604e2 5221 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5222 if (dc_crtc_timing->flags.Y_ONLY)
5223 color_space =
5224 COLOR_SPACE_YCBCR709_LIMITED;
5225 else
5226 color_space = COLOR_SPACE_YCBCR709;
5227 } else {
5228 if (dc_crtc_timing->flags.Y_ONLY)
5229 color_space =
5230 COLOR_SPACE_YCBCR601_LIMITED;
5231 else
5232 color_space = COLOR_SPACE_YCBCR601;
5233 }
5234
5235 }
5236 break;
5237 case PIXEL_ENCODING_RGB:
5238 color_space = COLOR_SPACE_SRGB;
5239 break;
5240
5241 default:
5242 WARN_ON(1);
5243 break;
5244 }
5245
5246 return color_space;
5247}
5248
ea117312
TA
5249static bool adjust_colour_depth_from_display_info(
5250 struct dc_crtc_timing *timing_out,
5251 const struct drm_display_info *info)
400443e8 5252{
ea117312 5253 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5254 int normalized_clk;
400443e8 5255 do {
380604e2 5256 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5257 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5258 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5259 normalized_clk /= 2;
5260 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5261 switch (depth) {
5262 case COLOR_DEPTH_888:
5263 break;
400443e8
ML
5264 case COLOR_DEPTH_101010:
5265 normalized_clk = (normalized_clk * 30) / 24;
5266 break;
5267 case COLOR_DEPTH_121212:
5268 normalized_clk = (normalized_clk * 36) / 24;
5269 break;
5270 case COLOR_DEPTH_161616:
5271 normalized_clk = (normalized_clk * 48) / 24;
5272 break;
5273 default:
ea117312
TA
5274 /* The above depths are the only ones valid for HDMI. */
5275 return false;
400443e8 5276 }
ea117312
TA
5277 if (normalized_clk <= info->max_tmds_clock) {
5278 timing_out->display_color_depth = depth;
5279 return true;
5280 }
5281 } while (--depth > COLOR_DEPTH_666);
5282 return false;
400443e8 5283}
e7b07cee 5284
42ba01fc
NK
5285static void fill_stream_properties_from_drm_display_mode(
5286 struct dc_stream_state *stream,
5287 const struct drm_display_mode *mode_in,
5288 const struct drm_connector *connector,
5289 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5290 const struct dc_stream_state *old_stream,
5291 int requested_bpc)
e7b07cee
HW
5292{
5293 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5294 const struct drm_display_info *info = &connector->display_info;
d4252eee 5295 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5296 struct hdmi_vendor_infoframe hv_frame;
5297 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5298
acf83f86
WL
5299 memset(&hv_frame, 0, sizeof(hv_frame));
5300 memset(&avi_frame, 0, sizeof(avi_frame));
5301
e7b07cee
HW
5302 timing_out->h_border_left = 0;
5303 timing_out->h_border_right = 0;
5304 timing_out->v_border_top = 0;
5305 timing_out->v_border_bottom = 0;
5306 /* TODO: un-hardcode */
fe61a2f1 5307 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5308 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5309 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5310 else if (drm_mode_is_420_also(info, mode_in)
5311 && aconnector->force_yuv420_output)
5312 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5313 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5314 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5315 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5316 else
5317 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5318
5319 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5320 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5321 connector,
5322 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5323 requested_bpc);
e7b07cee
HW
5324 timing_out->scan_type = SCANNING_TYPE_NODATA;
5325 timing_out->hdmi_vic = 0;
b333730d
BL
5326
5327 if(old_stream) {
5328 timing_out->vic = old_stream->timing.vic;
5329 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5330 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5331 } else {
5332 timing_out->vic = drm_match_cea_mode(mode_in);
5333 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5334 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5335 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5336 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5337 }
e7b07cee 5338
1cb1d477
WL
5339 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5340 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5341 timing_out->vic = avi_frame.video_code;
5342 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5343 timing_out->hdmi_vic = hv_frame.vic;
5344 }
5345
fe8858bb
NC
5346 if (is_freesync_video_mode(mode_in, aconnector)) {
5347 timing_out->h_addressable = mode_in->hdisplay;
5348 timing_out->h_total = mode_in->htotal;
5349 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5350 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5351 timing_out->v_total = mode_in->vtotal;
5352 timing_out->v_addressable = mode_in->vdisplay;
5353 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5354 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5355 timing_out->pix_clk_100hz = mode_in->clock * 10;
5356 } else {
5357 timing_out->h_addressable = mode_in->crtc_hdisplay;
5358 timing_out->h_total = mode_in->crtc_htotal;
5359 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5360 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5361 timing_out->v_total = mode_in->crtc_vtotal;
5362 timing_out->v_addressable = mode_in->crtc_vdisplay;
5363 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5364 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5365 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5366 }
a85ba005 5367
e7b07cee 5368 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5369
5370 stream->output_color_space = get_output_color_space(timing_out);
5371
e43a432c
AK
5372 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5373 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5374 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5375 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5376 drm_mode_is_420_also(info, mode_in) &&
5377 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5378 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5379 adjust_colour_depth_from_display_info(timing_out, info);
5380 }
5381 }
e7b07cee
HW
5382}
5383
3ee6b26b
AD
5384static void fill_audio_info(struct audio_info *audio_info,
5385 const struct drm_connector *drm_connector,
5386 const struct dc_sink *dc_sink)
e7b07cee
HW
5387{
5388 int i = 0;
5389 int cea_revision = 0;
5390 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5391
5392 audio_info->manufacture_id = edid_caps->manufacturer_id;
5393 audio_info->product_id = edid_caps->product_id;
5394
5395 cea_revision = drm_connector->display_info.cea_rev;
5396
090afc1e 5397 strscpy(audio_info->display_name,
d2b2562c 5398 edid_caps->display_name,
090afc1e 5399 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5400
b830ebc9 5401 if (cea_revision >= 3) {
e7b07cee
HW
5402 audio_info->mode_count = edid_caps->audio_mode_count;
5403
5404 for (i = 0; i < audio_info->mode_count; ++i) {
5405 audio_info->modes[i].format_code =
5406 (enum audio_format_code)
5407 (edid_caps->audio_modes[i].format_code);
5408 audio_info->modes[i].channel_count =
5409 edid_caps->audio_modes[i].channel_count;
5410 audio_info->modes[i].sample_rates.all =
5411 edid_caps->audio_modes[i].sample_rate;
5412 audio_info->modes[i].sample_size =
5413 edid_caps->audio_modes[i].sample_size;
5414 }
5415 }
5416
5417 audio_info->flags.all = edid_caps->speaker_flags;
5418
5419 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5420 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5421 audio_info->video_latency = drm_connector->video_latency[0];
5422 audio_info->audio_latency = drm_connector->audio_latency[0];
5423 }
5424
5425 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5426
5427}
5428
3ee6b26b
AD
5429static void
5430copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5431 struct drm_display_mode *dst_mode)
e7b07cee
HW
5432{
5433 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5434 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5435 dst_mode->crtc_clock = src_mode->crtc_clock;
5436 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5437 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5438 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5439 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5440 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5441 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5442 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5443 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5444 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5445 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5446 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5447}
5448
3ee6b26b
AD
5449static void
5450decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5451 const struct drm_display_mode *native_mode,
5452 bool scale_enabled)
e7b07cee
HW
5453{
5454 if (scale_enabled) {
5455 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5456 } else if (native_mode->clock == drm_mode->clock &&
5457 native_mode->htotal == drm_mode->htotal &&
5458 native_mode->vtotal == drm_mode->vtotal) {
5459 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5460 } else {
5461 /* no scaling nor amdgpu inserted, no need to patch */
5462 }
5463}
5464
aed15309
ML
5465static struct dc_sink *
5466create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5467{
2e0ac3d6 5468 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5469 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5470 sink_init_data.link = aconnector->dc_link;
5471 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5472
5473 sink = dc_sink_create(&sink_init_data);
423788c7 5474 if (!sink) {
2e0ac3d6 5475 DRM_ERROR("Failed to create sink!\n");
aed15309 5476 return NULL;
423788c7 5477 }
2e0ac3d6 5478 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5479
aed15309 5480 return sink;
2e0ac3d6
HW
5481}
5482
fa2123db
ML
5483static void set_multisync_trigger_params(
5484 struct dc_stream_state *stream)
5485{
ec372186
ML
5486 struct dc_stream_state *master = NULL;
5487
fa2123db 5488 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5489 master = stream->triggered_crtc_reset.event_source;
5490 stream->triggered_crtc_reset.event =
5491 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5492 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5493 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5494 }
5495}
5496
5497static void set_master_stream(struct dc_stream_state *stream_set[],
5498 int stream_count)
5499{
5500 int j, highest_rfr = 0, master_stream = 0;
5501
5502 for (j = 0; j < stream_count; j++) {
5503 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5504 int refresh_rate = 0;
5505
380604e2 5506 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5507 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5508 if (refresh_rate > highest_rfr) {
5509 highest_rfr = refresh_rate;
5510 master_stream = j;
5511 }
5512 }
5513 }
5514 for (j = 0; j < stream_count; j++) {
03736f4c 5515 if (stream_set[j])
fa2123db
ML
5516 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5517 }
5518}
5519
5520static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5521{
5522 int i = 0;
ec372186 5523 struct dc_stream_state *stream;
fa2123db
ML
5524
5525 if (context->stream_count < 2)
5526 return;
5527 for (i = 0; i < context->stream_count ; i++) {
5528 if (!context->streams[i])
5529 continue;
1f6010a9
DF
5530 /*
5531 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5532 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5533 * For now it's set to false
fa2123db 5534 */
fa2123db 5535 }
ec372186 5536
fa2123db 5537 set_master_stream(context->streams, context->stream_count);
ec372186
ML
5538
5539 for (i = 0; i < context->stream_count ; i++) {
5540 stream = context->streams[i];
5541
5542 if (!stream)
5543 continue;
5544
5545 set_multisync_trigger_params(stream);
5546 }
fa2123db
ML
5547}
5548
998b7ad2
FZ
5549static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5550 struct dc_sink *sink, struct dc_stream_state *stream,
5551 struct dsc_dec_dpcd_caps *dsc_caps)
5552{
5553 stream->timing.flags.DSC = 0;
5554
5555 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5556#if defined(CONFIG_DRM_AMD_DC_DCN)
5557 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5558 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5559 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5560 dsc_caps);
5561#endif
5562 }
5563}
5564
5565static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5566 struct dc_sink *sink, struct dc_stream_state *stream,
5567 struct dsc_dec_dpcd_caps *dsc_caps)
5568{
5569 struct drm_connector *drm_connector = &aconnector->base;
5570 uint32_t link_bandwidth_kbps;
5571
5572 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5573 dc_link_get_link_cap(aconnector->dc_link));
5574#if defined(CONFIG_DRM_AMD_DC_DCN)
5575 /* Set DSC policy according to dsc_clock_en */
5576 dc_dsc_policy_set_enable_dsc_when_not_needed(
5577 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5578
5579 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5580
5581 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5582 dsc_caps,
5583 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5584 0,
5585 link_bandwidth_kbps,
5586 &stream->timing,
5587 &stream->timing.dsc_cfg)) {
5588 stream->timing.flags.DSC = 1;
5589 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5590 }
5591 }
5592
5593 /* Overwrite the stream flag if DSC is enabled through debugfs */
5594 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5595 stream->timing.flags.DSC = 1;
5596
5597 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5598 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5599
5600 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5601 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5602
5603 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5604 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5605#endif
5606}
5607
a85ba005
NC
5608static struct drm_display_mode *
5609get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5610 bool use_probed_modes)
5611{
5612 struct drm_display_mode *m, *m_pref = NULL;
5613 u16 current_refresh, highest_refresh;
5614 struct list_head *list_head = use_probed_modes ?
5615 &aconnector->base.probed_modes :
5616 &aconnector->base.modes;
5617
5618 if (aconnector->freesync_vid_base.clock != 0)
5619 return &aconnector->freesync_vid_base;
5620
5621 /* Find the preferred mode */
5622 list_for_each_entry (m, list_head, head) {
5623 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5624 m_pref = m;
5625 break;
5626 }
5627 }
5628
5629 if (!m_pref) {
5630 /* Probably an EDID with no preferred mode. Fallback to first entry */
5631 m_pref = list_first_entry_or_null(
5632 &aconnector->base.modes, struct drm_display_mode, head);
5633 if (!m_pref) {
5634 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5635 return NULL;
5636 }
5637 }
5638
5639 highest_refresh = drm_mode_vrefresh(m_pref);
5640
5641 /*
5642 * Find the mode with highest refresh rate with same resolution.
5643 * For some monitors, preferred mode is not the mode with highest
5644 * supported refresh rate.
5645 */
5646 list_for_each_entry (m, list_head, head) {
5647 current_refresh = drm_mode_vrefresh(m);
5648
5649 if (m->hdisplay == m_pref->hdisplay &&
5650 m->vdisplay == m_pref->vdisplay &&
5651 highest_refresh < current_refresh) {
5652 highest_refresh = current_refresh;
5653 m_pref = m;
5654 }
5655 }
5656
5657 aconnector->freesync_vid_base = *m_pref;
5658 return m_pref;
5659}
5660
fe8858bb 5661static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
5662 struct amdgpu_dm_connector *aconnector)
5663{
5664 struct drm_display_mode *high_mode;
5665 int timing_diff;
5666
5667 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5668 if (!high_mode || !mode)
5669 return false;
5670
5671 timing_diff = high_mode->vtotal - mode->vtotal;
5672
5673 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5674 high_mode->hdisplay != mode->hdisplay ||
5675 high_mode->vdisplay != mode->vdisplay ||
5676 high_mode->hsync_start != mode->hsync_start ||
5677 high_mode->hsync_end != mode->hsync_end ||
5678 high_mode->htotal != mode->htotal ||
5679 high_mode->hskew != mode->hskew ||
5680 high_mode->vscan != mode->vscan ||
5681 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5682 high_mode->vsync_end - mode->vsync_end != timing_diff)
5683 return false;
5684 else
5685 return true;
5686}
5687
3ee6b26b
AD
5688static struct dc_stream_state *
5689create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5690 const struct drm_display_mode *drm_mode,
b333730d 5691 const struct dm_connector_state *dm_state,
cbd14ae7
SW
5692 const struct dc_stream_state *old_stream,
5693 int requested_bpc)
e7b07cee
HW
5694{
5695 struct drm_display_mode *preferred_mode = NULL;
391ef035 5696 struct drm_connector *drm_connector;
42ba01fc
NK
5697 const struct drm_connector_state *con_state =
5698 dm_state ? &dm_state->base : NULL;
0971c40e 5699 struct dc_stream_state *stream = NULL;
e7b07cee 5700 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
5701 struct drm_display_mode saved_mode;
5702 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 5703 bool native_mode_found = false;
b0781603
NK
5704 bool recalculate_timing = false;
5705 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 5706 int mode_refresh;
58124bf8 5707 int preferred_refresh = 0;
defeb878 5708#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 5709 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 5710#endif
aed15309 5711 struct dc_sink *sink = NULL;
a85ba005
NC
5712
5713 memset(&saved_mode, 0, sizeof(saved_mode));
5714
b830ebc9 5715 if (aconnector == NULL) {
e7b07cee 5716 DRM_ERROR("aconnector is NULL!\n");
64245fa7 5717 return stream;
e7b07cee
HW
5718 }
5719
e7b07cee 5720 drm_connector = &aconnector->base;
2e0ac3d6 5721
f4ac176e 5722 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
5723 sink = create_fake_sink(aconnector);
5724 if (!sink)
5725 return stream;
aed15309
ML
5726 } else {
5727 sink = aconnector->dc_sink;
dcd5fb82 5728 dc_sink_retain(sink);
f4ac176e 5729 }
2e0ac3d6 5730
aed15309 5731 stream = dc_create_stream_for_sink(sink);
4562236b 5732
b830ebc9 5733 if (stream == NULL) {
e7b07cee 5734 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 5735 goto finish;
e7b07cee
HW
5736 }
5737
ceb3dbb4
JL
5738 stream->dm_stream_context = aconnector;
5739
4a36fcba
WL
5740 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5741 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5742
e7b07cee
HW
5743 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5744 /* Search for preferred mode */
5745 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5746 native_mode_found = true;
5747 break;
5748 }
5749 }
5750 if (!native_mode_found)
5751 preferred_mode = list_first_entry_or_null(
5752 &aconnector->base.modes,
5753 struct drm_display_mode,
5754 head);
5755
b333730d
BL
5756 mode_refresh = drm_mode_vrefresh(&mode);
5757
b830ebc9 5758 if (preferred_mode == NULL) {
1f6010a9
DF
5759 /*
5760 * This may not be an error, the use case is when we have no
e7b07cee
HW
5761 * usermode calls to reset and set mode upon hotplug. In this
5762 * case, we call set mode ourselves to restore the previous mode
5763 * and the modelist may not be filled in in time.
5764 */
f1ad2f5e 5765 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 5766 } else {
b0781603 5767 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
5768 is_freesync_video_mode(&mode, aconnector);
5769 if (recalculate_timing) {
5770 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5771 saved_mode = mode;
5772 mode = *freesync_mode;
5773 } else {
5774 decide_crtc_timing_for_drm_display_mode(
b0781603 5775 &mode, preferred_mode, scale);
a85ba005 5776
b0781603
NK
5777 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5778 }
e7b07cee
HW
5779 }
5780
a85ba005
NC
5781 if (recalculate_timing)
5782 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 5783 else if (!dm_state)
f783577c
JFZ
5784 drm_mode_set_crtcinfo(&mode, 0);
5785
a85ba005 5786 /*
b333730d
BL
5787 * If scaling is enabled and refresh rate didn't change
5788 * we copy the vic and polarities of the old timings
5789 */
b0781603 5790 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
5791 fill_stream_properties_from_drm_display_mode(
5792 stream, &mode, &aconnector->base, con_state, NULL,
5793 requested_bpc);
b333730d 5794 else
a85ba005
NC
5795 fill_stream_properties_from_drm_display_mode(
5796 stream, &mode, &aconnector->base, con_state, old_stream,
5797 requested_bpc);
b333730d 5798
defeb878 5799#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
5800 /* SST DSC determination policy */
5801 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5802 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5803 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
5804#endif
5805
e7b07cee
HW
5806 update_stream_scaling_settings(&mode, dm_state, stream);
5807
5808 fill_audio_info(
5809 &stream->audio_info,
5810 drm_connector,
aed15309 5811 sink);
e7b07cee 5812
ceb3dbb4 5813 update_stream_signal(stream, sink);
9182b4cb 5814
d832fc3b 5815 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
5816 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5817
8a488f5d
RL
5818 if (stream->link->psr_settings.psr_feature_enabled) {
5819 //
5820 // should decide stream support vsc sdp colorimetry capability
5821 // before building vsc info packet
5822 //
5823 stream->use_vsc_sdp_for_colorimetry = false;
5824 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5825 stream->use_vsc_sdp_for_colorimetry =
5826 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5827 } else {
5828 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5829 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 5830 }
8a488f5d 5831 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
8c322309 5832 }
aed15309 5833finish:
dcd5fb82 5834 dc_sink_release(sink);
9e3efe3e 5835
e7b07cee
HW
5836 return stream;
5837}
5838
7578ecda 5839static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
5840{
5841 drm_crtc_cleanup(crtc);
5842 kfree(crtc);
5843}
5844
5845static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 5846 struct drm_crtc_state *state)
e7b07cee
HW
5847{
5848 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5849
5850 /* TODO Destroy dc_stream objects are stream object is flattened */
5851 if (cur->stream)
5852 dc_stream_release(cur->stream);
5853
5854
5855 __drm_atomic_helper_crtc_destroy_state(state);
5856
5857
5858 kfree(state);
5859}
5860
5861static void dm_crtc_reset_state(struct drm_crtc *crtc)
5862{
5863 struct dm_crtc_state *state;
5864
5865 if (crtc->state)
5866 dm_crtc_destroy_state(crtc, crtc->state);
5867
5868 state = kzalloc(sizeof(*state), GFP_KERNEL);
5869 if (WARN_ON(!state))
5870 return;
5871
1f8a52ec 5872 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
5873}
5874
5875static struct drm_crtc_state *
5876dm_crtc_duplicate_state(struct drm_crtc *crtc)
5877{
5878 struct dm_crtc_state *state, *cur;
5879
5880 cur = to_dm_crtc_state(crtc->state);
5881
5882 if (WARN_ON(!crtc->state))
5883 return NULL;
5884
2004f45e 5885 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
5886 if (!state)
5887 return NULL;
e7b07cee
HW
5888
5889 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5890
5891 if (cur->stream) {
5892 state->stream = cur->stream;
5893 dc_stream_retain(state->stream);
5894 }
5895
d6ef9b41 5896 state->active_planes = cur->active_planes;
98e6436d 5897 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 5898 state->abm_level = cur->abm_level;
bb47de73
NK
5899 state->vrr_supported = cur->vrr_supported;
5900 state->freesync_config = cur->freesync_config;
cf020d49
NK
5901 state->cm_has_degamma = cur->cm_has_degamma;
5902 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
e7b07cee
HW
5903 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5904
5905 return &state->base;
5906}
5907
86bc2219 5908#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 5909static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
5910{
5911 crtc_debugfs_init(crtc);
5912
5913 return 0;
5914}
5915#endif
5916
d2574c33
MK
5917static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5918{
5919 enum dc_irq_source irq_source;
5920 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5921 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
5922 int rc;
5923
5924 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5925
5926 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5927
4711c033
LT
5928 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5929 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
5930 return rc;
5931}
589d2739
HW
5932
5933static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5934{
5935 enum dc_irq_source irq_source;
5936 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 5937 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 5938 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 5939#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 5940 struct amdgpu_display_manager *dm = &adev->dm;
ea3b4242
QZ
5941 unsigned long flags;
5942#endif
d2574c33
MK
5943 int rc = 0;
5944
5945 if (enable) {
5946 /* vblank irq on -> Only need vupdate irq in vrr mode */
5947 if (amdgpu_dm_vrr_active(acrtc_state))
5948 rc = dm_set_vupdate_irq(crtc, true);
5949 } else {
5950 /* vblank irq off -> vupdate irq off */
5951 rc = dm_set_vupdate_irq(crtc, false);
5952 }
5953
5954 if (rc)
5955 return rc;
589d2739
HW
5956
5957 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
5958
5959 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5960 return -EBUSY;
5961
98ab5f35
BL
5962 if (amdgpu_in_reset(adev))
5963 return 0;
5964
4928b480 5965#if defined(CONFIG_DRM_AMD_DC_DCN)
ea3b4242
QZ
5966 spin_lock_irqsave(&dm->vblank_lock, flags);
5967 dm->vblank_workqueue->dm = dm;
5968 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5969 dm->vblank_workqueue->enable = enable;
5970 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5971 schedule_work(&dm->vblank_workqueue->mall_work);
4928b480 5972#endif
71338cb4 5973
71338cb4 5974 return 0;
589d2739
HW
5975}
5976
5977static int dm_enable_vblank(struct drm_crtc *crtc)
5978{
5979 return dm_set_vblank(crtc, true);
5980}
5981
5982static void dm_disable_vblank(struct drm_crtc *crtc)
5983{
5984 dm_set_vblank(crtc, false);
5985}
5986
e7b07cee
HW
5987/* Implemented only the options currently availible for the driver */
5988static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5989 .reset = dm_crtc_reset_state,
5990 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
5991 .set_config = drm_atomic_helper_set_config,
5992 .page_flip = drm_atomic_helper_page_flip,
5993 .atomic_duplicate_state = dm_crtc_duplicate_state,
5994 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 5995 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 5996 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 5997 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 5998 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
5999 .enable_vblank = dm_enable_vblank,
6000 .disable_vblank = dm_disable_vblank,
e3eff4b5 6001 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6002#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6003 .late_register = amdgpu_dm_crtc_late_register,
6004#endif
e7b07cee
HW
6005};
6006
6007static enum drm_connector_status
6008amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6009{
6010 bool connected;
c84dec2f 6011 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6012
1f6010a9
DF
6013 /*
6014 * Notes:
e7b07cee
HW
6015 * 1. This interface is NOT called in context of HPD irq.
6016 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6017 * makes it a bad place for *any* MST-related activity.
6018 */
e7b07cee 6019
8580d60b
HW
6020 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6021 !aconnector->fake_enable)
e7b07cee
HW
6022 connected = (aconnector->dc_sink != NULL);
6023 else
6024 connected = (aconnector->base.force == DRM_FORCE_ON);
6025
0f877894
OV
6026 update_subconnector_property(aconnector);
6027
e7b07cee
HW
6028 return (connected ? connector_status_connected :
6029 connector_status_disconnected);
6030}
6031
3ee6b26b
AD
6032int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6033 struct drm_connector_state *connector_state,
6034 struct drm_property *property,
6035 uint64_t val)
e7b07cee
HW
6036{
6037 struct drm_device *dev = connector->dev;
1348969a 6038 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6039 struct dm_connector_state *dm_old_state =
6040 to_dm_connector_state(connector->state);
6041 struct dm_connector_state *dm_new_state =
6042 to_dm_connector_state(connector_state);
6043
6044 int ret = -EINVAL;
6045
6046 if (property == dev->mode_config.scaling_mode_property) {
6047 enum amdgpu_rmx_type rmx_type;
6048
6049 switch (val) {
6050 case DRM_MODE_SCALE_CENTER:
6051 rmx_type = RMX_CENTER;
6052 break;
6053 case DRM_MODE_SCALE_ASPECT:
6054 rmx_type = RMX_ASPECT;
6055 break;
6056 case DRM_MODE_SCALE_FULLSCREEN:
6057 rmx_type = RMX_FULL;
6058 break;
6059 case DRM_MODE_SCALE_NONE:
6060 default:
6061 rmx_type = RMX_OFF;
6062 break;
6063 }
6064
6065 if (dm_old_state->scaling == rmx_type)
6066 return 0;
6067
6068 dm_new_state->scaling = rmx_type;
6069 ret = 0;
6070 } else if (property == adev->mode_info.underscan_hborder_property) {
6071 dm_new_state->underscan_hborder = val;
6072 ret = 0;
6073 } else if (property == adev->mode_info.underscan_vborder_property) {
6074 dm_new_state->underscan_vborder = val;
6075 ret = 0;
6076 } else if (property == adev->mode_info.underscan_property) {
6077 dm_new_state->underscan_enable = val;
6078 ret = 0;
c1ee92f9
DF
6079 } else if (property == adev->mode_info.abm_level_property) {
6080 dm_new_state->abm_level = val;
6081 ret = 0;
e7b07cee
HW
6082 }
6083
6084 return ret;
6085}
6086
3ee6b26b
AD
6087int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6088 const struct drm_connector_state *state,
6089 struct drm_property *property,
6090 uint64_t *val)
e7b07cee
HW
6091{
6092 struct drm_device *dev = connector->dev;
1348969a 6093 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6094 struct dm_connector_state *dm_state =
6095 to_dm_connector_state(state);
6096 int ret = -EINVAL;
6097
6098 if (property == dev->mode_config.scaling_mode_property) {
6099 switch (dm_state->scaling) {
6100 case RMX_CENTER:
6101 *val = DRM_MODE_SCALE_CENTER;
6102 break;
6103 case RMX_ASPECT:
6104 *val = DRM_MODE_SCALE_ASPECT;
6105 break;
6106 case RMX_FULL:
6107 *val = DRM_MODE_SCALE_FULLSCREEN;
6108 break;
6109 case RMX_OFF:
6110 default:
6111 *val = DRM_MODE_SCALE_NONE;
6112 break;
6113 }
6114 ret = 0;
6115 } else if (property == adev->mode_info.underscan_hborder_property) {
6116 *val = dm_state->underscan_hborder;
6117 ret = 0;
6118 } else if (property == adev->mode_info.underscan_vborder_property) {
6119 *val = dm_state->underscan_vborder;
6120 ret = 0;
6121 } else if (property == adev->mode_info.underscan_property) {
6122 *val = dm_state->underscan_enable;
6123 ret = 0;
c1ee92f9
DF
6124 } else if (property == adev->mode_info.abm_level_property) {
6125 *val = dm_state->abm_level;
6126 ret = 0;
e7b07cee 6127 }
c1ee92f9 6128
e7b07cee
HW
6129 return ret;
6130}
6131
526c654a
ED
6132static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6133{
6134 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6135
6136 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6137}
6138
7578ecda 6139static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6140{
c84dec2f 6141 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6142 const struct dc_link *link = aconnector->dc_link;
1348969a 6143 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6144 struct amdgpu_display_manager *dm = &adev->dm;
ada8ce15 6145
5dff80bd
AG
6146 /*
6147 * Call only if mst_mgr was iniitalized before since it's not done
6148 * for all connector types.
6149 */
6150 if (aconnector->mst_mgr.dev)
6151 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6152
e7b07cee
HW
6153#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6154 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6155
89fc8d4e 6156 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5cd29ed0
HW
6157 link->type != dc_connection_none &&
6158 dm->backlight_dev) {
6159 backlight_device_unregister(dm->backlight_dev);
6160 dm->backlight_dev = NULL;
e7b07cee
HW
6161 }
6162#endif
dcd5fb82
MF
6163
6164 if (aconnector->dc_em_sink)
6165 dc_sink_release(aconnector->dc_em_sink);
6166 aconnector->dc_em_sink = NULL;
6167 if (aconnector->dc_sink)
6168 dc_sink_release(aconnector->dc_sink);
6169 aconnector->dc_sink = NULL;
6170
e86e8947 6171 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6172 drm_connector_unregister(connector);
6173 drm_connector_cleanup(connector);
526c654a
ED
6174 if (aconnector->i2c) {
6175 i2c_del_adapter(&aconnector->i2c->base);
6176 kfree(aconnector->i2c);
6177 }
7daec99f 6178 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6179
e7b07cee
HW
6180 kfree(connector);
6181}
6182
6183void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6184{
6185 struct dm_connector_state *state =
6186 to_dm_connector_state(connector->state);
6187
df099b9b
LSL
6188 if (connector->state)
6189 __drm_atomic_helper_connector_destroy_state(connector->state);
6190
e7b07cee
HW
6191 kfree(state);
6192
6193 state = kzalloc(sizeof(*state), GFP_KERNEL);
6194
6195 if (state) {
6196 state->scaling = RMX_OFF;
6197 state->underscan_enable = false;
6198 state->underscan_hborder = 0;
6199 state->underscan_vborder = 0;
01933ba4 6200 state->base.max_requested_bpc = 8;
3261e013
ML
6201 state->vcpi_slots = 0;
6202 state->pbn = 0;
c3e50f89
NK
6203 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6204 state->abm_level = amdgpu_dm_abm_level;
6205
df099b9b 6206 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6207 }
6208}
6209
3ee6b26b
AD
6210struct drm_connector_state *
6211amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6212{
6213 struct dm_connector_state *state =
6214 to_dm_connector_state(connector->state);
6215
6216 struct dm_connector_state *new_state =
6217 kmemdup(state, sizeof(*state), GFP_KERNEL);
6218
98e6436d
AK
6219 if (!new_state)
6220 return NULL;
e7b07cee 6221
98e6436d
AK
6222 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6223
6224 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6225 new_state->abm_level = state->abm_level;
922454c2
NK
6226 new_state->scaling = state->scaling;
6227 new_state->underscan_enable = state->underscan_enable;
6228 new_state->underscan_hborder = state->underscan_hborder;
6229 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6230 new_state->vcpi_slots = state->vcpi_slots;
6231 new_state->pbn = state->pbn;
98e6436d 6232 return &new_state->base;
e7b07cee
HW
6233}
6234
14f04fa4
AD
6235static int
6236amdgpu_dm_connector_late_register(struct drm_connector *connector)
6237{
6238 struct amdgpu_dm_connector *amdgpu_dm_connector =
6239 to_amdgpu_dm_connector(connector);
00a8037e 6240 int r;
14f04fa4 6241
00a8037e
AD
6242 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6243 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6244 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6245 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6246 if (r)
6247 return r;
6248 }
6249
6250#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6251 connector_debugfs_init(amdgpu_dm_connector);
6252#endif
6253
6254 return 0;
6255}
6256
e7b07cee
HW
6257static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6258 .reset = amdgpu_dm_connector_funcs_reset,
6259 .detect = amdgpu_dm_connector_detect,
6260 .fill_modes = drm_helper_probe_single_connector_modes,
6261 .destroy = amdgpu_dm_connector_destroy,
6262 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6263 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6264 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6265 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6266 .late_register = amdgpu_dm_connector_late_register,
526c654a 6267 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6268};
6269
e7b07cee
HW
6270static int get_modes(struct drm_connector *connector)
6271{
6272 return amdgpu_dm_connector_get_modes(connector);
6273}
6274
c84dec2f 6275static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6276{
6277 struct dc_sink_init_data init_params = {
6278 .link = aconnector->dc_link,
6279 .sink_signal = SIGNAL_TYPE_VIRTUAL
6280 };
70e8ffc5 6281 struct edid *edid;
e7b07cee 6282
a89ff457 6283 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6284 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6285 aconnector->base.name);
6286
6287 aconnector->base.force = DRM_FORCE_OFF;
6288 aconnector->base.override_edid = false;
6289 return;
6290 }
6291
70e8ffc5
HW
6292 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6293
e7b07cee
HW
6294 aconnector->edid = edid;
6295
6296 aconnector->dc_em_sink = dc_link_add_remote_sink(
6297 aconnector->dc_link,
6298 (uint8_t *)edid,
6299 (edid->extensions + 1) * EDID_LENGTH,
6300 &init_params);
6301
dcd5fb82 6302 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6303 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6304 aconnector->dc_link->local_sink :
6305 aconnector->dc_em_sink;
dcd5fb82
MF
6306 dc_sink_retain(aconnector->dc_sink);
6307 }
e7b07cee
HW
6308}
6309
c84dec2f 6310static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6311{
6312 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6313
1f6010a9
DF
6314 /*
6315 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6316 * Those settings have to be != 0 to get initial modeset
6317 */
6318 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6319 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6320 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6321 }
6322
6323
6324 aconnector->base.override_edid = true;
6325 create_eml_sink(aconnector);
6326}
6327
cbd14ae7
SW
6328static struct dc_stream_state *
6329create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6330 const struct drm_display_mode *drm_mode,
6331 const struct dm_connector_state *dm_state,
6332 const struct dc_stream_state *old_stream)
6333{
6334 struct drm_connector *connector = &aconnector->base;
1348969a 6335 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6336 struct dc_stream_state *stream;
4b7da34b
SW
6337 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6338 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6339 enum dc_status dc_result = DC_OK;
6340
6341 do {
6342 stream = create_stream_for_sink(aconnector, drm_mode,
6343 dm_state, old_stream,
6344 requested_bpc);
6345 if (stream == NULL) {
6346 DRM_ERROR("Failed to create stream for sink!\n");
6347 break;
6348 }
6349
6350 dc_result = dc_validate_stream(adev->dm.dc, stream);
6351
6352 if (dc_result != DC_OK) {
74a16675 6353 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6354 drm_mode->hdisplay,
6355 drm_mode->vdisplay,
6356 drm_mode->clock,
74a16675
RS
6357 dc_result,
6358 dc_status_to_str(dc_result));
cbd14ae7
SW
6359
6360 dc_stream_release(stream);
6361 stream = NULL;
6362 requested_bpc -= 2; /* lower bpc to retry validation */
6363 }
6364
6365 } while (stream == NULL && requested_bpc >= 6);
6366
68eb3ae3
WS
6367 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6368 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6369
6370 aconnector->force_yuv420_output = true;
6371 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6372 dm_state, old_stream);
6373 aconnector->force_yuv420_output = false;
6374 }
6375
cbd14ae7
SW
6376 return stream;
6377}
6378
ba9ca088 6379enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6380 struct drm_display_mode *mode)
e7b07cee
HW
6381{
6382 int result = MODE_ERROR;
6383 struct dc_sink *dc_sink;
e7b07cee 6384 /* TODO: Unhardcode stream count */
0971c40e 6385 struct dc_stream_state *stream;
c84dec2f 6386 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6387
6388 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6389 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6390 return result;
6391
1f6010a9
DF
6392 /*
6393 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6394 * EDID mgmt
6395 */
6396 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6397 !aconnector->dc_em_sink)
6398 handle_edid_mgmt(aconnector);
6399
c84dec2f 6400 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6401
ad975f44
VL
6402 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6403 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6404 DRM_ERROR("dc_sink is NULL!\n");
6405 goto fail;
6406 }
6407
cbd14ae7
SW
6408 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6409 if (stream) {
6410 dc_stream_release(stream);
e7b07cee 6411 result = MODE_OK;
cbd14ae7 6412 }
e7b07cee
HW
6413
6414fail:
6415 /* TODO: error handling*/
6416 return result;
6417}
6418
88694af9
NK
6419static int fill_hdr_info_packet(const struct drm_connector_state *state,
6420 struct dc_info_packet *out)
6421{
6422 struct hdmi_drm_infoframe frame;
6423 unsigned char buf[30]; /* 26 + 4 */
6424 ssize_t len;
6425 int ret, i;
6426
6427 memset(out, 0, sizeof(*out));
6428
6429 if (!state->hdr_output_metadata)
6430 return 0;
6431
6432 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6433 if (ret)
6434 return ret;
6435
6436 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6437 if (len < 0)
6438 return (int)len;
6439
6440 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6441 if (len != 30)
6442 return -EINVAL;
6443
6444 /* Prepare the infopacket for DC. */
6445 switch (state->connector->connector_type) {
6446 case DRM_MODE_CONNECTOR_HDMIA:
6447 out->hb0 = 0x87; /* type */
6448 out->hb1 = 0x01; /* version */
6449 out->hb2 = 0x1A; /* length */
6450 out->sb[0] = buf[3]; /* checksum */
6451 i = 1;
6452 break;
6453
6454 case DRM_MODE_CONNECTOR_DisplayPort:
6455 case DRM_MODE_CONNECTOR_eDP:
6456 out->hb0 = 0x00; /* sdp id, zero */
6457 out->hb1 = 0x87; /* type */
6458 out->hb2 = 0x1D; /* payload len - 1 */
6459 out->hb3 = (0x13 << 2); /* sdp version */
6460 out->sb[0] = 0x01; /* version */
6461 out->sb[1] = 0x1A; /* length */
6462 i = 2;
6463 break;
6464
6465 default:
6466 return -EINVAL;
6467 }
6468
6469 memcpy(&out->sb[i], &buf[4], 26);
6470 out->valid = true;
6471
6472 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6473 sizeof(out->sb), false);
6474
6475 return 0;
6476}
6477
88694af9
NK
6478static int
6479amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6480 struct drm_atomic_state *state)
88694af9 6481{
51e857af
SP
6482 struct drm_connector_state *new_con_state =
6483 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6484 struct drm_connector_state *old_con_state =
6485 drm_atomic_get_old_connector_state(state, conn);
6486 struct drm_crtc *crtc = new_con_state->crtc;
6487 struct drm_crtc_state *new_crtc_state;
6488 int ret;
6489
e8a98235
RS
6490 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6491
88694af9
NK
6492 if (!crtc)
6493 return 0;
6494
72921cdf 6495 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
6496 struct dc_info_packet hdr_infopacket;
6497
6498 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6499 if (ret)
6500 return ret;
6501
6502 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6503 if (IS_ERR(new_crtc_state))
6504 return PTR_ERR(new_crtc_state);
6505
6506 /*
6507 * DC considers the stream backends changed if the
6508 * static metadata changes. Forcing the modeset also
6509 * gives a simple way for userspace to switch from
b232d4ed
NK
6510 * 8bpc to 10bpc when setting the metadata to enter
6511 * or exit HDR.
6512 *
6513 * Changing the static metadata after it's been
6514 * set is permissible, however. So only force a
6515 * modeset if we're entering or exiting HDR.
88694af9 6516 */
b232d4ed
NK
6517 new_crtc_state->mode_changed =
6518 !old_con_state->hdr_output_metadata ||
6519 !new_con_state->hdr_output_metadata;
88694af9
NK
6520 }
6521
6522 return 0;
6523}
6524
e7b07cee
HW
6525static const struct drm_connector_helper_funcs
6526amdgpu_dm_connector_helper_funcs = {
6527 /*
1f6010a9 6528 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6529 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6530 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6531 * in get_modes call back, not just return the modes count
6532 */
e7b07cee
HW
6533 .get_modes = get_modes,
6534 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6535 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6536};
6537
6538static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6539{
6540}
6541
d6ef9b41 6542static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6543{
6544 struct drm_atomic_state *state = new_crtc_state->state;
6545 struct drm_plane *plane;
6546 int num_active = 0;
6547
6548 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6549 struct drm_plane_state *new_plane_state;
6550
6551 /* Cursor planes are "fake". */
6552 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6553 continue;
6554
6555 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6556
6557 if (!new_plane_state) {
6558 /*
6559 * The plane is enable on the CRTC and hasn't changed
6560 * state. This means that it previously passed
6561 * validation and is therefore enabled.
6562 */
6563 num_active += 1;
6564 continue;
6565 }
6566
6567 /* We need a framebuffer to be considered enabled. */
6568 num_active += (new_plane_state->fb != NULL);
6569 }
6570
d6ef9b41
NK
6571 return num_active;
6572}
6573
8fe684e9
NK
6574static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6575 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6576{
6577 struct dm_crtc_state *dm_new_crtc_state =
6578 to_dm_crtc_state(new_crtc_state);
6579
6580 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6581
6582 if (!dm_new_crtc_state->stream)
6583 return;
6584
6585 dm_new_crtc_state->active_planes =
6586 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6587}
6588
3ee6b26b 6589static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6590 struct drm_atomic_state *state)
e7b07cee 6591{
29b77ad7
MR
6592 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6593 crtc);
1348969a 6594 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6595 struct dc *dc = adev->dm.dc;
29b77ad7 6596 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6597 int ret = -EINVAL;
6598
5b8c5969 6599 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6600
29b77ad7 6601 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6602
9b690ef3 6603 if (unlikely(!dm_crtc_state->stream &&
29b77ad7 6604 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
e7b07cee
HW
6605 WARN_ON(1);
6606 return ret;
6607 }
6608
bc92c065 6609 /*
b836a274
MD
6610 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6611 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6612 * planes are disabled, which is not supported by the hardware. And there is legacy
6613 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6614 */
29b77ad7 6615 if (crtc_state->enable &&
ea9522f5
SS
6616 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6617 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6618 return -EINVAL;
ea9522f5 6619 }
c14a005c 6620
b836a274
MD
6621 /* In some use cases, like reset, no stream is attached */
6622 if (!dm_crtc_state->stream)
6623 return 0;
6624
62c933f9 6625 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
6626 return 0;
6627
ea9522f5 6628 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
6629 return ret;
6630}
6631
3ee6b26b
AD
6632static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6633 const struct drm_display_mode *mode,
6634 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
6635{
6636 return true;
6637}
6638
6639static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6640 .disable = dm_crtc_helper_disable,
6641 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
6642 .mode_fixup = dm_crtc_helper_mode_fixup,
6643 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
6644};
6645
6646static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6647{
6648
6649}
6650
3261e013
ML
6651static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6652{
6653 switch (display_color_depth) {
6654 case COLOR_DEPTH_666:
6655 return 6;
6656 case COLOR_DEPTH_888:
6657 return 8;
6658 case COLOR_DEPTH_101010:
6659 return 10;
6660 case COLOR_DEPTH_121212:
6661 return 12;
6662 case COLOR_DEPTH_141414:
6663 return 14;
6664 case COLOR_DEPTH_161616:
6665 return 16;
6666 default:
6667 break;
6668 }
6669 return 0;
6670}
6671
3ee6b26b
AD
6672static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6673 struct drm_crtc_state *crtc_state,
6674 struct drm_connector_state *conn_state)
e7b07cee 6675{
3261e013
ML
6676 struct drm_atomic_state *state = crtc_state->state;
6677 struct drm_connector *connector = conn_state->connector;
6678 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6679 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6680 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6681 struct drm_dp_mst_topology_mgr *mst_mgr;
6682 struct drm_dp_mst_port *mst_port;
6683 enum dc_color_depth color_depth;
6684 int clock, bpp = 0;
1bc22f20 6685 bool is_y420 = false;
3261e013
ML
6686
6687 if (!aconnector->port || !aconnector->dc_sink)
6688 return 0;
6689
6690 mst_port = aconnector->port;
6691 mst_mgr = &aconnector->mst_port->mst_mgr;
6692
6693 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6694 return 0;
6695
6696 if (!state->duplicated) {
cbd14ae7 6697 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
6698 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6699 aconnector->force_yuv420_output;
cbd14ae7
SW
6700 color_depth = convert_color_depth_from_display_info(connector,
6701 is_y420,
6702 max_bpc);
3261e013
ML
6703 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6704 clock = adjusted_mode->clock;
dc48529f 6705 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
6706 }
6707 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6708 mst_mgr,
6709 mst_port,
1c6c1cb5 6710 dm_new_connector_state->pbn,
03ca9600 6711 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
6712 if (dm_new_connector_state->vcpi_slots < 0) {
6713 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6714 return dm_new_connector_state->vcpi_slots;
6715 }
e7b07cee
HW
6716 return 0;
6717}
6718
6719const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6720 .disable = dm_encoder_helper_disable,
6721 .atomic_check = dm_encoder_helper_atomic_check
6722};
6723
d9fe1a4c 6724#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74
ML
6725static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6726 struct dc_state *dc_state)
6727{
6728 struct dc_stream_state *stream = NULL;
6729 struct drm_connector *connector;
5760dcb9 6730 struct drm_connector_state *new_con_state;
29b9ba74
ML
6731 struct amdgpu_dm_connector *aconnector;
6732 struct dm_connector_state *dm_conn_state;
6733 int i, j, clock, bpp;
6734 int vcpi, pbn_div, pbn = 0;
6735
5760dcb9 6736 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
6737
6738 aconnector = to_amdgpu_dm_connector(connector);
6739
6740 if (!aconnector->port)
6741 continue;
6742
6743 if (!new_con_state || !new_con_state->crtc)
6744 continue;
6745
6746 dm_conn_state = to_dm_connector_state(new_con_state);
6747
6748 for (j = 0; j < dc_state->stream_count; j++) {
6749 stream = dc_state->streams[j];
6750 if (!stream)
6751 continue;
6752
6753 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6754 break;
6755
6756 stream = NULL;
6757 }
6758
6759 if (!stream)
6760 continue;
6761
6762 if (stream->timing.flags.DSC != 1) {
6763 drm_dp_mst_atomic_enable_dsc(state,
6764 aconnector->port,
6765 dm_conn_state->pbn,
6766 0,
6767 false);
6768 continue;
6769 }
6770
6771 pbn_div = dm_mst_get_pbn_divider(stream->link);
6772 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6773 clock = stream->timing.pix_clk_100hz / 10;
6774 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6775 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6776 aconnector->port,
6777 pbn, pbn_div,
6778 true);
6779 if (vcpi < 0)
6780 return vcpi;
6781
6782 dm_conn_state->pbn = pbn;
6783 dm_conn_state->vcpi_slots = vcpi;
6784 }
6785 return 0;
6786}
d9fe1a4c 6787#endif
29b9ba74 6788
e7b07cee
HW
6789static void dm_drm_plane_reset(struct drm_plane *plane)
6790{
6791 struct dm_plane_state *amdgpu_state = NULL;
6792
6793 if (plane->state)
6794 plane->funcs->atomic_destroy_state(plane, plane->state);
6795
6796 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 6797 WARN_ON(amdgpu_state == NULL);
1f6010a9 6798
7ddaef96
NK
6799 if (amdgpu_state)
6800 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
6801}
6802
6803static struct drm_plane_state *
6804dm_drm_plane_duplicate_state(struct drm_plane *plane)
6805{
6806 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6807
6808 old_dm_plane_state = to_dm_plane_state(plane->state);
6809 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6810 if (!dm_plane_state)
6811 return NULL;
6812
6813 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6814
3be5262e
HW
6815 if (old_dm_plane_state->dc_state) {
6816 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6817 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
6818 }
6819
6820 return &dm_plane_state->base;
6821}
6822
dfd84d90 6823static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 6824 struct drm_plane_state *state)
e7b07cee
HW
6825{
6826 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6827
3be5262e
HW
6828 if (dm_plane_state->dc_state)
6829 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 6830
0627bbd3 6831 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
6832}
6833
6834static const struct drm_plane_funcs dm_plane_funcs = {
6835 .update_plane = drm_atomic_helper_update_plane,
6836 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 6837 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
6838 .reset = dm_drm_plane_reset,
6839 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6840 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 6841 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
6842};
6843
3ee6b26b
AD
6844static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6845 struct drm_plane_state *new_state)
e7b07cee
HW
6846{
6847 struct amdgpu_framebuffer *afb;
6848 struct drm_gem_object *obj;
5d43be0c 6849 struct amdgpu_device *adev;
e7b07cee 6850 struct amdgpu_bo *rbo;
e7b07cee 6851 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
6852 struct list_head list;
6853 struct ttm_validate_buffer tv;
6854 struct ww_acquire_ctx ticket;
5d43be0c
CK
6855 uint32_t domain;
6856 int r;
e7b07cee
HW
6857
6858 if (!new_state->fb) {
4711c033 6859 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
6860 return 0;
6861 }
6862
6863 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 6864 obj = new_state->fb->obj[0];
e7b07cee 6865 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 6866 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
6867 INIT_LIST_HEAD(&list);
6868
6869 tv.bo = &rbo->tbo;
6870 tv.num_shared = 1;
6871 list_add(&tv.head, &list);
6872
9165fb87 6873 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
6874 if (r) {
6875 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 6876 return r;
0f257b09 6877 }
e7b07cee 6878
5d43be0c 6879 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 6880 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
6881 else
6882 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 6883
7b7c6c81 6884 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 6885 if (unlikely(r != 0)) {
30b7c614
HW
6886 if (r != -ERESTARTSYS)
6887 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 6888 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
6889 return r;
6890 }
6891
bb812f1e
JZ
6892 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6893 if (unlikely(r != 0)) {
6894 amdgpu_bo_unpin(rbo);
0f257b09 6895 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6896 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
6897 return r;
6898 }
7df7e505 6899
0f257b09 6900 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 6901
7b7c6c81 6902 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
6903
6904 amdgpu_bo_ref(rbo);
6905
cf322b49
NK
6906 /**
6907 * We don't do surface updates on planes that have been newly created,
6908 * but we also don't have the afb->address during atomic check.
6909 *
6910 * Fill in buffer attributes depending on the address here, but only on
6911 * newly created planes since they're not being used by DC yet and this
6912 * won't modify global state.
6913 */
6914 dm_plane_state_old = to_dm_plane_state(plane->state);
6915 dm_plane_state_new = to_dm_plane_state(new_state);
6916
3be5262e 6917 if (dm_plane_state_new->dc_state &&
cf322b49
NK
6918 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6919 struct dc_plane_state *plane_state =
6920 dm_plane_state_new->dc_state;
6921 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 6922
320932bf 6923 fill_plane_buffer_attributes(
695af5f9 6924 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 6925 afb->tiling_flags,
cf322b49
NK
6926 &plane_state->tiling_info, &plane_state->plane_size,
6927 &plane_state->dcc, &plane_state->address,
6eed95b0 6928 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
6929 }
6930
e7b07cee
HW
6931 return 0;
6932}
6933
3ee6b26b
AD
6934static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6935 struct drm_plane_state *old_state)
e7b07cee
HW
6936{
6937 struct amdgpu_bo *rbo;
e7b07cee
HW
6938 int r;
6939
6940 if (!old_state->fb)
6941 return;
6942
e68d14dd 6943 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
6944 r = amdgpu_bo_reserve(rbo, false);
6945 if (unlikely(r)) {
6946 DRM_ERROR("failed to reserve rbo before unpin\n");
6947 return;
b830ebc9
HW
6948 }
6949
6950 amdgpu_bo_unpin(rbo);
6951 amdgpu_bo_unreserve(rbo);
6952 amdgpu_bo_unref(&rbo);
e7b07cee
HW
6953}
6954
8c44515b
AP
6955static int dm_plane_helper_check_state(struct drm_plane_state *state,
6956 struct drm_crtc_state *new_crtc_state)
6957{
6300b3bd
MK
6958 struct drm_framebuffer *fb = state->fb;
6959 int min_downscale, max_upscale;
6960 int min_scale = 0;
6961 int max_scale = INT_MAX;
6962
40d916a2 6963 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 6964 if (fb && state->crtc) {
40d916a2
NC
6965 /* Validate viewport to cover the case when only the position changes */
6966 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6967 int viewport_width = state->crtc_w;
6968 int viewport_height = state->crtc_h;
6969
6970 if (state->crtc_x < 0)
6971 viewport_width += state->crtc_x;
6972 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6973 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6974
6975 if (state->crtc_y < 0)
6976 viewport_height += state->crtc_y;
6977 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6978 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6979
4abdb72b
NC
6980 if (viewport_width < 0 || viewport_height < 0) {
6981 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6982 return -EINVAL;
6983 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6984 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 6985 return -EINVAL;
4abdb72b
NC
6986 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6987 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 6988 return -EINVAL;
4abdb72b
NC
6989 }
6990
40d916a2
NC
6991 }
6992
6993 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
6994 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6995 &min_downscale, &max_upscale);
6996 /*
6997 * Convert to drm convention: 16.16 fixed point, instead of dc's
6998 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6999 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7000 */
7001 min_scale = (1000 << 16) / max_upscale;
7002 max_scale = (1000 << 16) / min_downscale;
7003 }
8c44515b 7004
8c44515b 7005 return drm_atomic_helper_check_plane_state(
6300b3bd 7006 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7007}
7008
7578ecda 7009static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7010 struct drm_atomic_state *state)
cbd19488 7011{
7c11b99a
MR
7012 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7013 plane);
1348969a 7014 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7015 struct dc *dc = adev->dm.dc;
78171832 7016 struct dm_plane_state *dm_plane_state;
695af5f9 7017 struct dc_scaling_info scaling_info;
8c44515b 7018 struct drm_crtc_state *new_crtc_state;
695af5f9 7019 int ret;
78171832 7020
ba5c1649 7021 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7022
ba5c1649 7023 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7024
3be5262e 7025 if (!dm_plane_state->dc_state)
9a3329b1 7026 return 0;
cbd19488 7027
8c44515b 7028 new_crtc_state =
dec92020 7029 drm_atomic_get_new_crtc_state(state,
ba5c1649 7030 new_plane_state->crtc);
8c44515b
AP
7031 if (!new_crtc_state)
7032 return -EINVAL;
7033
ba5c1649 7034 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7035 if (ret)
7036 return ret;
7037
ba5c1649 7038 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
695af5f9
NK
7039 if (ret)
7040 return ret;
a05bcff1 7041
62c933f9 7042 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7043 return 0;
7044
7045 return -EINVAL;
7046}
7047
674e78ac 7048static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7049 struct drm_atomic_state *state)
674e78ac
NK
7050{
7051 /* Only support async updates on cursor planes. */
7052 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7053 return -EINVAL;
7054
7055 return 0;
7056}
7057
7058static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7059 struct drm_atomic_state *state)
674e78ac 7060{
5ddb0bd4
MR
7061 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7062 plane);
674e78ac 7063 struct drm_plane_state *old_state =
5ddb0bd4 7064 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7065
e8a98235
RS
7066 trace_amdgpu_dm_atomic_update_cursor(new_state);
7067
332af874 7068 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7069
7070 plane->state->src_x = new_state->src_x;
7071 plane->state->src_y = new_state->src_y;
7072 plane->state->src_w = new_state->src_w;
7073 plane->state->src_h = new_state->src_h;
7074 plane->state->crtc_x = new_state->crtc_x;
7075 plane->state->crtc_y = new_state->crtc_y;
7076 plane->state->crtc_w = new_state->crtc_w;
7077 plane->state->crtc_h = new_state->crtc_h;
7078
7079 handle_cursor_update(plane, old_state);
7080}
7081
e7b07cee
HW
7082static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7083 .prepare_fb = dm_plane_helper_prepare_fb,
7084 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7085 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7086 .atomic_async_check = dm_plane_atomic_async_check,
7087 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7088};
7089
7090/*
7091 * TODO: these are currently initialized to rgb formats only.
7092 * For future use cases we should either initialize them dynamically based on
7093 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7094 * check will succeed, and let DC implement proper check
e7b07cee 7095 */
d90371b0 7096static const uint32_t rgb_formats[] = {
e7b07cee
HW
7097 DRM_FORMAT_XRGB8888,
7098 DRM_FORMAT_ARGB8888,
7099 DRM_FORMAT_RGBA8888,
7100 DRM_FORMAT_XRGB2101010,
7101 DRM_FORMAT_XBGR2101010,
7102 DRM_FORMAT_ARGB2101010,
7103 DRM_FORMAT_ABGR2101010,
58020403
MK
7104 DRM_FORMAT_XRGB16161616,
7105 DRM_FORMAT_XBGR16161616,
7106 DRM_FORMAT_ARGB16161616,
7107 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7108 DRM_FORMAT_XBGR8888,
7109 DRM_FORMAT_ABGR8888,
46dd9ff7 7110 DRM_FORMAT_RGB565,
e7b07cee
HW
7111};
7112
0d579c7e
NK
7113static const uint32_t overlay_formats[] = {
7114 DRM_FORMAT_XRGB8888,
7115 DRM_FORMAT_ARGB8888,
7116 DRM_FORMAT_RGBA8888,
7117 DRM_FORMAT_XBGR8888,
7118 DRM_FORMAT_ABGR8888,
7267a1a9 7119 DRM_FORMAT_RGB565
e7b07cee
HW
7120};
7121
7122static const u32 cursor_formats[] = {
7123 DRM_FORMAT_ARGB8888
7124};
7125
37c6a93b
NK
7126static int get_plane_formats(const struct drm_plane *plane,
7127 const struct dc_plane_cap *plane_cap,
7128 uint32_t *formats, int max_formats)
e7b07cee 7129{
37c6a93b
NK
7130 int i, num_formats = 0;
7131
7132 /*
7133 * TODO: Query support for each group of formats directly from
7134 * DC plane caps. This will require adding more formats to the
7135 * caps list.
7136 */
e7b07cee 7137
f180b4bc 7138 switch (plane->type) {
e7b07cee 7139 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7140 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7141 if (num_formats >= max_formats)
7142 break;
7143
7144 formats[num_formats++] = rgb_formats[i];
7145 }
7146
ea36ad34 7147 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7148 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7149 if (plane_cap && plane_cap->pixel_format_support.p010)
7150 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7151 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7152 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7153 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7154 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7155 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7156 }
e7b07cee 7157 break;
37c6a93b 7158
e7b07cee 7159 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7160 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7161 if (num_formats >= max_formats)
7162 break;
7163
7164 formats[num_formats++] = overlay_formats[i];
7165 }
e7b07cee 7166 break;
37c6a93b 7167
e7b07cee 7168 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7169 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7170 if (num_formats >= max_formats)
7171 break;
7172
7173 formats[num_formats++] = cursor_formats[i];
7174 }
e7b07cee
HW
7175 break;
7176 }
7177
37c6a93b
NK
7178 return num_formats;
7179}
7180
7181static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7182 struct drm_plane *plane,
7183 unsigned long possible_crtcs,
7184 const struct dc_plane_cap *plane_cap)
7185{
7186 uint32_t formats[32];
7187 int num_formats;
7188 int res = -EPERM;
ecc874a6 7189 unsigned int supported_rotations;
faa37f54 7190 uint64_t *modifiers = NULL;
37c6a93b
NK
7191
7192 num_formats = get_plane_formats(plane, plane_cap, formats,
7193 ARRAY_SIZE(formats));
7194
faa37f54
BN
7195 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7196 if (res)
7197 return res;
7198
4a580877 7199 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7200 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7201 modifiers, plane->type, NULL);
7202 kfree(modifiers);
37c6a93b
NK
7203 if (res)
7204 return res;
7205
cc1fec57
NK
7206 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7207 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7208 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7209 BIT(DRM_MODE_BLEND_PREMULTI);
7210
7211 drm_plane_create_alpha_property(plane);
7212 drm_plane_create_blend_mode_property(plane, blend_caps);
7213 }
7214
fc8e5230 7215 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7216 plane_cap &&
7217 (plane_cap->pixel_format_support.nv12 ||
7218 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7219 /* This only affects YUV formats. */
7220 drm_plane_create_color_properties(
7221 plane,
7222 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7223 BIT(DRM_COLOR_YCBCR_BT709) |
7224 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7225 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7226 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7227 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7228 }
7229
ecc874a6
PLG
7230 supported_rotations =
7231 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7232 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7233
1347385f
SS
7234 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7235 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7236 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7237 supported_rotations);
ecc874a6 7238
f180b4bc 7239 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7240
96719c54 7241 /* Create (reset) the plane state */
f180b4bc
HW
7242 if (plane->funcs->reset)
7243 plane->funcs->reset(plane);
96719c54 7244
37c6a93b 7245 return 0;
e7b07cee
HW
7246}
7247
7578ecda
AD
7248static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7249 struct drm_plane *plane,
7250 uint32_t crtc_index)
e7b07cee
HW
7251{
7252 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7253 struct drm_plane *cursor_plane;
e7b07cee
HW
7254
7255 int res = -ENOMEM;
7256
7257 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7258 if (!cursor_plane)
7259 goto fail;
7260
f180b4bc 7261 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7262 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7263
7264 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7265 if (!acrtc)
7266 goto fail;
7267
7268 res = drm_crtc_init_with_planes(
7269 dm->ddev,
7270 &acrtc->base,
7271 plane,
f180b4bc 7272 cursor_plane,
e7b07cee
HW
7273 &amdgpu_dm_crtc_funcs, NULL);
7274
7275 if (res)
7276 goto fail;
7277
7278 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7279
96719c54
HW
7280 /* Create (reset) the plane state */
7281 if (acrtc->base.funcs->reset)
7282 acrtc->base.funcs->reset(&acrtc->base);
7283
e7b07cee
HW
7284 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7285 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7286
7287 acrtc->crtc_id = crtc_index;
7288 acrtc->base.enabled = false;
c37e2d29 7289 acrtc->otg_inst = -1;
e7b07cee
HW
7290
7291 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7292 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7293 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7294 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7295
e7b07cee
HW
7296 return 0;
7297
7298fail:
b830ebc9
HW
7299 kfree(acrtc);
7300 kfree(cursor_plane);
e7b07cee
HW
7301 return res;
7302}
7303
7304
7305static int to_drm_connector_type(enum signal_type st)
7306{
7307 switch (st) {
7308 case SIGNAL_TYPE_HDMI_TYPE_A:
7309 return DRM_MODE_CONNECTOR_HDMIA;
7310 case SIGNAL_TYPE_EDP:
7311 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7312 case SIGNAL_TYPE_LVDS:
7313 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7314 case SIGNAL_TYPE_RGB:
7315 return DRM_MODE_CONNECTOR_VGA;
7316 case SIGNAL_TYPE_DISPLAY_PORT:
7317 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7318 return DRM_MODE_CONNECTOR_DisplayPort;
7319 case SIGNAL_TYPE_DVI_DUAL_LINK:
7320 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7321 return DRM_MODE_CONNECTOR_DVID;
7322 case SIGNAL_TYPE_VIRTUAL:
7323 return DRM_MODE_CONNECTOR_VIRTUAL;
7324
7325 default:
7326 return DRM_MODE_CONNECTOR_Unknown;
7327 }
7328}
7329
2b4c1c05
DV
7330static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7331{
62afb4ad
JRS
7332 struct drm_encoder *encoder;
7333
7334 /* There is only one encoder per connector */
7335 drm_connector_for_each_possible_encoder(connector, encoder)
7336 return encoder;
7337
7338 return NULL;
2b4c1c05
DV
7339}
7340
e7b07cee
HW
7341static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7342{
e7b07cee
HW
7343 struct drm_encoder *encoder;
7344 struct amdgpu_encoder *amdgpu_encoder;
7345
2b4c1c05 7346 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7347
7348 if (encoder == NULL)
7349 return;
7350
7351 amdgpu_encoder = to_amdgpu_encoder(encoder);
7352
7353 amdgpu_encoder->native_mode.clock = 0;
7354
7355 if (!list_empty(&connector->probed_modes)) {
7356 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7357
e7b07cee 7358 list_for_each_entry(preferred_mode,
b830ebc9
HW
7359 &connector->probed_modes,
7360 head) {
7361 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7362 amdgpu_encoder->native_mode = *preferred_mode;
7363
e7b07cee
HW
7364 break;
7365 }
7366
7367 }
7368}
7369
3ee6b26b
AD
7370static struct drm_display_mode *
7371amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7372 char *name,
7373 int hdisplay, int vdisplay)
e7b07cee
HW
7374{
7375 struct drm_device *dev = encoder->dev;
7376 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7377 struct drm_display_mode *mode = NULL;
7378 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7379
7380 mode = drm_mode_duplicate(dev, native_mode);
7381
b830ebc9 7382 if (mode == NULL)
e7b07cee
HW
7383 return NULL;
7384
7385 mode->hdisplay = hdisplay;
7386 mode->vdisplay = vdisplay;
7387 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7388 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7389
7390 return mode;
7391
7392}
7393
7394static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7395 struct drm_connector *connector)
e7b07cee
HW
7396{
7397 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7398 struct drm_display_mode *mode = NULL;
7399 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7400 struct amdgpu_dm_connector *amdgpu_dm_connector =
7401 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7402 int i;
7403 int n;
7404 struct mode_size {
7405 char name[DRM_DISPLAY_MODE_LEN];
7406 int w;
7407 int h;
b830ebc9 7408 } common_modes[] = {
e7b07cee
HW
7409 { "640x480", 640, 480},
7410 { "800x600", 800, 600},
7411 { "1024x768", 1024, 768},
7412 { "1280x720", 1280, 720},
7413 { "1280x800", 1280, 800},
7414 {"1280x1024", 1280, 1024},
7415 { "1440x900", 1440, 900},
7416 {"1680x1050", 1680, 1050},
7417 {"1600x1200", 1600, 1200},
7418 {"1920x1080", 1920, 1080},
7419 {"1920x1200", 1920, 1200}
7420 };
7421
b830ebc9 7422 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7423
7424 for (i = 0; i < n; i++) {
7425 struct drm_display_mode *curmode = NULL;
7426 bool mode_existed = false;
7427
7428 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7429 common_modes[i].h > native_mode->vdisplay ||
7430 (common_modes[i].w == native_mode->hdisplay &&
7431 common_modes[i].h == native_mode->vdisplay))
7432 continue;
e7b07cee
HW
7433
7434 list_for_each_entry(curmode, &connector->probed_modes, head) {
7435 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7436 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7437 mode_existed = true;
7438 break;
7439 }
7440 }
7441
7442 if (mode_existed)
7443 continue;
7444
7445 mode = amdgpu_dm_create_common_mode(encoder,
7446 common_modes[i].name, common_modes[i].w,
7447 common_modes[i].h);
7448 drm_mode_probed_add(connector, mode);
c84dec2f 7449 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7450 }
7451}
7452
3ee6b26b
AD
7453static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7454 struct edid *edid)
e7b07cee 7455{
c84dec2f
HW
7456 struct amdgpu_dm_connector *amdgpu_dm_connector =
7457 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7458
7459 if (edid) {
7460 /* empty probed_modes */
7461 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7462 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7463 drm_add_edid_modes(connector, edid);
7464
f1e5e913
YMM
7465 /* sorting the probed modes before calling function
7466 * amdgpu_dm_get_native_mode() since EDID can have
7467 * more than one preferred mode. The modes that are
7468 * later in the probed mode list could be of higher
7469 * and preferred resolution. For example, 3840x2160
7470 * resolution in base EDID preferred timing and 4096x2160
7471 * preferred resolution in DID extension block later.
7472 */
7473 drm_mode_sort(&connector->probed_modes);
e7b07cee 7474 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7475
7476 /* Freesync capabilities are reset by calling
7477 * drm_add_edid_modes() and need to be
7478 * restored here.
7479 */
7480 amdgpu_dm_update_freesync_caps(connector, edid);
a8d8d3dc 7481 } else {
c84dec2f 7482 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7483 }
e7b07cee
HW
7484}
7485
a85ba005
NC
7486static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7487 struct drm_display_mode *mode)
7488{
7489 struct drm_display_mode *m;
7490
7491 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7492 if (drm_mode_equal(m, mode))
7493 return true;
7494 }
7495
7496 return false;
7497}
7498
7499static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7500{
7501 const struct drm_display_mode *m;
7502 struct drm_display_mode *new_mode;
7503 uint i;
7504 uint32_t new_modes_count = 0;
7505
7506 /* Standard FPS values
7507 *
7508 * 23.976 - TV/NTSC
7509 * 24 - Cinema
7510 * 25 - TV/PAL
7511 * 29.97 - TV/NTSC
7512 * 30 - TV/NTSC
7513 * 48 - Cinema HFR
7514 * 50 - TV/PAL
7515 * 60 - Commonly used
7516 * 48,72,96 - Multiples of 24
7517 */
7518 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7519 48000, 50000, 60000, 72000, 96000 };
7520
7521 /*
7522 * Find mode with highest refresh rate with the same resolution
7523 * as the preferred mode. Some monitors report a preferred mode
7524 * with lower resolution than the highest refresh rate supported.
7525 */
7526
7527 m = get_highest_refresh_rate_mode(aconnector, true);
7528 if (!m)
7529 return 0;
7530
7531 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7532 uint64_t target_vtotal, target_vtotal_diff;
7533 uint64_t num, den;
7534
7535 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7536 continue;
7537
7538 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7539 common_rates[i] > aconnector->max_vfreq * 1000)
7540 continue;
7541
7542 num = (unsigned long long)m->clock * 1000 * 1000;
7543 den = common_rates[i] * (unsigned long long)m->htotal;
7544 target_vtotal = div_u64(num, den);
7545 target_vtotal_diff = target_vtotal - m->vtotal;
7546
7547 /* Check for illegal modes */
7548 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7549 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7550 m->vtotal + target_vtotal_diff < m->vsync_end)
7551 continue;
7552
7553 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7554 if (!new_mode)
7555 goto out;
7556
7557 new_mode->vtotal += (u16)target_vtotal_diff;
7558 new_mode->vsync_start += (u16)target_vtotal_diff;
7559 new_mode->vsync_end += (u16)target_vtotal_diff;
7560 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7561 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7562
7563 if (!is_duplicate_mode(aconnector, new_mode)) {
7564 drm_mode_probed_add(&aconnector->base, new_mode);
7565 new_modes_count += 1;
7566 } else
7567 drm_mode_destroy(aconnector->base.dev, new_mode);
7568 }
7569 out:
7570 return new_modes_count;
7571}
7572
7573static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7574 struct edid *edid)
7575{
7576 struct amdgpu_dm_connector *amdgpu_dm_connector =
7577 to_amdgpu_dm_connector(connector);
7578
7579 if (!(amdgpu_freesync_vid_mode && edid))
7580 return;
fe8858bb 7581
a85ba005
NC
7582 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7583 amdgpu_dm_connector->num_modes +=
7584 add_fs_modes(amdgpu_dm_connector);
7585}
7586
7578ecda 7587static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 7588{
c84dec2f
HW
7589 struct amdgpu_dm_connector *amdgpu_dm_connector =
7590 to_amdgpu_dm_connector(connector);
e7b07cee 7591 struct drm_encoder *encoder;
c84dec2f 7592 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 7593
2b4c1c05 7594 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 7595
5c0e6840 7596 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
7597 amdgpu_dm_connector->num_modes =
7598 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
7599 } else {
7600 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7601 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 7602 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 7603 }
3e332d3a 7604 amdgpu_dm_fbc_init(connector);
5099114b 7605
c84dec2f 7606 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
7607}
7608
3ee6b26b
AD
7609void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7610 struct amdgpu_dm_connector *aconnector,
7611 int connector_type,
7612 struct dc_link *link,
7613 int link_index)
e7b07cee 7614{
1348969a 7615 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 7616
f04bee34
NK
7617 /*
7618 * Some of the properties below require access to state, like bpc.
7619 * Allocate some default initial connector state with our reset helper.
7620 */
7621 if (aconnector->base.funcs->reset)
7622 aconnector->base.funcs->reset(&aconnector->base);
7623
e7b07cee
HW
7624 aconnector->connector_id = link_index;
7625 aconnector->dc_link = link;
7626 aconnector->base.interlace_allowed = false;
7627 aconnector->base.doublescan_allowed = false;
7628 aconnector->base.stereo_allowed = false;
7629 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7630 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 7631 aconnector->audio_inst = -1;
e7b07cee
HW
7632 mutex_init(&aconnector->hpd_lock);
7633
1f6010a9
DF
7634 /*
7635 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
7636 * which means HPD hot plug not supported
7637 */
e7b07cee
HW
7638 switch (connector_type) {
7639 case DRM_MODE_CONNECTOR_HDMIA:
7640 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7641 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7642 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
7643 break;
7644 case DRM_MODE_CONNECTOR_DisplayPort:
7645 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 7646 aconnector->base.ycbcr_420_allowed =
9ea59d5a 7647 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
7648 break;
7649 case DRM_MODE_CONNECTOR_DVID:
7650 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7651 break;
7652 default:
7653 break;
7654 }
7655
7656 drm_object_attach_property(&aconnector->base.base,
7657 dm->ddev->mode_config.scaling_mode_property,
7658 DRM_MODE_SCALE_NONE);
7659
7660 drm_object_attach_property(&aconnector->base.base,
7661 adev->mode_info.underscan_property,
7662 UNDERSCAN_OFF);
7663 drm_object_attach_property(&aconnector->base.base,
7664 adev->mode_info.underscan_hborder_property,
7665 0);
7666 drm_object_attach_property(&aconnector->base.base,
7667 adev->mode_info.underscan_vborder_property,
7668 0);
1825fd34 7669
8c61b31e
JFZ
7670 if (!aconnector->mst_port)
7671 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 7672
4a8ca46b
RL
7673 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7674 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7675 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 7676
c1ee92f9 7677 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 7678 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
7679 drm_object_attach_property(&aconnector->base.base,
7680 adev->mode_info.abm_level_property, 0);
7681 }
bb47de73
NK
7682
7683 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
7684 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7685 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 7686 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 7687
8c61b31e
JFZ
7688 if (!aconnector->mst_port)
7689 drm_connector_attach_vrr_capable_property(&aconnector->base);
7690
0c8620d6 7691#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 7692 if (adev->dm.hdcp_workqueue)
53e108aa 7693 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 7694#endif
bb47de73 7695 }
e7b07cee
HW
7696}
7697
7578ecda
AD
7698static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7699 struct i2c_msg *msgs, int num)
e7b07cee
HW
7700{
7701 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7702 struct ddc_service *ddc_service = i2c->ddc_service;
7703 struct i2c_command cmd;
7704 int i;
7705 int result = -EIO;
7706
b830ebc9 7707 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
7708
7709 if (!cmd.payloads)
7710 return result;
7711
7712 cmd.number_of_payloads = num;
7713 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7714 cmd.speed = 100;
7715
7716 for (i = 0; i < num; i++) {
7717 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7718 cmd.payloads[i].address = msgs[i].addr;
7719 cmd.payloads[i].length = msgs[i].len;
7720 cmd.payloads[i].data = msgs[i].buf;
7721 }
7722
c85e6e54
DF
7723 if (dc_submit_i2c(
7724 ddc_service->ctx->dc,
7725 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
7726 &cmd))
7727 result = num;
7728
7729 kfree(cmd.payloads);
7730 return result;
7731}
7732
7578ecda 7733static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
7734{
7735 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7736}
7737
7738static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7739 .master_xfer = amdgpu_dm_i2c_xfer,
7740 .functionality = amdgpu_dm_i2c_func,
7741};
7742
3ee6b26b
AD
7743static struct amdgpu_i2c_adapter *
7744create_i2c(struct ddc_service *ddc_service,
7745 int link_index,
7746 int *res)
e7b07cee
HW
7747{
7748 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7749 struct amdgpu_i2c_adapter *i2c;
7750
b830ebc9 7751 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
7752 if (!i2c)
7753 return NULL;
e7b07cee
HW
7754 i2c->base.owner = THIS_MODULE;
7755 i2c->base.class = I2C_CLASS_DDC;
7756 i2c->base.dev.parent = &adev->pdev->dev;
7757 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 7758 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
7759 i2c_set_adapdata(&i2c->base, i2c);
7760 i2c->ddc_service = ddc_service;
c85e6e54 7761 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
7762
7763 return i2c;
7764}
7765
89fc8d4e 7766
1f6010a9
DF
7767/*
7768 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
7769 * dc_link which will be represented by this aconnector.
7770 */
7578ecda
AD
7771static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7772 struct amdgpu_dm_connector *aconnector,
7773 uint32_t link_index,
7774 struct amdgpu_encoder *aencoder)
e7b07cee
HW
7775{
7776 int res = 0;
7777 int connector_type;
7778 struct dc *dc = dm->dc;
7779 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7780 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
7781
7782 link->priv = aconnector;
e7b07cee 7783
f1ad2f5e 7784 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
7785
7786 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
7787 if (!i2c) {
7788 DRM_ERROR("Failed to create i2c adapter data\n");
7789 return -ENOMEM;
7790 }
7791
e7b07cee
HW
7792 aconnector->i2c = i2c;
7793 res = i2c_add_adapter(&i2c->base);
7794
7795 if (res) {
7796 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7797 goto out_free;
7798 }
7799
7800 connector_type = to_drm_connector_type(link->connector_signal);
7801
17165de2 7802 res = drm_connector_init_with_ddc(
e7b07cee
HW
7803 dm->ddev,
7804 &aconnector->base,
7805 &amdgpu_dm_connector_funcs,
17165de2
AP
7806 connector_type,
7807 &i2c->base);
e7b07cee
HW
7808
7809 if (res) {
7810 DRM_ERROR("connector_init failed\n");
7811 aconnector->connector_id = -1;
7812 goto out_free;
7813 }
7814
7815 drm_connector_helper_add(
7816 &aconnector->base,
7817 &amdgpu_dm_connector_helper_funcs);
7818
7819 amdgpu_dm_connector_init_helper(
7820 dm,
7821 aconnector,
7822 connector_type,
7823 link,
7824 link_index);
7825
cde4c44d 7826 drm_connector_attach_encoder(
e7b07cee
HW
7827 &aconnector->base, &aencoder->base);
7828
e7b07cee
HW
7829 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7830 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 7831 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 7832
e7b07cee
HW
7833out_free:
7834 if (res) {
7835 kfree(i2c);
7836 aconnector->i2c = NULL;
7837 }
7838 return res;
7839}
7840
7841int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7842{
7843 switch (adev->mode_info.num_crtc) {
7844 case 1:
7845 return 0x1;
7846 case 2:
7847 return 0x3;
7848 case 3:
7849 return 0x7;
7850 case 4:
7851 return 0xf;
7852 case 5:
7853 return 0x1f;
7854 case 6:
7855 default:
7856 return 0x3f;
7857 }
7858}
7859
7578ecda
AD
7860static int amdgpu_dm_encoder_init(struct drm_device *dev,
7861 struct amdgpu_encoder *aencoder,
7862 uint32_t link_index)
e7b07cee 7863{
1348969a 7864 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
7865
7866 int res = drm_encoder_init(dev,
7867 &aencoder->base,
7868 &amdgpu_dm_encoder_funcs,
7869 DRM_MODE_ENCODER_TMDS,
7870 NULL);
7871
7872 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7873
7874 if (!res)
7875 aencoder->encoder_id = link_index;
7876 else
7877 aencoder->encoder_id = -1;
7878
7879 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7880
7881 return res;
7882}
7883
3ee6b26b
AD
7884static void manage_dm_interrupts(struct amdgpu_device *adev,
7885 struct amdgpu_crtc *acrtc,
7886 bool enable)
e7b07cee
HW
7887{
7888 /*
8fe684e9
NK
7889 * We have no guarantee that the frontend index maps to the same
7890 * backend index - some even map to more than one.
7891 *
7892 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
7893 */
7894 int irq_type =
734dd01d 7895 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
7896 adev,
7897 acrtc->crtc_id);
7898
7899 if (enable) {
7900 drm_crtc_vblank_on(&acrtc->base);
7901 amdgpu_irq_get(
7902 adev,
7903 &adev->pageflip_irq,
7904 irq_type);
86bc2219
WL
7905#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7906 amdgpu_irq_get(
7907 adev,
7908 &adev->vline0_irq,
7909 irq_type);
7910#endif
e7b07cee 7911 } else {
86bc2219
WL
7912#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7913 amdgpu_irq_put(
7914 adev,
7915 &adev->vline0_irq,
7916 irq_type);
7917#endif
e7b07cee
HW
7918 amdgpu_irq_put(
7919 adev,
7920 &adev->pageflip_irq,
7921 irq_type);
7922 drm_crtc_vblank_off(&acrtc->base);
7923 }
7924}
7925
8fe684e9
NK
7926static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7927 struct amdgpu_crtc *acrtc)
7928{
7929 int irq_type =
7930 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7931
7932 /**
7933 * This reads the current state for the IRQ and force reapplies
7934 * the setting to hardware.
7935 */
7936 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7937}
7938
3ee6b26b
AD
7939static bool
7940is_scaling_state_different(const struct dm_connector_state *dm_state,
7941 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
7942{
7943 if (dm_state->scaling != old_dm_state->scaling)
7944 return true;
7945 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7946 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7947 return true;
7948 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7949 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7950 return true;
b830ebc9
HW
7951 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7952 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7953 return true;
e7b07cee
HW
7954 return false;
7955}
7956
0c8620d6
BL
7957#ifdef CONFIG_DRM_AMD_DC_HDCP
7958static bool is_content_protection_different(struct drm_connector_state *state,
7959 const struct drm_connector_state *old_state,
7960 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7961{
7962 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 7963 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 7964
31c0ed90 7965 /* Handle: Type0/1 change */
53e108aa
BL
7966 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7967 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7968 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7969 return true;
7970 }
7971
31c0ed90
BL
7972 /* CP is being re enabled, ignore this
7973 *
7974 * Handles: ENABLED -> DESIRED
7975 */
0c8620d6
BL
7976 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7977 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7978 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7979 return false;
7980 }
7981
31c0ed90
BL
7982 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7983 *
7984 * Handles: UNDESIRED -> ENABLED
7985 */
0c8620d6
BL
7986 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7987 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7988 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7989
7990 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7991 * hot-plug, headless s3, dpms
31c0ed90
BL
7992 *
7993 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 7994 */
97f6c917
BL
7995 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7996 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7997 dm_con_state->update_hdcp = false;
0c8620d6 7998 return true;
97f6c917 7999 }
0c8620d6 8000
31c0ed90
BL
8001 /*
8002 * Handles: UNDESIRED -> UNDESIRED
8003 * DESIRED -> DESIRED
8004 * ENABLED -> ENABLED
8005 */
0c8620d6
BL
8006 if (old_state->content_protection == state->content_protection)
8007 return false;
8008
31c0ed90
BL
8009 /*
8010 * Handles: UNDESIRED -> DESIRED
8011 * DESIRED -> UNDESIRED
8012 * ENABLED -> UNDESIRED
8013 */
97f6c917 8014 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8015 return true;
8016
31c0ed90
BL
8017 /*
8018 * Handles: DESIRED -> ENABLED
8019 */
0c8620d6
BL
8020 return false;
8021}
8022
0c8620d6 8023#endif
3ee6b26b
AD
8024static void remove_stream(struct amdgpu_device *adev,
8025 struct amdgpu_crtc *acrtc,
8026 struct dc_stream_state *stream)
e7b07cee
HW
8027{
8028 /* this is the update mode case */
e7b07cee
HW
8029
8030 acrtc->otg_inst = -1;
8031 acrtc->enabled = false;
8032}
8033
7578ecda
AD
8034static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8035 struct dc_cursor_position *position)
2a8f6ccb 8036{
f4c2cc43 8037 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8038 int x, y;
8039 int xorigin = 0, yorigin = 0;
8040
e371e19c 8041 if (!crtc || !plane->state->fb)
2a8f6ccb 8042 return 0;
2a8f6ccb
HW
8043
8044 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8045 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8046 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8047 __func__,
8048 plane->state->crtc_w,
8049 plane->state->crtc_h);
8050 return -EINVAL;
8051 }
8052
8053 x = plane->state->crtc_x;
8054 y = plane->state->crtc_y;
c14a005c 8055
e371e19c
NK
8056 if (x <= -amdgpu_crtc->max_cursor_width ||
8057 y <= -amdgpu_crtc->max_cursor_height)
8058 return 0;
8059
2a8f6ccb
HW
8060 if (x < 0) {
8061 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8062 x = 0;
8063 }
8064 if (y < 0) {
8065 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8066 y = 0;
8067 }
8068 position->enable = true;
d243b6ff 8069 position->translate_by_source = true;
2a8f6ccb
HW
8070 position->x = x;
8071 position->y = y;
8072 position->x_hotspot = xorigin;
8073 position->y_hotspot = yorigin;
8074
8075 return 0;
8076}
8077
3ee6b26b
AD
8078static void handle_cursor_update(struct drm_plane *plane,
8079 struct drm_plane_state *old_plane_state)
e7b07cee 8080{
1348969a 8081 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8082 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8083 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8084 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8085 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8086 uint64_t address = afb ? afb->address : 0;
6a30a929 8087 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8088 struct dc_cursor_attributes attributes;
8089 int ret;
8090
e7b07cee
HW
8091 if (!plane->state->fb && !old_plane_state->fb)
8092 return;
8093
cb2318b7 8094 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8095 __func__,
8096 amdgpu_crtc->crtc_id,
8097 plane->state->crtc_w,
8098 plane->state->crtc_h);
2a8f6ccb
HW
8099
8100 ret = get_cursor_position(plane, crtc, &position);
8101 if (ret)
8102 return;
8103
8104 if (!position.enable) {
8105 /* turn off cursor */
674e78ac
NK
8106 if (crtc_state && crtc_state->stream) {
8107 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8108 dc_stream_set_cursor_position(crtc_state->stream,
8109 &position);
674e78ac
NK
8110 mutex_unlock(&adev->dm.dc_lock);
8111 }
2a8f6ccb 8112 return;
e7b07cee 8113 }
e7b07cee 8114
2a8f6ccb
HW
8115 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8116 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8117
c1cefe11 8118 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8119 attributes.address.high_part = upper_32_bits(address);
8120 attributes.address.low_part = lower_32_bits(address);
8121 attributes.width = plane->state->crtc_w;
8122 attributes.height = plane->state->crtc_h;
8123 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8124 attributes.rotation_angle = 0;
8125 attributes.attribute_flags.value = 0;
8126
03a66367 8127 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8128
886daac9 8129 if (crtc_state->stream) {
674e78ac 8130 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8131 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8132 &attributes))
8133 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8134
2a8f6ccb
HW
8135 if (!dc_stream_set_cursor_position(crtc_state->stream,
8136 &position))
8137 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8138 mutex_unlock(&adev->dm.dc_lock);
886daac9 8139 }
2a8f6ccb 8140}
e7b07cee
HW
8141
8142static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8143{
8144
8145 assert_spin_locked(&acrtc->base.dev->event_lock);
8146 WARN_ON(acrtc->event);
8147
8148 acrtc->event = acrtc->base.state->event;
8149
8150 /* Set the flip status */
8151 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8152
8153 /* Mark this event as consumed */
8154 acrtc->base.state->event = NULL;
8155
cb2318b7
VL
8156 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8157 acrtc->crtc_id);
e7b07cee
HW
8158}
8159
bb47de73
NK
8160static void update_freesync_state_on_stream(
8161 struct amdgpu_display_manager *dm,
8162 struct dm_crtc_state *new_crtc_state,
180db303
NK
8163 struct dc_stream_state *new_stream,
8164 struct dc_plane_state *surface,
8165 u32 flip_timestamp_in_us)
bb47de73 8166{
09aef2c4 8167 struct mod_vrr_params vrr_params;
bb47de73 8168 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8169 struct amdgpu_device *adev = dm->adev;
585d450c 8170 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8171 unsigned long flags;
4cda3243 8172 bool pack_sdp_v1_3 = false;
bb47de73
NK
8173
8174 if (!new_stream)
8175 return;
8176
8177 /*
8178 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8179 * For now it's sufficient to just guard against these conditions.
8180 */
8181
8182 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8183 return;
8184
4a580877 8185 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8186 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8187
180db303
NK
8188 if (surface) {
8189 mod_freesync_handle_preflip(
8190 dm->freesync_module,
8191 surface,
8192 new_stream,
8193 flip_timestamp_in_us,
8194 &vrr_params);
09aef2c4
MK
8195
8196 if (adev->family < AMDGPU_FAMILY_AI &&
8197 amdgpu_dm_vrr_active(new_crtc_state)) {
8198 mod_freesync_handle_v_update(dm->freesync_module,
8199 new_stream, &vrr_params);
e63e2491
EB
8200
8201 /* Need to call this before the frame ends. */
8202 dc_stream_adjust_vmin_vmax(dm->dc,
8203 new_crtc_state->stream,
8204 &vrr_params.adjust);
09aef2c4 8205 }
180db303 8206 }
bb47de73
NK
8207
8208 mod_freesync_build_vrr_infopacket(
8209 dm->freesync_module,
8210 new_stream,
180db303 8211 &vrr_params,
ecd0136b
HT
8212 PACKET_TYPE_VRR,
8213 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8214 &vrr_infopacket,
8215 pack_sdp_v1_3);
bb47de73 8216
8a48b44c 8217 new_crtc_state->freesync_timing_changed |=
585d450c 8218 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8219 &vrr_params.adjust,
8220 sizeof(vrr_params.adjust)) != 0);
bb47de73 8221
8a48b44c 8222 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8223 (memcmp(&new_crtc_state->vrr_infopacket,
8224 &vrr_infopacket,
8225 sizeof(vrr_infopacket)) != 0);
8226
585d450c 8227 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8228 new_crtc_state->vrr_infopacket = vrr_infopacket;
8229
585d450c 8230 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8231 new_stream->vrr_infopacket = vrr_infopacket;
8232
8233 if (new_crtc_state->freesync_vrr_info_changed)
8234 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8235 new_crtc_state->base.crtc->base.id,
8236 (int)new_crtc_state->base.vrr_enabled,
180db303 8237 (int)vrr_params.state);
09aef2c4 8238
4a580877 8239 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8240}
8241
585d450c 8242static void update_stream_irq_parameters(
e854194c
MK
8243 struct amdgpu_display_manager *dm,
8244 struct dm_crtc_state *new_crtc_state)
8245{
8246 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8247 struct mod_vrr_params vrr_params;
e854194c 8248 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8249 struct amdgpu_device *adev = dm->adev;
585d450c 8250 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8251 unsigned long flags;
e854194c
MK
8252
8253 if (!new_stream)
8254 return;
8255
8256 /*
8257 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8258 * For now it's sufficient to just guard against these conditions.
8259 */
8260 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8261 return;
8262
4a580877 8263 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8264 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8265
e854194c
MK
8266 if (new_crtc_state->vrr_supported &&
8267 config.min_refresh_in_uhz &&
8268 config.max_refresh_in_uhz) {
a85ba005
NC
8269 /*
8270 * if freesync compatible mode was set, config.state will be set
8271 * in atomic check
8272 */
8273 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8274 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8275 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8276 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8277 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8278 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8279 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8280 } else {
8281 config.state = new_crtc_state->base.vrr_enabled ?
8282 VRR_STATE_ACTIVE_VARIABLE :
8283 VRR_STATE_INACTIVE;
8284 }
e854194c
MK
8285 } else {
8286 config.state = VRR_STATE_UNSUPPORTED;
8287 }
8288
8289 mod_freesync_build_vrr_params(dm->freesync_module,
8290 new_stream,
8291 &config, &vrr_params);
8292
8293 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8294 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8295 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8296
585d450c
AP
8297 new_crtc_state->freesync_config = config;
8298 /* Copy state for access from DM IRQ handler */
8299 acrtc->dm_irq_params.freesync_config = config;
8300 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8301 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8302 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8303}
8304
66b0c973
MK
8305static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8306 struct dm_crtc_state *new_state)
8307{
8308 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8309 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8310
8311 if (!old_vrr_active && new_vrr_active) {
8312 /* Transition VRR inactive -> active:
8313 * While VRR is active, we must not disable vblank irq, as a
8314 * reenable after disable would compute bogus vblank/pflip
8315 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8316 *
8317 * We also need vupdate irq for the actual core vblank handling
8318 * at end of vblank.
66b0c973 8319 */
d2574c33 8320 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8321 drm_crtc_vblank_get(new_state->base.crtc);
8322 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8323 __func__, new_state->base.crtc->base.id);
8324 } else if (old_vrr_active && !new_vrr_active) {
8325 /* Transition VRR active -> inactive:
8326 * Allow vblank irq disable again for fixed refresh rate.
8327 */
d2574c33 8328 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
8329 drm_crtc_vblank_put(new_state->base.crtc);
8330 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8331 __func__, new_state->base.crtc->base.id);
8332 }
8333}
8334
8ad27806
NK
8335static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8336{
8337 struct drm_plane *plane;
5760dcb9 8338 struct drm_plane_state *old_plane_state;
8ad27806
NK
8339 int i;
8340
8341 /*
8342 * TODO: Make this per-stream so we don't issue redundant updates for
8343 * commits with multiple streams.
8344 */
5760dcb9 8345 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
8346 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8347 handle_cursor_update(plane, old_plane_state);
8348}
8349
3be5262e 8350static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 8351 struct dc_state *dc_state,
3ee6b26b
AD
8352 struct drm_device *dev,
8353 struct amdgpu_display_manager *dm,
8354 struct drm_crtc *pcrtc,
420cd472 8355 bool wait_for_vblank)
e7b07cee 8356{
efc8278e 8357 uint32_t i;
8a48b44c 8358 uint64_t timestamp_ns;
e7b07cee 8359 struct drm_plane *plane;
0bc9706d 8360 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 8361 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
8362 struct drm_crtc_state *new_pcrtc_state =
8363 drm_atomic_get_new_crtc_state(state, pcrtc);
8364 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
8365 struct dm_crtc_state *dm_old_crtc_state =
8366 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 8367 int planes_count = 0, vpos, hpos;
570c91d5 8368 long r;
e7b07cee 8369 unsigned long flags;
8a48b44c 8370 struct amdgpu_bo *abo;
fdd1fe57
MK
8371 uint32_t target_vblank, last_flip_vblank;
8372 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 8373 bool pflip_present = false;
bc7f670e
DF
8374 struct {
8375 struct dc_surface_update surface_updates[MAX_SURFACES];
8376 struct dc_plane_info plane_infos[MAX_SURFACES];
8377 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 8378 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 8379 struct dc_stream_update stream_update;
74aa7bd4 8380 } *bundle;
bc7f670e 8381
74aa7bd4 8382 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 8383
74aa7bd4
DF
8384 if (!bundle) {
8385 dm_error("Failed to allocate update bundle\n");
4b510503
NK
8386 goto cleanup;
8387 }
e7b07cee 8388
8ad27806
NK
8389 /*
8390 * Disable the cursor first if we're disabling all the planes.
8391 * It'll remain on the screen after the planes are re-enabled
8392 * if we don't.
8393 */
8394 if (acrtc_state->active_planes == 0)
8395 amdgpu_dm_commit_cursors(state);
8396
e7b07cee 8397 /* update planes when needed */
efc8278e 8398 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 8399 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 8400 struct drm_crtc_state *new_crtc_state;
0bc9706d 8401 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 8402 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 8403 bool plane_needs_flip;
c7af5f77 8404 struct dc_plane_state *dc_plane;
54d76575 8405 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 8406
80c218d5
NK
8407 /* Cursor plane is handled after stream updates */
8408 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 8409 continue;
e7b07cee 8410
f5ba60fe
DD
8411 if (!fb || !crtc || pcrtc != crtc)
8412 continue;
8413
8414 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8415 if (!new_crtc_state->active)
e7b07cee
HW
8416 continue;
8417
bc7f670e 8418 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 8419
74aa7bd4 8420 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 8421 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
8422 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8423 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 8424 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 8425 }
8a48b44c 8426
695af5f9
NK
8427 fill_dc_scaling_info(new_plane_state,
8428 &bundle->scaling_infos[planes_count]);
8a48b44c 8429
695af5f9
NK
8430 bundle->surface_updates[planes_count].scaling_info =
8431 &bundle->scaling_infos[planes_count];
8a48b44c 8432
f5031000 8433 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 8434
f5031000 8435 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 8436
f5031000
DF
8437 if (!plane_needs_flip) {
8438 planes_count += 1;
8439 continue;
8440 }
8a48b44c 8441
2fac0f53
CK
8442 abo = gem_to_amdgpu_bo(fb->obj[0]);
8443
f8308898
AG
8444 /*
8445 * Wait for all fences on this FB. Do limited wait to avoid
8446 * deadlock during GPU reset when this fence will not signal
8447 * but we hold reservation lock for the BO.
8448 */
52791eee 8449 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
2fac0f53 8450 false,
f8308898
AG
8451 msecs_to_jiffies(5000));
8452 if (unlikely(r <= 0))
ed8a5fb2 8453 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 8454
695af5f9 8455 fill_dc_plane_info_and_addr(
8ce5d842 8456 dm->adev, new_plane_state,
6eed95b0 8457 afb->tiling_flags,
695af5f9 8458 &bundle->plane_infos[planes_count],
87b7ebc2 8459 &bundle->flip_addrs[planes_count].address,
6eed95b0 8460 afb->tmz_surface, false);
87b7ebc2 8461
4711c033 8462 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
8463 new_plane_state->plane->index,
8464 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
8465
8466 bundle->surface_updates[planes_count].plane_info =
8467 &bundle->plane_infos[planes_count];
8a48b44c 8468
caff0e66
NK
8469 /*
8470 * Only allow immediate flips for fast updates that don't
8471 * change FB pitch, DCC state, rotation or mirroing.
8472 */
f5031000 8473 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 8474 crtc->state->async_flip &&
caff0e66 8475 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 8476
f5031000
DF
8477 timestamp_ns = ktime_get_ns();
8478 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8479 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8480 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 8481
f5031000
DF
8482 if (!bundle->surface_updates[planes_count].surface) {
8483 DRM_ERROR("No surface for CRTC: id=%d\n",
8484 acrtc_attach->crtc_id);
8485 continue;
bc7f670e
DF
8486 }
8487
f5031000
DF
8488 if (plane == pcrtc->primary)
8489 update_freesync_state_on_stream(
8490 dm,
8491 acrtc_state,
8492 acrtc_state->stream,
8493 dc_plane,
8494 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 8495
4711c033 8496 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
8497 __func__,
8498 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8499 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
8500
8501 planes_count += 1;
8502
8a48b44c
DF
8503 }
8504
74aa7bd4 8505 if (pflip_present) {
634092b1
MK
8506 if (!vrr_active) {
8507 /* Use old throttling in non-vrr fixed refresh rate mode
8508 * to keep flip scheduling based on target vblank counts
8509 * working in a backwards compatible way, e.g., for
8510 * clients using the GLX_OML_sync_control extension or
8511 * DRI3/Present extension with defined target_msc.
8512 */
e3eff4b5 8513 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
8514 }
8515 else {
8516 /* For variable refresh rate mode only:
8517 * Get vblank of last completed flip to avoid > 1 vrr
8518 * flips per video frame by use of throttling, but allow
8519 * flip programming anywhere in the possibly large
8520 * variable vrr vblank interval for fine-grained flip
8521 * timing control and more opportunity to avoid stutter
8522 * on late submission of flips.
8523 */
8524 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 8525 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
8526 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8527 }
8528
fdd1fe57 8529 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
8530
8531 /*
8532 * Wait until we're out of the vertical blank period before the one
8533 * targeted by the flip
8534 */
8535 while ((acrtc_attach->enabled &&
8536 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8537 0, &vpos, &hpos, NULL,
8538 NULL, &pcrtc->hwmode)
8539 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8540 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8541 (int)(target_vblank -
e3eff4b5 8542 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
8543 usleep_range(1000, 1100);
8544 }
8545
8fe684e9
NK
8546 /**
8547 * Prepare the flip event for the pageflip interrupt to handle.
8548 *
8549 * This only works in the case where we've already turned on the
8550 * appropriate hardware blocks (eg. HUBP) so in the transition case
8551 * from 0 -> n planes we have to skip a hardware generated event
8552 * and rely on sending it from software.
8553 */
8554 if (acrtc_attach->base.state->event &&
8555 acrtc_state->active_planes > 0) {
8a48b44c
DF
8556 drm_crtc_vblank_get(pcrtc);
8557
8558 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8559
8560 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8561 prepare_flip_isr(acrtc_attach);
8562
8563 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8564 }
8565
8566 if (acrtc_state->stream) {
8a48b44c 8567 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 8568 bundle->stream_update.vrr_infopacket =
8a48b44c 8569 &acrtc_state->stream->vrr_infopacket;
e7b07cee 8570 }
e7b07cee
HW
8571 }
8572
bc92c065 8573 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
8574 if ((planes_count || acrtc_state->active_planes == 0) &&
8575 acrtc_state->stream) {
b6e881c9 8576 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 8577 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
8578 bundle->stream_update.src = acrtc_state->stream->src;
8579 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
8580 }
8581
cf020d49
NK
8582 if (new_pcrtc_state->color_mgmt_changed) {
8583 /*
8584 * TODO: This isn't fully correct since we've actually
8585 * already modified the stream in place.
8586 */
8587 bundle->stream_update.gamut_remap =
8588 &acrtc_state->stream->gamut_remap_matrix;
8589 bundle->stream_update.output_csc_transform =
8590 &acrtc_state->stream->csc_color_matrix;
8591 bundle->stream_update.out_transfer_func =
8592 acrtc_state->stream->out_transfer_func;
8593 }
bc7f670e 8594
8a48b44c 8595 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 8596 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 8597 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 8598
e63e2491
EB
8599 /*
8600 * If FreeSync state on the stream has changed then we need to
8601 * re-adjust the min/max bounds now that DC doesn't handle this
8602 * as part of commit.
8603 */
a85ba005 8604 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
8605 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8606 dc_stream_adjust_vmin_vmax(
8607 dm->dc, acrtc_state->stream,
585d450c 8608 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
8609 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8610 }
bc7f670e 8611 mutex_lock(&dm->dc_lock);
8c322309 8612 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 8613 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
8614 amdgpu_dm_psr_disable(acrtc_state->stream);
8615
bc7f670e 8616 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 8617 bundle->surface_updates,
bc7f670e
DF
8618 planes_count,
8619 acrtc_state->stream,
efc8278e
AJ
8620 &bundle->stream_update,
8621 dc_state);
8c322309 8622
8fe684e9
NK
8623 /**
8624 * Enable or disable the interrupts on the backend.
8625 *
8626 * Most pipes are put into power gating when unused.
8627 *
8628 * When power gating is enabled on a pipe we lose the
8629 * interrupt enablement state when power gating is disabled.
8630 *
8631 * So we need to update the IRQ control state in hardware
8632 * whenever the pipe turns on (since it could be previously
8633 * power gated) or off (since some pipes can't be power gated
8634 * on some ASICs).
8635 */
8636 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
8637 dm_update_pflip_irq_state(drm_to_adev(dev),
8638 acrtc_attach);
8fe684e9 8639
8c322309 8640 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 8641 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 8642 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309
RL
8643 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8644 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
d1ebfdd8
WW
8645 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8646 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8c322309
RL
8647 amdgpu_dm_psr_enable(acrtc_state->stream);
8648 }
8649
bc7f670e 8650 mutex_unlock(&dm->dc_lock);
e7b07cee 8651 }
4b510503 8652
8ad27806
NK
8653 /*
8654 * Update cursor state *after* programming all the planes.
8655 * This avoids redundant programming in the case where we're going
8656 * to be disabling a single plane - those pipes are being disabled.
8657 */
8658 if (acrtc_state->active_planes)
8659 amdgpu_dm_commit_cursors(state);
80c218d5 8660
4b510503 8661cleanup:
74aa7bd4 8662 kfree(bundle);
e7b07cee
HW
8663}
8664
6ce8f316
NK
8665static void amdgpu_dm_commit_audio(struct drm_device *dev,
8666 struct drm_atomic_state *state)
8667{
1348969a 8668 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
8669 struct amdgpu_dm_connector *aconnector;
8670 struct drm_connector *connector;
8671 struct drm_connector_state *old_con_state, *new_con_state;
8672 struct drm_crtc_state *new_crtc_state;
8673 struct dm_crtc_state *new_dm_crtc_state;
8674 const struct dc_stream_status *status;
8675 int i, inst;
8676
8677 /* Notify device removals. */
8678 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8679 if (old_con_state->crtc != new_con_state->crtc) {
8680 /* CRTC changes require notification. */
8681 goto notify;
8682 }
8683
8684 if (!new_con_state->crtc)
8685 continue;
8686
8687 new_crtc_state = drm_atomic_get_new_crtc_state(
8688 state, new_con_state->crtc);
8689
8690 if (!new_crtc_state)
8691 continue;
8692
8693 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8694 continue;
8695
8696 notify:
8697 aconnector = to_amdgpu_dm_connector(connector);
8698
8699 mutex_lock(&adev->dm.audio_lock);
8700 inst = aconnector->audio_inst;
8701 aconnector->audio_inst = -1;
8702 mutex_unlock(&adev->dm.audio_lock);
8703
8704 amdgpu_dm_audio_eld_notify(adev, inst);
8705 }
8706
8707 /* Notify audio device additions. */
8708 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8709 if (!new_con_state->crtc)
8710 continue;
8711
8712 new_crtc_state = drm_atomic_get_new_crtc_state(
8713 state, new_con_state->crtc);
8714
8715 if (!new_crtc_state)
8716 continue;
8717
8718 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8719 continue;
8720
8721 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8722 if (!new_dm_crtc_state->stream)
8723 continue;
8724
8725 status = dc_stream_get_status(new_dm_crtc_state->stream);
8726 if (!status)
8727 continue;
8728
8729 aconnector = to_amdgpu_dm_connector(connector);
8730
8731 mutex_lock(&adev->dm.audio_lock);
8732 inst = status->audio_inst;
8733 aconnector->audio_inst = inst;
8734 mutex_unlock(&adev->dm.audio_lock);
8735
8736 amdgpu_dm_audio_eld_notify(adev, inst);
8737 }
8738}
8739
1f6010a9 8740/*
27b3f4fc
LSL
8741 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8742 * @crtc_state: the DRM CRTC state
8743 * @stream_state: the DC stream state.
8744 *
8745 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8746 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8747 */
8748static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8749 struct dc_stream_state *stream_state)
8750{
b9952f93 8751 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 8752}
e7b07cee 8753
b8592b48
LL
8754/**
8755 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8756 * @state: The atomic state to commit
8757 *
8758 * This will tell DC to commit the constructed DC state from atomic_check,
8759 * programming the hardware. Any failures here implies a hardware failure, since
8760 * atomic check should have filtered anything non-kosher.
8761 */
7578ecda 8762static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
8763{
8764 struct drm_device *dev = state->dev;
1348969a 8765 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8766 struct amdgpu_display_manager *dm = &adev->dm;
8767 struct dm_atomic_state *dm_state;
eb3dc897 8768 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 8769 uint32_t i, j;
5cc6dcbd 8770 struct drm_crtc *crtc;
0bc9706d 8771 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
8772 unsigned long flags;
8773 bool wait_for_vblank = true;
8774 struct drm_connector *connector;
c2cea706 8775 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 8776 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 8777 int crtc_disable_count = 0;
6ee90e88 8778 bool mode_set_reset_required = false;
e7b07cee 8779
e8a98235
RS
8780 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8781
e7b07cee
HW
8782 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8783
eb3dc897
NK
8784 dm_state = dm_atomic_get_new_state(state);
8785 if (dm_state && dm_state->context) {
8786 dc_state = dm_state->context;
8787 } else {
8788 /* No state changes, retain current state. */
813d20dc 8789 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
8790 ASSERT(dc_state_temp);
8791 dc_state = dc_state_temp;
8792 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8793 }
e7b07cee 8794
6d90a208
AP
8795 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8796 new_crtc_state, i) {
8797 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8798
8799 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8800
8801 if (old_crtc_state->active &&
8802 (!new_crtc_state->active ||
8803 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8804 manage_dm_interrupts(adev, acrtc, false);
8805 dc_stream_release(dm_old_crtc_state->stream);
8806 }
8807 }
8808
8976f73b
RS
8809 drm_atomic_helper_calc_timestamping_constants(state);
8810
e7b07cee 8811 /* update changed items */
0bc9706d 8812 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 8813 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8814
54d76575
LSL
8815 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8816 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 8817
4711c033 8818 DRM_DEBUG_ATOMIC(
e7b07cee
HW
8819 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8820 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8821 "connectors_changed:%d\n",
8822 acrtc->crtc_id,
0bc9706d
LSL
8823 new_crtc_state->enable,
8824 new_crtc_state->active,
8825 new_crtc_state->planes_changed,
8826 new_crtc_state->mode_changed,
8827 new_crtc_state->active_changed,
8828 new_crtc_state->connectors_changed);
e7b07cee 8829
5c68c652
VL
8830 /* Disable cursor if disabling crtc */
8831 if (old_crtc_state->active && !new_crtc_state->active) {
8832 struct dc_cursor_position position;
8833
8834 memset(&position, 0, sizeof(position));
8835 mutex_lock(&dm->dc_lock);
8836 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8837 mutex_unlock(&dm->dc_lock);
8838 }
8839
27b3f4fc
LSL
8840 /* Copy all transient state flags into dc state */
8841 if (dm_new_crtc_state->stream) {
8842 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8843 dm_new_crtc_state->stream);
8844 }
8845
e7b07cee
HW
8846 /* handles headless hotplug case, updating new_state and
8847 * aconnector as needed
8848 */
8849
54d76575 8850 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 8851
4711c033 8852 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8853
54d76575 8854 if (!dm_new_crtc_state->stream) {
e7b07cee 8855 /*
b830ebc9
HW
8856 * this could happen because of issues with
8857 * userspace notifications delivery.
8858 * In this case userspace tries to set mode on
1f6010a9
DF
8859 * display which is disconnected in fact.
8860 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
8861 * We expect reset mode will come soon.
8862 *
8863 * This can also happen when unplug is done
8864 * during resume sequence ended
8865 *
8866 * In this case, we want to pretend we still
8867 * have a sink to keep the pipe running so that
8868 * hw state is consistent with the sw state
8869 */
f1ad2f5e 8870 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
8871 __func__, acrtc->base.base.id);
8872 continue;
8873 }
8874
54d76575
LSL
8875 if (dm_old_crtc_state->stream)
8876 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 8877
97028037
LP
8878 pm_runtime_get_noresume(dev->dev);
8879
e7b07cee 8880 acrtc->enabled = true;
0bc9706d
LSL
8881 acrtc->hw_mode = new_crtc_state->mode;
8882 crtc->hwmode = new_crtc_state->mode;
6ee90e88 8883 mode_set_reset_required = true;
0bc9706d 8884 } else if (modereset_required(new_crtc_state)) {
4711c033 8885 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 8886 /* i.e. reset mode */
6ee90e88 8887 if (dm_old_crtc_state->stream)
54d76575 8888 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 8889
6ee90e88 8890 mode_set_reset_required = true;
e7b07cee
HW
8891 }
8892 } /* for_each_crtc_in_state() */
8893
eb3dc897 8894 if (dc_state) {
6ee90e88 8895 /* if there mode set or reset, disable eDP PSR */
8896 if (mode_set_reset_required)
8897 amdgpu_dm_psr_disable_all(dm);
8898
eb3dc897 8899 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 8900 mutex_lock(&dm->dc_lock);
eb3dc897 8901 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
8902#if defined(CONFIG_DRM_AMD_DC_DCN)
8903 /* Allow idle optimization when vblank count is 0 for display off */
8904 if (dm->active_vblank_irq_count == 0)
8905 dc_allow_idle_optimizations(dm->dc,true);
8906#endif
674e78ac 8907 mutex_unlock(&dm->dc_lock);
fa2123db 8908 }
fe8858bb 8909
0bc9706d 8910 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 8911 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 8912
54d76575 8913 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 8914
54d76575 8915 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 8916 const struct dc_stream_status *status =
54d76575 8917 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 8918
eb3dc897 8919 if (!status)
09f609c3
LL
8920 status = dc_stream_get_status_from_state(dc_state,
8921 dm_new_crtc_state->stream);
e7b07cee 8922 if (!status)
54d76575 8923 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
8924 else
8925 acrtc->otg_inst = status->primary_otg_inst;
8926 }
8927 }
0c8620d6
BL
8928#ifdef CONFIG_DRM_AMD_DC_HDCP
8929 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8930 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8931 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8932 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8933
8934 new_crtc_state = NULL;
8935
8936 if (acrtc)
8937 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8938
8939 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8940
8941 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8942 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8943 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8944 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 8945 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
8946 continue;
8947 }
8948
8949 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
8950 hdcp_update_display(
8951 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 8952 new_con_state->hdcp_content_type,
0e86d3d4 8953 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
8954 }
8955#endif
e7b07cee 8956
02d6a6fc 8957 /* Handle connector state changes */
c2cea706 8958 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
8959 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8960 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8961 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 8962 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 8963 struct dc_stream_update stream_update;
b232d4ed 8964 struct dc_info_packet hdr_packet;
e7b07cee 8965 struct dc_stream_status *status = NULL;
b232d4ed 8966 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 8967
efc8278e 8968 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
8969 memset(&stream_update, 0, sizeof(stream_update));
8970
44d09c6a 8971 if (acrtc) {
0bc9706d 8972 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
8973 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8974 }
0bc9706d 8975
e7b07cee 8976 /* Skip any modesets/resets */
0bc9706d 8977 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
8978 continue;
8979
54d76575 8980 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
8981 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8982
b232d4ed
NK
8983 scaling_changed = is_scaling_state_different(dm_new_con_state,
8984 dm_old_con_state);
8985
8986 abm_changed = dm_new_crtc_state->abm_level !=
8987 dm_old_crtc_state->abm_level;
8988
8989 hdr_changed =
72921cdf 8990 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
8991
8992 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 8993 continue;
e7b07cee 8994
b6e881c9 8995 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 8996 if (scaling_changed) {
02d6a6fc 8997 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 8998 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 8999
02d6a6fc
DF
9000 stream_update.src = dm_new_crtc_state->stream->src;
9001 stream_update.dst = dm_new_crtc_state->stream->dst;
9002 }
9003
b232d4ed 9004 if (abm_changed) {
02d6a6fc
DF
9005 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9006
9007 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9008 }
70e8ffc5 9009
b232d4ed
NK
9010 if (hdr_changed) {
9011 fill_hdr_info_packet(new_con_state, &hdr_packet);
9012 stream_update.hdr_static_metadata = &hdr_packet;
9013 }
9014
54d76575 9015 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9016
9017 if (WARN_ON(!status))
9018 continue;
9019
3be5262e 9020 WARN_ON(!status->plane_count);
e7b07cee 9021
02d6a6fc
DF
9022 /*
9023 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9024 * Here we create an empty update on each plane.
9025 * To fix this, DC should permit updating only stream properties.
9026 */
9027 for (j = 0; j < status->plane_count; j++)
efc8278e 9028 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9029
9030
9031 mutex_lock(&dm->dc_lock);
9032 dc_commit_updates_for_stream(dm->dc,
efc8278e 9033 dummy_updates,
02d6a6fc
DF
9034 status->plane_count,
9035 dm_new_crtc_state->stream,
efc8278e
AJ
9036 &stream_update,
9037 dc_state);
02d6a6fc 9038 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9039 }
9040
b5e83f6f 9041 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9042 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9043 new_crtc_state, i) {
fe2a1965
LP
9044 if (old_crtc_state->active && !new_crtc_state->active)
9045 crtc_disable_count++;
9046
54d76575 9047 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9048 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9049
585d450c
AP
9050 /* For freesync config update on crtc state and params for irq */
9051 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9052
66b0c973
MK
9053 /* Handle vrr on->off / off->on transitions */
9054 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9055 dm_new_crtc_state);
e7b07cee
HW
9056 }
9057
8fe684e9
NK
9058 /**
9059 * Enable interrupts for CRTCs that are newly enabled or went through
9060 * a modeset. It was intentionally deferred until after the front end
9061 * state was modified to wait until the OTG was on and so the IRQ
9062 * handlers didn't access stale or invalid state.
9063 */
9064 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9065 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9066#ifdef CONFIG_DEBUG_FS
86bc2219 9067 bool configure_crc = false;
8e7b6fee 9068 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9069#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9070 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9071#endif
9072 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9073 cur_crc_src = acrtc->dm_irq_params.crc_src;
9074 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9075#endif
585d450c
AP
9076 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9077
8fe684e9
NK
9078 if (new_crtc_state->active &&
9079 (!old_crtc_state->active ||
9080 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9081 dc_stream_retain(dm_new_crtc_state->stream);
9082 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9083 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9084
24eb9374 9085#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9086 /**
9087 * Frontend may have changed so reapply the CRC capture
9088 * settings for the stream.
9089 */
9090 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9091
8e7b6fee 9092 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9093 configure_crc = true;
9094#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9095 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9096 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9097 acrtc->dm_irq_params.crc_window.update_win = true;
9098 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9099 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9100 crc_rd_wrk->crtc = crtc;
9101 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9102 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9103 }
86bc2219 9104#endif
e2881d6d 9105 }
c920888c 9106
86bc2219 9107 if (configure_crc)
bbc49fc0
WL
9108 if (amdgpu_dm_crtc_configure_crc_source(
9109 crtc, dm_new_crtc_state, cur_crc_src))
9110 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9111#endif
8fe684e9
NK
9112 }
9113 }
e7b07cee 9114
420cd472 9115 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9116 if (new_crtc_state->async_flip)
420cd472
DF
9117 wait_for_vblank = false;
9118
e7b07cee 9119 /* update planes when needed per crtc*/
5cc6dcbd 9120 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9121 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9122
54d76575 9123 if (dm_new_crtc_state->stream)
eb3dc897 9124 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9125 dm, crtc, wait_for_vblank);
e7b07cee
HW
9126 }
9127
6ce8f316
NK
9128 /* Update audio instances for each connector. */
9129 amdgpu_dm_commit_audio(dev, state);
9130
7230362c
AD
9131#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9132 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9133 /* restore the backlight level */
9134 if (dm->backlight_dev)
9135 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9136#endif
e7b07cee
HW
9137 /*
9138 * send vblank event on all events not handled in flip and
9139 * mark consumed event for drm_atomic_helper_commit_hw_done
9140 */
4a580877 9141 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9142 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9143
0bc9706d
LSL
9144 if (new_crtc_state->event)
9145 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9146
0bc9706d 9147 new_crtc_state->event = NULL;
e7b07cee 9148 }
4a580877 9149 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9150
29c8f234
LL
9151 /* Signal HW programming completion */
9152 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9153
9154 if (wait_for_vblank)
320a1274 9155 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9156
9157 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9158
5f6fab24
AD
9159 /* return the stolen vga memory back to VRAM */
9160 if (!adev->mman.keep_stolen_vga_memory)
9161 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9162 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9163
1f6010a9
DF
9164 /*
9165 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9166 * so we can put the GPU into runtime suspend if we're not driving any
9167 * displays anymore
9168 */
fe2a1965
LP
9169 for (i = 0; i < crtc_disable_count; i++)
9170 pm_runtime_put_autosuspend(dev->dev);
97028037 9171 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9172
9173 if (dc_state_temp)
9174 dc_release_state(dc_state_temp);
e7b07cee
HW
9175}
9176
9177
9178static int dm_force_atomic_commit(struct drm_connector *connector)
9179{
9180 int ret = 0;
9181 struct drm_device *ddev = connector->dev;
9182 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9183 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9184 struct drm_plane *plane = disconnected_acrtc->base.primary;
9185 struct drm_connector_state *conn_state;
9186 struct drm_crtc_state *crtc_state;
9187 struct drm_plane_state *plane_state;
9188
9189 if (!state)
9190 return -ENOMEM;
9191
9192 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9193
9194 /* Construct an atomic state to restore previous display setting */
9195
9196 /*
9197 * Attach connectors to drm_atomic_state
9198 */
9199 conn_state = drm_atomic_get_connector_state(state, connector);
9200
9201 ret = PTR_ERR_OR_ZERO(conn_state);
9202 if (ret)
2dc39051 9203 goto out;
e7b07cee
HW
9204
9205 /* Attach crtc to drm_atomic_state*/
9206 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9207
9208 ret = PTR_ERR_OR_ZERO(crtc_state);
9209 if (ret)
2dc39051 9210 goto out;
e7b07cee
HW
9211
9212 /* force a restore */
9213 crtc_state->mode_changed = true;
9214
9215 /* Attach plane to drm_atomic_state */
9216 plane_state = drm_atomic_get_plane_state(state, plane);
9217
9218 ret = PTR_ERR_OR_ZERO(plane_state);
9219 if (ret)
2dc39051 9220 goto out;
e7b07cee
HW
9221
9222 /* Call commit internally with the state we just constructed */
9223 ret = drm_atomic_commit(state);
e7b07cee 9224
2dc39051 9225out:
e7b07cee 9226 drm_atomic_state_put(state);
2dc39051
VL
9227 if (ret)
9228 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9229
9230 return ret;
9231}
9232
9233/*
1f6010a9
DF
9234 * This function handles all cases when set mode does not come upon hotplug.
9235 * This includes when a display is unplugged then plugged back into the
9236 * same port and when running without usermode desktop manager supprot
e7b07cee 9237 */
3ee6b26b
AD
9238void dm_restore_drm_connector_state(struct drm_device *dev,
9239 struct drm_connector *connector)
e7b07cee 9240{
c84dec2f 9241 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9242 struct amdgpu_crtc *disconnected_acrtc;
9243 struct dm_crtc_state *acrtc_state;
9244
9245 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9246 return;
9247
9248 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9249 if (!disconnected_acrtc)
9250 return;
e7b07cee 9251
70e8ffc5
HW
9252 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9253 if (!acrtc_state->stream)
e7b07cee
HW
9254 return;
9255
9256 /*
9257 * If the previous sink is not released and different from the current,
9258 * we deduce we are in a state where we can not rely on usermode call
9259 * to turn on the display, so we do it here
9260 */
9261 if (acrtc_state->stream->sink != aconnector->dc_sink)
9262 dm_force_atomic_commit(&aconnector->base);
9263}
9264
1f6010a9 9265/*
e7b07cee
HW
9266 * Grabs all modesetting locks to serialize against any blocking commits,
9267 * Waits for completion of all non blocking commits.
9268 */
3ee6b26b
AD
9269static int do_aquire_global_lock(struct drm_device *dev,
9270 struct drm_atomic_state *state)
e7b07cee
HW
9271{
9272 struct drm_crtc *crtc;
9273 struct drm_crtc_commit *commit;
9274 long ret;
9275
1f6010a9
DF
9276 /*
9277 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9278 * ensure that when the framework release it the
9279 * extra locks we are locking here will get released to
9280 */
9281 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9282 if (ret)
9283 return ret;
9284
9285 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9286 spin_lock(&crtc->commit_lock);
9287 commit = list_first_entry_or_null(&crtc->commit_list,
9288 struct drm_crtc_commit, commit_entry);
9289 if (commit)
9290 drm_crtc_commit_get(commit);
9291 spin_unlock(&crtc->commit_lock);
9292
9293 if (!commit)
9294 continue;
9295
1f6010a9
DF
9296 /*
9297 * Make sure all pending HW programming completed and
e7b07cee
HW
9298 * page flips done
9299 */
9300 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9301
9302 if (ret > 0)
9303 ret = wait_for_completion_interruptible_timeout(
9304 &commit->flip_done, 10*HZ);
9305
9306 if (ret == 0)
9307 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9308 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9309
9310 drm_crtc_commit_put(commit);
9311 }
9312
9313 return ret < 0 ? ret : 0;
9314}
9315
bb47de73
NK
9316static void get_freesync_config_for_crtc(
9317 struct dm_crtc_state *new_crtc_state,
9318 struct dm_connector_state *new_con_state)
98e6436d
AK
9319{
9320 struct mod_freesync_config config = {0};
98e6436d
AK
9321 struct amdgpu_dm_connector *aconnector =
9322 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9323 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9324 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9325 bool fs_vid_mode = false;
98e6436d 9326
a057ec46 9327 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9328 vrefresh >= aconnector->min_vfreq &&
9329 vrefresh <= aconnector->max_vfreq;
bb47de73 9330
a057ec46
IB
9331 if (new_crtc_state->vrr_supported) {
9332 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
9333 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9334
9335 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9336 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9337 config.vsif_supported = true;
180db303 9338 config.btr = true;
98e6436d 9339
a85ba005
NC
9340 if (fs_vid_mode) {
9341 config.state = VRR_STATE_ACTIVE_FIXED;
9342 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9343 goto out;
9344 } else if (new_crtc_state->base.vrr_enabled) {
9345 config.state = VRR_STATE_ACTIVE_VARIABLE;
9346 } else {
9347 config.state = VRR_STATE_INACTIVE;
9348 }
9349 }
9350out:
bb47de73
NK
9351 new_crtc_state->freesync_config = config;
9352}
98e6436d 9353
bb47de73
NK
9354static void reset_freesync_config_for_crtc(
9355 struct dm_crtc_state *new_crtc_state)
9356{
9357 new_crtc_state->vrr_supported = false;
98e6436d 9358
bb47de73
NK
9359 memset(&new_crtc_state->vrr_infopacket, 0,
9360 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9361}
9362
a85ba005
NC
9363static bool
9364is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9365 struct drm_crtc_state *new_crtc_state)
9366{
9367 struct drm_display_mode old_mode, new_mode;
9368
9369 if (!old_crtc_state || !new_crtc_state)
9370 return false;
9371
9372 old_mode = old_crtc_state->mode;
9373 new_mode = new_crtc_state->mode;
9374
9375 if (old_mode.clock == new_mode.clock &&
9376 old_mode.hdisplay == new_mode.hdisplay &&
9377 old_mode.vdisplay == new_mode.vdisplay &&
9378 old_mode.htotal == new_mode.htotal &&
9379 old_mode.vtotal != new_mode.vtotal &&
9380 old_mode.hsync_start == new_mode.hsync_start &&
9381 old_mode.vsync_start != new_mode.vsync_start &&
9382 old_mode.hsync_end == new_mode.hsync_end &&
9383 old_mode.vsync_end != new_mode.vsync_end &&
9384 old_mode.hskew == new_mode.hskew &&
9385 old_mode.vscan == new_mode.vscan &&
9386 (old_mode.vsync_end - old_mode.vsync_start) ==
9387 (new_mode.vsync_end - new_mode.vsync_start))
9388 return true;
9389
9390 return false;
9391}
9392
9393static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9394 uint64_t num, den, res;
9395 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9396
9397 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9398
9399 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9400 den = (unsigned long long)new_crtc_state->mode.htotal *
9401 (unsigned long long)new_crtc_state->mode.vtotal;
9402
9403 res = div_u64(num, den);
9404 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9405}
9406
4b9674e5
LL
9407static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9408 struct drm_atomic_state *state,
9409 struct drm_crtc *crtc,
9410 struct drm_crtc_state *old_crtc_state,
9411 struct drm_crtc_state *new_crtc_state,
9412 bool enable,
9413 bool *lock_and_validation_needed)
e7b07cee 9414{
eb3dc897 9415 struct dm_atomic_state *dm_state = NULL;
54d76575 9416 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 9417 struct dc_stream_state *new_stream;
62f55537 9418 int ret = 0;
d4d4a645 9419
1f6010a9
DF
9420 /*
9421 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9422 * update changed items
9423 */
4b9674e5
LL
9424 struct amdgpu_crtc *acrtc = NULL;
9425 struct amdgpu_dm_connector *aconnector = NULL;
9426 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9427 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 9428
4b9674e5 9429 new_stream = NULL;
9635b754 9430
4b9674e5
LL
9431 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9432 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9433 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 9434 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 9435
4b9674e5
LL
9436 /* TODO This hack should go away */
9437 if (aconnector && enable) {
9438 /* Make sure fake sink is created in plug-in scenario */
9439 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9440 &aconnector->base);
9441 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9442 &aconnector->base);
19f89e23 9443
4b9674e5
LL
9444 if (IS_ERR(drm_new_conn_state)) {
9445 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9446 goto fail;
9447 }
19f89e23 9448
4b9674e5
LL
9449 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9450 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 9451
02d35a67
JFZ
9452 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9453 goto skip_modeset;
9454
cbd14ae7
SW
9455 new_stream = create_validate_stream_for_sink(aconnector,
9456 &new_crtc_state->mode,
9457 dm_new_conn_state,
9458 dm_old_crtc_state->stream);
19f89e23 9459
4b9674e5
LL
9460 /*
9461 * we can have no stream on ACTION_SET if a display
9462 * was disconnected during S3, in this case it is not an
9463 * error, the OS will be updated after detection, and
9464 * will do the right thing on next atomic commit
9465 */
19f89e23 9466
4b9674e5
LL
9467 if (!new_stream) {
9468 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9469 __func__, acrtc->base.base.id);
9470 ret = -ENOMEM;
9471 goto fail;
9472 }
e7b07cee 9473
3d4e52d0
VL
9474 /*
9475 * TODO: Check VSDB bits to decide whether this should
9476 * be enabled or not.
9477 */
9478 new_stream->triggered_crtc_reset.enabled =
9479 dm->force_timing_sync;
9480
4b9674e5 9481 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 9482
88694af9
NK
9483 ret = fill_hdr_info_packet(drm_new_conn_state,
9484 &new_stream->hdr_static_metadata);
9485 if (ret)
9486 goto fail;
9487
7e930949
NK
9488 /*
9489 * If we already removed the old stream from the context
9490 * (and set the new stream to NULL) then we can't reuse
9491 * the old stream even if the stream and scaling are unchanged.
9492 * We'll hit the BUG_ON and black screen.
9493 *
9494 * TODO: Refactor this function to allow this check to work
9495 * in all conditions.
9496 */
a85ba005
NC
9497 if (amdgpu_freesync_vid_mode &&
9498 dm_new_crtc_state->stream &&
9499 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9500 goto skip_modeset;
9501
7e930949
NK
9502 if (dm_new_crtc_state->stream &&
9503 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
9504 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9505 new_crtc_state->mode_changed = false;
9506 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9507 new_crtc_state->mode_changed);
62f55537 9508 }
4b9674e5 9509 }
b830ebc9 9510
02d35a67 9511 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
9512 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9513 goto skip_modeset;
e7b07cee 9514
4711c033 9515 DRM_DEBUG_ATOMIC(
4b9674e5
LL
9516 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9517 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9518 "connectors_changed:%d\n",
9519 acrtc->crtc_id,
9520 new_crtc_state->enable,
9521 new_crtc_state->active,
9522 new_crtc_state->planes_changed,
9523 new_crtc_state->mode_changed,
9524 new_crtc_state->active_changed,
9525 new_crtc_state->connectors_changed);
62f55537 9526
4b9674e5
LL
9527 /* Remove stream for any changed/disabled CRTC */
9528 if (!enable) {
62f55537 9529
4b9674e5
LL
9530 if (!dm_old_crtc_state->stream)
9531 goto skip_modeset;
eb3dc897 9532
a85ba005
NC
9533 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9534 is_timing_unchanged_for_freesync(new_crtc_state,
9535 old_crtc_state)) {
9536 new_crtc_state->mode_changed = false;
9537 DRM_DEBUG_DRIVER(
9538 "Mode change not required for front porch change, "
9539 "setting mode_changed to %d",
9540 new_crtc_state->mode_changed);
9541
9542 set_freesync_fixed_config(dm_new_crtc_state);
9543
9544 goto skip_modeset;
9545 } else if (amdgpu_freesync_vid_mode && aconnector &&
9546 is_freesync_video_mode(&new_crtc_state->mode,
9547 aconnector)) {
9548 set_freesync_fixed_config(dm_new_crtc_state);
9549 }
9550
4b9674e5
LL
9551 ret = dm_atomic_get_state(state, &dm_state);
9552 if (ret)
9553 goto fail;
e7b07cee 9554
4b9674e5
LL
9555 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9556 crtc->base.id);
62f55537 9557
4b9674e5
LL
9558 /* i.e. reset mode */
9559 if (dc_remove_stream_from_ctx(
9560 dm->dc,
9561 dm_state->context,
9562 dm_old_crtc_state->stream) != DC_OK) {
9563 ret = -EINVAL;
9564 goto fail;
9565 }
62f55537 9566
4b9674e5
LL
9567 dc_stream_release(dm_old_crtc_state->stream);
9568 dm_new_crtc_state->stream = NULL;
bb47de73 9569
4b9674e5 9570 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 9571
4b9674e5 9572 *lock_and_validation_needed = true;
62f55537 9573
4b9674e5
LL
9574 } else {/* Add stream for any updated/enabled CRTC */
9575 /*
9576 * Quick fix to prevent NULL pointer on new_stream when
9577 * added MST connectors not found in existing crtc_state in the chained mode
9578 * TODO: need to dig out the root cause of that
9579 */
9580 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9581 goto skip_modeset;
62f55537 9582
4b9674e5
LL
9583 if (modereset_required(new_crtc_state))
9584 goto skip_modeset;
62f55537 9585
4b9674e5
LL
9586 if (modeset_required(new_crtc_state, new_stream,
9587 dm_old_crtc_state->stream)) {
62f55537 9588
4b9674e5 9589 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 9590
4b9674e5
LL
9591 ret = dm_atomic_get_state(state, &dm_state);
9592 if (ret)
9593 goto fail;
27b3f4fc 9594
4b9674e5 9595 dm_new_crtc_state->stream = new_stream;
62f55537 9596
4b9674e5 9597 dc_stream_retain(new_stream);
1dc90497 9598
4711c033
LT
9599 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9600 crtc->base.id);
1dc90497 9601
4b9674e5
LL
9602 if (dc_add_stream_to_ctx(
9603 dm->dc,
9604 dm_state->context,
9605 dm_new_crtc_state->stream) != DC_OK) {
9606 ret = -EINVAL;
9607 goto fail;
9b690ef3
BL
9608 }
9609
4b9674e5
LL
9610 *lock_and_validation_needed = true;
9611 }
9612 }
e277adc5 9613
4b9674e5
LL
9614skip_modeset:
9615 /* Release extra reference */
9616 if (new_stream)
9617 dc_stream_release(new_stream);
e277adc5 9618
4b9674e5
LL
9619 /*
9620 * We want to do dc stream updates that do not require a
9621 * full modeset below.
9622 */
2afda735 9623 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
9624 return 0;
9625 /*
9626 * Given above conditions, the dc state cannot be NULL because:
9627 * 1. We're in the process of enabling CRTCs (just been added
9628 * to the dc context, or already is on the context)
9629 * 2. Has a valid connector attached, and
9630 * 3. Is currently active and enabled.
9631 * => The dc stream state currently exists.
9632 */
9633 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 9634
4b9674e5
LL
9635 /* Scaling or underscan settings */
9636 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9637 update_stream_scaling_settings(
9638 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 9639
b05e2c5e
DF
9640 /* ABM settings */
9641 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9642
4b9674e5
LL
9643 /*
9644 * Color management settings. We also update color properties
9645 * when a modeset is needed, to ensure it gets reprogrammed.
9646 */
9647 if (dm_new_crtc_state->base.color_mgmt_changed ||
9648 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 9649 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
9650 if (ret)
9651 goto fail;
62f55537 9652 }
e7b07cee 9653
4b9674e5
LL
9654 /* Update Freesync settings. */
9655 get_freesync_config_for_crtc(dm_new_crtc_state,
9656 dm_new_conn_state);
9657
62f55537 9658 return ret;
9635b754
DS
9659
9660fail:
9661 if (new_stream)
9662 dc_stream_release(new_stream);
9663 return ret;
62f55537 9664}
9b690ef3 9665
f6ff2a08
NK
9666static bool should_reset_plane(struct drm_atomic_state *state,
9667 struct drm_plane *plane,
9668 struct drm_plane_state *old_plane_state,
9669 struct drm_plane_state *new_plane_state)
9670{
9671 struct drm_plane *other;
9672 struct drm_plane_state *old_other_state, *new_other_state;
9673 struct drm_crtc_state *new_crtc_state;
9674 int i;
9675
70a1efac
NK
9676 /*
9677 * TODO: Remove this hack once the checks below are sufficient
9678 * enough to determine when we need to reset all the planes on
9679 * the stream.
9680 */
9681 if (state->allow_modeset)
9682 return true;
9683
f6ff2a08
NK
9684 /* Exit early if we know that we're adding or removing the plane. */
9685 if (old_plane_state->crtc != new_plane_state->crtc)
9686 return true;
9687
9688 /* old crtc == new_crtc == NULL, plane not in context. */
9689 if (!new_plane_state->crtc)
9690 return false;
9691
9692 new_crtc_state =
9693 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9694
9695 if (!new_crtc_state)
9696 return true;
9697
7316c4ad
NK
9698 /* CRTC Degamma changes currently require us to recreate planes. */
9699 if (new_crtc_state->color_mgmt_changed)
9700 return true;
9701
f6ff2a08
NK
9702 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9703 return true;
9704
9705 /*
9706 * If there are any new primary or overlay planes being added or
9707 * removed then the z-order can potentially change. To ensure
9708 * correct z-order and pipe acquisition the current DC architecture
9709 * requires us to remove and recreate all existing planes.
9710 *
9711 * TODO: Come up with a more elegant solution for this.
9712 */
9713 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 9714 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
9715 if (other->type == DRM_PLANE_TYPE_CURSOR)
9716 continue;
9717
9718 if (old_other_state->crtc != new_plane_state->crtc &&
9719 new_other_state->crtc != new_plane_state->crtc)
9720 continue;
9721
9722 if (old_other_state->crtc != new_other_state->crtc)
9723 return true;
9724
dc4cb30d
NK
9725 /* Src/dst size and scaling updates. */
9726 if (old_other_state->src_w != new_other_state->src_w ||
9727 old_other_state->src_h != new_other_state->src_h ||
9728 old_other_state->crtc_w != new_other_state->crtc_w ||
9729 old_other_state->crtc_h != new_other_state->crtc_h)
9730 return true;
9731
9732 /* Rotation / mirroring updates. */
9733 if (old_other_state->rotation != new_other_state->rotation)
9734 return true;
9735
9736 /* Blending updates. */
9737 if (old_other_state->pixel_blend_mode !=
9738 new_other_state->pixel_blend_mode)
9739 return true;
9740
9741 /* Alpha updates. */
9742 if (old_other_state->alpha != new_other_state->alpha)
9743 return true;
9744
9745 /* Colorspace changes. */
9746 if (old_other_state->color_range != new_other_state->color_range ||
9747 old_other_state->color_encoding != new_other_state->color_encoding)
9748 return true;
9749
9a81cc60
NK
9750 /* Framebuffer checks fall at the end. */
9751 if (!old_other_state->fb || !new_other_state->fb)
9752 continue;
9753
9754 /* Pixel format changes can require bandwidth updates. */
9755 if (old_other_state->fb->format != new_other_state->fb->format)
9756 return true;
9757
6eed95b0
BN
9758 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9759 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
9760
9761 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
9762 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9763 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
9764 return true;
9765 }
9766
9767 return false;
9768}
9769
b0455fda
SS
9770static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9771 struct drm_plane_state *new_plane_state,
9772 struct drm_framebuffer *fb)
9773{
e72868c4
SS
9774 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9775 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 9776 unsigned int pitch;
e72868c4 9777 bool linear;
b0455fda
SS
9778
9779 if (fb->width > new_acrtc->max_cursor_width ||
9780 fb->height > new_acrtc->max_cursor_height) {
9781 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9782 new_plane_state->fb->width,
9783 new_plane_state->fb->height);
9784 return -EINVAL;
9785 }
9786 if (new_plane_state->src_w != fb->width << 16 ||
9787 new_plane_state->src_h != fb->height << 16) {
9788 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9789 return -EINVAL;
9790 }
9791
9792 /* Pitch in pixels */
9793 pitch = fb->pitches[0] / fb->format->cpp[0];
9794
9795 if (fb->width != pitch) {
9796 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9797 fb->width, pitch);
9798 return -EINVAL;
9799 }
9800
9801 switch (pitch) {
9802 case 64:
9803 case 128:
9804 case 256:
9805 /* FB pitch is supported by cursor plane */
9806 break;
9807 default:
9808 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9809 return -EINVAL;
9810 }
9811
e72868c4
SS
9812 /* Core DRM takes care of checking FB modifiers, so we only need to
9813 * check tiling flags when the FB doesn't have a modifier. */
9814 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9815 if (adev->family < AMDGPU_FAMILY_AI) {
9816 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9817 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9818 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9819 } else {
9820 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9821 }
9822 if (!linear) {
9823 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9824 return -EINVAL;
9825 }
9826 }
9827
b0455fda
SS
9828 return 0;
9829}
9830
9e869063
LL
9831static int dm_update_plane_state(struct dc *dc,
9832 struct drm_atomic_state *state,
9833 struct drm_plane *plane,
9834 struct drm_plane_state *old_plane_state,
9835 struct drm_plane_state *new_plane_state,
9836 bool enable,
9837 bool *lock_and_validation_needed)
62f55537 9838{
eb3dc897
NK
9839
9840 struct dm_atomic_state *dm_state = NULL;
62f55537 9841 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 9842 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 9843 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 9844 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 9845 struct amdgpu_crtc *new_acrtc;
f6ff2a08 9846 bool needs_reset;
62f55537 9847 int ret = 0;
e7b07cee 9848
9b690ef3 9849
9e869063
LL
9850 new_plane_crtc = new_plane_state->crtc;
9851 old_plane_crtc = old_plane_state->crtc;
9852 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9853 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 9854
626bf90f
SS
9855 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9856 if (!enable || !new_plane_crtc ||
9857 drm_atomic_plane_disabling(plane->state, new_plane_state))
9858 return 0;
9859
9860 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9861
5f581248
SS
9862 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9863 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9864 return -EINVAL;
9865 }
9866
24f99d2b 9867 if (new_plane_state->fb) {
b0455fda
SS
9868 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9869 new_plane_state->fb);
9870 if (ret)
9871 return ret;
24f99d2b
SS
9872 }
9873
9e869063 9874 return 0;
626bf90f 9875 }
9b690ef3 9876
f6ff2a08
NK
9877 needs_reset = should_reset_plane(state, plane, old_plane_state,
9878 new_plane_state);
9879
9e869063
LL
9880 /* Remove any changed/removed planes */
9881 if (!enable) {
f6ff2a08 9882 if (!needs_reset)
9e869063 9883 return 0;
a7b06724 9884
9e869063
LL
9885 if (!old_plane_crtc)
9886 return 0;
62f55537 9887
9e869063
LL
9888 old_crtc_state = drm_atomic_get_old_crtc_state(
9889 state, old_plane_crtc);
9890 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 9891
9e869063
LL
9892 if (!dm_old_crtc_state->stream)
9893 return 0;
62f55537 9894
9e869063
LL
9895 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9896 plane->base.id, old_plane_crtc->base.id);
9b690ef3 9897
9e869063
LL
9898 ret = dm_atomic_get_state(state, &dm_state);
9899 if (ret)
9900 return ret;
eb3dc897 9901
9e869063
LL
9902 if (!dc_remove_plane_from_context(
9903 dc,
9904 dm_old_crtc_state->stream,
9905 dm_old_plane_state->dc_state,
9906 dm_state->context)) {
62f55537 9907
c3537613 9908 return -EINVAL;
9e869063 9909 }
e7b07cee 9910
9b690ef3 9911
9e869063
LL
9912 dc_plane_state_release(dm_old_plane_state->dc_state);
9913 dm_new_plane_state->dc_state = NULL;
1dc90497 9914
9e869063 9915 *lock_and_validation_needed = true;
1dc90497 9916
9e869063
LL
9917 } else { /* Add new planes */
9918 struct dc_plane_state *dc_new_plane_state;
1dc90497 9919
9e869063
LL
9920 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9921 return 0;
e7b07cee 9922
9e869063
LL
9923 if (!new_plane_crtc)
9924 return 0;
e7b07cee 9925
9e869063
LL
9926 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9927 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 9928
9e869063
LL
9929 if (!dm_new_crtc_state->stream)
9930 return 0;
62f55537 9931
f6ff2a08 9932 if (!needs_reset)
9e869063 9933 return 0;
62f55537 9934
8c44515b
AP
9935 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9936 if (ret)
9937 return ret;
9938
9e869063 9939 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 9940
9e869063
LL
9941 dc_new_plane_state = dc_create_plane_state(dc);
9942 if (!dc_new_plane_state)
9943 return -ENOMEM;
62f55537 9944
4711c033
LT
9945 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9946 plane->base.id, new_plane_crtc->base.id);
8c45c5db 9947
695af5f9 9948 ret = fill_dc_plane_attributes(
1348969a 9949 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
9950 dc_new_plane_state,
9951 new_plane_state,
9952 new_crtc_state);
9953 if (ret) {
9954 dc_plane_state_release(dc_new_plane_state);
9955 return ret;
9956 }
62f55537 9957
9e869063
LL
9958 ret = dm_atomic_get_state(state, &dm_state);
9959 if (ret) {
9960 dc_plane_state_release(dc_new_plane_state);
9961 return ret;
9962 }
eb3dc897 9963
9e869063
LL
9964 /*
9965 * Any atomic check errors that occur after this will
9966 * not need a release. The plane state will be attached
9967 * to the stream, and therefore part of the atomic
9968 * state. It'll be released when the atomic state is
9969 * cleaned.
9970 */
9971 if (!dc_add_plane_to_context(
9972 dc,
9973 dm_new_crtc_state->stream,
9974 dc_new_plane_state,
9975 dm_state->context)) {
62f55537 9976
9e869063
LL
9977 dc_plane_state_release(dc_new_plane_state);
9978 return -EINVAL;
9979 }
8c45c5db 9980
9e869063 9981 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 9982
9e869063
LL
9983 /* Tell DC to do a full surface update every time there
9984 * is a plane change. Inefficient, but works for now.
9985 */
9986 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9987
9988 *lock_and_validation_needed = true;
62f55537 9989 }
e7b07cee
HW
9990
9991
62f55537
AG
9992 return ret;
9993}
a87fa993 9994
12f4849a
SS
9995static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9996 struct drm_crtc *crtc,
9997 struct drm_crtc_state *new_crtc_state)
9998{
9999 struct drm_plane_state *new_cursor_state, *new_primary_state;
10000 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10001
10002 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10003 * cursor per pipe but it's going to inherit the scaling and
10004 * positioning from the underlying pipe. Check the cursor plane's
10005 * blending properties match the primary plane's. */
10006
10007 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10008 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
7df4ceb6
SE
10009 if (!new_cursor_state || !new_primary_state ||
10010 !new_cursor_state->fb || !new_primary_state->fb) {
12f4849a
SS
10011 return 0;
10012 }
10013
10014 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10015 (new_cursor_state->src_w >> 16);
10016 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10017 (new_cursor_state->src_h >> 16);
10018
10019 primary_scale_w = new_primary_state->crtc_w * 1000 /
10020 (new_primary_state->src_w >> 16);
10021 primary_scale_h = new_primary_state->crtc_h * 1000 /
10022 (new_primary_state->src_h >> 16);
10023
10024 if (cursor_scale_w != primary_scale_w ||
10025 cursor_scale_h != primary_scale_h) {
10026 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
10027 return -EINVAL;
10028 }
10029
10030 return 0;
10031}
10032
e10517b3 10033#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10034static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10035{
10036 struct drm_connector *connector;
10037 struct drm_connector_state *conn_state;
10038 struct amdgpu_dm_connector *aconnector = NULL;
10039 int i;
10040 for_each_new_connector_in_state(state, connector, conn_state, i) {
10041 if (conn_state->crtc != crtc)
10042 continue;
10043
10044 aconnector = to_amdgpu_dm_connector(connector);
10045 if (!aconnector->port || !aconnector->mst_port)
10046 aconnector = NULL;
10047 else
10048 break;
10049 }
10050
10051 if (!aconnector)
10052 return 0;
10053
10054 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10055}
e10517b3 10056#endif
44be939f 10057
16e9b3e5
RS
10058static int validate_overlay(struct drm_atomic_state *state)
10059{
10060 int i;
10061 struct drm_plane *plane;
10062 struct drm_plane_state *old_plane_state, *new_plane_state;
e8ce3d47 10063 struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
16e9b3e5
RS
10064
10065 /* Check if primary plane is contained inside overlay */
10066 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10067 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10068 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10069 return 0;
10070
10071 overlay_state = new_plane_state;
10072 continue;
10073 }
10074 }
10075
10076 /* check if we're making changes to the overlay plane */
10077 if (!overlay_state)
10078 return 0;
10079
10080 /* check if overlay plane is enabled */
10081 if (!overlay_state->crtc)
10082 return 0;
10083
10084 /* find the primary plane for the CRTC that the overlay is enabled on */
10085 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10086 if (IS_ERR(primary_state))
10087 return PTR_ERR(primary_state);
10088
10089 /* check if primary plane is enabled */
10090 if (!primary_state->crtc)
10091 return 0;
10092
e8ce3d47
RS
10093 /* check if cursor plane is enabled */
10094 cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10095 if (IS_ERR(cursor_state))
10096 return PTR_ERR(cursor_state);
10097
10098 if (drm_atomic_plane_disabling(plane->state, cursor_state))
10099 return 0;
10100
16e9b3e5
RS
10101 /* Perform the bounds check to ensure the overlay plane covers the primary */
10102 if (primary_state->crtc_x < overlay_state->crtc_x ||
10103 primary_state->crtc_y < overlay_state->crtc_y ||
10104 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10105 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10106 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10107 return -EINVAL;
10108 }
10109
10110 return 0;
10111}
10112
b8592b48
LL
10113/**
10114 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10115 * @dev: The DRM device
10116 * @state: The atomic state to commit
10117 *
10118 * Validate that the given atomic state is programmable by DC into hardware.
10119 * This involves constructing a &struct dc_state reflecting the new hardware
10120 * state we wish to commit, then querying DC to see if it is programmable. It's
10121 * important not to modify the existing DC state. Otherwise, atomic_check
10122 * may unexpectedly commit hardware changes.
10123 *
10124 * When validating the DC state, it's important that the right locks are
10125 * acquired. For full updates case which removes/adds/updates streams on one
10126 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10127 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10128 * flip using DRMs synchronization events.
b8592b48
LL
10129 *
10130 * Note that DM adds the affected connectors for all CRTCs in state, when that
10131 * might not seem necessary. This is because DC stream creation requires the
10132 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10133 * be possible but non-trivial - a possible TODO item.
10134 *
10135 * Return: -Error code if validation failed.
10136 */
7578ecda
AD
10137static int amdgpu_dm_atomic_check(struct drm_device *dev,
10138 struct drm_atomic_state *state)
62f55537 10139{
1348969a 10140 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10141 struct dm_atomic_state *dm_state = NULL;
62f55537 10142 struct dc *dc = adev->dm.dc;
62f55537 10143 struct drm_connector *connector;
c2cea706 10144 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10145 struct drm_crtc *crtc;
fc9e9920 10146 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10147 struct drm_plane *plane;
10148 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10149 enum dc_status status;
1e88ad0a 10150 int ret, i;
62f55537 10151 bool lock_and_validation_needed = false;
886876ec 10152 struct dm_crtc_state *dm_old_crtc_state;
62f55537 10153
e8a98235 10154 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10155
62f55537 10156 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
10157 if (ret)
10158 goto fail;
62f55537 10159
c5892a10
SW
10160 /* Check connector changes */
10161 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10162 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10163 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10164
10165 /* Skip connectors that are disabled or part of modeset already. */
10166 if (!old_con_state->crtc && !new_con_state->crtc)
10167 continue;
10168
10169 if (!new_con_state->crtc)
10170 continue;
10171
10172 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10173 if (IS_ERR(new_crtc_state)) {
10174 ret = PTR_ERR(new_crtc_state);
10175 goto fail;
10176 }
10177
10178 if (dm_old_con_state->abm_level !=
10179 dm_new_con_state->abm_level)
10180 new_crtc_state->connectors_changed = true;
10181 }
10182
e10517b3 10183#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10184 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10185 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10186 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10187 ret = add_affected_mst_dsc_crtcs(state, crtc);
10188 if (ret)
10189 goto fail;
10190 }
10191 }
10192 }
e10517b3 10193#endif
1e88ad0a 10194 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10195 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10196
1e88ad0a 10197 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10198 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10199 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10200 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10201 continue;
7bef1af3 10202
1e88ad0a
S
10203 if (!new_crtc_state->enable)
10204 continue;
fc9e9920 10205
1e88ad0a
S
10206 ret = drm_atomic_add_affected_connectors(state, crtc);
10207 if (ret)
10208 return ret;
fc9e9920 10209
1e88ad0a
S
10210 ret = drm_atomic_add_affected_planes(state, crtc);
10211 if (ret)
10212 goto fail;
115a385c 10213
cbac53f7 10214 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10215 new_crtc_state->mode_changed = true;
e7b07cee
HW
10216 }
10217
2d9e6431
NK
10218 /*
10219 * Add all primary and overlay planes on the CRTC to the state
10220 * whenever a plane is enabled to maintain correct z-ordering
10221 * and to enable fast surface updates.
10222 */
10223 drm_for_each_crtc(crtc, dev) {
10224 bool modified = false;
10225
10226 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10227 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10228 continue;
10229
10230 if (new_plane_state->crtc == crtc ||
10231 old_plane_state->crtc == crtc) {
10232 modified = true;
10233 break;
10234 }
10235 }
10236
10237 if (!modified)
10238 continue;
10239
10240 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10241 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10242 continue;
10243
10244 new_plane_state =
10245 drm_atomic_get_plane_state(state, plane);
10246
10247 if (IS_ERR(new_plane_state)) {
10248 ret = PTR_ERR(new_plane_state);
10249 goto fail;
10250 }
10251 }
10252 }
10253
62f55537 10254 /* Remove exiting planes if they are modified */
9e869063
LL
10255 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10256 ret = dm_update_plane_state(dc, state, plane,
10257 old_plane_state,
10258 new_plane_state,
10259 false,
10260 &lock_and_validation_needed);
10261 if (ret)
10262 goto fail;
62f55537
AG
10263 }
10264
10265 /* Disable all crtcs which require disable */
4b9674e5
LL
10266 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10267 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10268 old_crtc_state,
10269 new_crtc_state,
10270 false,
10271 &lock_and_validation_needed);
10272 if (ret)
10273 goto fail;
62f55537
AG
10274 }
10275
10276 /* Enable all crtcs which require enable */
4b9674e5
LL
10277 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10278 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10279 old_crtc_state,
10280 new_crtc_state,
10281 true,
10282 &lock_and_validation_needed);
10283 if (ret)
10284 goto fail;
62f55537
AG
10285 }
10286
16e9b3e5
RS
10287 ret = validate_overlay(state);
10288 if (ret)
10289 goto fail;
10290
62f55537 10291 /* Add new/modified planes */
9e869063
LL
10292 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10293 ret = dm_update_plane_state(dc, state, plane,
10294 old_plane_state,
10295 new_plane_state,
10296 true,
10297 &lock_and_validation_needed);
10298 if (ret)
10299 goto fail;
62f55537
AG
10300 }
10301
b349f76e
ES
10302 /* Run this here since we want to validate the streams we created */
10303 ret = drm_atomic_helper_check_planes(dev, state);
10304 if (ret)
10305 goto fail;
62f55537 10306
12f4849a
SS
10307 /* Check cursor planes scaling */
10308 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10309 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10310 if (ret)
10311 goto fail;
10312 }
10313
43d10d30
NK
10314 if (state->legacy_cursor_update) {
10315 /*
10316 * This is a fast cursor update coming from the plane update
10317 * helper, check if it can be done asynchronously for better
10318 * performance.
10319 */
10320 state->async_update =
10321 !drm_atomic_helper_async_check(dev, state);
10322
10323 /*
10324 * Skip the remaining global validation if this is an async
10325 * update. Cursor updates can be done without affecting
10326 * state or bandwidth calcs and this avoids the performance
10327 * penalty of locking the private state object and
10328 * allocating a new dc_state.
10329 */
10330 if (state->async_update)
10331 return 0;
10332 }
10333
ebdd27e1 10334 /* Check scaling and underscan changes*/
1f6010a9 10335 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10336 * new stream into context w\o causing full reset. Need to
10337 * decide how to handle.
10338 */
c2cea706 10339 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10340 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10341 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10342 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10343
10344 /* Skip any modesets/resets */
0bc9706d
LSL
10345 if (!acrtc || drm_atomic_crtc_needs_modeset(
10346 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10347 continue;
10348
b830ebc9 10349 /* Skip any thing not scale or underscan changes */
54d76575 10350 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10351 continue;
10352
10353 lock_and_validation_needed = true;
10354 }
10355
f6d7c7fa
NK
10356 /**
10357 * Streams and planes are reset when there are changes that affect
10358 * bandwidth. Anything that affects bandwidth needs to go through
10359 * DC global validation to ensure that the configuration can be applied
10360 * to hardware.
10361 *
10362 * We have to currently stall out here in atomic_check for outstanding
10363 * commits to finish in this case because our IRQ handlers reference
10364 * DRM state directly - we can end up disabling interrupts too early
10365 * if we don't.
10366 *
10367 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10368 */
f6d7c7fa 10369 if (lock_and_validation_needed) {
eb3dc897
NK
10370 ret = dm_atomic_get_state(state, &dm_state);
10371 if (ret)
10372 goto fail;
e7b07cee
HW
10373
10374 ret = do_aquire_global_lock(dev, state);
10375 if (ret)
10376 goto fail;
1dc90497 10377
d9fe1a4c 10378#if defined(CONFIG_DRM_AMD_DC_DCN)
8c20a1ed
DF
10379 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10380 goto fail;
10381
29b9ba74
ML
10382 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10383 if (ret)
10384 goto fail;
d9fe1a4c 10385#endif
29b9ba74 10386
ded58c7b
ZL
10387 /*
10388 * Perform validation of MST topology in the state:
10389 * We need to perform MST atomic check before calling
10390 * dc_validate_global_state(), or there is a chance
10391 * to get stuck in an infinite loop and hang eventually.
10392 */
10393 ret = drm_dp_mst_atomic_check(state);
10394 if (ret)
10395 goto fail;
74a16675
RS
10396 status = dc_validate_global_state(dc, dm_state->context, false);
10397 if (status != DC_OK) {
10398 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10399 dc_status_to_str(status), status);
e7b07cee
HW
10400 ret = -EINVAL;
10401 goto fail;
10402 }
bd200d19 10403 } else {
674e78ac 10404 /*
bd200d19
NK
10405 * The commit is a fast update. Fast updates shouldn't change
10406 * the DC context, affect global validation, and can have their
10407 * commit work done in parallel with other commits not touching
10408 * the same resource. If we have a new DC context as part of
10409 * the DM atomic state from validation we need to free it and
10410 * retain the existing one instead.
fde9f39a
MR
10411 *
10412 * Furthermore, since the DM atomic state only contains the DC
10413 * context and can safely be annulled, we can free the state
10414 * and clear the associated private object now to free
10415 * some memory and avoid a possible use-after-free later.
674e78ac 10416 */
bd200d19 10417
fde9f39a
MR
10418 for (i = 0; i < state->num_private_objs; i++) {
10419 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 10420
fde9f39a
MR
10421 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10422 int j = state->num_private_objs-1;
bd200d19 10423
fde9f39a
MR
10424 dm_atomic_destroy_state(obj,
10425 state->private_objs[i].state);
10426
10427 /* If i is not at the end of the array then the
10428 * last element needs to be moved to where i was
10429 * before the array can safely be truncated.
10430 */
10431 if (i != j)
10432 state->private_objs[i] =
10433 state->private_objs[j];
bd200d19 10434
fde9f39a
MR
10435 state->private_objs[j].ptr = NULL;
10436 state->private_objs[j].state = NULL;
10437 state->private_objs[j].old_state = NULL;
10438 state->private_objs[j].new_state = NULL;
10439
10440 state->num_private_objs = j;
10441 break;
10442 }
bd200d19 10443 }
e7b07cee
HW
10444 }
10445
caff0e66
NK
10446 /* Store the overall update type for use later in atomic check. */
10447 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10448 struct dm_crtc_state *dm_new_crtc_state =
10449 to_dm_crtc_state(new_crtc_state);
10450
f6d7c7fa
NK
10451 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10452 UPDATE_TYPE_FULL :
10453 UPDATE_TYPE_FAST;
e7b07cee
HW
10454 }
10455
10456 /* Must be success */
10457 WARN_ON(ret);
e8a98235
RS
10458
10459 trace_amdgpu_dm_atomic_check_finish(state, ret);
10460
e7b07cee
HW
10461 return ret;
10462
10463fail:
10464 if (ret == -EDEADLK)
01e28f9c 10465 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 10466 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 10467 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 10468 else
01e28f9c 10469 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 10470
e8a98235
RS
10471 trace_amdgpu_dm_atomic_check_finish(state, ret);
10472
e7b07cee
HW
10473 return ret;
10474}
10475
3ee6b26b
AD
10476static bool is_dp_capable_without_timing_msa(struct dc *dc,
10477 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
10478{
10479 uint8_t dpcd_data;
10480 bool capable = false;
10481
c84dec2f 10482 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
10483 dm_helpers_dp_read_dpcd(
10484 NULL,
c84dec2f 10485 amdgpu_dm_connector->dc_link,
e7b07cee
HW
10486 DP_DOWN_STREAM_PORT_COUNT,
10487 &dpcd_data,
10488 sizeof(dpcd_data))) {
10489 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10490 }
10491
10492 return capable;
10493}
f9b4f20c
SW
10494
10495static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10496 uint8_t *edid_ext, int len,
10497 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10498{
10499 int i;
10500 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10501 struct dc *dc = adev->dm.dc;
10502
10503 /* send extension block to DMCU for parsing */
10504 for (i = 0; i < len; i += 8) {
10505 bool res;
10506 int offset;
10507
10508 /* send 8 bytes a time */
10509 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10510 return false;
10511
10512 if (i+8 == len) {
10513 /* EDID block sent completed, expect result */
10514 int version, min_rate, max_rate;
10515
10516 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10517 if (res) {
10518 /* amd vsdb found */
10519 vsdb_info->freesync_supported = 1;
10520 vsdb_info->amd_vsdb_version = version;
10521 vsdb_info->min_refresh_rate_hz = min_rate;
10522 vsdb_info->max_refresh_rate_hz = max_rate;
10523 return true;
10524 }
10525 /* not amd vsdb */
10526 return false;
10527 }
10528
10529 /* check for ack*/
10530 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10531 if (!res)
10532 return false;
10533 }
10534
10535 return false;
10536}
10537
7c7dd774 10538static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
10539 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10540{
10541 uint8_t *edid_ext = NULL;
10542 int i;
10543 bool valid_vsdb_found = false;
10544
10545 /*----- drm_find_cea_extension() -----*/
10546 /* No EDID or EDID extensions */
10547 if (edid == NULL || edid->extensions == 0)
7c7dd774 10548 return -ENODEV;
f9b4f20c
SW
10549
10550 /* Find CEA extension */
10551 for (i = 0; i < edid->extensions; i++) {
10552 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10553 if (edid_ext[0] == CEA_EXT)
10554 break;
10555 }
10556
10557 if (i == edid->extensions)
7c7dd774 10558 return -ENODEV;
f9b4f20c
SW
10559
10560 /*----- cea_db_offsets() -----*/
10561 if (edid_ext[0] != CEA_EXT)
7c7dd774 10562 return -ENODEV;
f9b4f20c
SW
10563
10564 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
10565
10566 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
10567}
10568
98e6436d
AK
10569void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10570 struct edid *edid)
e7b07cee 10571{
eb0709ba 10572 int i = 0;
e7b07cee
HW
10573 struct detailed_timing *timing;
10574 struct detailed_non_pixel *data;
10575 struct detailed_data_monitor_range *range;
c84dec2f
HW
10576 struct amdgpu_dm_connector *amdgpu_dm_connector =
10577 to_amdgpu_dm_connector(connector);
bb47de73 10578 struct dm_connector_state *dm_con_state = NULL;
e7b07cee
HW
10579
10580 struct drm_device *dev = connector->dev;
1348969a 10581 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 10582 bool freesync_capable = false;
f9b4f20c 10583 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 10584
8218d7f1
HW
10585 if (!connector->state) {
10586 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 10587 goto update;
8218d7f1
HW
10588 }
10589
98e6436d
AK
10590 if (!edid) {
10591 dm_con_state = to_dm_connector_state(connector->state);
10592
10593 amdgpu_dm_connector->min_vfreq = 0;
10594 amdgpu_dm_connector->max_vfreq = 0;
10595 amdgpu_dm_connector->pixel_clock_mhz = 0;
10596
bb47de73 10597 goto update;
98e6436d
AK
10598 }
10599
8218d7f1
HW
10600 dm_con_state = to_dm_connector_state(connector->state);
10601
c84dec2f 10602 if (!amdgpu_dm_connector->dc_sink) {
e7b07cee 10603 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
bb47de73 10604 goto update;
e7b07cee
HW
10605 }
10606 if (!adev->dm.freesync_module)
bb47de73 10607 goto update;
f9b4f20c
SW
10608
10609
10610 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10611 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10612 bool edid_check_required = false;
10613
10614 if (edid) {
e7b07cee
HW
10615 edid_check_required = is_dp_capable_without_timing_msa(
10616 adev->dm.dc,
c84dec2f 10617 amdgpu_dm_connector);
e7b07cee 10618 }
e7b07cee 10619
f9b4f20c
SW
10620 if (edid_check_required == true && (edid->version > 1 ||
10621 (edid->version == 1 && edid->revision > 1))) {
10622 for (i = 0; i < 4; i++) {
e7b07cee 10623
f9b4f20c
SW
10624 timing = &edid->detailed_timings[i];
10625 data = &timing->data.other_data;
10626 range = &data->data.range;
10627 /*
10628 * Check if monitor has continuous frequency mode
10629 */
10630 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10631 continue;
10632 /*
10633 * Check for flag range limits only. If flag == 1 then
10634 * no additional timing information provided.
10635 * Default GTF, GTF Secondary curve and CVT are not
10636 * supported
10637 */
10638 if (range->flags != 1)
10639 continue;
a0ffc3fd 10640
f9b4f20c
SW
10641 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10642 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10643 amdgpu_dm_connector->pixel_clock_mhz =
10644 range->pixel_clock_mhz * 10;
a0ffc3fd 10645
f9b4f20c
SW
10646 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10647 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 10648
f9b4f20c
SW
10649 break;
10650 }
98e6436d 10651
f9b4f20c
SW
10652 if (amdgpu_dm_connector->max_vfreq -
10653 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 10654
f9b4f20c
SW
10655 freesync_capable = true;
10656 }
10657 }
10658 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
10659 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10660 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
10661 timing = &edid->detailed_timings[i];
10662 data = &timing->data.other_data;
10663
10664 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10665 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10666 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10667 freesync_capable = true;
10668
10669 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10670 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
10671 }
10672 }
bb47de73
NK
10673
10674update:
10675 if (dm_con_state)
10676 dm_con_state->freesync_capable = freesync_capable;
10677
10678 if (connector->vrr_capable_property)
10679 drm_connector_set_vrr_capable_property(connector,
10680 freesync_capable);
e7b07cee
HW
10681}
10682
8c322309
RL
10683static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10684{
10685 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10686
10687 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10688 return;
10689 if (link->type == dc_connection_none)
10690 return;
10691 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10692 dpcd_data, sizeof(dpcd_data))) {
d1ebfdd8
WW
10693 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10694
10695 if (dpcd_data[0] == 0) {
1cfbbdde 10696 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
d1ebfdd8
WW
10697 link->psr_settings.psr_feature_enabled = false;
10698 } else {
1cfbbdde 10699 link->psr_settings.psr_version = DC_PSR_VERSION_1;
d1ebfdd8
WW
10700 link->psr_settings.psr_feature_enabled = true;
10701 }
10702
10703 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
10704 }
10705}
10706
10707/*
10708 * amdgpu_dm_link_setup_psr() - configure psr link
10709 * @stream: stream state
10710 *
10711 * Return: true if success
10712 */
10713static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10714{
10715 struct dc_link *link = NULL;
10716 struct psr_config psr_config = {0};
10717 struct psr_context psr_context = {0};
8c322309
RL
10718 bool ret = false;
10719
10720 if (stream == NULL)
10721 return false;
10722
10723 link = stream->link;
8c322309 10724
d1ebfdd8 10725 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8c322309
RL
10726
10727 if (psr_config.psr_version > 0) {
10728 psr_config.psr_exit_link_training_required = 0x1;
10729 psr_config.psr_frame_capture_indication_req = 0;
10730 psr_config.psr_rfb_setup_time = 0x37;
10731 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10732 psr_config.allow_smu_optimizations = 0x0;
10733
10734 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10735
10736 }
d1ebfdd8 10737 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8c322309
RL
10738
10739 return ret;
10740}
10741
10742/*
10743 * amdgpu_dm_psr_enable() - enable psr f/w
10744 * @stream: stream state
10745 *
10746 * Return: true if success
10747 */
10748bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10749{
10750 struct dc_link *link = stream->link;
5b5abe95
AK
10751 unsigned int vsync_rate_hz = 0;
10752 struct dc_static_screen_params params = {0};
10753 /* Calculate number of static frames before generating interrupt to
10754 * enter PSR.
10755 */
5b5abe95
AK
10756 // Init fail safe of 2 frames static
10757 unsigned int num_frames_static = 2;
8c322309
RL
10758
10759 DRM_DEBUG_DRIVER("Enabling psr...\n");
10760
5b5abe95
AK
10761 vsync_rate_hz = div64_u64(div64_u64((
10762 stream->timing.pix_clk_100hz * 100),
10763 stream->timing.v_total),
10764 stream->timing.h_total);
10765
10766 /* Round up
10767 * Calculate number of frames such that at least 30 ms of time has
10768 * passed.
10769 */
7aa62404
RL
10770 if (vsync_rate_hz != 0) {
10771 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
5b5abe95 10772 num_frames_static = (30000 / frame_time_microsec) + 1;
7aa62404 10773 }
5b5abe95
AK
10774
10775 params.triggers.cursor_update = true;
10776 params.triggers.overlay_update = true;
10777 params.triggers.surface_update = true;
10778 params.num_frames = num_frames_static;
8c322309 10779
5b5abe95 10780 dc_stream_set_static_screen_params(link->ctx->dc,
8c322309 10781 &stream, 1,
5b5abe95 10782 &params);
8c322309 10783
1d496907 10784 return dc_link_set_psr_allow_active(link, true, false, false);
8c322309
RL
10785}
10786
10787/*
10788 * amdgpu_dm_psr_disable() - disable psr f/w
10789 * @stream: stream state
10790 *
10791 * Return: true if success
10792 */
10793static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10794{
10795
10796 DRM_DEBUG_DRIVER("Disabling psr...\n");
10797
1d496907 10798 return dc_link_set_psr_allow_active(stream->link, false, true, false);
8c322309 10799}
3d4e52d0 10800
6ee90e88 10801/*
10802 * amdgpu_dm_psr_disable() - disable psr f/w
10803 * if psr is enabled on any stream
10804 *
10805 * Return: true if success
10806 */
10807static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10808{
10809 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10810 return dc_set_psr_allow_active(dm->dc, false);
10811}
10812
3d4e52d0
VL
10813void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10814{
1348969a 10815 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
10816 struct dc *dc = adev->dm.dc;
10817 int i;
10818
10819 mutex_lock(&adev->dm.dc_lock);
10820 if (dc->current_state) {
10821 for (i = 0; i < dc->current_state->stream_count; ++i)
10822 dc->current_state->streams[i]
10823 ->triggered_crtc_reset.enabled =
10824 adev->dm.force_timing_sync;
10825
10826 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10827 dc_trigger_sync(dc, dc->current_state);
10828 }
10829 mutex_unlock(&adev->dm.dc_lock);
10830}
9d83722d
RS
10831
10832void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10833 uint32_t value, const char *func_name)
10834{
10835#ifdef DM_CHECK_ADDR_0
10836 if (address == 0) {
10837 DC_ERR("invalid register write. address = 0");
10838 return;
10839 }
10840#endif
10841 cgs_write_register(ctx->cgs_device, address, value);
10842 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10843}
10844
10845uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10846 const char *func_name)
10847{
10848 uint32_t value;
10849#ifdef DM_CHECK_ADDR_0
10850 if (address == 0) {
10851 DC_ERR("invalid register read; address = 0\n");
10852 return 0;
10853 }
10854#endif
10855
10856 if (ctx->dmub_srv &&
10857 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10858 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10859 ASSERT(false);
10860 return 0;
10861 }
10862
10863 value = cgs_read_register(ctx->cgs_device, address);
10864
10865 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10866
10867 return value;
10868}
81927e28
JS
10869
10870int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10871 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10872{
10873 struct amdgpu_device *adev = ctx->driver_context;
10874 int ret = 0;
10875
10876 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10877 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10878 if (ret == 0) {
10879 *operation_result = AUX_RET_ERROR_TIMEOUT;
10880 return -1;
10881 }
10882 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10883
10884 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10885 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10886
10887 // For read case, Copy data to payload
10888 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10889 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10890 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10891 adev->dm.dmub_notify->aux_reply.length);
10892 }
10893
10894 return adev->dm.dmub_notify->aux_reply.length;
10895}