drm/amd/display: Use dcc_ind_blk value to set register directly
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
1dc90497 32#include "dc/inc/core_types.h"
a7669aff 33#include "dal_asic_id.h"
cdca3f21 34#include "dmub/dmub_srv.h"
743b9786
NK
35#include "dc/inc/hw/dmcu.h"
36#include "dc/inc/hw/abm.h"
9a71c7d3 37#include "dc/dc_dmub_srv.h"
f9b4f20c 38#include "dc/dc_edid_parser.h"
81927e28 39#include "dc/dc_stat.h"
9d83722d 40#include "amdgpu_dm_trace.h"
4562236b
HW
41
42#include "vid.h"
43#include "amdgpu.h"
a49dcb88 44#include "amdgpu_display.h"
a94d5569 45#include "amdgpu_ucode.h"
4562236b
HW
46#include "atom.h"
47#include "amdgpu_dm.h"
52704fca
BL
48#ifdef CONFIG_DRM_AMD_DC_HDCP
49#include "amdgpu_dm_hdcp.h"
53e108aa 50#include <drm/drm_hdcp.h>
52704fca 51#endif
e7b07cee 52#include "amdgpu_pm.h"
4562236b
HW
53
54#include "amd_shared.h"
55#include "amdgpu_dm_irq.h"
56#include "dm_helpers.h"
e7b07cee 57#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
58#if defined(CONFIG_DEBUG_FS)
59#include "amdgpu_dm_debugfs.h"
60#endif
f4594cd1 61#include "amdgpu_dm_psr.h"
4562236b
HW
62
63#include "ivsrcid/ivsrcid_vislands30.h"
64
81927e28 65#include "i2caux_interface.h"
4562236b
HW
66#include <linux/module.h>
67#include <linux/moduleparam.h>
e7b07cee 68#include <linux/types.h>
97028037 69#include <linux/pm_runtime.h>
09d21852 70#include <linux/pci.h>
a94d5569 71#include <linux/firmware.h>
6ce8f316 72#include <linux/component.h>
4562236b
HW
73
74#include <drm/drm_atomic.h>
674e78ac 75#include <drm/drm_atomic_uapi.h>
4562236b
HW
76#include <drm/drm_atomic_helper.h>
77#include <drm/drm_dp_mst_helper.h>
e7b07cee 78#include <drm/drm_fb_helper.h>
09d21852 79#include <drm/drm_fourcc.h>
e7b07cee 80#include <drm/drm_edid.h>
09d21852 81#include <drm/drm_vblank.h>
6ce8f316 82#include <drm/drm_audio_component.h>
4562236b 83
b86a1aa3 84#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 85#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 86
ad941f7a
FX
87#include "dcn/dcn_1_0_offset.h"
88#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
89#include "soc15_hw_ip.h"
90#include "vega10_ip_offset.h"
ff5ef992
AD
91
92#include "soc15_common.h"
93#endif
94
e7b07cee 95#include "modules/inc/mod_freesync.h"
bbf854dc 96#include "modules/power/power_helpers.h"
ecd0136b 97#include "modules/inc/mod_info_packet.h"
e7b07cee 98
743b9786
NK
99#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
101#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
103#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
105#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
107#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
109#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
111#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
113#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 115
a94d5569
DF
116#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
117MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 118
5ea23931
RL
119#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
120MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121
8c7aea40
NK
122/* Number of bytes in PSP header for firmware. */
123#define PSP_HEADER_BYTES 0x100
124
125/* Number of bytes in PSP footer for firmware. */
126#define PSP_FOOTER_BYTES 0x100
127
b8592b48
LL
128/**
129 * DOC: overview
130 *
131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 132 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
133 * requests into DC requests, and DC responses into DRM responses.
134 *
135 * The root control structure is &struct amdgpu_display_manager.
136 */
137
7578ecda
AD
138/* basic init/fini API */
139static int amdgpu_dm_init(struct amdgpu_device *adev);
140static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 141static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 142
0f877894
OV
143static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144{
145 switch (link->dpcd_caps.dongle_type) {
146 case DISPLAY_DONGLE_NONE:
147 return DRM_MODE_SUBCONNECTOR_Native;
148 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 return DRM_MODE_SUBCONNECTOR_VGA;
150 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_DVID;
153 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 return DRM_MODE_SUBCONNECTOR_HDMIA;
156 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 default:
158 return DRM_MODE_SUBCONNECTOR_Unknown;
159 }
160}
161
162static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163{
164 struct dc_link *link = aconnector->dc_link;
165 struct drm_connector *connector = &aconnector->base;
166 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167
168 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 return;
170
171 if (aconnector->dc_sink)
172 subconnector = get_subconnector_type(link);
173
174 drm_object_property_set_value(&connector->base,
175 connector->dev->mode_config.dp_subconnector_property,
176 subconnector);
177}
178
1f6010a9
DF
179/*
180 * initializes drm_device display related structures, based on the information
7578ecda
AD
181 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182 * drm_encoder, drm_mode_config
183 *
184 * Returns 0 on success
185 */
186static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187/* removes and deallocates the drm structures, created by the above function */
188static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189
7578ecda 190static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 191 struct drm_plane *plane,
cc1fec57
NK
192 unsigned long possible_crtcs,
193 const struct dc_plane_cap *plane_cap);
7578ecda
AD
194static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 struct drm_plane *plane,
196 uint32_t link_index);
197static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 struct amdgpu_dm_connector *amdgpu_dm_connector,
199 uint32_t link_index,
200 struct amdgpu_encoder *amdgpu_encoder);
201static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 struct amdgpu_encoder *aencoder,
203 uint32_t link_index);
204
205static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206
7578ecda
AD
207static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208
209static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 struct drm_atomic_state *state);
211
674e78ac
NK
212static void handle_cursor_update(struct drm_plane *plane,
213 struct drm_plane_state *old_plane_state);
7578ecda 214
dfbbfe3c
BN
215static const struct drm_format_info *
216amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217
e27c41d5
JS
218static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
219
a85ba005
NC
220static bool
221is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222 struct drm_crtc_state *new_crtc_state);
4562236b
HW
223/*
224 * dm_vblank_get_counter
225 *
226 * @brief
227 * Get counter for number of vertical blanks
228 *
229 * @param
230 * struct amdgpu_device *adev - [in] desired amdgpu device
231 * int disp_idx - [in] which CRTC to get the counter from
232 *
233 * @return
234 * Counter for vertical blanks
235 */
236static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
237{
238 if (crtc >= adev->mode_info.num_crtc)
239 return 0;
240 else {
241 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
242
585d450c 243 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
244 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245 crtc);
4562236b
HW
246 return 0;
247 }
248
585d450c 249 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
250 }
251}
252
253static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 254 u32 *vbl, u32 *position)
4562236b 255{
81c50963
ST
256 uint32_t v_blank_start, v_blank_end, h_position, v_position;
257
4562236b
HW
258 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259 return -EINVAL;
260 else {
261 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262
585d450c 263 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
264 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265 crtc);
4562236b
HW
266 return 0;
267 }
268
81c50963
ST
269 /*
270 * TODO rework base driver to use values directly.
271 * for now parse it back into reg-format
272 */
585d450c 273 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
274 &v_blank_start,
275 &v_blank_end,
276 &h_position,
277 &v_position);
278
e806208d
AG
279 *position = v_position | (h_position << 16);
280 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
281 }
282
283 return 0;
284}
285
286static bool dm_is_idle(void *handle)
287{
288 /* XXX todo */
289 return true;
290}
291
292static int dm_wait_for_idle(void *handle)
293{
294 /* XXX todo */
295 return 0;
296}
297
298static bool dm_check_soft_reset(void *handle)
299{
300 return false;
301}
302
303static int dm_soft_reset(void *handle)
304{
305 /* XXX todo */
306 return 0;
307}
308
3ee6b26b
AD
309static struct amdgpu_crtc *
310get_crtc_by_otg_inst(struct amdgpu_device *adev,
311 int otg_inst)
4562236b 312{
4a580877 313 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
314 struct drm_crtc *crtc;
315 struct amdgpu_crtc *amdgpu_crtc;
316
bcd74374 317 if (WARN_ON(otg_inst == -1))
4562236b 318 return adev->mode_info.crtcs[0];
4562236b
HW
319
320 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321 amdgpu_crtc = to_amdgpu_crtc(crtc);
322
323 if (amdgpu_crtc->otg_inst == otg_inst)
324 return amdgpu_crtc;
325 }
326
327 return NULL;
328}
329
585d450c
AP
330static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331{
332 return acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_VARIABLE ||
334 acrtc->dm_irq_params.freesync_config.state ==
335 VRR_STATE_ACTIVE_FIXED;
336}
337
66b0c973
MK
338static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339{
340 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342}
343
a85ba005
NC
344static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345 struct dm_crtc_state *new_state)
346{
347 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
348 return true;
349 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
350 return true;
351 else
352 return false;
353}
354
b8e8c934
HW
355/**
356 * dm_pflip_high_irq() - Handle pageflip interrupt
357 * @interrupt_params: ignored
358 *
359 * Handles the pageflip interrupt by notifying all interested parties
360 * that the pageflip has been completed.
361 */
4562236b
HW
362static void dm_pflip_high_irq(void *interrupt_params)
363{
4562236b
HW
364 struct amdgpu_crtc *amdgpu_crtc;
365 struct common_irq_params *irq_params = interrupt_params;
366 struct amdgpu_device *adev = irq_params->adev;
367 unsigned long flags;
71bbe51a 368 struct drm_pending_vblank_event *e;
71bbe51a
MK
369 uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 bool vrr_active;
4562236b
HW
371
372 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
373
374 /* IRQ could occur when in initial stage */
1f6010a9 375 /* TODO work and BO cleanup */
4562236b 376 if (amdgpu_crtc == NULL) {
cb2318b7 377 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
378 return;
379 }
380
4a580877 381 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
382
383 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 384 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
385 amdgpu_crtc->pflip_status,
386 AMDGPU_FLIP_SUBMITTED,
387 amdgpu_crtc->crtc_id,
388 amdgpu_crtc);
4a580877 389 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
390 return;
391 }
392
71bbe51a
MK
393 /* page flip completed. */
394 e = amdgpu_crtc->event;
395 amdgpu_crtc->event = NULL;
4562236b 396
bcd74374 397 WARN_ON(!e);
1159898a 398
585d450c 399 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
400
401 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
402 if (!vrr_active ||
585d450c 403 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
404 &v_blank_end, &hpos, &vpos) ||
405 (vpos < v_blank_start)) {
406 /* Update to correct count and vblank timestamp if racing with
407 * vblank irq. This also updates to the correct vblank timestamp
408 * even in VRR mode, as scanout is past the front-porch atm.
409 */
410 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 411
71bbe51a
MK
412 /* Wake up userspace by sending the pageflip event with proper
413 * count and timestamp of vblank of flip completion.
414 */
415 if (e) {
416 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
417
418 /* Event sent, so done with vblank for this flip */
419 drm_crtc_vblank_put(&amdgpu_crtc->base);
420 }
421 } else if (e) {
422 /* VRR active and inside front-porch: vblank count and
423 * timestamp for pageflip event will only be up to date after
424 * drm_crtc_handle_vblank() has been executed from late vblank
425 * irq handler after start of back-porch (vline 0). We queue the
426 * pageflip event for send-out by drm_crtc_handle_vblank() with
427 * updated timestamp and count, once it runs after us.
428 *
429 * We need to open-code this instead of using the helper
430 * drm_crtc_arm_vblank_event(), as that helper would
431 * call drm_crtc_accurate_vblank_count(), which we must
432 * not call in VRR mode while we are in front-porch!
433 */
434
435 /* sequence will be replaced by real count during send-out. */
436 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
437 e->pipe = amdgpu_crtc->crtc_id;
438
4a580877 439 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
440 e = NULL;
441 }
4562236b 442
fdd1fe57
MK
443 /* Keep track of vblank of this flip for flip throttling. We use the
444 * cooked hw counter, as that one incremented at start of this vblank
445 * of pageflip completion, so last_flip_vblank is the forbidden count
446 * for queueing new pageflips if vsync + VRR is enabled.
447 */
5d1c59c4 448 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 449 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 450
54f5499a 451 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 452 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 453
cb2318b7
VL
454 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
455 amdgpu_crtc->crtc_id, amdgpu_crtc,
456 vrr_active, (int) !e);
4562236b
HW
457}
458
d2574c33
MK
459static void dm_vupdate_high_irq(void *interrupt_params)
460{
461 struct common_irq_params *irq_params = interrupt_params;
462 struct amdgpu_device *adev = irq_params->adev;
463 struct amdgpu_crtc *acrtc;
47588233
RS
464 struct drm_device *drm_dev;
465 struct drm_vblank_crtc *vblank;
466 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 467 unsigned long flags;
585d450c 468 int vrr_active;
d2574c33
MK
469
470 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
471
472 if (acrtc) {
585d450c 473 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
474 drm_dev = acrtc->base.dev;
475 vblank = &drm_dev->vblank[acrtc->base.index];
476 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
477 frame_duration_ns = vblank->time - previous_timestamp;
478
479 if (frame_duration_ns > 0) {
480 trace_amdgpu_refresh_rate_track(acrtc->base.index,
481 frame_duration_ns,
482 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
483 atomic64_set(&irq_params->previous_timestamp, vblank->time);
484 }
d2574c33 485
cb2318b7 486 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 487 acrtc->crtc_id,
585d450c 488 vrr_active);
d2574c33
MK
489
490 /* Core vblank handling is done here after end of front-porch in
491 * vrr mode, as vblank timestamping will give valid results
492 * while now done after front-porch. This will also deliver
493 * page-flip completion events that have been queued to us
494 * if a pageflip happened inside front-porch.
495 */
585d450c 496 if (vrr_active) {
d2574c33 497 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
498
499 /* BTR processing for pre-DCE12 ASICs */
585d450c 500 if (acrtc->dm_irq_params.stream &&
09aef2c4 501 adev->family < AMDGPU_FAMILY_AI) {
4a580877 502 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
503 mod_freesync_handle_v_update(
504 adev->dm.freesync_module,
585d450c
AP
505 acrtc->dm_irq_params.stream,
506 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
507
508 dc_stream_adjust_vmin_vmax(
509 adev->dm.dc,
585d450c
AP
510 acrtc->dm_irq_params.stream,
511 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 512 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
513 }
514 }
d2574c33
MK
515 }
516}
517
b8e8c934
HW
518/**
519 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 520 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
521 *
522 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
523 * event handler.
524 */
4562236b
HW
525static void dm_crtc_high_irq(void *interrupt_params)
526{
527 struct common_irq_params *irq_params = interrupt_params;
528 struct amdgpu_device *adev = irq_params->adev;
4562236b 529 struct amdgpu_crtc *acrtc;
09aef2c4 530 unsigned long flags;
585d450c 531 int vrr_active;
4562236b 532
b57de80a 533 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
534 if (!acrtc)
535 return;
536
585d450c 537 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 538
cb2318b7 539 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 540 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 541
2346ef47
NK
542 /**
543 * Core vblank handling at start of front-porch is only possible
544 * in non-vrr mode, as only there vblank timestamping will give
545 * valid results while done in front-porch. Otherwise defer it
546 * to dm_vupdate_high_irq after end of front-porch.
547 */
585d450c 548 if (!vrr_active)
2346ef47
NK
549 drm_crtc_handle_vblank(&acrtc->base);
550
551 /**
552 * Following stuff must happen at start of vblank, for crc
553 * computation and below-the-range btr support in vrr mode.
554 */
16f17eda 555 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
556
557 /* BTR updates need to happen before VUPDATE on Vega and above. */
558 if (adev->family < AMDGPU_FAMILY_AI)
559 return;
16f17eda 560
4a580877 561 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 562
585d450c
AP
563 if (acrtc->dm_irq_params.stream &&
564 acrtc->dm_irq_params.vrr_params.supported &&
565 acrtc->dm_irq_params.freesync_config.state ==
566 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 567 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
568 acrtc->dm_irq_params.stream,
569 &acrtc->dm_irq_params.vrr_params);
16f17eda 570
585d450c
AP
571 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
573 }
574
2b5aed9a
MK
575 /*
576 * If there aren't any active_planes then DCH HUBP may be clock-gated.
577 * In that case, pageflip completion interrupts won't fire and pageflip
578 * completion events won't get delivered. Prevent this by sending
579 * pending pageflip events from here if a flip is still pending.
580 *
581 * If any planes are enabled, use dm_pflip_high_irq() instead, to
582 * avoid race conditions between flip programming and completion,
583 * which could cause too early flip completion events.
584 */
2346ef47
NK
585 if (adev->family >= AMDGPU_FAMILY_RV &&
586 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 587 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
588 if (acrtc->event) {
589 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
590 acrtc->event = NULL;
591 drm_crtc_vblank_put(&acrtc->base);
592 }
593 acrtc->pflip_status = AMDGPU_FLIP_NONE;
594 }
595
4a580877 596 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
597}
598
86bc2219 599#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 600#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
601/**
602 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603 * DCN generation ASICs
48e01bf4 604 * @interrupt_params: interrupt parameters
86bc2219
WL
605 *
606 * Used to set crc window/read out crc value at vertical line 0 position
607 */
86bc2219
WL
608static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
609{
610 struct common_irq_params *irq_params = interrupt_params;
611 struct amdgpu_device *adev = irq_params->adev;
612 struct amdgpu_crtc *acrtc;
613
614 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
615
616 if (!acrtc)
617 return;
618
619 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
620}
621#endif
86bc2219 622
e27c41d5
JS
623/**
624 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
625 * @adev: amdgpu_device pointer
626 * @notify: dmub notification structure
627 *
628 * Dmub AUX or SET_CONFIG command completion processing callback
629 * Copies dmub notification to DM which is to be read by AUX command.
630 * issuing thread and also signals the event to wake up the thread.
631 */
632void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
633{
634 if (adev->dm.dmub_notify)
635 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
636 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
637 complete(&adev->dm.dmub_aux_transfer_done);
638}
639
640/**
641 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
642 * @adev: amdgpu_device pointer
643 * @notify: dmub notification structure
644 *
645 * Dmub Hpd interrupt processing callback. Gets displayindex through the
646 * ink index and calls helper to do the processing.
647 */
648void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
649{
650 struct amdgpu_dm_connector *aconnector;
651 struct drm_connector *connector;
652 struct drm_connector_list_iter iter;
653 struct dc_link *link;
654 uint8_t link_index = 0;
655 struct drm_device *dev = adev->dm.ddev;
656
657 if (adev == NULL)
658 return;
659
660 if (notify == NULL) {
661 DRM_ERROR("DMUB HPD callback notification was NULL");
662 return;
663 }
664
665 if (notify->link_index > adev->dm.dc->link_count) {
666 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
667 return;
668 }
669
670 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
671
672 link_index = notify->link_index;
673
674 link = adev->dm.dc->links[link_index];
675
676 drm_connector_list_iter_begin(dev, &iter);
677 drm_for_each_connector_iter(connector, &iter) {
678 aconnector = to_amdgpu_dm_connector(connector);
679 if (link && aconnector->dc_link == link) {
680 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
681 handle_hpd_irq_helper(aconnector);
682 break;
683 }
684 }
685 drm_connector_list_iter_end(&iter);
686 drm_modeset_unlock(&dev->mode_config.connection_mutex);
687
688}
689
690/**
691 * register_dmub_notify_callback - Sets callback for DMUB notify
692 * @adev: amdgpu_device pointer
693 * @type: Type of dmub notification
694 * @callback: Dmub interrupt callback function
695 * @dmub_int_thread_offload: offload indicator
696 *
697 * API to register a dmub callback handler for a dmub notification
698 * Also sets indicator whether callback processing to be offloaded.
699 * to dmub interrupt handling thread
700 * Return: true if successfully registered, false if there is existing registration
701 */
702bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
703dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
704{
705 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
706 adev->dm.dmub_callback[type] = callback;
707 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
708 } else
709 return false;
710
711 return true;
712}
713
714static void dm_handle_hpd_work(struct work_struct *work)
715{
716 struct dmub_hpd_work *dmub_hpd_wrk;
717
718 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
719
720 if (!dmub_hpd_wrk->dmub_notify) {
721 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
722 return;
723 }
724
725 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
726 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
727 dmub_hpd_wrk->dmub_notify);
728 }
729 kfree(dmub_hpd_wrk);
730
731}
732
e25515e2 733#define DMUB_TRACE_MAX_READ 64
81927e28
JS
734/**
735 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
736 * @interrupt_params: used for determining the Outbox instance
737 *
738 * Handles the Outbox Interrupt
739 * event handler.
740 */
81927e28
JS
741static void dm_dmub_outbox1_low_irq(void *interrupt_params)
742{
743 struct dmub_notification notify;
744 struct common_irq_params *irq_params = interrupt_params;
745 struct amdgpu_device *adev = irq_params->adev;
746 struct amdgpu_display_manager *dm = &adev->dm;
747 struct dmcub_trace_buf_entry entry = { 0 };
748 uint32_t count = 0;
e27c41d5 749 struct dmub_hpd_work *dmub_hpd_wrk;
81927e28
JS
750
751 if (dc_enable_dmub_notifications(adev->dm.dc)) {
e27c41d5
JS
752 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
753 if (!dmub_hpd_wrk) {
754 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
755 return;
756 }
757 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
758
81927e28
JS
759 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
760 do {
761 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
e27c41d5
JS
762 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
763 DRM_ERROR("DM: notify type %d larger than the array size %ld !", notify.type,
764 ARRAY_SIZE(dm->dmub_thread_offload));
765 continue;
766 }
767 if (dm->dmub_thread_offload[notify.type] == true) {
768 dmub_hpd_wrk->dmub_notify = &notify;
769 dmub_hpd_wrk->adev = adev;
770 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
771 } else {
772 dm->dmub_callback[notify.type](adev, &notify);
773 }
81927e28 774
e27c41d5 775 } while (notify.pending_notification);
81927e28
JS
776
777 } else {
778 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
779 }
780 }
781
782
783 do {
784 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
785 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
786 entry.param0, entry.param1);
787
788 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
789 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
790 } else
791 break;
792
793 count++;
794
795 } while (count <= DMUB_TRACE_MAX_READ);
796
797 ASSERT(count <= DMUB_TRACE_MAX_READ);
798}
86bc2219
WL
799#endif
800
4562236b
HW
801static int dm_set_clockgating_state(void *handle,
802 enum amd_clockgating_state state)
803{
804 return 0;
805}
806
807static int dm_set_powergating_state(void *handle,
808 enum amd_powergating_state state)
809{
810 return 0;
811}
812
813/* Prototypes of private functions */
814static int dm_early_init(void* handle);
815
a32e24b4 816/* Allocate memory for FBC compressed data */
3e332d3a 817static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 818{
3e332d3a 819 struct drm_device *dev = connector->dev;
1348969a 820 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 821 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
822 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
823 struct drm_display_mode *mode;
42e67c3b
RL
824 unsigned long max_size = 0;
825
826 if (adev->dm.dc->fbc_compressor == NULL)
827 return;
a32e24b4 828
3e332d3a 829 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
830 return;
831
3e332d3a
RL
832 if (compressor->bo_ptr)
833 return;
42e67c3b 834
42e67c3b 835
3e332d3a
RL
836 list_for_each_entry(mode, &connector->modes, head) {
837 if (max_size < mode->htotal * mode->vtotal)
838 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
839 }
840
841 if (max_size) {
842 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 843 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 844 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
845
846 if (r)
42e67c3b
RL
847 DRM_ERROR("DM: Failed to initialize FBC\n");
848 else {
849 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
850 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
851 }
852
a32e24b4
RL
853 }
854
855}
a32e24b4 856
6ce8f316
NK
857static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
858 int pipe, bool *enabled,
859 unsigned char *buf, int max_bytes)
860{
861 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 862 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
863 struct drm_connector *connector;
864 struct drm_connector_list_iter conn_iter;
865 struct amdgpu_dm_connector *aconnector;
866 int ret = 0;
867
868 *enabled = false;
869
870 mutex_lock(&adev->dm.audio_lock);
871
872 drm_connector_list_iter_begin(dev, &conn_iter);
873 drm_for_each_connector_iter(connector, &conn_iter) {
874 aconnector = to_amdgpu_dm_connector(connector);
875 if (aconnector->audio_inst != port)
876 continue;
877
878 *enabled = true;
879 ret = drm_eld_size(connector->eld);
880 memcpy(buf, connector->eld, min(max_bytes, ret));
881
882 break;
883 }
884 drm_connector_list_iter_end(&conn_iter);
885
886 mutex_unlock(&adev->dm.audio_lock);
887
888 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
889
890 return ret;
891}
892
893static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
894 .get_eld = amdgpu_dm_audio_component_get_eld,
895};
896
897static int amdgpu_dm_audio_component_bind(struct device *kdev,
898 struct device *hda_kdev, void *data)
899{
900 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 901 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
902 struct drm_audio_component *acomp = data;
903
904 acomp->ops = &amdgpu_dm_audio_component_ops;
905 acomp->dev = kdev;
906 adev->dm.audio_component = acomp;
907
908 return 0;
909}
910
911static void amdgpu_dm_audio_component_unbind(struct device *kdev,
912 struct device *hda_kdev, void *data)
913{
914 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 915 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
916 struct drm_audio_component *acomp = data;
917
918 acomp->ops = NULL;
919 acomp->dev = NULL;
920 adev->dm.audio_component = NULL;
921}
922
923static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
924 .bind = amdgpu_dm_audio_component_bind,
925 .unbind = amdgpu_dm_audio_component_unbind,
926};
927
928static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
929{
930 int i, ret;
931
932 if (!amdgpu_audio)
933 return 0;
934
935 adev->mode_info.audio.enabled = true;
936
937 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
938
939 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
940 adev->mode_info.audio.pin[i].channels = -1;
941 adev->mode_info.audio.pin[i].rate = -1;
942 adev->mode_info.audio.pin[i].bits_per_sample = -1;
943 adev->mode_info.audio.pin[i].status_bits = 0;
944 adev->mode_info.audio.pin[i].category_code = 0;
945 adev->mode_info.audio.pin[i].connected = false;
946 adev->mode_info.audio.pin[i].id =
947 adev->dm.dc->res_pool->audios[i]->inst;
948 adev->mode_info.audio.pin[i].offset = 0;
949 }
950
951 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
952 if (ret < 0)
953 return ret;
954
955 adev->dm.audio_registered = true;
956
957 return 0;
958}
959
960static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
961{
962 if (!amdgpu_audio)
963 return;
964
965 if (!adev->mode_info.audio.enabled)
966 return;
967
968 if (adev->dm.audio_registered) {
969 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
970 adev->dm.audio_registered = false;
971 }
972
973 /* TODO: Disable audio? */
974
975 adev->mode_info.audio.enabled = false;
976}
977
dfd84d90 978static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
979{
980 struct drm_audio_component *acomp = adev->dm.audio_component;
981
982 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
983 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
984
985 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
986 pin, -1);
987 }
988}
989
743b9786
NK
990static int dm_dmub_hw_init(struct amdgpu_device *adev)
991{
743b9786
NK
992 const struct dmcub_firmware_header_v1_0 *hdr;
993 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 994 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
995 const struct firmware *dmub_fw = adev->dm.dmub_fw;
996 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
997 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
998 struct dmub_srv_hw_params hw_params;
999 enum dmub_status status;
1000 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1001 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786
NK
1002 bool has_hw_support;
1003
1004 if (!dmub_srv)
1005 /* DMUB isn't supported on the ASIC. */
1006 return 0;
1007
8c7aea40
NK
1008 if (!fb_info) {
1009 DRM_ERROR("No framebuffer info for DMUB service.\n");
1010 return -EINVAL;
1011 }
1012
743b9786
NK
1013 if (!dmub_fw) {
1014 /* Firmware required for DMUB support. */
1015 DRM_ERROR("No firmware provided for DMUB.\n");
1016 return -EINVAL;
1017 }
1018
1019 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1020 if (status != DMUB_STATUS_OK) {
1021 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1022 return -EINVAL;
1023 }
1024
1025 if (!has_hw_support) {
1026 DRM_INFO("DMUB unsupported on ASIC\n");
1027 return 0;
1028 }
1029
1030 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1031
743b9786
NK
1032 fw_inst_const = dmub_fw->data +
1033 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1034 PSP_HEADER_BYTES;
743b9786
NK
1035
1036 fw_bss_data = dmub_fw->data +
1037 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1038 le32_to_cpu(hdr->inst_const_bytes);
1039
1040 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1041 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1042 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1043
1044 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1045
ddde28a5
HW
1046 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1047 * amdgpu_ucode_init_single_fw will load dmub firmware
1048 * fw_inst_const part to cw0; otherwise, the firmware back door load
1049 * will be done by dm_dmub_hw_init
1050 */
1051 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1052 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1053 fw_inst_const_size);
1054 }
1055
a576b345
NK
1056 if (fw_bss_data_size)
1057 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1058 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1059
1060 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1061 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1062 adev->bios_size);
1063
1064 /* Reset regions that need to be reset. */
1065 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1066 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1067
1068 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1069 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1070
1071 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1072 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1073
1074 /* Initialize hardware. */
1075 memset(&hw_params, 0, sizeof(hw_params));
1076 hw_params.fb_base = adev->gmc.fb_start;
1077 hw_params.fb_offset = adev->gmc.aper_base;
1078
31a7f4bb
HW
1079 /* backdoor load firmware and trigger dmub running */
1080 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1081 hw_params.load_inst_const = true;
1082
743b9786
NK
1083 if (dmcu)
1084 hw_params.psp_version = dmcu->psp_version;
1085
8c7aea40
NK
1086 for (i = 0; i < fb_info->num_fb; ++i)
1087 hw_params.fb[i] = &fb_info->fb[i];
743b9786
NK
1088
1089 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1090 if (status != DMUB_STATUS_OK) {
1091 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1092 return -EINVAL;
1093 }
1094
1095 /* Wait for firmware load to finish. */
1096 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1097 if (status != DMUB_STATUS_OK)
1098 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1099
1100 /* Init DMCU and ABM if available. */
1101 if (dmcu && abm) {
1102 dmcu->funcs->dmcu_init(dmcu);
1103 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1104 }
1105
051b7887
RL
1106 if (!adev->dm.dc->ctx->dmub_srv)
1107 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1108 if (!adev->dm.dc->ctx->dmub_srv) {
1109 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1110 return -ENOMEM;
1111 }
1112
743b9786
NK
1113 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1114 adev->dm.dmcub_fw_version);
1115
1116 return 0;
1117}
1118
a3fe0e33 1119#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1120static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1121{
c0fb85ae
YZ
1122 uint64_t pt_base;
1123 uint32_t logical_addr_low;
1124 uint32_t logical_addr_high;
1125 uint32_t agp_base, agp_bot, agp_top;
1126 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1127
a0f884f5
NK
1128 memset(pa_config, 0, sizeof(*pa_config));
1129
c0fb85ae
YZ
1130 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1131 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1132
c0fb85ae
YZ
1133 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1134 /*
1135 * Raven2 has a HW issue that it is unable to use the vram which
1136 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1137 * workaround that increase system aperture high address (add 1)
1138 * to get rid of the VM fault and hardware hang.
1139 */
1140 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1141 else
1142 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1143
c0fb85ae
YZ
1144 agp_base = 0;
1145 agp_bot = adev->gmc.agp_start >> 24;
1146 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1147
c44a22b3 1148
c0fb85ae
YZ
1149 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1150 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1151 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1152 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1153 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1154 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1155
c0fb85ae
YZ
1156 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1157 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1158
1159 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1160 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1161 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1162
1163 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1164 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1165 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1166
1167 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1168 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1169 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1170
1171 pa_config->is_hvm_enabled = 0;
c44a22b3 1172
c44a22b3 1173}
e6cd859d 1174#endif
ea3b4242 1175#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1176static void vblank_control_worker(struct work_struct *work)
ea3b4242 1177{
09a5df6c
NK
1178 struct vblank_control_work *vblank_work =
1179 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1180 struct amdgpu_display_manager *dm = vblank_work->dm;
1181
1182 mutex_lock(&dm->dc_lock);
1183
1184 if (vblank_work->enable)
1185 dm->active_vblank_irq_count++;
5af50b0b 1186 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1187 dm->active_vblank_irq_count--;
1188
2cbcb78c 1189 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1190
4711c033 1191 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1192
58aa1c50
NK
1193 /* Control PSR based on vblank requirements from OS */
1194 if (vblank_work->stream && vblank_work->stream->link) {
1195 if (vblank_work->enable) {
1196 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1197 amdgpu_dm_psr_disable(vblank_work->stream);
1198 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1199 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1200 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1201 amdgpu_dm_psr_enable(vblank_work->stream);
1202 }
1203 }
1204
ea3b4242 1205 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1206
1207 dc_stream_release(vblank_work->stream);
1208
09a5df6c 1209 kfree(vblank_work);
ea3b4242
QZ
1210}
1211
ea3b4242 1212#endif
8e794421
WL
1213
1214static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1215{
1216 struct hpd_rx_irq_offload_work *offload_work;
1217 struct amdgpu_dm_connector *aconnector;
1218 struct dc_link *dc_link;
1219 struct amdgpu_device *adev;
1220 enum dc_connection_type new_connection_type = dc_connection_none;
1221 unsigned long flags;
1222
1223 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1224 aconnector = offload_work->offload_wq->aconnector;
1225
1226 if (!aconnector) {
1227 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1228 goto skip;
1229 }
1230
1231 adev = drm_to_adev(aconnector->base.dev);
1232 dc_link = aconnector->dc_link;
1233
1234 mutex_lock(&aconnector->hpd_lock);
1235 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1236 DRM_ERROR("KMS: Failed to detect connector\n");
1237 mutex_unlock(&aconnector->hpd_lock);
1238
1239 if (new_connection_type == dc_connection_none)
1240 goto skip;
1241
1242 if (amdgpu_in_reset(adev))
1243 goto skip;
1244
1245 mutex_lock(&adev->dm.dc_lock);
1246 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1247 dc_link_dp_handle_automated_test(dc_link);
1248 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1249 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1250 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1251 dc_link_dp_handle_link_loss(dc_link);
1252 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1253 offload_work->offload_wq->is_handling_link_loss = false;
1254 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1255 }
1256 mutex_unlock(&adev->dm.dc_lock);
1257
1258skip:
1259 kfree(offload_work);
1260
1261}
1262
1263static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1264{
1265 int max_caps = dc->caps.max_links;
1266 int i = 0;
1267 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1268
1269 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1270
1271 if (!hpd_rx_offload_wq)
1272 return NULL;
1273
1274
1275 for (i = 0; i < max_caps; i++) {
1276 hpd_rx_offload_wq[i].wq =
1277 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1278
1279 if (hpd_rx_offload_wq[i].wq == NULL) {
1280 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1281 return NULL;
1282 }
1283
1284 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1285 }
1286
1287 return hpd_rx_offload_wq;
1288}
1289
7578ecda 1290static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1291{
1292 struct dc_init_data init_data;
52704fca
BL
1293#ifdef CONFIG_DRM_AMD_DC_HDCP
1294 struct dc_callback_init init_params;
1295#endif
743b9786 1296 int r;
52704fca 1297
4a580877 1298 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1299 adev->dm.adev = adev;
1300
4562236b
HW
1301 /* Zero all the fields */
1302 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1303#ifdef CONFIG_DRM_AMD_DC_HDCP
1304 memset(&init_params, 0, sizeof(init_params));
1305#endif
4562236b 1306
674e78ac 1307 mutex_init(&adev->dm.dc_lock);
6ce8f316 1308 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1309#if defined(CONFIG_DRM_AMD_DC_DCN)
1310 spin_lock_init(&adev->dm.vblank_lock);
1311#endif
674e78ac 1312
4562236b
HW
1313 if(amdgpu_dm_irq_init(adev)) {
1314 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1315 goto error;
1316 }
1317
1318 init_data.asic_id.chip_family = adev->family;
1319
2dc31ca1 1320 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b
HW
1321 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1322
770d13b1 1323 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1324 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1325 init_data.asic_id.atombios_base_address =
1326 adev->mode_info.atom_context->bios;
1327
1328 init_data.driver = adev;
1329
1330 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1331
1332 if (!adev->dm.cgs_device) {
1333 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1334 goto error;
1335 }
1336
1337 init_data.cgs_device = adev->dm.cgs_device;
1338
4562236b
HW
1339 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1340
60fb100b
AD
1341 switch (adev->asic_type) {
1342 case CHIP_CARRIZO:
1343 case CHIP_STONEY:
1344 case CHIP_RAVEN:
fe3db437 1345 case CHIP_RENOIR:
6e227308 1346 init_data.flags.gpu_vm_support = true;
71c0fd92
RL
1347 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1348 init_data.flags.disable_dmcu = true;
60fb100b 1349 break;
6df9218a 1350 case CHIP_VANGOGH:
1ebcaebd
NK
1351 case CHIP_YELLOW_CARP:
1352 init_data.flags.gpu_vm_support = true;
1353 break;
60fb100b
AD
1354 default:
1355 break;
1356 }
6e227308 1357
04b94af4
AD
1358 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1359 init_data.flags.fbc_support = true;
1360
d99f38ae
AD
1361 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1362 init_data.flags.multi_mon_pp_mclk_switch = true;
1363
eaf56410
LL
1364 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1365 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1366
1367 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1368 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1369
27eaa492 1370 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1371
0dd79532 1372 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1373 /* Display Core create. */
1374 adev->dm.dc = dc_create(&init_data);
1375
423788c7 1376 if (adev->dm.dc) {
76121231 1377 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1378 } else {
76121231 1379 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1380 goto error;
1381 }
4562236b 1382
8a791dab
HW
1383 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1384 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1385 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1386 }
1387
f99d8762
HW
1388 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1389 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1390
8a791dab
HW
1391 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1392 adev->dm.dc->debug.disable_stutter = true;
1393
1394 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1395 adev->dm.dc->debug.disable_dsc = true;
1396
1397 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1398 adev->dm.dc->debug.disable_clock_gate = true;
1399
743b9786
NK
1400 r = dm_dmub_hw_init(adev);
1401 if (r) {
1402 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1403 goto error;
1404 }
1405
bb6785c1
NK
1406 dc_hardware_init(adev->dm.dc);
1407
8e794421
WL
1408 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1409 if (!adev->dm.hpd_rx_offload_wq) {
1410 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1411 goto error;
1412 }
1413
0b08c54b 1414#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1415 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1416 struct dc_phy_addr_space_config pa_config;
1417
0b08c54b 1418 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1419
0b08c54b
YZ
1420 // Call the DC init_memory func
1421 dc_setup_system_context(adev->dm.dc, &pa_config);
1422 }
1423#endif
c0fb85ae 1424
4562236b
HW
1425 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1426 if (!adev->dm.freesync_module) {
1427 DRM_ERROR(
1428 "amdgpu: failed to initialize freesync_module.\n");
1429 } else
f1ad2f5e 1430 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1431 adev->dm.freesync_module);
1432
e277adc5
LSL
1433 amdgpu_dm_init_color_mod();
1434
ea3b4242
QZ
1435#if defined(CONFIG_DRM_AMD_DC_DCN)
1436 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1437 adev->dm.vblank_control_workqueue =
1438 create_singlethread_workqueue("dm_vblank_control_workqueue");
1439 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1440 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1441 }
1442#endif
1443
52704fca 1444#ifdef CONFIG_DRM_AMD_DC_HDCP
af5bbf93 1445 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
e50dc171 1446 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1447
96a3b32e
BL
1448 if (!adev->dm.hdcp_workqueue)
1449 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1450 else
1451 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1452
96a3b32e
BL
1453 dc_init_callbacks(adev->dm.dc, &init_params);
1454 }
9a65df19
WL
1455#endif
1456#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1457 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1458#endif
81927e28
JS
1459 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1460 init_completion(&adev->dm.dmub_aux_transfer_done);
1461 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1462 if (!adev->dm.dmub_notify) {
1463 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1464 goto error;
1465 }
e27c41d5
JS
1466
1467 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1468 if (!adev->dm.delayed_hpd_wq) {
1469 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1470 goto error;
1471 }
1472
81927e28 1473 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1474#if defined(CONFIG_DRM_AMD_DC_DCN)
1475 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1476 dmub_aux_setconfig_callback, false)) {
1477 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1478 goto error;
1479 }
1480 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1481 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1482 goto error;
1483 }
1484#endif
81927e28
JS
1485 }
1486
4562236b
HW
1487 if (amdgpu_dm_initialize_drm_device(adev)) {
1488 DRM_ERROR(
1489 "amdgpu: failed to initialize sw for display support.\n");
1490 goto error;
1491 }
1492
f74367e4
AD
1493 /* create fake encoders for MST */
1494 dm_dp_create_fake_mst_encoders(adev);
1495
4562236b
HW
1496 /* TODO: Add_display_info? */
1497
1498 /* TODO use dynamic cursor width */
4a580877
LT
1499 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1500 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1501
4a580877 1502 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1503 DRM_ERROR(
1504 "amdgpu: failed to initialize sw for display support.\n");
1505 goto error;
1506 }
1507
c0fb85ae 1508
f1ad2f5e 1509 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1510
1511 return 0;
1512error:
1513 amdgpu_dm_fini(adev);
1514
59d0f396 1515 return -EINVAL;
4562236b
HW
1516}
1517
e9669fb7
AG
1518static int amdgpu_dm_early_fini(void *handle)
1519{
1520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521
1522 amdgpu_dm_audio_fini(adev);
1523
1524 return 0;
1525}
1526
7578ecda 1527static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1528{
f74367e4
AD
1529 int i;
1530
09a5df6c
NK
1531#if defined(CONFIG_DRM_AMD_DC_DCN)
1532 if (adev->dm.vblank_control_workqueue) {
1533 destroy_workqueue(adev->dm.vblank_control_workqueue);
1534 adev->dm.vblank_control_workqueue = NULL;
1535 }
1536#endif
1537
f74367e4
AD
1538 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1539 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1540 }
1541
4562236b 1542 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1543
9a65df19
WL
1544#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1545 if (adev->dm.crc_rd_wrk) {
1546 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1547 kfree(adev->dm.crc_rd_wrk);
1548 adev->dm.crc_rd_wrk = NULL;
1549 }
1550#endif
52704fca
BL
1551#ifdef CONFIG_DRM_AMD_DC_HDCP
1552 if (adev->dm.hdcp_workqueue) {
e96b1b29 1553 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1554 adev->dm.hdcp_workqueue = NULL;
1555 }
1556
1557 if (adev->dm.dc)
1558 dc_deinit_callbacks(adev->dm.dc);
1559#endif
51ba6912 1560
3beac533 1561 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1562
81927e28
JS
1563 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1564 kfree(adev->dm.dmub_notify);
1565 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1566 destroy_workqueue(adev->dm.delayed_hpd_wq);
1567 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1568 }
1569
743b9786
NK
1570 if (adev->dm.dmub_bo)
1571 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1572 &adev->dm.dmub_bo_gpu_addr,
1573 &adev->dm.dmub_bo_cpu_addr);
52704fca 1574
c8bdf2b6
ED
1575 /* DC Destroy TODO: Replace destroy DAL */
1576 if (adev->dm.dc)
1577 dc_destroy(&adev->dm.dc);
4562236b
HW
1578 /*
1579 * TODO: pageflip, vlank interrupt
1580 *
1581 * amdgpu_dm_irq_fini(adev);
1582 */
1583
1584 if (adev->dm.cgs_device) {
1585 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1586 adev->dm.cgs_device = NULL;
1587 }
1588 if (adev->dm.freesync_module) {
1589 mod_freesync_destroy(adev->dm.freesync_module);
1590 adev->dm.freesync_module = NULL;
1591 }
674e78ac 1592
8e794421
WL
1593 if (adev->dm.hpd_rx_offload_wq) {
1594 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1595 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1596 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1597 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1598 }
1599 }
1600
1601 kfree(adev->dm.hpd_rx_offload_wq);
1602 adev->dm.hpd_rx_offload_wq = NULL;
1603 }
1604
6ce8f316 1605 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1606 mutex_destroy(&adev->dm.dc_lock);
1607
4562236b
HW
1608 return;
1609}
1610
a94d5569 1611static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1612{
a7669aff 1613 const char *fw_name_dmcu = NULL;
a94d5569
DF
1614 int r;
1615 const struct dmcu_firmware_header_v1_0 *hdr;
1616
1617 switch(adev->asic_type) {
55e56389
MR
1618#if defined(CONFIG_DRM_AMD_DC_SI)
1619 case CHIP_TAHITI:
1620 case CHIP_PITCAIRN:
1621 case CHIP_VERDE:
1622 case CHIP_OLAND:
1623#endif
a94d5569
DF
1624 case CHIP_BONAIRE:
1625 case CHIP_HAWAII:
1626 case CHIP_KAVERI:
1627 case CHIP_KABINI:
1628 case CHIP_MULLINS:
1629 case CHIP_TONGA:
1630 case CHIP_FIJI:
1631 case CHIP_CARRIZO:
1632 case CHIP_STONEY:
1633 case CHIP_POLARIS11:
1634 case CHIP_POLARIS10:
1635 case CHIP_POLARIS12:
1636 case CHIP_VEGAM:
1637 case CHIP_VEGA10:
1638 case CHIP_VEGA12:
1639 case CHIP_VEGA20:
476e955d 1640 case CHIP_NAVI10:
baebcf2e 1641 case CHIP_NAVI14:
30221ad8 1642 case CHIP_RENOIR:
79037324 1643 case CHIP_SIENNA_CICHLID:
a6c5308f 1644 case CHIP_NAVY_FLOUNDER:
2a411205 1645 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 1646 case CHIP_BEIGE_GOBY:
469989ca 1647 case CHIP_VANGOGH:
1ebcaebd 1648 case CHIP_YELLOW_CARP:
a94d5569 1649 return 0;
5ea23931
RL
1650 case CHIP_NAVI12:
1651 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1652 break;
a94d5569 1653 case CHIP_RAVEN:
a7669aff
HW
1654 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1655 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1656 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1657 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1658 else
a7669aff 1659 return 0;
a94d5569
DF
1660 break;
1661 default:
1662 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1663 return -EINVAL;
a94d5569
DF
1664 }
1665
1666 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1667 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1668 return 0;
1669 }
1670
1671 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1672 if (r == -ENOENT) {
1673 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1674 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1675 adev->dm.fw_dmcu = NULL;
1676 return 0;
1677 }
1678 if (r) {
1679 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1680 fw_name_dmcu);
1681 return r;
1682 }
1683
1684 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1685 if (r) {
1686 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1687 fw_name_dmcu);
1688 release_firmware(adev->dm.fw_dmcu);
1689 adev->dm.fw_dmcu = NULL;
1690 return r;
1691 }
1692
1693 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1694 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1695 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1696 adev->firmware.fw_size +=
1697 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1698
1699 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1700 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1701 adev->firmware.fw_size +=
1702 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1703
ee6e89c0
DF
1704 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1705
a94d5569
DF
1706 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1707
4562236b
HW
1708 return 0;
1709}
1710
743b9786
NK
1711static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1712{
1713 struct amdgpu_device *adev = ctx;
1714
1715 return dm_read_reg(adev->dm.dc->ctx, address);
1716}
1717
1718static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1719 uint32_t value)
1720{
1721 struct amdgpu_device *adev = ctx;
1722
1723 return dm_write_reg(adev->dm.dc->ctx, address, value);
1724}
1725
1726static int dm_dmub_sw_init(struct amdgpu_device *adev)
1727{
1728 struct dmub_srv_create_params create_params;
8c7aea40
NK
1729 struct dmub_srv_region_params region_params;
1730 struct dmub_srv_region_info region_info;
1731 struct dmub_srv_fb_params fb_params;
1732 struct dmub_srv_fb_info *fb_info;
1733 struct dmub_srv *dmub_srv;
743b9786
NK
1734 const struct dmcub_firmware_header_v1_0 *hdr;
1735 const char *fw_name_dmub;
1736 enum dmub_asic dmub_asic;
1737 enum dmub_status status;
1738 int r;
1739
1740 switch (adev->asic_type) {
1741 case CHIP_RENOIR:
1742 dmub_asic = DMUB_ASIC_DCN21;
1743 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1744 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1745 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1746 break;
79037324
BL
1747 case CHIP_SIENNA_CICHLID:
1748 dmub_asic = DMUB_ASIC_DCN30;
1749 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1750 break;
5ce868fc
BL
1751 case CHIP_NAVY_FLOUNDER:
1752 dmub_asic = DMUB_ASIC_DCN30;
1753 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
79037324 1754 break;
469989ca
RL
1755 case CHIP_VANGOGH:
1756 dmub_asic = DMUB_ASIC_DCN301;
1757 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1758 break;
2a411205
BL
1759 case CHIP_DIMGREY_CAVEFISH:
1760 dmub_asic = DMUB_ASIC_DCN302;
1761 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1762 break;
656fe9b6
AP
1763 case CHIP_BEIGE_GOBY:
1764 dmub_asic = DMUB_ASIC_DCN303;
1765 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1766 break;
1ebcaebd
NK
1767 case CHIP_YELLOW_CARP:
1768 dmub_asic = DMUB_ASIC_DCN31;
1769 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1770 break;
743b9786
NK
1771
1772 default:
1773 /* ASIC doesn't support DMUB. */
1774 return 0;
1775 }
1776
743b9786
NK
1777 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1778 if (r) {
1779 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1780 return 0;
1781 }
1782
1783 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1784 if (r) {
1785 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1786 return 0;
1787 }
1788
743b9786 1789 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1790 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1791
9a6ed547
NK
1792 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1793 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1794 AMDGPU_UCODE_ID_DMCUB;
1795 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1796 adev->dm.dmub_fw;
1797 adev->firmware.fw_size +=
1798 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1799
9a6ed547
NK
1800 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1801 adev->dm.dmcub_fw_version);
1802 }
1803
743b9786 1804
8c7aea40
NK
1805 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1806 dmub_srv = adev->dm.dmub_srv;
1807
1808 if (!dmub_srv) {
1809 DRM_ERROR("Failed to allocate DMUB service!\n");
1810 return -ENOMEM;
1811 }
1812
1813 memset(&create_params, 0, sizeof(create_params));
1814 create_params.user_ctx = adev;
1815 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1816 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1817 create_params.asic = dmub_asic;
1818
1819 /* Create the DMUB service. */
1820 status = dmub_srv_create(dmub_srv, &create_params);
1821 if (status != DMUB_STATUS_OK) {
1822 DRM_ERROR("Error creating DMUB service: %d\n", status);
1823 return -EINVAL;
1824 }
1825
1826 /* Calculate the size of all the regions for the DMUB service. */
1827 memset(&region_params, 0, sizeof(region_params));
1828
1829 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1830 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1831 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1832 region_params.vbios_size = adev->bios_size;
0922b899 1833 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1834 adev->dm.dmub_fw->data +
1835 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1836 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1837 region_params.fw_inst_const =
1838 adev->dm.dmub_fw->data +
1839 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1840 PSP_HEADER_BYTES;
8c7aea40
NK
1841
1842 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1843 &region_info);
1844
1845 if (status != DMUB_STATUS_OK) {
1846 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1847 return -EINVAL;
1848 }
1849
1850 /*
1851 * Allocate a framebuffer based on the total size of all the regions.
1852 * TODO: Move this into GART.
1853 */
1854 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1855 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1856 &adev->dm.dmub_bo_gpu_addr,
1857 &adev->dm.dmub_bo_cpu_addr);
1858 if (r)
1859 return r;
1860
1861 /* Rebase the regions on the framebuffer address. */
1862 memset(&fb_params, 0, sizeof(fb_params));
1863 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1864 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1865 fb_params.region_info = &region_info;
1866
1867 adev->dm.dmub_fb_info =
1868 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1869 fb_info = adev->dm.dmub_fb_info;
1870
1871 if (!fb_info) {
1872 DRM_ERROR(
1873 "Failed to allocate framebuffer info for DMUB service!\n");
1874 return -ENOMEM;
1875 }
1876
1877 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1878 if (status != DMUB_STATUS_OK) {
1879 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1880 return -EINVAL;
1881 }
1882
743b9786
NK
1883 return 0;
1884}
1885
a94d5569
DF
1886static int dm_sw_init(void *handle)
1887{
1888 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1889 int r;
1890
1891 r = dm_dmub_sw_init(adev);
1892 if (r)
1893 return r;
a94d5569
DF
1894
1895 return load_dmcu_fw(adev);
1896}
1897
4562236b
HW
1898static int dm_sw_fini(void *handle)
1899{
a94d5569
DF
1900 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1901
8c7aea40
NK
1902 kfree(adev->dm.dmub_fb_info);
1903 adev->dm.dmub_fb_info = NULL;
1904
743b9786
NK
1905 if (adev->dm.dmub_srv) {
1906 dmub_srv_destroy(adev->dm.dmub_srv);
1907 adev->dm.dmub_srv = NULL;
1908 }
1909
75e1658e
ND
1910 release_firmware(adev->dm.dmub_fw);
1911 adev->dm.dmub_fw = NULL;
743b9786 1912
75e1658e
ND
1913 release_firmware(adev->dm.fw_dmcu);
1914 adev->dm.fw_dmcu = NULL;
a94d5569 1915
4562236b
HW
1916 return 0;
1917}
1918
7abcf6b5 1919static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 1920{
c84dec2f 1921 struct amdgpu_dm_connector *aconnector;
4562236b 1922 struct drm_connector *connector;
f8d2d39e 1923 struct drm_connector_list_iter iter;
7abcf6b5 1924 int ret = 0;
4562236b 1925
f8d2d39e
LP
1926 drm_connector_list_iter_begin(dev, &iter);
1927 drm_for_each_connector_iter(connector, &iter) {
b349f76e 1928 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
1929 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1930 aconnector->mst_mgr.aux) {
f1ad2f5e 1931 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
1932 aconnector,
1933 aconnector->base.base.id);
7abcf6b5
AG
1934
1935 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1936 if (ret < 0) {
1937 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
1938 aconnector->dc_link->type =
1939 dc_connection_single;
1940 break;
7abcf6b5 1941 }
f8d2d39e 1942 }
4562236b 1943 }
f8d2d39e 1944 drm_connector_list_iter_end(&iter);
4562236b 1945
7abcf6b5
AG
1946 return ret;
1947}
1948
1949static int dm_late_init(void *handle)
1950{
42e67c3b 1951 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 1952
bbf854dc
DF
1953 struct dmcu_iram_parameters params;
1954 unsigned int linear_lut[16];
1955 int i;
17bdb4a8 1956 struct dmcu *dmcu = NULL;
bbf854dc 1957
17bdb4a8
JFZ
1958 dmcu = adev->dm.dc->res_pool->dmcu;
1959
bbf854dc
DF
1960 for (i = 0; i < 16; i++)
1961 linear_lut[i] = 0xFFFF * i / 15;
1962
1963 params.set = 0;
1964 params.backlight_ramping_start = 0xCCCC;
1965 params.backlight_ramping_reduction = 0xCCCCCCCC;
1966 params.backlight_lut_array_size = 16;
1967 params.backlight_lut_array = linear_lut;
1968
2ad0cdf9
AK
1969 /* Min backlight level after ABM reduction, Don't allow below 1%
1970 * 0xFFFF x 0.01 = 0x28F
1971 */
1972 params.min_abm_backlight = 0x28F;
5cb32419 1973 /* In the case where abm is implemented on dmcub,
6e568e43
JW
1974 * dmcu object will be null.
1975 * ABM 2.4 and up are implemented on dmcub.
1976 */
1977 if (dmcu) {
1978 if (!dmcu_load_iram(dmcu, params))
1979 return -EINVAL;
1980 } else if (adev->dm.dc->ctx->dmub_srv) {
1981 struct dc_link *edp_links[MAX_NUM_EDP];
1982 int edp_num;
bbf854dc 1983
6e568e43
JW
1984 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1985 for (i = 0; i < edp_num; i++) {
1986 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1987 return -EINVAL;
1988 }
1989 }
bbf854dc 1990
4a580877 1991 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
1992}
1993
1994static void s3_handle_mst(struct drm_device *dev, bool suspend)
1995{
c84dec2f 1996 struct amdgpu_dm_connector *aconnector;
4562236b 1997 struct drm_connector *connector;
f8d2d39e 1998 struct drm_connector_list_iter iter;
fe7553be
LP
1999 struct drm_dp_mst_topology_mgr *mgr;
2000 int ret;
2001 bool need_hotplug = false;
4562236b 2002
f8d2d39e
LP
2003 drm_connector_list_iter_begin(dev, &iter);
2004 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2005 aconnector = to_amdgpu_dm_connector(connector);
2006 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2007 aconnector->mst_port)
2008 continue;
2009
2010 mgr = &aconnector->mst_mgr;
2011
2012 if (suspend) {
2013 drm_dp_mst_topology_mgr_suspend(mgr);
2014 } else {
6f85f738 2015 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2016 if (ret < 0) {
2017 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2018 need_hotplug = true;
2019 }
2020 }
4562236b 2021 }
f8d2d39e 2022 drm_connector_list_iter_end(&iter);
fe7553be
LP
2023
2024 if (need_hotplug)
2025 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2026}
2027
9340dfd3
HW
2028static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2029{
2030 struct smu_context *smu = &adev->smu;
2031 int ret = 0;
2032
2033 if (!is_support_sw_smu(adev))
2034 return 0;
2035
2036 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2037 * on window driver dc implementation.
2038 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2039 * should be passed to smu during boot up and resume from s3.
2040 * boot up: dc calculate dcn watermark clock settings within dc_create,
2041 * dcn20_resource_construct
2042 * then call pplib functions below to pass the settings to smu:
2043 * smu_set_watermarks_for_clock_ranges
2044 * smu_set_watermarks_table
2045 * navi10_set_watermarks_table
2046 * smu_write_watermarks_table
2047 *
2048 * For Renoir, clock settings of dcn watermark are also fixed values.
2049 * dc has implemented different flow for window driver:
2050 * dc_hardware_init / dc_set_power_state
2051 * dcn10_init_hw
2052 * notify_wm_ranges
2053 * set_wm_ranges
2054 * -- Linux
2055 * smu_set_watermarks_for_clock_ranges
2056 * renoir_set_watermarks_table
2057 * smu_write_watermarks_table
2058 *
2059 * For Linux,
2060 * dc_hardware_init -> amdgpu_dm_init
2061 * dc_set_power_state --> dm_resume
2062 *
2063 * therefore, this function apply to navi10/12/14 but not Renoir
2064 * *
2065 */
2066 switch(adev->asic_type) {
2067 case CHIP_NAVI10:
2068 case CHIP_NAVI14:
2069 case CHIP_NAVI12:
2070 break;
2071 default:
2072 return 0;
2073 }
2074
e7a95eea
EQ
2075 ret = smu_write_watermarks_table(smu);
2076 if (ret) {
2077 DRM_ERROR("Failed to update WMTABLE!\n");
2078 return ret;
9340dfd3
HW
2079 }
2080
9340dfd3
HW
2081 return 0;
2082}
2083
b8592b48
LL
2084/**
2085 * dm_hw_init() - Initialize DC device
28d687ea 2086 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2087 *
2088 * Initialize the &struct amdgpu_display_manager device. This involves calling
2089 * the initializers of each DM component, then populating the struct with them.
2090 *
2091 * Although the function implies hardware initialization, both hardware and
2092 * software are initialized here. Splitting them out to their relevant init
2093 * hooks is a future TODO item.
2094 *
2095 * Some notable things that are initialized here:
2096 *
2097 * - Display Core, both software and hardware
2098 * - DC modules that we need (freesync and color management)
2099 * - DRM software states
2100 * - Interrupt sources and handlers
2101 * - Vblank support
2102 * - Debug FS entries, if enabled
2103 */
4562236b
HW
2104static int dm_hw_init(void *handle)
2105{
2106 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2107 /* Create DAL display manager */
2108 amdgpu_dm_init(adev);
4562236b
HW
2109 amdgpu_dm_hpd_init(adev);
2110
4562236b
HW
2111 return 0;
2112}
2113
b8592b48
LL
2114/**
2115 * dm_hw_fini() - Teardown DC device
28d687ea 2116 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2117 *
2118 * Teardown components within &struct amdgpu_display_manager that require
2119 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2120 * were loaded. Also flush IRQ workqueues and disable them.
2121 */
4562236b
HW
2122static int dm_hw_fini(void *handle)
2123{
2124 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2125
2126 amdgpu_dm_hpd_fini(adev);
2127
2128 amdgpu_dm_irq_fini(adev);
21de3396 2129 amdgpu_dm_fini(adev);
4562236b
HW
2130 return 0;
2131}
2132
cdaae837
BL
2133
2134static int dm_enable_vblank(struct drm_crtc *crtc);
2135static void dm_disable_vblank(struct drm_crtc *crtc);
2136
2137static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2138 struct dc_state *state, bool enable)
2139{
2140 enum dc_irq_source irq_source;
2141 struct amdgpu_crtc *acrtc;
2142 int rc = -EBUSY;
2143 int i = 0;
2144
2145 for (i = 0; i < state->stream_count; i++) {
2146 acrtc = get_crtc_by_otg_inst(
2147 adev, state->stream_status[i].primary_otg_inst);
2148
2149 if (acrtc && state->stream_status[i].plane_count != 0) {
2150 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2151 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2152 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2153 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2154 if (rc)
2155 DRM_WARN("Failed to %s pflip interrupts\n",
2156 enable ? "enable" : "disable");
2157
2158 if (enable) {
2159 rc = dm_enable_vblank(&acrtc->base);
2160 if (rc)
2161 DRM_WARN("Failed to enable vblank interrupts\n");
2162 } else {
2163 dm_disable_vblank(&acrtc->base);
2164 }
2165
2166 }
2167 }
2168
2169}
2170
dfd84d90 2171static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2172{
2173 struct dc_state *context = NULL;
2174 enum dc_status res = DC_ERROR_UNEXPECTED;
2175 int i;
2176 struct dc_stream_state *del_streams[MAX_PIPES];
2177 int del_streams_count = 0;
2178
2179 memset(del_streams, 0, sizeof(del_streams));
2180
2181 context = dc_create_state(dc);
2182 if (context == NULL)
2183 goto context_alloc_fail;
2184
2185 dc_resource_state_copy_construct_current(dc, context);
2186
2187 /* First remove from context all streams */
2188 for (i = 0; i < context->stream_count; i++) {
2189 struct dc_stream_state *stream = context->streams[i];
2190
2191 del_streams[del_streams_count++] = stream;
2192 }
2193
2194 /* Remove all planes for removed streams and then remove the streams */
2195 for (i = 0; i < del_streams_count; i++) {
2196 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2197 res = DC_FAIL_DETACH_SURFACES;
2198 goto fail;
2199 }
2200
2201 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2202 if (res != DC_OK)
2203 goto fail;
2204 }
2205
2206
2207 res = dc_validate_global_state(dc, context, false);
2208
2209 if (res != DC_OK) {
2210 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2211 goto fail;
2212 }
2213
2214 res = dc_commit_state(dc, context);
2215
2216fail:
2217 dc_release_state(context);
2218
2219context_alloc_fail:
2220 return res;
2221}
2222
8e794421
WL
2223static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2224{
2225 int i;
2226
2227 if (dm->hpd_rx_offload_wq) {
2228 for (i = 0; i < dm->dc->caps.max_links; i++)
2229 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2230 }
2231}
2232
4562236b
HW
2233static int dm_suspend(void *handle)
2234{
2235 struct amdgpu_device *adev = handle;
2236 struct amdgpu_display_manager *dm = &adev->dm;
2237 int ret = 0;
4562236b 2238
53b3f8f4 2239 if (amdgpu_in_reset(adev)) {
cdaae837 2240 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2241
2242#if defined(CONFIG_DRM_AMD_DC_DCN)
2243 dc_allow_idle_optimizations(adev->dm.dc, false);
2244#endif
2245
cdaae837
BL
2246 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2247
2248 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2249
2250 amdgpu_dm_commit_zero_streams(dm->dc);
2251
2252 amdgpu_dm_irq_suspend(adev);
2253
8e794421
WL
2254 hpd_rx_irq_work_suspend(dm);
2255
cdaae837
BL
2256 return ret;
2257 }
4562236b 2258
d2f0b53b 2259 WARN_ON(adev->dm.cached_state);
4a580877 2260 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2261
4a580877 2262 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2263
4562236b
HW
2264 amdgpu_dm_irq_suspend(adev);
2265
8e794421
WL
2266 hpd_rx_irq_work_suspend(dm);
2267
32f5062d 2268 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2269
1c2075d4 2270 return 0;
4562236b
HW
2271}
2272
1daf8c63
AD
2273static struct amdgpu_dm_connector *
2274amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2275 struct drm_crtc *crtc)
4562236b
HW
2276{
2277 uint32_t i;
c2cea706 2278 struct drm_connector_state *new_con_state;
4562236b
HW
2279 struct drm_connector *connector;
2280 struct drm_crtc *crtc_from_state;
2281
c2cea706
LSL
2282 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2283 crtc_from_state = new_con_state->crtc;
4562236b
HW
2284
2285 if (crtc_from_state == crtc)
c84dec2f 2286 return to_amdgpu_dm_connector(connector);
4562236b
HW
2287 }
2288
2289 return NULL;
2290}
2291
fbbdadf2
BL
2292static void emulated_link_detect(struct dc_link *link)
2293{
2294 struct dc_sink_init_data sink_init_data = { 0 };
2295 struct display_sink_capability sink_caps = { 0 };
2296 enum dc_edid_status edid_status;
2297 struct dc_context *dc_ctx = link->ctx;
2298 struct dc_sink *sink = NULL;
2299 struct dc_sink *prev_sink = NULL;
2300
2301 link->type = dc_connection_none;
2302 prev_sink = link->local_sink;
2303
30164a16
VL
2304 if (prev_sink)
2305 dc_sink_release(prev_sink);
fbbdadf2
BL
2306
2307 switch (link->connector_signal) {
2308 case SIGNAL_TYPE_HDMI_TYPE_A: {
2309 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2310 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2311 break;
2312 }
2313
2314 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2315 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2316 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2317 break;
2318 }
2319
2320 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2321 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2322 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2323 break;
2324 }
2325
2326 case SIGNAL_TYPE_LVDS: {
2327 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2328 sink_caps.signal = SIGNAL_TYPE_LVDS;
2329 break;
2330 }
2331
2332 case SIGNAL_TYPE_EDP: {
2333 sink_caps.transaction_type =
2334 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2335 sink_caps.signal = SIGNAL_TYPE_EDP;
2336 break;
2337 }
2338
2339 case SIGNAL_TYPE_DISPLAY_PORT: {
2340 sink_caps.transaction_type =
2341 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2342 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2343 break;
2344 }
2345
2346 default:
2347 DC_ERROR("Invalid connector type! signal:%d\n",
2348 link->connector_signal);
2349 return;
2350 }
2351
2352 sink_init_data.link = link;
2353 sink_init_data.sink_signal = sink_caps.signal;
2354
2355 sink = dc_sink_create(&sink_init_data);
2356 if (!sink) {
2357 DC_ERROR("Failed to create sink!\n");
2358 return;
2359 }
2360
dcd5fb82 2361 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2362 link->local_sink = sink;
2363
2364 edid_status = dm_helpers_read_local_edid(
2365 link->ctx,
2366 link,
2367 sink);
2368
2369 if (edid_status != EDID_OK)
2370 DC_ERROR("Failed to read EDID");
2371
2372}
2373
cdaae837
BL
2374static void dm_gpureset_commit_state(struct dc_state *dc_state,
2375 struct amdgpu_display_manager *dm)
2376{
2377 struct {
2378 struct dc_surface_update surface_updates[MAX_SURFACES];
2379 struct dc_plane_info plane_infos[MAX_SURFACES];
2380 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2381 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2382 struct dc_stream_update stream_update;
2383 } * bundle;
2384 int k, m;
2385
2386 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2387
2388 if (!bundle) {
2389 dm_error("Failed to allocate update bundle\n");
2390 goto cleanup;
2391 }
2392
2393 for (k = 0; k < dc_state->stream_count; k++) {
2394 bundle->stream_update.stream = dc_state->streams[k];
2395
2396 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2397 bundle->surface_updates[m].surface =
2398 dc_state->stream_status->plane_states[m];
2399 bundle->surface_updates[m].surface->force_full_update =
2400 true;
2401 }
2402 dc_commit_updates_for_stream(
2403 dm->dc, bundle->surface_updates,
2404 dc_state->stream_status->plane_count,
efc8278e 2405 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2406 }
2407
2408cleanup:
2409 kfree(bundle);
2410
2411 return;
2412}
2413
035f5496 2414static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2415{
2416 struct dc_stream_state *stream_state;
2417 struct amdgpu_dm_connector *aconnector = link->priv;
2418 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2419 struct dc_stream_update stream_update;
2420 bool dpms_off = true;
2421
2422 memset(&stream_update, 0, sizeof(stream_update));
2423 stream_update.dpms_off = &dpms_off;
2424
2425 mutex_lock(&adev->dm.dc_lock);
2426 stream_state = dc_stream_find_from_link(link);
2427
2428 if (stream_state == NULL) {
2429 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2430 mutex_unlock(&adev->dm.dc_lock);
2431 return;
2432 }
2433
2434 stream_update.stream = stream_state;
035f5496 2435 acrtc_state->force_dpms_off = true;
3c4d55c9 2436 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2437 stream_state, &stream_update,
2438 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2439 mutex_unlock(&adev->dm.dc_lock);
2440}
2441
4562236b
HW
2442static int dm_resume(void *handle)
2443{
2444 struct amdgpu_device *adev = handle;
4a580877 2445 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2446 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2447 struct amdgpu_dm_connector *aconnector;
4562236b 2448 struct drm_connector *connector;
f8d2d39e 2449 struct drm_connector_list_iter iter;
4562236b 2450 struct drm_crtc *crtc;
c2cea706 2451 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2452 struct dm_crtc_state *dm_new_crtc_state;
2453 struct drm_plane *plane;
2454 struct drm_plane_state *new_plane_state;
2455 struct dm_plane_state *dm_new_plane_state;
113b7a01 2456 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2457 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2458 struct dc_state *dc_state;
2459 int i, r, j;
4562236b 2460
53b3f8f4 2461 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2462 dc_state = dm->cached_dc_state;
2463
2464 r = dm_dmub_hw_init(adev);
2465 if (r)
2466 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2467
2468 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2469 dc_resume(dm->dc);
2470
2471 amdgpu_dm_irq_resume_early(adev);
2472
2473 for (i = 0; i < dc_state->stream_count; i++) {
2474 dc_state->streams[i]->mode_changed = true;
2475 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2476 dc_state->stream_status->plane_states[j]->update_flags.raw
2477 = 0xffffffff;
2478 }
2479 }
8fe44c08 2480#if defined(CONFIG_DRM_AMD_DC_DCN)
1ebcaebd
NK
2481 /*
2482 * Resource allocation happens for link encoders for newer ASIC in
2483 * dc_validate_global_state, so we need to revalidate it.
2484 *
2485 * This shouldn't fail (it passed once before), so warn if it does.
2486 */
2487 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2488#endif
cdaae837
BL
2489
2490 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2491
cdaae837
BL
2492 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2493
2494 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2495
2496 dc_release_state(dm->cached_dc_state);
2497 dm->cached_dc_state = NULL;
2498
2499 amdgpu_dm_irq_resume_late(adev);
2500
2501 mutex_unlock(&dm->dc_lock);
2502
2503 return 0;
2504 }
113b7a01
LL
2505 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2506 dc_release_state(dm_state->context);
2507 dm_state->context = dc_create_state(dm->dc);
2508 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2509 dc_resource_state_construct(dm->dc, dm_state->context);
2510
8c7aea40
NK
2511 /* Before powering on DC we need to re-initialize DMUB. */
2512 r = dm_dmub_hw_init(adev);
2513 if (r)
2514 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2515
a80aa93d
ML
2516 /* power on hardware */
2517 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2518
4562236b
HW
2519 /* program HPD filter */
2520 dc_resume(dm->dc);
2521
4562236b
HW
2522 /*
2523 * early enable HPD Rx IRQ, should be done before set mode as short
2524 * pulse interrupts are used for MST
2525 */
2526 amdgpu_dm_irq_resume_early(adev);
2527
d20ebea8 2528 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2529 s3_handle_mst(ddev, false);
2530
4562236b 2531 /* Do detection*/
f8d2d39e
LP
2532 drm_connector_list_iter_begin(ddev, &iter);
2533 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2534 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2535
2536 /*
2537 * this is the case when traversing through already created
2538 * MST connectors, should be skipped
2539 */
2540 if (aconnector->mst_port)
2541 continue;
2542
03ea364c 2543 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2544 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2545 DRM_ERROR("KMS: Failed to detect connector\n");
2546
2547 if (aconnector->base.force && new_connection_type == dc_connection_none)
2548 emulated_link_detect(aconnector->dc_link);
2549 else
2550 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2551
2552 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2553 aconnector->fake_enable = false;
2554
dcd5fb82
MF
2555 if (aconnector->dc_sink)
2556 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2557 aconnector->dc_sink = NULL;
2558 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2559 mutex_unlock(&aconnector->hpd_lock);
4562236b 2560 }
f8d2d39e 2561 drm_connector_list_iter_end(&iter);
4562236b 2562
1f6010a9 2563 /* Force mode set in atomic commit */
a80aa93d 2564 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2565 new_crtc_state->active_changed = true;
4f346e65 2566
fcb4019e
LSL
2567 /*
2568 * atomic_check is expected to create the dc states. We need to release
2569 * them here, since they were duplicated as part of the suspend
2570 * procedure.
2571 */
a80aa93d 2572 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2573 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2574 if (dm_new_crtc_state->stream) {
2575 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2576 dc_stream_release(dm_new_crtc_state->stream);
2577 dm_new_crtc_state->stream = NULL;
2578 }
2579 }
2580
a80aa93d 2581 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2582 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2583 if (dm_new_plane_state->dc_state) {
2584 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2585 dc_plane_state_release(dm_new_plane_state->dc_state);
2586 dm_new_plane_state->dc_state = NULL;
2587 }
2588 }
2589
2d1af6a1 2590 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2591
a80aa93d 2592 dm->cached_state = NULL;
0a214e2f 2593
9faa4237 2594 amdgpu_dm_irq_resume_late(adev);
4562236b 2595
9340dfd3
HW
2596 amdgpu_dm_smu_write_watermarks_table(adev);
2597
2d1af6a1 2598 return 0;
4562236b
HW
2599}
2600
b8592b48
LL
2601/**
2602 * DOC: DM Lifecycle
2603 *
2604 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2605 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2606 * the base driver's device list to be initialized and torn down accordingly.
2607 *
2608 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2609 */
2610
4562236b
HW
2611static const struct amd_ip_funcs amdgpu_dm_funcs = {
2612 .name = "dm",
2613 .early_init = dm_early_init,
7abcf6b5 2614 .late_init = dm_late_init,
4562236b
HW
2615 .sw_init = dm_sw_init,
2616 .sw_fini = dm_sw_fini,
e9669fb7 2617 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2618 .hw_init = dm_hw_init,
2619 .hw_fini = dm_hw_fini,
2620 .suspend = dm_suspend,
2621 .resume = dm_resume,
2622 .is_idle = dm_is_idle,
2623 .wait_for_idle = dm_wait_for_idle,
2624 .check_soft_reset = dm_check_soft_reset,
2625 .soft_reset = dm_soft_reset,
2626 .set_clockgating_state = dm_set_clockgating_state,
2627 .set_powergating_state = dm_set_powergating_state,
2628};
2629
2630const struct amdgpu_ip_block_version dm_ip_block =
2631{
2632 .type = AMD_IP_BLOCK_TYPE_DCE,
2633 .major = 1,
2634 .minor = 0,
2635 .rev = 0,
2636 .funcs = &amdgpu_dm_funcs,
2637};
2638
ca3268c4 2639
b8592b48
LL
2640/**
2641 * DOC: atomic
2642 *
2643 * *WIP*
2644 */
0a323b84 2645
b3663f70 2646static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2647 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2648 .get_format_info = amd_get_format_info,
366c1baa 2649 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2650 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2651 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2652};
2653
2654static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2655 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2656};
2657
94562810
RS
2658static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2659{
2660 u32 max_cll, min_cll, max, min, q, r;
2661 struct amdgpu_dm_backlight_caps *caps;
2662 struct amdgpu_display_manager *dm;
2663 struct drm_connector *conn_base;
2664 struct amdgpu_device *adev;
ec11fe37 2665 struct dc_link *link = NULL;
94562810
RS
2666 static const u8 pre_computed_values[] = {
2667 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2668 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2669 int i;
94562810
RS
2670
2671 if (!aconnector || !aconnector->dc_link)
2672 return;
2673
ec11fe37 2674 link = aconnector->dc_link;
2675 if (link->connector_signal != SIGNAL_TYPE_EDP)
2676 return;
2677
94562810 2678 conn_base = &aconnector->base;
1348969a 2679 adev = drm_to_adev(conn_base->dev);
94562810 2680 dm = &adev->dm;
7fd13bae
AD
2681 for (i = 0; i < dm->num_of_edps; i++) {
2682 if (link == dm->backlight_link[i])
2683 break;
2684 }
2685 if (i >= dm->num_of_edps)
2686 return;
2687 caps = &dm->backlight_caps[i];
94562810
RS
2688 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2689 caps->aux_support = false;
2690 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2691 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2692
d0ae0b64 2693 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2694 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2695 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2696 caps->aux_support = true;
2697
7a46f05e
TI
2698 if (amdgpu_backlight == 0)
2699 caps->aux_support = false;
2700 else if (amdgpu_backlight == 1)
2701 caps->aux_support = true;
2702
94562810
RS
2703 /* From the specification (CTA-861-G), for calculating the maximum
2704 * luminance we need to use:
2705 * Luminance = 50*2**(CV/32)
2706 * Where CV is a one-byte value.
2707 * For calculating this expression we may need float point precision;
2708 * to avoid this complexity level, we take advantage that CV is divided
2709 * by a constant. From the Euclids division algorithm, we know that CV
2710 * can be written as: CV = 32*q + r. Next, we replace CV in the
2711 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2712 * need to pre-compute the value of r/32. For pre-computing the values
2713 * We just used the following Ruby line:
2714 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2715 * The results of the above expressions can be verified at
2716 * pre_computed_values.
2717 */
2718 q = max_cll >> 5;
2719 r = max_cll % 32;
2720 max = (1 << q) * pre_computed_values[r];
2721
2722 // min luminance: maxLum * (CV/255)^2 / 100
2723 q = DIV_ROUND_CLOSEST(min_cll, 255);
2724 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2725
2726 caps->aux_max_input_signal = max;
2727 caps->aux_min_input_signal = min;
2728}
2729
97e51c16
HW
2730void amdgpu_dm_update_connector_after_detect(
2731 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2732{
2733 struct drm_connector *connector = &aconnector->base;
2734 struct drm_device *dev = connector->dev;
b73a22d3 2735 struct dc_sink *sink;
4562236b
HW
2736
2737 /* MST handled by drm_mst framework */
2738 if (aconnector->mst_mgr.mst_state == true)
2739 return;
2740
4562236b 2741 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2742 if (sink)
2743 dc_sink_retain(sink);
4562236b 2744
1f6010a9
DF
2745 /*
2746 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2747 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2748 * Skip if already done during boot.
4562236b
HW
2749 */
2750 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2751 && aconnector->dc_em_sink) {
2752
1f6010a9
DF
2753 /*
2754 * For S3 resume with headless use eml_sink to fake stream
2755 * because on resume connector->sink is set to NULL
4562236b
HW
2756 */
2757 mutex_lock(&dev->mode_config.mutex);
2758
2759 if (sink) {
922aa1e1 2760 if (aconnector->dc_sink) {
98e6436d 2761 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2762 /*
2763 * retain and release below are used to
2764 * bump up refcount for sink because the link doesn't point
2765 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2766 * reshuffle by UMD we will get into unwanted dc_sink release
2767 */
dcd5fb82 2768 dc_sink_release(aconnector->dc_sink);
922aa1e1 2769 }
4562236b 2770 aconnector->dc_sink = sink;
dcd5fb82 2771 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2772 amdgpu_dm_update_freesync_caps(connector,
2773 aconnector->edid);
4562236b 2774 } else {
98e6436d 2775 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2776 if (!aconnector->dc_sink) {
4562236b 2777 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2778 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2779 }
4562236b
HW
2780 }
2781
2782 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2783
2784 if (sink)
2785 dc_sink_release(sink);
4562236b
HW
2786 return;
2787 }
2788
2789 /*
2790 * TODO: temporary guard to look for proper fix
2791 * if this sink is MST sink, we should not do anything
2792 */
dcd5fb82
MF
2793 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2794 dc_sink_release(sink);
4562236b 2795 return;
dcd5fb82 2796 }
4562236b
HW
2797
2798 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2799 /*
2800 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2801 * Do nothing!!
2802 */
f1ad2f5e 2803 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2804 aconnector->connector_id);
dcd5fb82
MF
2805 if (sink)
2806 dc_sink_release(sink);
4562236b
HW
2807 return;
2808 }
2809
f1ad2f5e 2810 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2811 aconnector->connector_id, aconnector->dc_sink, sink);
2812
2813 mutex_lock(&dev->mode_config.mutex);
2814
1f6010a9
DF
2815 /*
2816 * 1. Update status of the drm connector
2817 * 2. Send an event and let userspace tell us what to do
2818 */
4562236b 2819 if (sink) {
1f6010a9
DF
2820 /*
2821 * TODO: check if we still need the S3 mode update workaround.
2822 * If yes, put it here.
2823 */
c64b0d6b 2824 if (aconnector->dc_sink) {
98e6436d 2825 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2826 dc_sink_release(aconnector->dc_sink);
2827 }
4562236b
HW
2828
2829 aconnector->dc_sink = sink;
dcd5fb82 2830 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2831 if (sink->dc_edid.length == 0) {
4562236b 2832 aconnector->edid = NULL;
e6142dd5
AP
2833 if (aconnector->dc_link->aux_mode) {
2834 drm_dp_cec_unset_edid(
2835 &aconnector->dm_dp_aux.aux);
2836 }
900b3cb1 2837 } else {
4562236b 2838 aconnector->edid =
e6142dd5 2839 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2840
c555f023 2841 drm_connector_update_edid_property(connector,
e6142dd5 2842 aconnector->edid);
e6142dd5
AP
2843 if (aconnector->dc_link->aux_mode)
2844 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2845 aconnector->edid);
4562236b 2846 }
e6142dd5 2847
98e6436d 2848 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2849 update_connector_ext_caps(aconnector);
4562236b 2850 } else {
e86e8947 2851 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2852 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2853 drm_connector_update_edid_property(connector, NULL);
4562236b 2854 aconnector->num_modes = 0;
dcd5fb82 2855 dc_sink_release(aconnector->dc_sink);
4562236b 2856 aconnector->dc_sink = NULL;
5326c452 2857 aconnector->edid = NULL;
0c8620d6
BL
2858#ifdef CONFIG_DRM_AMD_DC_HDCP
2859 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2860 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2861 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2862#endif
4562236b
HW
2863 }
2864
2865 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2866
0f877894
OV
2867 update_subconnector_property(aconnector);
2868
dcd5fb82
MF
2869 if (sink)
2870 dc_sink_release(sink);
4562236b
HW
2871}
2872
e27c41d5 2873static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 2874{
4562236b
HW
2875 struct drm_connector *connector = &aconnector->base;
2876 struct drm_device *dev = connector->dev;
fbbdadf2 2877 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2878 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2879 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 2880 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 2881
b972b4f9
HW
2882 if (adev->dm.disable_hpd_irq)
2883 return;
2884
035f5496
AP
2885 if (dm_con_state->base.state && dm_con_state->base.crtc)
2886 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2887 dm_con_state->base.state,
2888 dm_con_state->base.crtc));
1f6010a9
DF
2889 /*
2890 * In case of failure or MST no need to update connector status or notify the OS
2891 * since (for MST case) MST does this in its own context.
4562236b
HW
2892 */
2893 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2894
0c8620d6 2895#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2896 if (adev->dm.hdcp_workqueue) {
96a3b32e 2897 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2898 dm_con_state->update_hdcp = true;
2899 }
0c8620d6 2900#endif
2e0ac3d6
HW
2901 if (aconnector->fake_enable)
2902 aconnector->fake_enable = false;
2903
fbbdadf2
BL
2904 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2905 DRM_ERROR("KMS: Failed to detect connector\n");
2906
2907 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2908 emulated_link_detect(aconnector->dc_link);
2909
2910
2911 drm_modeset_lock_all(dev);
2912 dm_restore_drm_connector_state(dev, connector);
2913 drm_modeset_unlock_all(dev);
2914
2915 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2916 drm_kms_helper_hotplug_event(dev);
2917
2918 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 2919 if (new_connection_type == dc_connection_none &&
035f5496
AP
2920 aconnector->dc_link->type == dc_connection_none &&
2921 dm_crtc_state)
2922 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 2923
3c4d55c9 2924 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
2925
2926 drm_modeset_lock_all(dev);
2927 dm_restore_drm_connector_state(dev, connector);
2928 drm_modeset_unlock_all(dev);
2929
2930 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2931 drm_kms_helper_hotplug_event(dev);
2932 }
2933 mutex_unlock(&aconnector->hpd_lock);
2934
2935}
2936
e27c41d5
JS
2937static void handle_hpd_irq(void *param)
2938{
2939 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2940
2941 handle_hpd_irq_helper(aconnector);
2942
2943}
2944
8e794421 2945static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
2946{
2947 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2948 uint8_t dret;
2949 bool new_irq_handled = false;
2950 int dpcd_addr;
2951 int dpcd_bytes_to_read;
2952
2953 const int max_process_count = 30;
2954 int process_count = 0;
2955
2956 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2957
2958 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2959 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2960 /* DPCD 0x200 - 0x201 for downstream IRQ */
2961 dpcd_addr = DP_SINK_COUNT;
2962 } else {
2963 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2964 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2965 dpcd_addr = DP_SINK_COUNT_ESI;
2966 }
2967
2968 dret = drm_dp_dpcd_read(
2969 &aconnector->dm_dp_aux.aux,
2970 dpcd_addr,
2971 esi,
2972 dpcd_bytes_to_read);
2973
2974 while (dret == dpcd_bytes_to_read &&
2975 process_count < max_process_count) {
2976 uint8_t retry;
2977 dret = 0;
2978
2979 process_count++;
2980
f1ad2f5e 2981 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
2982 /* handle HPD short pulse irq */
2983 if (aconnector->mst_mgr.mst_state)
2984 drm_dp_mst_hpd_irq(
2985 &aconnector->mst_mgr,
2986 esi,
2987 &new_irq_handled);
4562236b
HW
2988
2989 if (new_irq_handled) {
2990 /* ACK at DPCD to notify down stream */
2991 const int ack_dpcd_bytes_to_write =
2992 dpcd_bytes_to_read - 1;
2993
2994 for (retry = 0; retry < 3; retry++) {
2995 uint8_t wret;
2996
2997 wret = drm_dp_dpcd_write(
2998 &aconnector->dm_dp_aux.aux,
2999 dpcd_addr + 1,
3000 &esi[1],
3001 ack_dpcd_bytes_to_write);
3002 if (wret == ack_dpcd_bytes_to_write)
3003 break;
3004 }
3005
1f6010a9 3006 /* check if there is new irq to be handled */
4562236b
HW
3007 dret = drm_dp_dpcd_read(
3008 &aconnector->dm_dp_aux.aux,
3009 dpcd_addr,
3010 esi,
3011 dpcd_bytes_to_read);
3012
3013 new_irq_handled = false;
d4a6e8a9 3014 } else {
4562236b 3015 break;
d4a6e8a9 3016 }
4562236b
HW
3017 }
3018
3019 if (process_count == max_process_count)
f1ad2f5e 3020 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3021}
3022
8e794421
WL
3023static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3024 union hpd_irq_data hpd_irq_data)
3025{
3026 struct hpd_rx_irq_offload_work *offload_work =
3027 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3028
3029 if (!offload_work) {
3030 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3031 return;
3032 }
3033
3034 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3035 offload_work->data = hpd_irq_data;
3036 offload_work->offload_wq = offload_wq;
3037
3038 queue_work(offload_wq->wq, &offload_work->work);
3039 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3040}
3041
4562236b
HW
3042static void handle_hpd_rx_irq(void *param)
3043{
c84dec2f 3044 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3045 struct drm_connector *connector = &aconnector->base;
3046 struct drm_device *dev = connector->dev;
53cbf65c 3047 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3048 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3049 bool result = false;
fbbdadf2 3050 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3051 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3052 union hpd_irq_data hpd_irq_data;
8e794421
WL
3053 bool link_loss = false;
3054 bool has_left_work = false;
3055 int idx = aconnector->base.index;
3056 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3057
3058 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3059
b972b4f9
HW
3060 if (adev->dm.disable_hpd_irq)
3061 return;
3062
1f6010a9
DF
3063 /*
3064 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3065 * conflict, after implement i2c helper, this mutex should be
3066 * retired.
3067 */
b86e7eef 3068 mutex_lock(&aconnector->hpd_lock);
4562236b 3069
8e794421
WL
3070 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3071 &link_loss, true, &has_left_work);
3083a984 3072
8e794421
WL
3073 if (!has_left_work)
3074 goto out;
3075
3076 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3077 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3078 goto out;
3079 }
3080
3081 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3082 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3083 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3084 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3085 goto out;
3086 }
3083a984 3087
8e794421
WL
3088 if (link_loss) {
3089 bool skip = false;
d2aa1356 3090
8e794421
WL
3091 spin_lock(&offload_wq->offload_lock);
3092 skip = offload_wq->is_handling_link_loss;
3093
3094 if (!skip)
3095 offload_wq->is_handling_link_loss = true;
3096
3097 spin_unlock(&offload_wq->offload_lock);
3098
3099 if (!skip)
3100 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3101
3102 goto out;
3103 }
3104 }
c8ea79a8 3105
3083a984 3106out:
c8ea79a8 3107 if (result && !is_mst_root_connector) {
4562236b 3108 /* Downstream Port status changed. */
fbbdadf2
BL
3109 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3110 DRM_ERROR("KMS: Failed to detect connector\n");
3111
3112 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3113 emulated_link_detect(dc_link);
3114
3115 if (aconnector->fake_enable)
3116 aconnector->fake_enable = false;
3117
3118 amdgpu_dm_update_connector_after_detect(aconnector);
3119
3120
3121 drm_modeset_lock_all(dev);
3122 dm_restore_drm_connector_state(dev, connector);
3123 drm_modeset_unlock_all(dev);
3124
3125 drm_kms_helper_hotplug_event(dev);
3126 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3127
3128 if (aconnector->fake_enable)
3129 aconnector->fake_enable = false;
3130
4562236b
HW
3131 amdgpu_dm_update_connector_after_detect(aconnector);
3132
3133
3134 drm_modeset_lock_all(dev);
3135 dm_restore_drm_connector_state(dev, connector);
3136 drm_modeset_unlock_all(dev);
3137
3138 drm_kms_helper_hotplug_event(dev);
3139 }
3140 }
2a0f9270 3141#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3142 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3143 if (adev->dm.hdcp_workqueue)
3144 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3145 }
2a0f9270 3146#endif
4562236b 3147
b86e7eef 3148 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3149 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3150
3151 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3152}
3153
3154static void register_hpd_handlers(struct amdgpu_device *adev)
3155{
4a580877 3156 struct drm_device *dev = adev_to_drm(adev);
4562236b 3157 struct drm_connector *connector;
c84dec2f 3158 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3159 const struct dc_link *dc_link;
3160 struct dc_interrupt_params int_params = {0};
3161
3162 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3163 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3164
3165 list_for_each_entry(connector,
3166 &dev->mode_config.connector_list, head) {
3167
c84dec2f 3168 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3169 dc_link = aconnector->dc_link;
3170
3171 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3172 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3173 int_params.irq_source = dc_link->irq_source_hpd;
3174
3175 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3176 handle_hpd_irq,
3177 (void *) aconnector);
3178 }
3179
3180 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3181
3182 /* Also register for DP short pulse (hpd_rx). */
3183 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3184 int_params.irq_source = dc_link->irq_source_hpd_rx;
3185
3186 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3187 handle_hpd_rx_irq,
3188 (void *) aconnector);
8e794421
WL
3189
3190 if (adev->dm.hpd_rx_offload_wq)
3191 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3192 aconnector;
4562236b
HW
3193 }
3194 }
3195}
3196
55e56389
MR
3197#if defined(CONFIG_DRM_AMD_DC_SI)
3198/* Register IRQ sources and initialize IRQ callbacks */
3199static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3200{
3201 struct dc *dc = adev->dm.dc;
3202 struct common_irq_params *c_irq_params;
3203 struct dc_interrupt_params int_params = {0};
3204 int r;
3205 int i;
3206 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3207
3208 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3209 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3210
3211 /*
3212 * Actions of amdgpu_irq_add_id():
3213 * 1. Register a set() function with base driver.
3214 * Base driver will call set() function to enable/disable an
3215 * interrupt in DC hardware.
3216 * 2. Register amdgpu_dm_irq_handler().
3217 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3218 * coming from DC hardware.
3219 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3220 * for acknowledging and handling. */
3221
3222 /* Use VBLANK interrupt */
3223 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3224 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3225 if (r) {
3226 DRM_ERROR("Failed to add crtc irq id!\n");
3227 return r;
3228 }
3229
3230 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3231 int_params.irq_source =
3232 dc_interrupt_to_irq_source(dc, i+1 , 0);
3233
3234 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3235
3236 c_irq_params->adev = adev;
3237 c_irq_params->irq_src = int_params.irq_source;
3238
3239 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3240 dm_crtc_high_irq, c_irq_params);
3241 }
3242
3243 /* Use GRPH_PFLIP interrupt */
3244 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3245 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3246 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3247 if (r) {
3248 DRM_ERROR("Failed to add page flip irq id!\n");
3249 return r;
3250 }
3251
3252 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3253 int_params.irq_source =
3254 dc_interrupt_to_irq_source(dc, i, 0);
3255
3256 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3257
3258 c_irq_params->adev = adev;
3259 c_irq_params->irq_src = int_params.irq_source;
3260
3261 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3262 dm_pflip_high_irq, c_irq_params);
3263
3264 }
3265
3266 /* HPD */
3267 r = amdgpu_irq_add_id(adev, client_id,
3268 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3269 if (r) {
3270 DRM_ERROR("Failed to add hpd irq id!\n");
3271 return r;
3272 }
3273
3274 register_hpd_handlers(adev);
3275
3276 return 0;
3277}
3278#endif
3279
4562236b
HW
3280/* Register IRQ sources and initialize IRQ callbacks */
3281static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3282{
3283 struct dc *dc = adev->dm.dc;
3284 struct common_irq_params *c_irq_params;
3285 struct dc_interrupt_params int_params = {0};
3286 int r;
3287 int i;
1ffdeca6 3288 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3289
84374725 3290 if (adev->asic_type >= CHIP_VEGA10)
3760f76c 3291 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3292
3293 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3294 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3295
1f6010a9
DF
3296 /*
3297 * Actions of amdgpu_irq_add_id():
4562236b
HW
3298 * 1. Register a set() function with base driver.
3299 * Base driver will call set() function to enable/disable an
3300 * interrupt in DC hardware.
3301 * 2. Register amdgpu_dm_irq_handler().
3302 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3303 * coming from DC hardware.
3304 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3305 * for acknowledging and handling. */
3306
b57de80a 3307 /* Use VBLANK interrupt */
e9029155 3308 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3309 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3310 if (r) {
3311 DRM_ERROR("Failed to add crtc irq id!\n");
3312 return r;
3313 }
3314
3315 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3316 int_params.irq_source =
3d761e79 3317 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3318
b57de80a 3319 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3320
3321 c_irq_params->adev = adev;
3322 c_irq_params->irq_src = int_params.irq_source;
3323
3324 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3325 dm_crtc_high_irq, c_irq_params);
3326 }
3327
d2574c33
MK
3328 /* Use VUPDATE interrupt */
3329 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3330 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3331 if (r) {
3332 DRM_ERROR("Failed to add vupdate irq id!\n");
3333 return r;
3334 }
3335
3336 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3337 int_params.irq_source =
3338 dc_interrupt_to_irq_source(dc, i, 0);
3339
3340 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3341
3342 c_irq_params->adev = adev;
3343 c_irq_params->irq_src = int_params.irq_source;
3344
3345 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3346 dm_vupdate_high_irq, c_irq_params);
3347 }
3348
3d761e79 3349 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3350 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3351 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3352 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3353 if (r) {
3354 DRM_ERROR("Failed to add page flip irq id!\n");
3355 return r;
3356 }
3357
3358 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3359 int_params.irq_source =
3360 dc_interrupt_to_irq_source(dc, i, 0);
3361
3362 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3363
3364 c_irq_params->adev = adev;
3365 c_irq_params->irq_src = int_params.irq_source;
3366
3367 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3368 dm_pflip_high_irq, c_irq_params);
3369
3370 }
3371
3372 /* HPD */
2c8ad2d5
AD
3373 r = amdgpu_irq_add_id(adev, client_id,
3374 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3375 if (r) {
3376 DRM_ERROR("Failed to add hpd irq id!\n");
3377 return r;
3378 }
3379
3380 register_hpd_handlers(adev);
3381
3382 return 0;
3383}
3384
b86a1aa3 3385#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3386/* Register IRQ sources and initialize IRQ callbacks */
3387static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3388{
3389 struct dc *dc = adev->dm.dc;
3390 struct common_irq_params *c_irq_params;
3391 struct dc_interrupt_params int_params = {0};
3392 int r;
3393 int i;
660d5406
WL
3394#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3395 static const unsigned int vrtl_int_srcid[] = {
3396 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3397 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3398 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3399 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3400 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3401 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3402 };
3403#endif
ff5ef992
AD
3404
3405 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3406 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3407
1f6010a9
DF
3408 /*
3409 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3410 * 1. Register a set() function with base driver.
3411 * Base driver will call set() function to enable/disable an
3412 * interrupt in DC hardware.
3413 * 2. Register amdgpu_dm_irq_handler().
3414 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3415 * coming from DC hardware.
3416 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3417 * for acknowledging and handling.
1f6010a9 3418 */
ff5ef992
AD
3419
3420 /* Use VSTARTUP interrupt */
3421 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3422 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3423 i++) {
3760f76c 3424 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3425
3426 if (r) {
3427 DRM_ERROR("Failed to add crtc irq id!\n");
3428 return r;
3429 }
3430
3431 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3432 int_params.irq_source =
3433 dc_interrupt_to_irq_source(dc, i, 0);
3434
3435 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3436
3437 c_irq_params->adev = adev;
3438 c_irq_params->irq_src = int_params.irq_source;
3439
2346ef47
NK
3440 amdgpu_dm_irq_register_interrupt(
3441 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3442 }
3443
86bc2219
WL
3444 /* Use otg vertical line interrupt */
3445#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3446 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3447 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3448 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3449
3450 if (r) {
3451 DRM_ERROR("Failed to add vline0 irq id!\n");
3452 return r;
3453 }
3454
3455 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3456 int_params.irq_source =
660d5406
WL
3457 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3458
3459 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3460 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3461 break;
3462 }
86bc2219
WL
3463
3464 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3465 - DC_IRQ_SOURCE_DC1_VLINE0];
3466
3467 c_irq_params->adev = adev;
3468 c_irq_params->irq_src = int_params.irq_source;
3469
3470 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3471 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3472 }
3473#endif
3474
2346ef47
NK
3475 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3476 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3477 * to trigger at end of each vblank, regardless of state of the lock,
3478 * matching DCE behaviour.
3479 */
3480 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3481 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3482 i++) {
3483 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3484
3485 if (r) {
3486 DRM_ERROR("Failed to add vupdate irq id!\n");
3487 return r;
3488 }
3489
3490 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3491 int_params.irq_source =
3492 dc_interrupt_to_irq_source(dc, i, 0);
3493
3494 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3495
3496 c_irq_params->adev = adev;
3497 c_irq_params->irq_src = int_params.irq_source;
3498
ff5ef992 3499 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3500 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3501 }
3502
ff5ef992
AD
3503 /* Use GRPH_PFLIP interrupt */
3504 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3505 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3506 i++) {
3760f76c 3507 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3508 if (r) {
3509 DRM_ERROR("Failed to add page flip irq id!\n");
3510 return r;
3511 }
3512
3513 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3514 int_params.irq_source =
3515 dc_interrupt_to_irq_source(dc, i, 0);
3516
3517 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3518
3519 c_irq_params->adev = adev;
3520 c_irq_params->irq_src = int_params.irq_source;
3521
3522 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3523 dm_pflip_high_irq, c_irq_params);
3524
3525 }
3526
81927e28
JS
3527 /* HPD */
3528 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3529 &adev->hpd_irq);
3530 if (r) {
3531 DRM_ERROR("Failed to add hpd irq id!\n");
3532 return r;
3533 }
a08f16cf 3534
81927e28 3535 register_hpd_handlers(adev);
a08f16cf 3536
81927e28
JS
3537 return 0;
3538}
3539/* Register Outbox IRQ sources and initialize IRQ callbacks */
3540static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3541{
3542 struct dc *dc = adev->dm.dc;
3543 struct common_irq_params *c_irq_params;
3544 struct dc_interrupt_params int_params = {0};
3545 int r, i;
3546
3547 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3548 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3549
3550 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3551 &adev->dmub_outbox_irq);
3552 if (r) {
3553 DRM_ERROR("Failed to add outbox irq id!\n");
3554 return r;
3555 }
3556
3557 if (dc->ctx->dmub_srv) {
3558 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3559 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3560 int_params.irq_source =
81927e28 3561 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3562
81927e28 3563 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3564
3565 c_irq_params->adev = adev;
3566 c_irq_params->irq_src = int_params.irq_source;
3567
3568 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3569 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3570 }
3571
ff5ef992
AD
3572 return 0;
3573}
3574#endif
3575
eb3dc897
NK
3576/*
3577 * Acquires the lock for the atomic state object and returns
3578 * the new atomic state.
3579 *
3580 * This should only be called during atomic check.
3581 */
3582static int dm_atomic_get_state(struct drm_atomic_state *state,
3583 struct dm_atomic_state **dm_state)
3584{
3585 struct drm_device *dev = state->dev;
1348969a 3586 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3587 struct amdgpu_display_manager *dm = &adev->dm;
3588 struct drm_private_state *priv_state;
eb3dc897
NK
3589
3590 if (*dm_state)
3591 return 0;
3592
eb3dc897
NK
3593 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3594 if (IS_ERR(priv_state))
3595 return PTR_ERR(priv_state);
3596
3597 *dm_state = to_dm_atomic_state(priv_state);
3598
3599 return 0;
3600}
3601
dfd84d90 3602static struct dm_atomic_state *
eb3dc897
NK
3603dm_atomic_get_new_state(struct drm_atomic_state *state)
3604{
3605 struct drm_device *dev = state->dev;
1348969a 3606 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3607 struct amdgpu_display_manager *dm = &adev->dm;
3608 struct drm_private_obj *obj;
3609 struct drm_private_state *new_obj_state;
3610 int i;
3611
3612 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3613 if (obj->funcs == dm->atomic_obj.funcs)
3614 return to_dm_atomic_state(new_obj_state);
3615 }
3616
3617 return NULL;
3618}
3619
eb3dc897
NK
3620static struct drm_private_state *
3621dm_atomic_duplicate_state(struct drm_private_obj *obj)
3622{
3623 struct dm_atomic_state *old_state, *new_state;
3624
3625 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3626 if (!new_state)
3627 return NULL;
3628
3629 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3630
813d20dc
AW
3631 old_state = to_dm_atomic_state(obj->state);
3632
3633 if (old_state && old_state->context)
3634 new_state->context = dc_copy_state(old_state->context);
3635
eb3dc897
NK
3636 if (!new_state->context) {
3637 kfree(new_state);
3638 return NULL;
3639 }
3640
eb3dc897
NK
3641 return &new_state->base;
3642}
3643
3644static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3645 struct drm_private_state *state)
3646{
3647 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3648
3649 if (dm_state && dm_state->context)
3650 dc_release_state(dm_state->context);
3651
3652 kfree(dm_state);
3653}
3654
3655static struct drm_private_state_funcs dm_atomic_state_funcs = {
3656 .atomic_duplicate_state = dm_atomic_duplicate_state,
3657 .atomic_destroy_state = dm_atomic_destroy_state,
3658};
3659
4562236b
HW
3660static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3661{
eb3dc897 3662 struct dm_atomic_state *state;
4562236b
HW
3663 int r;
3664
3665 adev->mode_info.mode_config_initialized = true;
3666
4a580877
LT
3667 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3668 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3669
4a580877
LT
3670 adev_to_drm(adev)->mode_config.max_width = 16384;
3671 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3672
4a580877
LT
3673 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3674 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3675 /* indicates support for immediate flip */
4a580877 3676 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3677
4a580877 3678 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3679
eb3dc897
NK
3680 state = kzalloc(sizeof(*state), GFP_KERNEL);
3681 if (!state)
3682 return -ENOMEM;
3683
813d20dc 3684 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3685 if (!state->context) {
3686 kfree(state);
3687 return -ENOMEM;
3688 }
3689
3690 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3691
4a580877 3692 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3693 &adev->dm.atomic_obj,
eb3dc897
NK
3694 &state->base,
3695 &dm_atomic_state_funcs);
3696
3dc9b1ce 3697 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3698 if (r) {
3699 dc_release_state(state->context);
3700 kfree(state);
4562236b 3701 return r;
b67a468a 3702 }
4562236b 3703
6ce8f316 3704 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3705 if (r) {
3706 dc_release_state(state->context);
3707 kfree(state);
6ce8f316 3708 return r;
b67a468a 3709 }
6ce8f316 3710
4562236b
HW
3711 return 0;
3712}
3713
206bbafe
DF
3714#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3715#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3716#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3717
4562236b
HW
3718#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3719 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3720
7fd13bae
AD
3721static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3722 int bl_idx)
206bbafe
DF
3723{
3724#if defined(CONFIG_ACPI)
3725 struct amdgpu_dm_backlight_caps caps;
3726
58965855
FS
3727 memset(&caps, 0, sizeof(caps));
3728
7fd13bae 3729 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3730 return;
3731
f9b7f370 3732 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3733 if (caps.caps_valid) {
7fd13bae 3734 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3735 if (caps.aux_support)
3736 return;
7fd13bae
AD
3737 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3738 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3739 } else {
7fd13bae 3740 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3741 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3742 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3743 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3744 }
3745#else
7fd13bae 3746 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3747 return;
3748
7fd13bae
AD
3749 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3750 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3751#endif
3752}
3753
69d9f427
AM
3754static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3755 unsigned *min, unsigned *max)
94562810 3756{
94562810 3757 if (!caps)
69d9f427 3758 return 0;
94562810 3759
69d9f427
AM
3760 if (caps->aux_support) {
3761 // Firmware limits are in nits, DC API wants millinits.
3762 *max = 1000 * caps->aux_max_input_signal;
3763 *min = 1000 * caps->aux_min_input_signal;
94562810 3764 } else {
69d9f427
AM
3765 // Firmware limits are 8-bit, PWM control is 16-bit.
3766 *max = 0x101 * caps->max_input_signal;
3767 *min = 0x101 * caps->min_input_signal;
94562810 3768 }
69d9f427
AM
3769 return 1;
3770}
94562810 3771
69d9f427
AM
3772static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3773 uint32_t brightness)
3774{
3775 unsigned min, max;
94562810 3776
69d9f427
AM
3777 if (!get_brightness_range(caps, &min, &max))
3778 return brightness;
3779
3780 // Rescale 0..255 to min..max
3781 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3782 AMDGPU_MAX_BL_LEVEL);
3783}
3784
3785static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3786 uint32_t brightness)
3787{
3788 unsigned min, max;
3789
3790 if (!get_brightness_range(caps, &min, &max))
3791 return brightness;
3792
3793 if (brightness < min)
3794 return 0;
3795 // Rescale min..max to 0..255
3796 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3797 max - min);
94562810
RS
3798}
3799
3d6c9164 3800static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3801 int bl_idx,
3d6c9164 3802 u32 user_brightness)
4562236b 3803{
206bbafe 3804 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3805 struct dc_link *link;
3806 u32 brightness;
94562810 3807 bool rc;
4562236b 3808
7fd13bae
AD
3809 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3810 caps = dm->backlight_caps[bl_idx];
94562810 3811
7fd13bae
AD
3812 dm->brightness[bl_idx] = user_brightness;
3813 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3814 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3815
3d6c9164 3816 /* Change brightness based on AUX property */
118b4627 3817 if (caps.aux_support) {
7fd13bae
AD
3818 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3819 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3820 if (!rc)
3821 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3822 } else {
7fd13bae
AD
3823 rc = dc_link_set_backlight_level(link, brightness, 0);
3824 if (!rc)
3825 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3826 }
94562810
RS
3827
3828 return rc ? 0 : 1;
4562236b
HW
3829}
3830
3d6c9164 3831static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3832{
620a0d27 3833 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3834 int i;
3d6c9164 3835
7fd13bae
AD
3836 for (i = 0; i < dm->num_of_edps; i++) {
3837 if (bd == dm->backlight_dev[i])
3838 break;
3839 }
3840 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3841 i = 0;
3842 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3843
3844 return 0;
3845}
3846
7fd13bae
AD
3847static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3848 int bl_idx)
3d6c9164 3849{
0ad3e64e 3850 struct amdgpu_dm_backlight_caps caps;
7fd13bae 3851 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 3852
7fd13bae
AD
3853 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3854 caps = dm->backlight_caps[bl_idx];
620a0d27 3855
0ad3e64e 3856 if (caps.aux_support) {
0ad3e64e
AD
3857 u32 avg, peak;
3858 bool rc;
3859
3860 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3861 if (!rc)
7fd13bae 3862 return dm->brightness[bl_idx];
0ad3e64e
AD
3863 return convert_brightness_to_user(&caps, avg);
3864 } else {
7fd13bae 3865 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
3866
3867 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 3868 return dm->brightness[bl_idx];
0ad3e64e
AD
3869 return convert_brightness_to_user(&caps, ret);
3870 }
4562236b
HW
3871}
3872
3d6c9164
AD
3873static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3874{
3875 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3876 int i;
3d6c9164 3877
7fd13bae
AD
3878 for (i = 0; i < dm->num_of_edps; i++) {
3879 if (bd == dm->backlight_dev[i])
3880 break;
3881 }
3882 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3883 i = 0;
3884 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
3885}
3886
4562236b 3887static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3888 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3889 .get_brightness = amdgpu_dm_backlight_get_brightness,
3890 .update_status = amdgpu_dm_backlight_update_status,
3891};
3892
7578ecda
AD
3893static void
3894amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3895{
3896 char bl_name[16];
3897 struct backlight_properties props = { 0 };
3898
7fd13bae
AD
3899 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3900 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 3901
4562236b 3902 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 3903 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
3904 props.type = BACKLIGHT_RAW;
3905
3906 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 3907 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 3908
7fd13bae
AD
3909 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3910 adev_to_drm(dm->adev)->dev,
3911 dm,
3912 &amdgpu_dm_backlight_ops,
3913 &props);
4562236b 3914
7fd13bae 3915 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
3916 DRM_ERROR("DM: Backlight registration failed!\n");
3917 else
f1ad2f5e 3918 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 3919}
4562236b
HW
3920#endif
3921
df534fff 3922static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 3923 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
3924 enum drm_plane_type plane_type,
3925 const struct dc_plane_cap *plane_cap)
df534fff 3926{
f180b4bc 3927 struct drm_plane *plane;
df534fff
S
3928 unsigned long possible_crtcs;
3929 int ret = 0;
3930
f180b4bc 3931 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
3932 if (!plane) {
3933 DRM_ERROR("KMS: Failed to allocate plane\n");
3934 return -ENOMEM;
3935 }
b2fddb13 3936 plane->type = plane_type;
df534fff
S
3937
3938 /*
b2fddb13
NK
3939 * HACK: IGT tests expect that the primary plane for a CRTC
3940 * can only have one possible CRTC. Only expose support for
3941 * any CRTC if they're not going to be used as a primary plane
3942 * for a CRTC - like overlay or underlay planes.
df534fff
S
3943 */
3944 possible_crtcs = 1 << plane_id;
3945 if (plane_id >= dm->dc->caps.max_streams)
3946 possible_crtcs = 0xff;
3947
cc1fec57 3948 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
3949
3950 if (ret) {
3951 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 3952 kfree(plane);
df534fff
S
3953 return ret;
3954 }
3955
54087768
NK
3956 if (mode_info)
3957 mode_info->planes[plane_id] = plane;
3958
df534fff
S
3959 return ret;
3960}
3961
89fc8d4e
HW
3962
3963static void register_backlight_device(struct amdgpu_display_manager *dm,
3964 struct dc_link *link)
3965{
3966#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3967 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3968
3969 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3970 link->type != dc_connection_none) {
1f6010a9
DF
3971 /*
3972 * Event if registration failed, we should continue with
89fc8d4e
HW
3973 * DM initialization because not having a backlight control
3974 * is better then a black screen.
3975 */
7fd13bae 3976 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 3977 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 3978
7fd13bae 3979 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
3980 dm->backlight_link[dm->num_of_edps] = link;
3981 dm->num_of_edps++;
3982 }
89fc8d4e
HW
3983 }
3984#endif
3985}
3986
3987
1f6010a9
DF
3988/*
3989 * In this architecture, the association
4562236b
HW
3990 * connector -> encoder -> crtc
3991 * id not really requried. The crtc and connector will hold the
3992 * display_index as an abstraction to use with DAL component
3993 *
3994 * Returns 0 on success
3995 */
7578ecda 3996static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
3997{
3998 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 3999 int32_t i;
c84dec2f 4000 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4001 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4002 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4003 uint32_t link_cnt;
cc1fec57 4004 int32_t primary_planes;
fbbdadf2 4005 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4006 const struct dc_plane_cap *plane;
4562236b 4007
d58159de
AD
4008 dm->display_indexes_num = dm->dc->caps.max_streams;
4009 /* Update the actual used number of crtc */
4010 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4011
4562236b 4012 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4013 if (amdgpu_dm_mode_config_init(dm->adev)) {
4014 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4015 return -EINVAL;
4562236b
HW
4016 }
4017
b2fddb13
NK
4018 /* There is one primary plane per CRTC */
4019 primary_planes = dm->dc->caps.max_streams;
54087768 4020 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4021
b2fddb13
NK
4022 /*
4023 * Initialize primary planes, implicit planes for legacy IOCTLS.
4024 * Order is reversed to match iteration order in atomic check.
4025 */
4026 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4027 plane = &dm->dc->caps.planes[i];
4028
b2fddb13 4029 if (initialize_plane(dm, mode_info, i,
cc1fec57 4030 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4031 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4032 goto fail;
d4e13b0d 4033 }
df534fff 4034 }
92f3ac40 4035
0d579c7e
NK
4036 /*
4037 * Initialize overlay planes, index starting after primary planes.
4038 * These planes have a higher DRM index than the primary planes since
4039 * they should be considered as having a higher z-order.
4040 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4041 *
4042 * Only support DCN for now, and only expose one so we don't encourage
4043 * userspace to use up all the pipes.
0d579c7e 4044 */
cc1fec57
NK
4045 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4046 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4047
4048 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4049 continue;
4050
4051 if (!plane->blends_with_above || !plane->blends_with_below)
4052 continue;
4053
ea36ad34 4054 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4055 continue;
4056
54087768 4057 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4058 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4059 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4060 goto fail;
d4e13b0d 4061 }
cc1fec57
NK
4062
4063 /* Only create one overlay plane. */
4064 break;
d4e13b0d 4065 }
4562236b 4066
d4e13b0d 4067 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4068 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4069 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4070 goto fail;
4562236b 4071 }
4562236b 4072
50610b74 4073#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28
JS
4074 /* Use Outbox interrupt */
4075 switch (adev->asic_type) {
81927e28
JS
4076 case CHIP_SIENNA_CICHLID:
4077 case CHIP_NAVY_FLOUNDER:
1ebcaebd 4078 case CHIP_YELLOW_CARP:
81927e28
JS
4079 case CHIP_RENOIR:
4080 if (register_outbox_irq_handlers(dm->adev)) {
4081 DRM_ERROR("DM: Failed to initialize IRQ\n");
4082 goto fail;
4083 }
4084 break;
4085 default:
4086 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
4087 }
50610b74 4088#endif
81927e28 4089
4562236b
HW
4090 /* loops over all connectors on the board */
4091 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4092 struct dc_link *link = NULL;
4562236b
HW
4093
4094 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4095 DRM_ERROR(
4096 "KMS: Cannot support more than %d display indexes\n",
4097 AMDGPU_DM_MAX_DISPLAY_INDEX);
4098 continue;
4099 }
4100
4101 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4102 if (!aconnector)
cd8a2ae8 4103 goto fail;
4562236b
HW
4104
4105 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4106 if (!aencoder)
cd8a2ae8 4107 goto fail;
4562236b
HW
4108
4109 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4110 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4111 goto fail;
4562236b
HW
4112 }
4113
4114 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4115 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4116 goto fail;
4562236b
HW
4117 }
4118
89fc8d4e
HW
4119 link = dc_get_link_at_index(dm->dc, i);
4120
fbbdadf2
BL
4121 if (!dc_link_detect_sink(link, &new_connection_type))
4122 DRM_ERROR("KMS: Failed to detect connector\n");
4123
4124 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4125 emulated_link_detect(link);
4126 amdgpu_dm_update_connector_after_detect(aconnector);
4127
4128 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4129 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4130 register_backlight_device(dm, link);
397a9bc5
RL
4131 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4132 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
4133 }
4134
4135
4562236b
HW
4136 }
4137
4138 /* Software is initialized. Now we can register interrupt handlers. */
4139 switch (adev->asic_type) {
55e56389
MR
4140#if defined(CONFIG_DRM_AMD_DC_SI)
4141 case CHIP_TAHITI:
4142 case CHIP_PITCAIRN:
4143 case CHIP_VERDE:
4144 case CHIP_OLAND:
4145 if (dce60_register_irq_handlers(dm->adev)) {
4146 DRM_ERROR("DM: Failed to initialize IRQ\n");
4147 goto fail;
4148 }
4149 break;
4150#endif
4562236b
HW
4151 case CHIP_BONAIRE:
4152 case CHIP_HAWAII:
cd4b356f
AD
4153 case CHIP_KAVERI:
4154 case CHIP_KABINI:
4155 case CHIP_MULLINS:
4562236b
HW
4156 case CHIP_TONGA:
4157 case CHIP_FIJI:
4158 case CHIP_CARRIZO:
4159 case CHIP_STONEY:
4160 case CHIP_POLARIS11:
4161 case CHIP_POLARIS10:
b264d345 4162 case CHIP_POLARIS12:
7737de91 4163 case CHIP_VEGAM:
2c8ad2d5 4164 case CHIP_VEGA10:
2325ff30 4165 case CHIP_VEGA12:
1fe6bf2f 4166 case CHIP_VEGA20:
4562236b
HW
4167 if (dce110_register_irq_handlers(dm->adev)) {
4168 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4169 goto fail;
4562236b
HW
4170 }
4171 break;
b86a1aa3 4172#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 4173 case CHIP_RAVEN:
fbd2afe5 4174 case CHIP_NAVI12:
476e955d 4175 case CHIP_NAVI10:
fce651e3 4176 case CHIP_NAVI14:
30221ad8 4177 case CHIP_RENOIR:
79037324 4178 case CHIP_SIENNA_CICHLID:
a6c5308f 4179 case CHIP_NAVY_FLOUNDER:
2a411205 4180 case CHIP_DIMGREY_CAVEFISH:
656fe9b6 4181 case CHIP_BEIGE_GOBY:
469989ca 4182 case CHIP_VANGOGH:
1ebcaebd 4183 case CHIP_YELLOW_CARP:
ff5ef992
AD
4184 if (dcn10_register_irq_handlers(dm->adev)) {
4185 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4186 goto fail;
ff5ef992
AD
4187 }
4188 break;
4189#endif
4562236b 4190 default:
e63f8673 4191 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
cd8a2ae8 4192 goto fail;
4562236b
HW
4193 }
4194
4562236b 4195 return 0;
cd8a2ae8 4196fail:
4562236b 4197 kfree(aencoder);
4562236b 4198 kfree(aconnector);
54087768 4199
59d0f396 4200 return -EINVAL;
4562236b
HW
4201}
4202
7578ecda 4203static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4204{
eb3dc897 4205 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4206 return;
4207}
4208
4209/******************************************************************************
4210 * amdgpu_display_funcs functions
4211 *****************************************************************************/
4212
1f6010a9 4213/*
4562236b
HW
4214 * dm_bandwidth_update - program display watermarks
4215 *
4216 * @adev: amdgpu_device pointer
4217 *
4218 * Calculate and program the display watermarks and line buffer allocation.
4219 */
4220static void dm_bandwidth_update(struct amdgpu_device *adev)
4221{
49c07a99 4222 /* TODO: implement later */
4562236b
HW
4223}
4224
39cc5be2 4225static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4226 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4227 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4228 .backlight_set_level = NULL, /* never called for DC */
4229 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4230 .hpd_sense = NULL,/* called unconditionally */
4231 .hpd_set_polarity = NULL, /* called unconditionally */
4232 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4233 .page_flip_get_scanoutpos =
4234 dm_crtc_get_scanoutpos,/* called unconditionally */
4235 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4236 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4237};
4238
4239#if defined(CONFIG_DEBUG_KERNEL_DC)
4240
3ee6b26b
AD
4241static ssize_t s3_debug_store(struct device *device,
4242 struct device_attribute *attr,
4243 const char *buf,
4244 size_t count)
4562236b
HW
4245{
4246 int ret;
4247 int s3_state;
ef1de361 4248 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4249 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4250
4251 ret = kstrtoint(buf, 0, &s3_state);
4252
4253 if (ret == 0) {
4254 if (s3_state) {
4255 dm_resume(adev);
4a580877 4256 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4257 } else
4258 dm_suspend(adev);
4259 }
4260
4261 return ret == 0 ? count : 0;
4262}
4263
4264DEVICE_ATTR_WO(s3_debug);
4265
4266#endif
4267
4268static int dm_early_init(void *handle)
4269{
4270 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4271
4562236b 4272 switch (adev->asic_type) {
55e56389
MR
4273#if defined(CONFIG_DRM_AMD_DC_SI)
4274 case CHIP_TAHITI:
4275 case CHIP_PITCAIRN:
4276 case CHIP_VERDE:
4277 adev->mode_info.num_crtc = 6;
4278 adev->mode_info.num_hpd = 6;
4279 adev->mode_info.num_dig = 6;
4280 break;
4281 case CHIP_OLAND:
4282 adev->mode_info.num_crtc = 2;
4283 adev->mode_info.num_hpd = 2;
4284 adev->mode_info.num_dig = 2;
4285 break;
4286#endif
4562236b
HW
4287 case CHIP_BONAIRE:
4288 case CHIP_HAWAII:
4289 adev->mode_info.num_crtc = 6;
4290 adev->mode_info.num_hpd = 6;
4291 adev->mode_info.num_dig = 6;
4562236b 4292 break;
cd4b356f
AD
4293 case CHIP_KAVERI:
4294 adev->mode_info.num_crtc = 4;
4295 adev->mode_info.num_hpd = 6;
4296 adev->mode_info.num_dig = 7;
cd4b356f
AD
4297 break;
4298 case CHIP_KABINI:
4299 case CHIP_MULLINS:
4300 adev->mode_info.num_crtc = 2;
4301 adev->mode_info.num_hpd = 6;
4302 adev->mode_info.num_dig = 6;
cd4b356f 4303 break;
4562236b
HW
4304 case CHIP_FIJI:
4305 case CHIP_TONGA:
4306 adev->mode_info.num_crtc = 6;
4307 adev->mode_info.num_hpd = 6;
4308 adev->mode_info.num_dig = 7;
4562236b
HW
4309 break;
4310 case CHIP_CARRIZO:
4311 adev->mode_info.num_crtc = 3;
4312 adev->mode_info.num_hpd = 6;
4313 adev->mode_info.num_dig = 9;
4562236b
HW
4314 break;
4315 case CHIP_STONEY:
4316 adev->mode_info.num_crtc = 2;
4317 adev->mode_info.num_hpd = 6;
4318 adev->mode_info.num_dig = 9;
4562236b
HW
4319 break;
4320 case CHIP_POLARIS11:
b264d345 4321 case CHIP_POLARIS12:
4562236b
HW
4322 adev->mode_info.num_crtc = 5;
4323 adev->mode_info.num_hpd = 5;
4324 adev->mode_info.num_dig = 5;
4562236b
HW
4325 break;
4326 case CHIP_POLARIS10:
7737de91 4327 case CHIP_VEGAM:
4562236b
HW
4328 adev->mode_info.num_crtc = 6;
4329 adev->mode_info.num_hpd = 6;
4330 adev->mode_info.num_dig = 6;
4562236b 4331 break;
2c8ad2d5 4332 case CHIP_VEGA10:
2325ff30 4333 case CHIP_VEGA12:
1fe6bf2f 4334 case CHIP_VEGA20:
2c8ad2d5
AD
4335 adev->mode_info.num_crtc = 6;
4336 adev->mode_info.num_hpd = 6;
4337 adev->mode_info.num_dig = 6;
4338 break;
b86a1aa3 4339#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992 4340 case CHIP_RAVEN:
20f2ffe5
AD
4341 case CHIP_RENOIR:
4342 case CHIP_VANGOGH:
ff5ef992
AD
4343 adev->mode_info.num_crtc = 4;
4344 adev->mode_info.num_hpd = 4;
4345 adev->mode_info.num_dig = 4;
ff5ef992 4346 break;
476e955d 4347 case CHIP_NAVI10:
fbd2afe5 4348 case CHIP_NAVI12:
79037324 4349 case CHIP_SIENNA_CICHLID:
a6c5308f 4350 case CHIP_NAVY_FLOUNDER:
476e955d
HW
4351 adev->mode_info.num_crtc = 6;
4352 adev->mode_info.num_hpd = 6;
4353 adev->mode_info.num_dig = 6;
4354 break;
1ebcaebd
NK
4355 case CHIP_YELLOW_CARP:
4356 adev->mode_info.num_crtc = 4;
4357 adev->mode_info.num_hpd = 4;
4358 adev->mode_info.num_dig = 4;
4359 break;
fce651e3 4360 case CHIP_NAVI14:
2a411205 4361 case CHIP_DIMGREY_CAVEFISH:
fce651e3
BL
4362 adev->mode_info.num_crtc = 5;
4363 adev->mode_info.num_hpd = 5;
4364 adev->mode_info.num_dig = 5;
4365 break;
656fe9b6
AP
4366 case CHIP_BEIGE_GOBY:
4367 adev->mode_info.num_crtc = 2;
4368 adev->mode_info.num_hpd = 2;
4369 adev->mode_info.num_dig = 2;
4370 break;
20f2ffe5 4371#endif
4562236b 4372 default:
e63f8673 4373 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4562236b
HW
4374 return -EINVAL;
4375 }
4376
c8dd5715
MD
4377 amdgpu_dm_set_irq_funcs(adev);
4378
39cc5be2
AD
4379 if (adev->mode_info.funcs == NULL)
4380 adev->mode_info.funcs = &dm_display_funcs;
4381
1f6010a9
DF
4382 /*
4383 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4384 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4385 * amdgpu_device_init()
4386 */
4562236b
HW
4387#if defined(CONFIG_DEBUG_KERNEL_DC)
4388 device_create_file(
4a580877 4389 adev_to_drm(adev)->dev,
4562236b
HW
4390 &dev_attr_s3_debug);
4391#endif
4392
4393 return 0;
4394}
4395
9b690ef3 4396static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4397 struct dc_stream_state *new_stream,
4398 struct dc_stream_state *old_stream)
9b690ef3 4399{
2afda735 4400 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4401}
4402
4403static bool modereset_required(struct drm_crtc_state *crtc_state)
4404{
2afda735 4405 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4406}
4407
7578ecda 4408static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4409{
4410 drm_encoder_cleanup(encoder);
4411 kfree(encoder);
4412}
4413
4414static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4415 .destroy = amdgpu_dm_encoder_destroy,
4416};
4417
e7b07cee 4418
6300b3bd
MK
4419static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4420 struct drm_framebuffer *fb,
4421 int *min_downscale, int *max_upscale)
4422{
4423 struct amdgpu_device *adev = drm_to_adev(dev);
4424 struct dc *dc = adev->dm.dc;
4425 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4426 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4427
4428 switch (fb->format->format) {
4429 case DRM_FORMAT_P010:
4430 case DRM_FORMAT_NV12:
4431 case DRM_FORMAT_NV21:
4432 *max_upscale = plane_cap->max_upscale_factor.nv12;
4433 *min_downscale = plane_cap->max_downscale_factor.nv12;
4434 break;
4435
4436 case DRM_FORMAT_XRGB16161616F:
4437 case DRM_FORMAT_ARGB16161616F:
4438 case DRM_FORMAT_XBGR16161616F:
4439 case DRM_FORMAT_ABGR16161616F:
4440 *max_upscale = plane_cap->max_upscale_factor.fp16;
4441 *min_downscale = plane_cap->max_downscale_factor.fp16;
4442 break;
4443
4444 default:
4445 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4446 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4447 break;
4448 }
4449
4450 /*
4451 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4452 * scaling factor of 1.0 == 1000 units.
4453 */
4454 if (*max_upscale == 1)
4455 *max_upscale = 1000;
4456
4457 if (*min_downscale == 1)
4458 *min_downscale = 1000;
4459}
4460
4461
695af5f9
NK
4462static int fill_dc_scaling_info(const struct drm_plane_state *state,
4463 struct dc_scaling_info *scaling_info)
e7b07cee 4464{
6300b3bd 4465 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4466
695af5f9 4467 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4468
695af5f9
NK
4469 /* Source is fixed 16.16 but we ignore mantissa for now... */
4470 scaling_info->src_rect.x = state->src_x >> 16;
4471 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4472
d89f6048
HW
4473 /*
4474 * For reasons we don't (yet) fully understand a non-zero
4475 * src_y coordinate into an NV12 buffer can cause a
4476 * system hang. To avoid hangs (and maybe be overly cautious)
4477 * let's reject both non-zero src_x and src_y.
4478 *
4479 * We currently know of only one use-case to reproduce a
4480 * scenario with non-zero src_x and src_y for NV12, which
4481 * is to gesture the YouTube Android app into full screen
4482 * on ChromeOS.
4483 */
4484 if (state->fb &&
4485 state->fb->format->format == DRM_FORMAT_NV12 &&
4486 (scaling_info->src_rect.x != 0 ||
4487 scaling_info->src_rect.y != 0))
4488 return -EINVAL;
4489
695af5f9
NK
4490 scaling_info->src_rect.width = state->src_w >> 16;
4491 if (scaling_info->src_rect.width == 0)
4492 return -EINVAL;
4493
4494 scaling_info->src_rect.height = state->src_h >> 16;
4495 if (scaling_info->src_rect.height == 0)
4496 return -EINVAL;
4497
4498 scaling_info->dst_rect.x = state->crtc_x;
4499 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4500
4501 if (state->crtc_w == 0)
695af5f9 4502 return -EINVAL;
e7b07cee 4503
695af5f9 4504 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4505
4506 if (state->crtc_h == 0)
695af5f9 4507 return -EINVAL;
e7b07cee 4508
695af5f9 4509 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4510
695af5f9
NK
4511 /* DRM doesn't specify clipping on destination output. */
4512 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4513
6300b3bd
MK
4514 /* Validate scaling per-format with DC plane caps */
4515 if (state->plane && state->plane->dev && state->fb) {
4516 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4517 &min_downscale, &max_upscale);
4518 } else {
4519 min_downscale = 250;
4520 max_upscale = 16000;
4521 }
4522
6491f0c0
NK
4523 scale_w = scaling_info->dst_rect.width * 1000 /
4524 scaling_info->src_rect.width;
e7b07cee 4525
6300b3bd 4526 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4527 return -EINVAL;
4528
4529 scale_h = scaling_info->dst_rect.height * 1000 /
4530 scaling_info->src_rect.height;
4531
6300b3bd 4532 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4533 return -EINVAL;
4534
695af5f9
NK
4535 /*
4536 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4537 * assume reasonable defaults based on the format.
4538 */
e7b07cee 4539
695af5f9 4540 return 0;
4562236b 4541}
695af5f9 4542
a3241991
BN
4543static void
4544fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4545 uint64_t tiling_flags)
e7b07cee 4546{
a3241991
BN
4547 /* Fill GFX8 params */
4548 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4549 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4550
a3241991
BN
4551 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4552 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4553 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4554 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4555 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4556
a3241991
BN
4557 /* XXX fix me for VI */
4558 tiling_info->gfx8.num_banks = num_banks;
4559 tiling_info->gfx8.array_mode =
4560 DC_ARRAY_2D_TILED_THIN1;
4561 tiling_info->gfx8.tile_split = tile_split;
4562 tiling_info->gfx8.bank_width = bankw;
4563 tiling_info->gfx8.bank_height = bankh;
4564 tiling_info->gfx8.tile_aspect = mtaspect;
4565 tiling_info->gfx8.tile_mode =
4566 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4567 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4568 == DC_ARRAY_1D_TILED_THIN1) {
4569 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4570 }
4571
a3241991
BN
4572 tiling_info->gfx8.pipe_config =
4573 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4574}
4575
a3241991
BN
4576static void
4577fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4578 union dc_tiling_info *tiling_info)
4579{
4580 tiling_info->gfx9.num_pipes =
4581 adev->gfx.config.gb_addr_config_fields.num_pipes;
4582 tiling_info->gfx9.num_banks =
4583 adev->gfx.config.gb_addr_config_fields.num_banks;
4584 tiling_info->gfx9.pipe_interleave =
4585 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4586 tiling_info->gfx9.num_shader_engines =
4587 adev->gfx.config.gb_addr_config_fields.num_se;
4588 tiling_info->gfx9.max_compressed_frags =
4589 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4590 tiling_info->gfx9.num_rb_per_se =
4591 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4592 tiling_info->gfx9.shaderEnable = 1;
a3241991
BN
4593 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4594 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4595 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
656fe9b6 4596 adev->asic_type == CHIP_BEIGE_GOBY ||
1ebcaebd 4597 adev->asic_type == CHIP_YELLOW_CARP ||
a3241991
BN
4598 adev->asic_type == CHIP_VANGOGH)
4599 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4600}
4601
695af5f9 4602static int
a3241991
BN
4603validate_dcc(struct amdgpu_device *adev,
4604 const enum surface_pixel_format format,
4605 const enum dc_rotation_angle rotation,
4606 const union dc_tiling_info *tiling_info,
4607 const struct dc_plane_dcc_param *dcc,
4608 const struct dc_plane_address *address,
4609 const struct plane_size *plane_size)
7df7e505
NK
4610{
4611 struct dc *dc = adev->dm.dc;
8daa1218
NC
4612 struct dc_dcc_surface_param input;
4613 struct dc_surface_dcc_cap output;
7df7e505 4614
8daa1218
NC
4615 memset(&input, 0, sizeof(input));
4616 memset(&output, 0, sizeof(output));
4617
a3241991 4618 if (!dcc->enable)
87b7ebc2
RS
4619 return 0;
4620
a3241991
BN
4621 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4622 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4623 return -EINVAL;
7df7e505 4624
695af5f9 4625 input.format = format;
12e2b2d4
DL
4626 input.surface_size.width = plane_size->surface_size.width;
4627 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4628 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4629
695af5f9 4630 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4631 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4632 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4633 input.scan = SCAN_DIRECTION_VERTICAL;
4634
4635 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4636 return -EINVAL;
7df7e505
NK
4637
4638 if (!output.capable)
09e5665a 4639 return -EINVAL;
7df7e505 4640
a3241991
BN
4641 if (dcc->independent_64b_blks == 0 &&
4642 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4643 return -EINVAL;
7df7e505 4644
a3241991
BN
4645 return 0;
4646}
4647
37384b3f
BN
4648static bool
4649modifier_has_dcc(uint64_t modifier)
4650{
4651 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4652}
4653
4654static unsigned
4655modifier_gfx9_swizzle_mode(uint64_t modifier)
4656{
4657 if (modifier == DRM_FORMAT_MOD_LINEAR)
4658 return 0;
4659
4660 return AMD_FMT_MOD_GET(TILE, modifier);
4661}
4662
dfbbfe3c
BN
4663static const struct drm_format_info *
4664amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4665{
816853f9 4666 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4667}
4668
37384b3f
BN
4669static void
4670fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4671 union dc_tiling_info *tiling_info,
4672 uint64_t modifier)
4673{
4674 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4675 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4676 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4677 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4678
4679 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4680
4681 if (!IS_AMD_FMT_MOD(modifier))
4682 return;
4683
4684 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4685 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4686
4687 if (adev->family >= AMDGPU_FAMILY_NV) {
4688 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4689 } else {
4690 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4691
4692 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4693 }
4694}
4695
faa37f54
BN
4696enum dm_micro_swizzle {
4697 MICRO_SWIZZLE_Z = 0,
4698 MICRO_SWIZZLE_S = 1,
4699 MICRO_SWIZZLE_D = 2,
4700 MICRO_SWIZZLE_R = 3
4701};
4702
4703static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4704 uint32_t format,
4705 uint64_t modifier)
4706{
4707 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4708 const struct drm_format_info *info = drm_format_info(format);
fe180178 4709 int i;
faa37f54
BN
4710
4711 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4712
4713 if (!info)
4714 return false;
4715
4716 /*
fe180178
QZ
4717 * We always have to allow these modifiers:
4718 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4719 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4720 */
fe180178
QZ
4721 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4722 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4723 return true;
fe180178 4724 }
faa37f54 4725
fe180178
QZ
4726 /* Check that the modifier is on the list of the plane's supported modifiers. */
4727 for (i = 0; i < plane->modifier_count; i++) {
4728 if (modifier == plane->modifiers[i])
4729 break;
4730 }
4731 if (i == plane->modifier_count)
faa37f54
BN
4732 return false;
4733
4734 /*
4735 * For D swizzle the canonical modifier depends on the bpp, so check
4736 * it here.
4737 */
4738 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4739 adev->family >= AMDGPU_FAMILY_NV) {
4740 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4741 return false;
4742 }
4743
4744 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4745 info->cpp[0] < 8)
4746 return false;
4747
4748 if (modifier_has_dcc(modifier)) {
4749 /* Per radeonsi comments 16/64 bpp are more complicated. */
4750 if (info->cpp[0] != 4)
4751 return false;
951796f2
SS
4752 /* We support multi-planar formats, but not when combined with
4753 * additional DCC metadata planes. */
4754 if (info->num_planes > 1)
4755 return false;
faa37f54
BN
4756 }
4757
4758 return true;
4759}
4760
4761static void
4762add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4763{
4764 if (!*mods)
4765 return;
4766
4767 if (*cap - *size < 1) {
4768 uint64_t new_cap = *cap * 2;
4769 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4770
4771 if (!new_mods) {
4772 kfree(*mods);
4773 *mods = NULL;
4774 return;
4775 }
4776
4777 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4778 kfree(*mods);
4779 *mods = new_mods;
4780 *cap = new_cap;
4781 }
4782
4783 (*mods)[*size] = mod;
4784 *size += 1;
4785}
4786
4787static void
4788add_gfx9_modifiers(const struct amdgpu_device *adev,
4789 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4790{
4791 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4792 int pipe_xor_bits = min(8, pipes +
4793 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4794 int bank_xor_bits = min(8 - pipe_xor_bits,
4795 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4796 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4797 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4798
4799
4800 if (adev->family == AMDGPU_FAMILY_RV) {
4801 /* Raven2 and later */
4802 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4803
4804 /*
4805 * No _D DCC swizzles yet because we only allow 32bpp, which
4806 * doesn't support _D on DCN
4807 */
4808
4809 if (has_constant_encode) {
4810 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4811 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4812 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4813 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4814 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4815 AMD_FMT_MOD_SET(DCC, 1) |
4816 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4817 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4818 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4819 }
4820
4821 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4822 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4823 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4824 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4825 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4826 AMD_FMT_MOD_SET(DCC, 1) |
4827 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4828 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4829 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4830
4831 if (has_constant_encode) {
4832 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4833 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4834 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4835 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4836 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4837 AMD_FMT_MOD_SET(DCC, 1) |
4838 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4839 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4840 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4841
4842 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4843 AMD_FMT_MOD_SET(RB, rb) |
4844 AMD_FMT_MOD_SET(PIPE, pipes));
4845 }
4846
4847 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4848 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4849 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4850 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4851 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4852 AMD_FMT_MOD_SET(DCC, 1) |
4853 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4854 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4855 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4856 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4857 AMD_FMT_MOD_SET(RB, rb) |
4858 AMD_FMT_MOD_SET(PIPE, pipes));
4859 }
4860
4861 /*
4862 * Only supported for 64bpp on Raven, will be filtered on format in
4863 * dm_plane_format_mod_supported.
4864 */
4865 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4866 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4867 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4868 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4869 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4870
4871 if (adev->family == AMDGPU_FAMILY_RV) {
4872 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4873 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4874 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4875 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4876 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4877 }
4878
4879 /*
4880 * Only supported for 64bpp on Raven, will be filtered on format in
4881 * dm_plane_format_mod_supported.
4882 */
4883 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4884 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4885 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4886
4887 if (adev->family == AMDGPU_FAMILY_RV) {
4888 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4889 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4890 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4891 }
4892}
4893
4894static void
4895add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4896 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4897{
4898 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4899
4900 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4901 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4902 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4903 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4904 AMD_FMT_MOD_SET(DCC, 1) |
4905 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4906 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4907 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4908
4909 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4910 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4911 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4912 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4913 AMD_FMT_MOD_SET(DCC, 1) |
4914 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4915 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4916 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4917 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4918
4919 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4920 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4921 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4922 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4923
4924 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4925 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4926 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4927 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4928
4929
4930 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4931 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4932 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4933 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4934
4935 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4936 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4937 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4938}
4939
4940static void
4941add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4942 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4943{
4944 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4945 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4946
4947 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4948 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4949 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4950 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4951 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4952 AMD_FMT_MOD_SET(DCC, 1) |
4953 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4954 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4955 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4956 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4957
4958 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4959 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4960 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4961 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4962 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4963 AMD_FMT_MOD_SET(DCC, 1) |
4964 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4965 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4966 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4967 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 4968 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54
BN
4969
4970 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4971 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4972 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4973 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4974 AMD_FMT_MOD_SET(PACKERS, pkrs));
4975
4976 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4977 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4978 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4979 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4980 AMD_FMT_MOD_SET(PACKERS, pkrs));
4981
4982 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4983 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4984 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4985 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4986
4987 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4988 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4989 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4990}
4991
4992static int
4993get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4994{
4995 uint64_t size = 0, capacity = 128;
4996 *mods = NULL;
4997
4998 /* We have not hooked up any pre-GFX9 modifiers. */
4999 if (adev->family < AMDGPU_FAMILY_AI)
5000 return 0;
5001
5002 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5003
5004 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5005 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5006 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5007 return *mods ? 0 : -ENOMEM;
5008 }
5009
5010 switch (adev->family) {
5011 case AMDGPU_FAMILY_AI:
5012 case AMDGPU_FAMILY_RV:
5013 add_gfx9_modifiers(adev, mods, &size, &capacity);
5014 break;
5015 case AMDGPU_FAMILY_NV:
5016 case AMDGPU_FAMILY_VGH:
1ebcaebd 5017 case AMDGPU_FAMILY_YC:
faa37f54
BN
5018 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
5019 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5020 else
5021 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5022 break;
5023 }
5024
5025 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5026
5027 /* INVALID marks the end of the list. */
5028 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5029
5030 if (!*mods)
5031 return -ENOMEM;
5032
5033 return 0;
5034}
5035
37384b3f
BN
5036static int
5037fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5038 const struct amdgpu_framebuffer *afb,
5039 const enum surface_pixel_format format,
5040 const enum dc_rotation_angle rotation,
5041 const struct plane_size *plane_size,
5042 union dc_tiling_info *tiling_info,
5043 struct dc_plane_dcc_param *dcc,
5044 struct dc_plane_address *address,
5045 const bool force_disable_dcc)
5046{
5047 const uint64_t modifier = afb->base.modifier;
2be7f77f 5048 int ret = 0;
37384b3f
BN
5049
5050 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5051 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5052
5053 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5054 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5055 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
37384b3f
BN
5056
5057 dcc->enable = 1;
5058 dcc->meta_pitch = afb->base.pitches[1];
3d360154
JA
5059 dcc->independent_64b_blks = independent_64b_blks;
5060 if (independent_64b_blks)
5061 dcc->dcc_ind_blk = hubp_ind_block_64b;
5062 else
5063 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
37384b3f
BN
5064
5065 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5066 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5067 }
5068
5069 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5070 if (ret)
2be7f77f 5071 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5072
2be7f77f 5073 return ret;
09e5665a
NK
5074}
5075
5076static int
320932bf 5077fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5078 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5079 const enum surface_pixel_format format,
5080 const enum dc_rotation_angle rotation,
5081 const uint64_t tiling_flags,
09e5665a 5082 union dc_tiling_info *tiling_info,
12e2b2d4 5083 struct plane_size *plane_size,
09e5665a 5084 struct dc_plane_dcc_param *dcc,
87b7ebc2 5085 struct dc_plane_address *address,
5888f07a 5086 bool tmz_surface,
87b7ebc2 5087 bool force_disable_dcc)
09e5665a 5088{
320932bf 5089 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5090 int ret;
5091
5092 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5093 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5094 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5095 memset(address, 0, sizeof(*address));
5096
5888f07a
HW
5097 address->tmz_surface = tmz_surface;
5098
695af5f9 5099 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5100 uint64_t addr = afb->address + fb->offsets[0];
5101
12e2b2d4
DL
5102 plane_size->surface_size.x = 0;
5103 plane_size->surface_size.y = 0;
5104 plane_size->surface_size.width = fb->width;
5105 plane_size->surface_size.height = fb->height;
5106 plane_size->surface_pitch =
320932bf
NK
5107 fb->pitches[0] / fb->format->cpp[0];
5108
e0634e8d 5109 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5110 address->grph.addr.low_part = lower_32_bits(addr);
5111 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5112 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5113 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5114 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5115
12e2b2d4
DL
5116 plane_size->surface_size.x = 0;
5117 plane_size->surface_size.y = 0;
5118 plane_size->surface_size.width = fb->width;
5119 plane_size->surface_size.height = fb->height;
5120 plane_size->surface_pitch =
320932bf
NK
5121 fb->pitches[0] / fb->format->cpp[0];
5122
12e2b2d4
DL
5123 plane_size->chroma_size.x = 0;
5124 plane_size->chroma_size.y = 0;
320932bf 5125 /* TODO: set these based on surface format */
12e2b2d4
DL
5126 plane_size->chroma_size.width = fb->width / 2;
5127 plane_size->chroma_size.height = fb->height / 2;
320932bf 5128
12e2b2d4 5129 plane_size->chroma_pitch =
320932bf
NK
5130 fb->pitches[1] / fb->format->cpp[1];
5131
e0634e8d
NK
5132 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5133 address->video_progressive.luma_addr.low_part =
be7b9b32 5134 lower_32_bits(luma_addr);
e0634e8d 5135 address->video_progressive.luma_addr.high_part =
be7b9b32 5136 upper_32_bits(luma_addr);
e0634e8d
NK
5137 address->video_progressive.chroma_addr.low_part =
5138 lower_32_bits(chroma_addr);
5139 address->video_progressive.chroma_addr.high_part =
5140 upper_32_bits(chroma_addr);
5141 }
09e5665a 5142
a3241991 5143 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5144 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5145 rotation, plane_size,
5146 tiling_info, dcc,
5147 address,
5148 force_disable_dcc);
09e5665a
NK
5149 if (ret)
5150 return ret;
a3241991
BN
5151 } else {
5152 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5153 }
5154
5155 return 0;
7df7e505
NK
5156}
5157
d74004b6 5158static void
695af5f9 5159fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5160 bool *per_pixel_alpha, bool *global_alpha,
5161 int *global_alpha_value)
5162{
5163 *per_pixel_alpha = false;
5164 *global_alpha = false;
5165 *global_alpha_value = 0xff;
5166
5167 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5168 return;
5169
5170 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5171 static const uint32_t alpha_formats[] = {
5172 DRM_FORMAT_ARGB8888,
5173 DRM_FORMAT_RGBA8888,
5174 DRM_FORMAT_ABGR8888,
5175 };
5176 uint32_t format = plane_state->fb->format->format;
5177 unsigned int i;
5178
5179 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5180 if (format == alpha_formats[i]) {
5181 *per_pixel_alpha = true;
5182 break;
5183 }
5184 }
5185 }
5186
5187 if (plane_state->alpha < 0xffff) {
5188 *global_alpha = true;
5189 *global_alpha_value = plane_state->alpha >> 8;
5190 }
5191}
5192
004fefa3
NK
5193static int
5194fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5195 const enum surface_pixel_format format,
004fefa3
NK
5196 enum dc_color_space *color_space)
5197{
5198 bool full_range;
5199
5200 *color_space = COLOR_SPACE_SRGB;
5201
5202 /* DRM color properties only affect non-RGB formats. */
695af5f9 5203 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5204 return 0;
5205
5206 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5207
5208 switch (plane_state->color_encoding) {
5209 case DRM_COLOR_YCBCR_BT601:
5210 if (full_range)
5211 *color_space = COLOR_SPACE_YCBCR601;
5212 else
5213 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5214 break;
5215
5216 case DRM_COLOR_YCBCR_BT709:
5217 if (full_range)
5218 *color_space = COLOR_SPACE_YCBCR709;
5219 else
5220 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5221 break;
5222
5223 case DRM_COLOR_YCBCR_BT2020:
5224 if (full_range)
5225 *color_space = COLOR_SPACE_2020_YCBCR;
5226 else
5227 return -EINVAL;
5228 break;
5229
5230 default:
5231 return -EINVAL;
5232 }
5233
5234 return 0;
5235}
5236
695af5f9
NK
5237static int
5238fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5239 const struct drm_plane_state *plane_state,
5240 const uint64_t tiling_flags,
5241 struct dc_plane_info *plane_info,
87b7ebc2 5242 struct dc_plane_address *address,
5888f07a 5243 bool tmz_surface,
87b7ebc2 5244 bool force_disable_dcc)
695af5f9
NK
5245{
5246 const struct drm_framebuffer *fb = plane_state->fb;
5247 const struct amdgpu_framebuffer *afb =
5248 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5249 int ret;
5250
5251 memset(plane_info, 0, sizeof(*plane_info));
5252
5253 switch (fb->format->format) {
5254 case DRM_FORMAT_C8:
5255 plane_info->format =
5256 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5257 break;
5258 case DRM_FORMAT_RGB565:
5259 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5260 break;
5261 case DRM_FORMAT_XRGB8888:
5262 case DRM_FORMAT_ARGB8888:
5263 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5264 break;
5265 case DRM_FORMAT_XRGB2101010:
5266 case DRM_FORMAT_ARGB2101010:
5267 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5268 break;
5269 case DRM_FORMAT_XBGR2101010:
5270 case DRM_FORMAT_ABGR2101010:
5271 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5272 break;
5273 case DRM_FORMAT_XBGR8888:
5274 case DRM_FORMAT_ABGR8888:
5275 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5276 break;
5277 case DRM_FORMAT_NV21:
5278 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5279 break;
5280 case DRM_FORMAT_NV12:
5281 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5282 break;
cbec6477
SW
5283 case DRM_FORMAT_P010:
5284 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5285 break;
492548dc
SW
5286 case DRM_FORMAT_XRGB16161616F:
5287 case DRM_FORMAT_ARGB16161616F:
5288 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5289 break;
2a5195dc
MK
5290 case DRM_FORMAT_XBGR16161616F:
5291 case DRM_FORMAT_ABGR16161616F:
5292 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5293 break;
58020403
MK
5294 case DRM_FORMAT_XRGB16161616:
5295 case DRM_FORMAT_ARGB16161616:
5296 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5297 break;
5298 case DRM_FORMAT_XBGR16161616:
5299 case DRM_FORMAT_ABGR16161616:
5300 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5301 break;
695af5f9
NK
5302 default:
5303 DRM_ERROR(
92f1d09c
SA
5304 "Unsupported screen format %p4cc\n",
5305 &fb->format->format);
695af5f9
NK
5306 return -EINVAL;
5307 }
5308
5309 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5310 case DRM_MODE_ROTATE_0:
5311 plane_info->rotation = ROTATION_ANGLE_0;
5312 break;
5313 case DRM_MODE_ROTATE_90:
5314 plane_info->rotation = ROTATION_ANGLE_90;
5315 break;
5316 case DRM_MODE_ROTATE_180:
5317 plane_info->rotation = ROTATION_ANGLE_180;
5318 break;
5319 case DRM_MODE_ROTATE_270:
5320 plane_info->rotation = ROTATION_ANGLE_270;
5321 break;
5322 default:
5323 plane_info->rotation = ROTATION_ANGLE_0;
5324 break;
5325 }
5326
5327 plane_info->visible = true;
5328 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5329
6d83a32d
MS
5330 plane_info->layer_index = 0;
5331
695af5f9
NK
5332 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5333 &plane_info->color_space);
5334 if (ret)
5335 return ret;
5336
5337 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5338 plane_info->rotation, tiling_flags,
5339 &plane_info->tiling_info,
5340 &plane_info->plane_size,
5888f07a 5341 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5342 force_disable_dcc);
695af5f9
NK
5343 if (ret)
5344 return ret;
5345
5346 fill_blending_from_plane_state(
5347 plane_state, &plane_info->per_pixel_alpha,
5348 &plane_info->global_alpha, &plane_info->global_alpha_value);
5349
5350 return 0;
5351}
5352
5353static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5354 struct dc_plane_state *dc_plane_state,
5355 struct drm_plane_state *plane_state,
5356 struct drm_crtc_state *crtc_state)
e7b07cee 5357{
cf020d49 5358 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5359 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5360 struct dc_scaling_info scaling_info;
5361 struct dc_plane_info plane_info;
695af5f9 5362 int ret;
87b7ebc2 5363 bool force_disable_dcc = false;
e7b07cee 5364
695af5f9
NK
5365 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5366 if (ret)
5367 return ret;
e7b07cee 5368
695af5f9
NK
5369 dc_plane_state->src_rect = scaling_info.src_rect;
5370 dc_plane_state->dst_rect = scaling_info.dst_rect;
5371 dc_plane_state->clip_rect = scaling_info.clip_rect;
5372 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5373
87b7ebc2 5374 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5375 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5376 afb->tiling_flags,
695af5f9 5377 &plane_info,
87b7ebc2 5378 &dc_plane_state->address,
6eed95b0 5379 afb->tmz_surface,
87b7ebc2 5380 force_disable_dcc);
004fefa3
NK
5381 if (ret)
5382 return ret;
5383
695af5f9
NK
5384 dc_plane_state->format = plane_info.format;
5385 dc_plane_state->color_space = plane_info.color_space;
5386 dc_plane_state->format = plane_info.format;
5387 dc_plane_state->plane_size = plane_info.plane_size;
5388 dc_plane_state->rotation = plane_info.rotation;
5389 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5390 dc_plane_state->stereo_format = plane_info.stereo_format;
5391 dc_plane_state->tiling_info = plane_info.tiling_info;
5392 dc_plane_state->visible = plane_info.visible;
5393 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5394 dc_plane_state->global_alpha = plane_info.global_alpha;
5395 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5396 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5397 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5398 dc_plane_state->flip_int_enabled = true;
695af5f9 5399
e277adc5
LSL
5400 /*
5401 * Always set input transfer function, since plane state is refreshed
5402 * every time.
5403 */
cf020d49
NK
5404 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5405 if (ret)
5406 return ret;
e7b07cee 5407
cf020d49 5408 return 0;
e7b07cee
HW
5409}
5410
3ee6b26b
AD
5411static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5412 const struct dm_connector_state *dm_state,
5413 struct dc_stream_state *stream)
e7b07cee
HW
5414{
5415 enum amdgpu_rmx_type rmx_type;
5416
5417 struct rect src = { 0 }; /* viewport in composition space*/
5418 struct rect dst = { 0 }; /* stream addressable area */
5419
5420 /* no mode. nothing to be done */
5421 if (!mode)
5422 return;
5423
5424 /* Full screen scaling by default */
5425 src.width = mode->hdisplay;
5426 src.height = mode->vdisplay;
5427 dst.width = stream->timing.h_addressable;
5428 dst.height = stream->timing.v_addressable;
5429
f4791779
HW
5430 if (dm_state) {
5431 rmx_type = dm_state->scaling;
5432 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5433 if (src.width * dst.height <
5434 src.height * dst.width) {
5435 /* height needs less upscaling/more downscaling */
5436 dst.width = src.width *
5437 dst.height / src.height;
5438 } else {
5439 /* width needs less upscaling/more downscaling */
5440 dst.height = src.height *
5441 dst.width / src.width;
5442 }
5443 } else if (rmx_type == RMX_CENTER) {
5444 dst = src;
e7b07cee 5445 }
e7b07cee 5446
f4791779
HW
5447 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5448 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5449
f4791779
HW
5450 if (dm_state->underscan_enable) {
5451 dst.x += dm_state->underscan_hborder / 2;
5452 dst.y += dm_state->underscan_vborder / 2;
5453 dst.width -= dm_state->underscan_hborder;
5454 dst.height -= dm_state->underscan_vborder;
5455 }
e7b07cee
HW
5456 }
5457
5458 stream->src = src;
5459 stream->dst = dst;
5460
4711c033
LT
5461 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5462 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5463
5464}
5465
3ee6b26b 5466static enum dc_color_depth
42ba01fc 5467convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5468 bool is_y420, int requested_bpc)
e7b07cee 5469{
1bc22f20 5470 uint8_t bpc;
01c22997 5471
1bc22f20
SW
5472 if (is_y420) {
5473 bpc = 8;
5474
5475 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5476 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5477 bpc = 16;
5478 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5479 bpc = 12;
5480 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5481 bpc = 10;
5482 } else {
5483 bpc = (uint8_t)connector->display_info.bpc;
5484 /* Assume 8 bpc by default if no bpc is specified. */
5485 bpc = bpc ? bpc : 8;
5486 }
e7b07cee 5487
cbd14ae7 5488 if (requested_bpc > 0) {
01c22997
NK
5489 /*
5490 * Cap display bpc based on the user requested value.
5491 *
5492 * The value for state->max_bpc may not correctly updated
5493 * depending on when the connector gets added to the state
5494 * or if this was called outside of atomic check, so it
5495 * can't be used directly.
5496 */
cbd14ae7 5497 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5498
1825fd34
NK
5499 /* Round down to the nearest even number. */
5500 bpc = bpc - (bpc & 1);
5501 }
07e3a1cf 5502
e7b07cee
HW
5503 switch (bpc) {
5504 case 0:
1f6010a9
DF
5505 /*
5506 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5507 * EDID revision before 1.4
5508 * TODO: Fix edid parsing
5509 */
5510 return COLOR_DEPTH_888;
5511 case 6:
5512 return COLOR_DEPTH_666;
5513 case 8:
5514 return COLOR_DEPTH_888;
5515 case 10:
5516 return COLOR_DEPTH_101010;
5517 case 12:
5518 return COLOR_DEPTH_121212;
5519 case 14:
5520 return COLOR_DEPTH_141414;
5521 case 16:
5522 return COLOR_DEPTH_161616;
5523 default:
5524 return COLOR_DEPTH_UNDEFINED;
5525 }
5526}
5527
3ee6b26b
AD
5528static enum dc_aspect_ratio
5529get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5530{
e11d4147
LSL
5531 /* 1-1 mapping, since both enums follow the HDMI spec. */
5532 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5533}
5534
3ee6b26b
AD
5535static enum dc_color_space
5536get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5537{
5538 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5539
5540 switch (dc_crtc_timing->pixel_encoding) {
5541 case PIXEL_ENCODING_YCBCR422:
5542 case PIXEL_ENCODING_YCBCR444:
5543 case PIXEL_ENCODING_YCBCR420:
5544 {
5545 /*
5546 * 27030khz is the separation point between HDTV and SDTV
5547 * according to HDMI spec, we use YCbCr709 and YCbCr601
5548 * respectively
5549 */
380604e2 5550 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5551 if (dc_crtc_timing->flags.Y_ONLY)
5552 color_space =
5553 COLOR_SPACE_YCBCR709_LIMITED;
5554 else
5555 color_space = COLOR_SPACE_YCBCR709;
5556 } else {
5557 if (dc_crtc_timing->flags.Y_ONLY)
5558 color_space =
5559 COLOR_SPACE_YCBCR601_LIMITED;
5560 else
5561 color_space = COLOR_SPACE_YCBCR601;
5562 }
5563
5564 }
5565 break;
5566 case PIXEL_ENCODING_RGB:
5567 color_space = COLOR_SPACE_SRGB;
5568 break;
5569
5570 default:
5571 WARN_ON(1);
5572 break;
5573 }
5574
5575 return color_space;
5576}
5577
ea117312
TA
5578static bool adjust_colour_depth_from_display_info(
5579 struct dc_crtc_timing *timing_out,
5580 const struct drm_display_info *info)
400443e8 5581{
ea117312 5582 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5583 int normalized_clk;
400443e8 5584 do {
380604e2 5585 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5586 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5587 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5588 normalized_clk /= 2;
5589 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5590 switch (depth) {
5591 case COLOR_DEPTH_888:
5592 break;
400443e8
ML
5593 case COLOR_DEPTH_101010:
5594 normalized_clk = (normalized_clk * 30) / 24;
5595 break;
5596 case COLOR_DEPTH_121212:
5597 normalized_clk = (normalized_clk * 36) / 24;
5598 break;
5599 case COLOR_DEPTH_161616:
5600 normalized_clk = (normalized_clk * 48) / 24;
5601 break;
5602 default:
ea117312
TA
5603 /* The above depths are the only ones valid for HDMI. */
5604 return false;
400443e8 5605 }
ea117312
TA
5606 if (normalized_clk <= info->max_tmds_clock) {
5607 timing_out->display_color_depth = depth;
5608 return true;
5609 }
5610 } while (--depth > COLOR_DEPTH_666);
5611 return false;
400443e8 5612}
e7b07cee 5613
42ba01fc
NK
5614static void fill_stream_properties_from_drm_display_mode(
5615 struct dc_stream_state *stream,
5616 const struct drm_display_mode *mode_in,
5617 const struct drm_connector *connector,
5618 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5619 const struct dc_stream_state *old_stream,
5620 int requested_bpc)
e7b07cee
HW
5621{
5622 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5623 const struct drm_display_info *info = &connector->display_info;
d4252eee 5624 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5625 struct hdmi_vendor_infoframe hv_frame;
5626 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5627
acf83f86
WL
5628 memset(&hv_frame, 0, sizeof(hv_frame));
5629 memset(&avi_frame, 0, sizeof(avi_frame));
5630
e7b07cee
HW
5631 timing_out->h_border_left = 0;
5632 timing_out->h_border_right = 0;
5633 timing_out->v_border_top = 0;
5634 timing_out->v_border_bottom = 0;
5635 /* TODO: un-hardcode */
fe61a2f1 5636 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5637 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5638 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5639 else if (drm_mode_is_420_also(info, mode_in)
5640 && aconnector->force_yuv420_output)
5641 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5642 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5643 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5644 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5645 else
5646 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5647
5648 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5649 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5650 connector,
5651 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5652 requested_bpc);
e7b07cee
HW
5653 timing_out->scan_type = SCANNING_TYPE_NODATA;
5654 timing_out->hdmi_vic = 0;
b333730d
BL
5655
5656 if(old_stream) {
5657 timing_out->vic = old_stream->timing.vic;
5658 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5659 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5660 } else {
5661 timing_out->vic = drm_match_cea_mode(mode_in);
5662 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5663 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5664 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5665 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5666 }
e7b07cee 5667
1cb1d477
WL
5668 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5669 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5670 timing_out->vic = avi_frame.video_code;
5671 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5672 timing_out->hdmi_vic = hv_frame.vic;
5673 }
5674
fe8858bb
NC
5675 if (is_freesync_video_mode(mode_in, aconnector)) {
5676 timing_out->h_addressable = mode_in->hdisplay;
5677 timing_out->h_total = mode_in->htotal;
5678 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5679 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5680 timing_out->v_total = mode_in->vtotal;
5681 timing_out->v_addressable = mode_in->vdisplay;
5682 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5683 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5684 timing_out->pix_clk_100hz = mode_in->clock * 10;
5685 } else {
5686 timing_out->h_addressable = mode_in->crtc_hdisplay;
5687 timing_out->h_total = mode_in->crtc_htotal;
5688 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5689 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5690 timing_out->v_total = mode_in->crtc_vtotal;
5691 timing_out->v_addressable = mode_in->crtc_vdisplay;
5692 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5693 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5694 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5695 }
a85ba005 5696
e7b07cee 5697 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5698
5699 stream->output_color_space = get_output_color_space(timing_out);
5700
e43a432c
AK
5701 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5702 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5703 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5704 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5705 drm_mode_is_420_also(info, mode_in) &&
5706 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5707 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5708 adjust_colour_depth_from_display_info(timing_out, info);
5709 }
5710 }
e7b07cee
HW
5711}
5712
3ee6b26b
AD
5713static void fill_audio_info(struct audio_info *audio_info,
5714 const struct drm_connector *drm_connector,
5715 const struct dc_sink *dc_sink)
e7b07cee
HW
5716{
5717 int i = 0;
5718 int cea_revision = 0;
5719 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5720
5721 audio_info->manufacture_id = edid_caps->manufacturer_id;
5722 audio_info->product_id = edid_caps->product_id;
5723
5724 cea_revision = drm_connector->display_info.cea_rev;
5725
090afc1e 5726 strscpy(audio_info->display_name,
d2b2562c 5727 edid_caps->display_name,
090afc1e 5728 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5729
b830ebc9 5730 if (cea_revision >= 3) {
e7b07cee
HW
5731 audio_info->mode_count = edid_caps->audio_mode_count;
5732
5733 for (i = 0; i < audio_info->mode_count; ++i) {
5734 audio_info->modes[i].format_code =
5735 (enum audio_format_code)
5736 (edid_caps->audio_modes[i].format_code);
5737 audio_info->modes[i].channel_count =
5738 edid_caps->audio_modes[i].channel_count;
5739 audio_info->modes[i].sample_rates.all =
5740 edid_caps->audio_modes[i].sample_rate;
5741 audio_info->modes[i].sample_size =
5742 edid_caps->audio_modes[i].sample_size;
5743 }
5744 }
5745
5746 audio_info->flags.all = edid_caps->speaker_flags;
5747
5748 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5749 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5750 audio_info->video_latency = drm_connector->video_latency[0];
5751 audio_info->audio_latency = drm_connector->audio_latency[0];
5752 }
5753
5754 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5755
5756}
5757
3ee6b26b
AD
5758static void
5759copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5760 struct drm_display_mode *dst_mode)
e7b07cee
HW
5761{
5762 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5763 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5764 dst_mode->crtc_clock = src_mode->crtc_clock;
5765 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5766 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5767 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5768 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5769 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5770 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5771 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5772 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5773 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5774 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5775 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5776}
5777
3ee6b26b
AD
5778static void
5779decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5780 const struct drm_display_mode *native_mode,
5781 bool scale_enabled)
e7b07cee
HW
5782{
5783 if (scale_enabled) {
5784 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5785 } else if (native_mode->clock == drm_mode->clock &&
5786 native_mode->htotal == drm_mode->htotal &&
5787 native_mode->vtotal == drm_mode->vtotal) {
5788 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5789 } else {
5790 /* no scaling nor amdgpu inserted, no need to patch */
5791 }
5792}
5793
aed15309
ML
5794static struct dc_sink *
5795create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5796{
2e0ac3d6 5797 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5798 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5799 sink_init_data.link = aconnector->dc_link;
5800 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5801
5802 sink = dc_sink_create(&sink_init_data);
423788c7 5803 if (!sink) {
2e0ac3d6 5804 DRM_ERROR("Failed to create sink!\n");
aed15309 5805 return NULL;
423788c7 5806 }
2e0ac3d6 5807 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5808
aed15309 5809 return sink;
2e0ac3d6
HW
5810}
5811
fa2123db
ML
5812static void set_multisync_trigger_params(
5813 struct dc_stream_state *stream)
5814{
ec372186
ML
5815 struct dc_stream_state *master = NULL;
5816
fa2123db 5817 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5818 master = stream->triggered_crtc_reset.event_source;
5819 stream->triggered_crtc_reset.event =
5820 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5821 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5822 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5823 }
5824}
5825
5826static void set_master_stream(struct dc_stream_state *stream_set[],
5827 int stream_count)
5828{
5829 int j, highest_rfr = 0, master_stream = 0;
5830
5831 for (j = 0; j < stream_count; j++) {
5832 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5833 int refresh_rate = 0;
5834
380604e2 5835 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5836 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5837 if (refresh_rate > highest_rfr) {
5838 highest_rfr = refresh_rate;
5839 master_stream = j;
5840 }
5841 }
5842 }
5843 for (j = 0; j < stream_count; j++) {
03736f4c 5844 if (stream_set[j])
fa2123db
ML
5845 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5846 }
5847}
5848
5849static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5850{
5851 int i = 0;
ec372186 5852 struct dc_stream_state *stream;
fa2123db
ML
5853
5854 if (context->stream_count < 2)
5855 return;
5856 for (i = 0; i < context->stream_count ; i++) {
5857 if (!context->streams[i])
5858 continue;
1f6010a9
DF
5859 /*
5860 * TODO: add a function to read AMD VSDB bits and set
fa2123db 5861 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 5862 * For now it's set to false
fa2123db 5863 */
fa2123db 5864 }
ec372186 5865
fa2123db 5866 set_master_stream(context->streams, context->stream_count);
ec372186
ML
5867
5868 for (i = 0; i < context->stream_count ; i++) {
5869 stream = context->streams[i];
5870
5871 if (!stream)
5872 continue;
5873
5874 set_multisync_trigger_params(stream);
5875 }
fa2123db
ML
5876}
5877
ea2be5c0 5878#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
5879static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5880 struct dc_sink *sink, struct dc_stream_state *stream,
5881 struct dsc_dec_dpcd_caps *dsc_caps)
5882{
5883 stream->timing.flags.DSC = 0;
5884
5885 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
998b7ad2
FZ
5886 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5887 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5888 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5889 dsc_caps);
998b7ad2
FZ
5890 }
5891}
5892
5893static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5894 struct dc_sink *sink, struct dc_stream_state *stream,
5895 struct dsc_dec_dpcd_caps *dsc_caps)
5896{
5897 struct drm_connector *drm_connector = &aconnector->base;
5898 uint32_t link_bandwidth_kbps;
f1c1a982 5899 uint32_t max_dsc_target_bpp_limit_override = 0;
998b7ad2
FZ
5900
5901 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5902 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
5903
5904 if (stream->link && stream->link->local_sink)
5905 max_dsc_target_bpp_limit_override =
5906 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5907
998b7ad2
FZ
5908 /* Set DSC policy according to dsc_clock_en */
5909 dc_dsc_policy_set_enable_dsc_when_not_needed(
5910 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5911
5912 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5913
5914 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5915 dsc_caps,
5916 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 5917 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
5918 link_bandwidth_kbps,
5919 &stream->timing,
5920 &stream->timing.dsc_cfg)) {
5921 stream->timing.flags.DSC = 1;
5922 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5923 }
5924 }
5925
5926 /* Overwrite the stream flag if DSC is enabled through debugfs */
5927 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5928 stream->timing.flags.DSC = 1;
5929
5930 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5931 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5932
5933 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5934 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5935
5936 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5937 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 5938}
ea2be5c0 5939#endif
998b7ad2 5940
5fd953a3
RS
5941/**
5942 * DOC: FreeSync Video
5943 *
5944 * When a userspace application wants to play a video, the content follows a
5945 * standard format definition that usually specifies the FPS for that format.
5946 * The below list illustrates some video format and the expected FPS,
5947 * respectively:
5948 *
5949 * - TV/NTSC (23.976 FPS)
5950 * - Cinema (24 FPS)
5951 * - TV/PAL (25 FPS)
5952 * - TV/NTSC (29.97 FPS)
5953 * - TV/NTSC (30 FPS)
5954 * - Cinema HFR (48 FPS)
5955 * - TV/PAL (50 FPS)
5956 * - Commonly used (60 FPS)
5957 * - Multiples of 24 (48,72,96 FPS)
5958 *
5959 * The list of standards video format is not huge and can be added to the
5960 * connector modeset list beforehand. With that, userspace can leverage
5961 * FreeSync to extends the front porch in order to attain the target refresh
5962 * rate. Such a switch will happen seamlessly, without screen blanking or
5963 * reprogramming of the output in any other way. If the userspace requests a
5964 * modesetting change compatible with FreeSync modes that only differ in the
5965 * refresh rate, DC will skip the full update and avoid blink during the
5966 * transition. For example, the video player can change the modesetting from
5967 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5968 * causing any display blink. This same concept can be applied to a mode
5969 * setting change.
5970 */
a85ba005
NC
5971static struct drm_display_mode *
5972get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5973 bool use_probed_modes)
5974{
5975 struct drm_display_mode *m, *m_pref = NULL;
5976 u16 current_refresh, highest_refresh;
5977 struct list_head *list_head = use_probed_modes ?
5978 &aconnector->base.probed_modes :
5979 &aconnector->base.modes;
5980
5981 if (aconnector->freesync_vid_base.clock != 0)
5982 return &aconnector->freesync_vid_base;
5983
5984 /* Find the preferred mode */
5985 list_for_each_entry (m, list_head, head) {
5986 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5987 m_pref = m;
5988 break;
5989 }
5990 }
5991
5992 if (!m_pref) {
5993 /* Probably an EDID with no preferred mode. Fallback to first entry */
5994 m_pref = list_first_entry_or_null(
5995 &aconnector->base.modes, struct drm_display_mode, head);
5996 if (!m_pref) {
5997 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5998 return NULL;
5999 }
6000 }
6001
6002 highest_refresh = drm_mode_vrefresh(m_pref);
6003
6004 /*
6005 * Find the mode with highest refresh rate with same resolution.
6006 * For some monitors, preferred mode is not the mode with highest
6007 * supported refresh rate.
6008 */
6009 list_for_each_entry (m, list_head, head) {
6010 current_refresh = drm_mode_vrefresh(m);
6011
6012 if (m->hdisplay == m_pref->hdisplay &&
6013 m->vdisplay == m_pref->vdisplay &&
6014 highest_refresh < current_refresh) {
6015 highest_refresh = current_refresh;
6016 m_pref = m;
6017 }
6018 }
6019
6020 aconnector->freesync_vid_base = *m_pref;
6021 return m_pref;
6022}
6023
fe8858bb 6024static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6025 struct amdgpu_dm_connector *aconnector)
6026{
6027 struct drm_display_mode *high_mode;
6028 int timing_diff;
6029
6030 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6031 if (!high_mode || !mode)
6032 return false;
6033
6034 timing_diff = high_mode->vtotal - mode->vtotal;
6035
6036 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6037 high_mode->hdisplay != mode->hdisplay ||
6038 high_mode->vdisplay != mode->vdisplay ||
6039 high_mode->hsync_start != mode->hsync_start ||
6040 high_mode->hsync_end != mode->hsync_end ||
6041 high_mode->htotal != mode->htotal ||
6042 high_mode->hskew != mode->hskew ||
6043 high_mode->vscan != mode->vscan ||
6044 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6045 high_mode->vsync_end - mode->vsync_end != timing_diff)
6046 return false;
6047 else
6048 return true;
6049}
6050
3ee6b26b
AD
6051static struct dc_stream_state *
6052create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6053 const struct drm_display_mode *drm_mode,
b333730d 6054 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6055 const struct dc_stream_state *old_stream,
6056 int requested_bpc)
e7b07cee
HW
6057{
6058 struct drm_display_mode *preferred_mode = NULL;
391ef035 6059 struct drm_connector *drm_connector;
42ba01fc
NK
6060 const struct drm_connector_state *con_state =
6061 dm_state ? &dm_state->base : NULL;
0971c40e 6062 struct dc_stream_state *stream = NULL;
e7b07cee 6063 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6064 struct drm_display_mode saved_mode;
6065 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6066 bool native_mode_found = false;
b0781603
NK
6067 bool recalculate_timing = false;
6068 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6069 int mode_refresh;
58124bf8 6070 int preferred_refresh = 0;
defeb878 6071#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6072 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6073#endif
aed15309 6074 struct dc_sink *sink = NULL;
a85ba005
NC
6075
6076 memset(&saved_mode, 0, sizeof(saved_mode));
6077
b830ebc9 6078 if (aconnector == NULL) {
e7b07cee 6079 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6080 return stream;
e7b07cee
HW
6081 }
6082
e7b07cee 6083 drm_connector = &aconnector->base;
2e0ac3d6 6084
f4ac176e 6085 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6086 sink = create_fake_sink(aconnector);
6087 if (!sink)
6088 return stream;
aed15309
ML
6089 } else {
6090 sink = aconnector->dc_sink;
dcd5fb82 6091 dc_sink_retain(sink);
f4ac176e 6092 }
2e0ac3d6 6093
aed15309 6094 stream = dc_create_stream_for_sink(sink);
4562236b 6095
b830ebc9 6096 if (stream == NULL) {
e7b07cee 6097 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6098 goto finish;
e7b07cee
HW
6099 }
6100
ceb3dbb4
JL
6101 stream->dm_stream_context = aconnector;
6102
4a36fcba
WL
6103 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6104 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6105
e7b07cee
HW
6106 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6107 /* Search for preferred mode */
6108 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6109 native_mode_found = true;
6110 break;
6111 }
6112 }
6113 if (!native_mode_found)
6114 preferred_mode = list_first_entry_or_null(
6115 &aconnector->base.modes,
6116 struct drm_display_mode,
6117 head);
6118
b333730d
BL
6119 mode_refresh = drm_mode_vrefresh(&mode);
6120
b830ebc9 6121 if (preferred_mode == NULL) {
1f6010a9
DF
6122 /*
6123 * This may not be an error, the use case is when we have no
e7b07cee
HW
6124 * usermode calls to reset and set mode upon hotplug. In this
6125 * case, we call set mode ourselves to restore the previous mode
6126 * and the modelist may not be filled in in time.
6127 */
f1ad2f5e 6128 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6129 } else {
b0781603 6130 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
6131 is_freesync_video_mode(&mode, aconnector);
6132 if (recalculate_timing) {
6133 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6134 saved_mode = mode;
6135 mode = *freesync_mode;
6136 } else {
6137 decide_crtc_timing_for_drm_display_mode(
b0781603 6138 &mode, preferred_mode, scale);
a85ba005 6139
b0781603
NK
6140 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6141 }
e7b07cee
HW
6142 }
6143
a85ba005
NC
6144 if (recalculate_timing)
6145 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6146 else if (!dm_state)
f783577c
JFZ
6147 drm_mode_set_crtcinfo(&mode, 0);
6148
a85ba005 6149 /*
b333730d
BL
6150 * If scaling is enabled and refresh rate didn't change
6151 * we copy the vic and polarities of the old timings
6152 */
b0781603 6153 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6154 fill_stream_properties_from_drm_display_mode(
6155 stream, &mode, &aconnector->base, con_state, NULL,
6156 requested_bpc);
b333730d 6157 else
a85ba005
NC
6158 fill_stream_properties_from_drm_display_mode(
6159 stream, &mode, &aconnector->base, con_state, old_stream,
6160 requested_bpc);
b333730d 6161
defeb878 6162#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6163 /* SST DSC determination policy */
6164 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6165 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6166 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6167#endif
6168
e7b07cee
HW
6169 update_stream_scaling_settings(&mode, dm_state, stream);
6170
6171 fill_audio_info(
6172 &stream->audio_info,
6173 drm_connector,
aed15309 6174 sink);
e7b07cee 6175
ceb3dbb4 6176 update_stream_signal(stream, sink);
9182b4cb 6177
d832fc3b 6178 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6179 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6180
8a488f5d
RL
6181 if (stream->link->psr_settings.psr_feature_enabled) {
6182 //
6183 // should decide stream support vsc sdp colorimetry capability
6184 // before building vsc info packet
6185 //
6186 stream->use_vsc_sdp_for_colorimetry = false;
6187 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6188 stream->use_vsc_sdp_for_colorimetry =
6189 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6190 } else {
6191 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6192 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6193 }
8a488f5d 6194 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
6195 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6196
8c322309 6197 }
aed15309 6198finish:
dcd5fb82 6199 dc_sink_release(sink);
9e3efe3e 6200
e7b07cee
HW
6201 return stream;
6202}
6203
7578ecda 6204static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6205{
6206 drm_crtc_cleanup(crtc);
6207 kfree(crtc);
6208}
6209
6210static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6211 struct drm_crtc_state *state)
e7b07cee
HW
6212{
6213 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6214
6215 /* TODO Destroy dc_stream objects are stream object is flattened */
6216 if (cur->stream)
6217 dc_stream_release(cur->stream);
6218
6219
6220 __drm_atomic_helper_crtc_destroy_state(state);
6221
6222
6223 kfree(state);
6224}
6225
6226static void dm_crtc_reset_state(struct drm_crtc *crtc)
6227{
6228 struct dm_crtc_state *state;
6229
6230 if (crtc->state)
6231 dm_crtc_destroy_state(crtc, crtc->state);
6232
6233 state = kzalloc(sizeof(*state), GFP_KERNEL);
6234 if (WARN_ON(!state))
6235 return;
6236
1f8a52ec 6237 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6238}
6239
6240static struct drm_crtc_state *
6241dm_crtc_duplicate_state(struct drm_crtc *crtc)
6242{
6243 struct dm_crtc_state *state, *cur;
6244
6245 cur = to_dm_crtc_state(crtc->state);
6246
6247 if (WARN_ON(!crtc->state))
6248 return NULL;
6249
2004f45e 6250 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6251 if (!state)
6252 return NULL;
e7b07cee
HW
6253
6254 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6255
6256 if (cur->stream) {
6257 state->stream = cur->stream;
6258 dc_stream_retain(state->stream);
6259 }
6260
d6ef9b41 6261 state->active_planes = cur->active_planes;
98e6436d 6262 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6263 state->abm_level = cur->abm_level;
bb47de73
NK
6264 state->vrr_supported = cur->vrr_supported;
6265 state->freesync_config = cur->freesync_config;
cf020d49
NK
6266 state->cm_has_degamma = cur->cm_has_degamma;
6267 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6268 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6269 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6270
6271 return &state->base;
6272}
6273
86bc2219 6274#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6275static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6276{
6277 crtc_debugfs_init(crtc);
6278
6279 return 0;
6280}
6281#endif
6282
d2574c33
MK
6283static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6284{
6285 enum dc_irq_source irq_source;
6286 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6287 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6288 int rc;
6289
6290 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6291
6292 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6293
4711c033
LT
6294 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6295 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6296 return rc;
6297}
589d2739
HW
6298
6299static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6300{
6301 enum dc_irq_source irq_source;
6302 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6303 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6304 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6305#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6306 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6307 struct vblank_control_work *work;
ea3b4242 6308#endif
d2574c33
MK
6309 int rc = 0;
6310
6311 if (enable) {
6312 /* vblank irq on -> Only need vupdate irq in vrr mode */
6313 if (amdgpu_dm_vrr_active(acrtc_state))
6314 rc = dm_set_vupdate_irq(crtc, true);
6315 } else {
6316 /* vblank irq off -> vupdate irq off */
6317 rc = dm_set_vupdate_irq(crtc, false);
6318 }
6319
6320 if (rc)
6321 return rc;
589d2739
HW
6322
6323 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6324
6325 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6326 return -EBUSY;
6327
98ab5f35
BL
6328 if (amdgpu_in_reset(adev))
6329 return 0;
6330
4928b480 6331#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6332 if (dm->vblank_control_workqueue) {
6333 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6334 if (!work)
6335 return -ENOMEM;
09a5df6c 6336
06dd1888
NK
6337 INIT_WORK(&work->work, vblank_control_worker);
6338 work->dm = dm;
6339 work->acrtc = acrtc;
6340 work->enable = enable;
09a5df6c 6341
06dd1888
NK
6342 if (acrtc_state->stream) {
6343 dc_stream_retain(acrtc_state->stream);
6344 work->stream = acrtc_state->stream;
6345 }
58aa1c50 6346
06dd1888
NK
6347 queue_work(dm->vblank_control_workqueue, &work->work);
6348 }
4928b480 6349#endif
71338cb4 6350
71338cb4 6351 return 0;
589d2739
HW
6352}
6353
6354static int dm_enable_vblank(struct drm_crtc *crtc)
6355{
6356 return dm_set_vblank(crtc, true);
6357}
6358
6359static void dm_disable_vblank(struct drm_crtc *crtc)
6360{
6361 dm_set_vblank(crtc, false);
6362}
6363
e7b07cee
HW
6364/* Implemented only the options currently availible for the driver */
6365static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6366 .reset = dm_crtc_reset_state,
6367 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6368 .set_config = drm_atomic_helper_set_config,
6369 .page_flip = drm_atomic_helper_page_flip,
6370 .atomic_duplicate_state = dm_crtc_duplicate_state,
6371 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6372 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6373 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6374 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6375 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6376 .enable_vblank = dm_enable_vblank,
6377 .disable_vblank = dm_disable_vblank,
e3eff4b5 6378 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6379#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6380 .late_register = amdgpu_dm_crtc_late_register,
6381#endif
e7b07cee
HW
6382};
6383
6384static enum drm_connector_status
6385amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6386{
6387 bool connected;
c84dec2f 6388 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6389
1f6010a9
DF
6390 /*
6391 * Notes:
e7b07cee
HW
6392 * 1. This interface is NOT called in context of HPD irq.
6393 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6394 * makes it a bad place for *any* MST-related activity.
6395 */
e7b07cee 6396
8580d60b
HW
6397 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6398 !aconnector->fake_enable)
e7b07cee
HW
6399 connected = (aconnector->dc_sink != NULL);
6400 else
6401 connected = (aconnector->base.force == DRM_FORCE_ON);
6402
0f877894
OV
6403 update_subconnector_property(aconnector);
6404
e7b07cee
HW
6405 return (connected ? connector_status_connected :
6406 connector_status_disconnected);
6407}
6408
3ee6b26b
AD
6409int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6410 struct drm_connector_state *connector_state,
6411 struct drm_property *property,
6412 uint64_t val)
e7b07cee
HW
6413{
6414 struct drm_device *dev = connector->dev;
1348969a 6415 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6416 struct dm_connector_state *dm_old_state =
6417 to_dm_connector_state(connector->state);
6418 struct dm_connector_state *dm_new_state =
6419 to_dm_connector_state(connector_state);
6420
6421 int ret = -EINVAL;
6422
6423 if (property == dev->mode_config.scaling_mode_property) {
6424 enum amdgpu_rmx_type rmx_type;
6425
6426 switch (val) {
6427 case DRM_MODE_SCALE_CENTER:
6428 rmx_type = RMX_CENTER;
6429 break;
6430 case DRM_MODE_SCALE_ASPECT:
6431 rmx_type = RMX_ASPECT;
6432 break;
6433 case DRM_MODE_SCALE_FULLSCREEN:
6434 rmx_type = RMX_FULL;
6435 break;
6436 case DRM_MODE_SCALE_NONE:
6437 default:
6438 rmx_type = RMX_OFF;
6439 break;
6440 }
6441
6442 if (dm_old_state->scaling == rmx_type)
6443 return 0;
6444
6445 dm_new_state->scaling = rmx_type;
6446 ret = 0;
6447 } else if (property == adev->mode_info.underscan_hborder_property) {
6448 dm_new_state->underscan_hborder = val;
6449 ret = 0;
6450 } else if (property == adev->mode_info.underscan_vborder_property) {
6451 dm_new_state->underscan_vborder = val;
6452 ret = 0;
6453 } else if (property == adev->mode_info.underscan_property) {
6454 dm_new_state->underscan_enable = val;
6455 ret = 0;
c1ee92f9
DF
6456 } else if (property == adev->mode_info.abm_level_property) {
6457 dm_new_state->abm_level = val;
6458 ret = 0;
e7b07cee
HW
6459 }
6460
6461 return ret;
6462}
6463
3ee6b26b
AD
6464int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6465 const struct drm_connector_state *state,
6466 struct drm_property *property,
6467 uint64_t *val)
e7b07cee
HW
6468{
6469 struct drm_device *dev = connector->dev;
1348969a 6470 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6471 struct dm_connector_state *dm_state =
6472 to_dm_connector_state(state);
6473 int ret = -EINVAL;
6474
6475 if (property == dev->mode_config.scaling_mode_property) {
6476 switch (dm_state->scaling) {
6477 case RMX_CENTER:
6478 *val = DRM_MODE_SCALE_CENTER;
6479 break;
6480 case RMX_ASPECT:
6481 *val = DRM_MODE_SCALE_ASPECT;
6482 break;
6483 case RMX_FULL:
6484 *val = DRM_MODE_SCALE_FULLSCREEN;
6485 break;
6486 case RMX_OFF:
6487 default:
6488 *val = DRM_MODE_SCALE_NONE;
6489 break;
6490 }
6491 ret = 0;
6492 } else if (property == adev->mode_info.underscan_hborder_property) {
6493 *val = dm_state->underscan_hborder;
6494 ret = 0;
6495 } else if (property == adev->mode_info.underscan_vborder_property) {
6496 *val = dm_state->underscan_vborder;
6497 ret = 0;
6498 } else if (property == adev->mode_info.underscan_property) {
6499 *val = dm_state->underscan_enable;
6500 ret = 0;
c1ee92f9
DF
6501 } else if (property == adev->mode_info.abm_level_property) {
6502 *val = dm_state->abm_level;
6503 ret = 0;
e7b07cee 6504 }
c1ee92f9 6505
e7b07cee
HW
6506 return ret;
6507}
6508
526c654a
ED
6509static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6510{
6511 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6512
6513 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6514}
6515
7578ecda 6516static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6517{
c84dec2f 6518 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6519 const struct dc_link *link = aconnector->dc_link;
1348969a 6520 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6521 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6522 int i;
ada8ce15 6523
5dff80bd
AG
6524 /*
6525 * Call only if mst_mgr was iniitalized before since it's not done
6526 * for all connector types.
6527 */
6528 if (aconnector->mst_mgr.dev)
6529 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6530
e7b07cee
HW
6531#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6532 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6533 for (i = 0; i < dm->num_of_edps; i++) {
6534 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6535 backlight_device_unregister(dm->backlight_dev[i]);
6536 dm->backlight_dev[i] = NULL;
6537 }
e7b07cee
HW
6538 }
6539#endif
dcd5fb82
MF
6540
6541 if (aconnector->dc_em_sink)
6542 dc_sink_release(aconnector->dc_em_sink);
6543 aconnector->dc_em_sink = NULL;
6544 if (aconnector->dc_sink)
6545 dc_sink_release(aconnector->dc_sink);
6546 aconnector->dc_sink = NULL;
6547
e86e8947 6548 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6549 drm_connector_unregister(connector);
6550 drm_connector_cleanup(connector);
526c654a
ED
6551 if (aconnector->i2c) {
6552 i2c_del_adapter(&aconnector->i2c->base);
6553 kfree(aconnector->i2c);
6554 }
7daec99f 6555 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6556
e7b07cee
HW
6557 kfree(connector);
6558}
6559
6560void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6561{
6562 struct dm_connector_state *state =
6563 to_dm_connector_state(connector->state);
6564
df099b9b
LSL
6565 if (connector->state)
6566 __drm_atomic_helper_connector_destroy_state(connector->state);
6567
e7b07cee
HW
6568 kfree(state);
6569
6570 state = kzalloc(sizeof(*state), GFP_KERNEL);
6571
6572 if (state) {
6573 state->scaling = RMX_OFF;
6574 state->underscan_enable = false;
6575 state->underscan_hborder = 0;
6576 state->underscan_vborder = 0;
01933ba4 6577 state->base.max_requested_bpc = 8;
3261e013
ML
6578 state->vcpi_slots = 0;
6579 state->pbn = 0;
c3e50f89
NK
6580 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6581 state->abm_level = amdgpu_dm_abm_level;
6582
df099b9b 6583 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6584 }
6585}
6586
3ee6b26b
AD
6587struct drm_connector_state *
6588amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6589{
6590 struct dm_connector_state *state =
6591 to_dm_connector_state(connector->state);
6592
6593 struct dm_connector_state *new_state =
6594 kmemdup(state, sizeof(*state), GFP_KERNEL);
6595
98e6436d
AK
6596 if (!new_state)
6597 return NULL;
e7b07cee 6598
98e6436d
AK
6599 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6600
6601 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6602 new_state->abm_level = state->abm_level;
922454c2
NK
6603 new_state->scaling = state->scaling;
6604 new_state->underscan_enable = state->underscan_enable;
6605 new_state->underscan_hborder = state->underscan_hborder;
6606 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6607 new_state->vcpi_slots = state->vcpi_slots;
6608 new_state->pbn = state->pbn;
98e6436d 6609 return &new_state->base;
e7b07cee
HW
6610}
6611
14f04fa4
AD
6612static int
6613amdgpu_dm_connector_late_register(struct drm_connector *connector)
6614{
6615 struct amdgpu_dm_connector *amdgpu_dm_connector =
6616 to_amdgpu_dm_connector(connector);
00a8037e 6617 int r;
14f04fa4 6618
00a8037e
AD
6619 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6620 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6621 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6622 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6623 if (r)
6624 return r;
6625 }
6626
6627#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6628 connector_debugfs_init(amdgpu_dm_connector);
6629#endif
6630
6631 return 0;
6632}
6633
e7b07cee
HW
6634static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6635 .reset = amdgpu_dm_connector_funcs_reset,
6636 .detect = amdgpu_dm_connector_detect,
6637 .fill_modes = drm_helper_probe_single_connector_modes,
6638 .destroy = amdgpu_dm_connector_destroy,
6639 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6640 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6641 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6642 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6643 .late_register = amdgpu_dm_connector_late_register,
526c654a 6644 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6645};
6646
e7b07cee
HW
6647static int get_modes(struct drm_connector *connector)
6648{
6649 return amdgpu_dm_connector_get_modes(connector);
6650}
6651
c84dec2f 6652static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6653{
6654 struct dc_sink_init_data init_params = {
6655 .link = aconnector->dc_link,
6656 .sink_signal = SIGNAL_TYPE_VIRTUAL
6657 };
70e8ffc5 6658 struct edid *edid;
e7b07cee 6659
a89ff457 6660 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6661 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6662 aconnector->base.name);
6663
6664 aconnector->base.force = DRM_FORCE_OFF;
6665 aconnector->base.override_edid = false;
6666 return;
6667 }
6668
70e8ffc5
HW
6669 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6670
e7b07cee
HW
6671 aconnector->edid = edid;
6672
6673 aconnector->dc_em_sink = dc_link_add_remote_sink(
6674 aconnector->dc_link,
6675 (uint8_t *)edid,
6676 (edid->extensions + 1) * EDID_LENGTH,
6677 &init_params);
6678
dcd5fb82 6679 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6680 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6681 aconnector->dc_link->local_sink :
6682 aconnector->dc_em_sink;
dcd5fb82
MF
6683 dc_sink_retain(aconnector->dc_sink);
6684 }
e7b07cee
HW
6685}
6686
c84dec2f 6687static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6688{
6689 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6690
1f6010a9
DF
6691 /*
6692 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6693 * Those settings have to be != 0 to get initial modeset
6694 */
6695 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6696 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6697 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6698 }
6699
6700
6701 aconnector->base.override_edid = true;
6702 create_eml_sink(aconnector);
6703}
6704
cbd14ae7
SW
6705static struct dc_stream_state *
6706create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6707 const struct drm_display_mode *drm_mode,
6708 const struct dm_connector_state *dm_state,
6709 const struct dc_stream_state *old_stream)
6710{
6711 struct drm_connector *connector = &aconnector->base;
1348969a 6712 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6713 struct dc_stream_state *stream;
4b7da34b
SW
6714 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6715 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6716 enum dc_status dc_result = DC_OK;
6717
6718 do {
6719 stream = create_stream_for_sink(aconnector, drm_mode,
6720 dm_state, old_stream,
6721 requested_bpc);
6722 if (stream == NULL) {
6723 DRM_ERROR("Failed to create stream for sink!\n");
6724 break;
6725 }
6726
6727 dc_result = dc_validate_stream(adev->dm.dc, stream);
6728
6729 if (dc_result != DC_OK) {
74a16675 6730 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6731 drm_mode->hdisplay,
6732 drm_mode->vdisplay,
6733 drm_mode->clock,
74a16675
RS
6734 dc_result,
6735 dc_status_to_str(dc_result));
cbd14ae7
SW
6736
6737 dc_stream_release(stream);
6738 stream = NULL;
6739 requested_bpc -= 2; /* lower bpc to retry validation */
6740 }
6741
6742 } while (stream == NULL && requested_bpc >= 6);
6743
68eb3ae3
WS
6744 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6745 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6746
6747 aconnector->force_yuv420_output = true;
6748 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6749 dm_state, old_stream);
6750 aconnector->force_yuv420_output = false;
6751 }
6752
cbd14ae7
SW
6753 return stream;
6754}
6755
ba9ca088 6756enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6757 struct drm_display_mode *mode)
e7b07cee
HW
6758{
6759 int result = MODE_ERROR;
6760 struct dc_sink *dc_sink;
e7b07cee 6761 /* TODO: Unhardcode stream count */
0971c40e 6762 struct dc_stream_state *stream;
c84dec2f 6763 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6764
6765 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6766 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6767 return result;
6768
1f6010a9
DF
6769 /*
6770 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6771 * EDID mgmt
6772 */
6773 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6774 !aconnector->dc_em_sink)
6775 handle_edid_mgmt(aconnector);
6776
c84dec2f 6777 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6778
ad975f44
VL
6779 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6780 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6781 DRM_ERROR("dc_sink is NULL!\n");
6782 goto fail;
6783 }
6784
cbd14ae7
SW
6785 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6786 if (stream) {
6787 dc_stream_release(stream);
e7b07cee 6788 result = MODE_OK;
cbd14ae7 6789 }
e7b07cee
HW
6790
6791fail:
6792 /* TODO: error handling*/
6793 return result;
6794}
6795
88694af9
NK
6796static int fill_hdr_info_packet(const struct drm_connector_state *state,
6797 struct dc_info_packet *out)
6798{
6799 struct hdmi_drm_infoframe frame;
6800 unsigned char buf[30]; /* 26 + 4 */
6801 ssize_t len;
6802 int ret, i;
6803
6804 memset(out, 0, sizeof(*out));
6805
6806 if (!state->hdr_output_metadata)
6807 return 0;
6808
6809 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6810 if (ret)
6811 return ret;
6812
6813 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6814 if (len < 0)
6815 return (int)len;
6816
6817 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6818 if (len != 30)
6819 return -EINVAL;
6820
6821 /* Prepare the infopacket for DC. */
6822 switch (state->connector->connector_type) {
6823 case DRM_MODE_CONNECTOR_HDMIA:
6824 out->hb0 = 0x87; /* type */
6825 out->hb1 = 0x01; /* version */
6826 out->hb2 = 0x1A; /* length */
6827 out->sb[0] = buf[3]; /* checksum */
6828 i = 1;
6829 break;
6830
6831 case DRM_MODE_CONNECTOR_DisplayPort:
6832 case DRM_MODE_CONNECTOR_eDP:
6833 out->hb0 = 0x00; /* sdp id, zero */
6834 out->hb1 = 0x87; /* type */
6835 out->hb2 = 0x1D; /* payload len - 1 */
6836 out->hb3 = (0x13 << 2); /* sdp version */
6837 out->sb[0] = 0x01; /* version */
6838 out->sb[1] = 0x1A; /* length */
6839 i = 2;
6840 break;
6841
6842 default:
6843 return -EINVAL;
6844 }
6845
6846 memcpy(&out->sb[i], &buf[4], 26);
6847 out->valid = true;
6848
6849 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6850 sizeof(out->sb), false);
6851
6852 return 0;
6853}
6854
88694af9
NK
6855static int
6856amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 6857 struct drm_atomic_state *state)
88694af9 6858{
51e857af
SP
6859 struct drm_connector_state *new_con_state =
6860 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
6861 struct drm_connector_state *old_con_state =
6862 drm_atomic_get_old_connector_state(state, conn);
6863 struct drm_crtc *crtc = new_con_state->crtc;
6864 struct drm_crtc_state *new_crtc_state;
6865 int ret;
6866
e8a98235
RS
6867 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6868
88694af9
NK
6869 if (!crtc)
6870 return 0;
6871
72921cdf 6872 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
6873 struct dc_info_packet hdr_infopacket;
6874
6875 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6876 if (ret)
6877 return ret;
6878
6879 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6880 if (IS_ERR(new_crtc_state))
6881 return PTR_ERR(new_crtc_state);
6882
6883 /*
6884 * DC considers the stream backends changed if the
6885 * static metadata changes. Forcing the modeset also
6886 * gives a simple way for userspace to switch from
b232d4ed
NK
6887 * 8bpc to 10bpc when setting the metadata to enter
6888 * or exit HDR.
6889 *
6890 * Changing the static metadata after it's been
6891 * set is permissible, however. So only force a
6892 * modeset if we're entering or exiting HDR.
88694af9 6893 */
b232d4ed
NK
6894 new_crtc_state->mode_changed =
6895 !old_con_state->hdr_output_metadata ||
6896 !new_con_state->hdr_output_metadata;
88694af9
NK
6897 }
6898
6899 return 0;
6900}
6901
e7b07cee
HW
6902static const struct drm_connector_helper_funcs
6903amdgpu_dm_connector_helper_funcs = {
6904 /*
1f6010a9 6905 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 6906 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 6907 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
6908 * in get_modes call back, not just return the modes count
6909 */
e7b07cee
HW
6910 .get_modes = get_modes,
6911 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 6912 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
6913};
6914
6915static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6916{
6917}
6918
d6ef9b41 6919static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
6920{
6921 struct drm_atomic_state *state = new_crtc_state->state;
6922 struct drm_plane *plane;
6923 int num_active = 0;
6924
6925 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6926 struct drm_plane_state *new_plane_state;
6927
6928 /* Cursor planes are "fake". */
6929 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6930 continue;
6931
6932 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6933
6934 if (!new_plane_state) {
6935 /*
6936 * The plane is enable on the CRTC and hasn't changed
6937 * state. This means that it previously passed
6938 * validation and is therefore enabled.
6939 */
6940 num_active += 1;
6941 continue;
6942 }
6943
6944 /* We need a framebuffer to be considered enabled. */
6945 num_active += (new_plane_state->fb != NULL);
6946 }
6947
d6ef9b41
NK
6948 return num_active;
6949}
6950
8fe684e9
NK
6951static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6952 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
6953{
6954 struct dm_crtc_state *dm_new_crtc_state =
6955 to_dm_crtc_state(new_crtc_state);
6956
6957 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
6958
6959 if (!dm_new_crtc_state->stream)
6960 return;
6961
6962 dm_new_crtc_state->active_planes =
6963 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
6964}
6965
3ee6b26b 6966static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 6967 struct drm_atomic_state *state)
e7b07cee 6968{
29b77ad7
MR
6969 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6970 crtc);
1348969a 6971 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 6972 struct dc *dc = adev->dm.dc;
29b77ad7 6973 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
6974 int ret = -EINVAL;
6975
5b8c5969 6976 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 6977
29b77ad7 6978 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 6979
bcd74374
ND
6980 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6981 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
6982 return ret;
6983 }
6984
bc92c065 6985 /*
b836a274
MD
6986 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6987 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6988 * planes are disabled, which is not supported by the hardware. And there is legacy
6989 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 6990 */
29b77ad7 6991 if (crtc_state->enable &&
ea9522f5
SS
6992 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6993 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 6994 return -EINVAL;
ea9522f5 6995 }
c14a005c 6996
b836a274
MD
6997 /* In some use cases, like reset, no stream is attached */
6998 if (!dm_crtc_state->stream)
6999 return 0;
7000
62c933f9 7001 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7002 return 0;
7003
ea9522f5 7004 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7005 return ret;
7006}
7007
3ee6b26b
AD
7008static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7009 const struct drm_display_mode *mode,
7010 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7011{
7012 return true;
7013}
7014
7015static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7016 .disable = dm_crtc_helper_disable,
7017 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7018 .mode_fixup = dm_crtc_helper_mode_fixup,
7019 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7020};
7021
7022static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7023{
7024
7025}
7026
3261e013
ML
7027static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7028{
7029 switch (display_color_depth) {
7030 case COLOR_DEPTH_666:
7031 return 6;
7032 case COLOR_DEPTH_888:
7033 return 8;
7034 case COLOR_DEPTH_101010:
7035 return 10;
7036 case COLOR_DEPTH_121212:
7037 return 12;
7038 case COLOR_DEPTH_141414:
7039 return 14;
7040 case COLOR_DEPTH_161616:
7041 return 16;
7042 default:
7043 break;
7044 }
7045 return 0;
7046}
7047
3ee6b26b
AD
7048static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7049 struct drm_crtc_state *crtc_state,
7050 struct drm_connector_state *conn_state)
e7b07cee 7051{
3261e013
ML
7052 struct drm_atomic_state *state = crtc_state->state;
7053 struct drm_connector *connector = conn_state->connector;
7054 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7055 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7056 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7057 struct drm_dp_mst_topology_mgr *mst_mgr;
7058 struct drm_dp_mst_port *mst_port;
7059 enum dc_color_depth color_depth;
7060 int clock, bpp = 0;
1bc22f20 7061 bool is_y420 = false;
3261e013
ML
7062
7063 if (!aconnector->port || !aconnector->dc_sink)
7064 return 0;
7065
7066 mst_port = aconnector->port;
7067 mst_mgr = &aconnector->mst_port->mst_mgr;
7068
7069 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7070 return 0;
7071
7072 if (!state->duplicated) {
cbd14ae7 7073 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7074 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7075 aconnector->force_yuv420_output;
cbd14ae7
SW
7076 color_depth = convert_color_depth_from_display_info(connector,
7077 is_y420,
7078 max_bpc);
3261e013
ML
7079 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7080 clock = adjusted_mode->clock;
dc48529f 7081 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7082 }
7083 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7084 mst_mgr,
7085 mst_port,
1c6c1cb5 7086 dm_new_connector_state->pbn,
03ca9600 7087 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7088 if (dm_new_connector_state->vcpi_slots < 0) {
7089 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7090 return dm_new_connector_state->vcpi_slots;
7091 }
e7b07cee
HW
7092 return 0;
7093}
7094
7095const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7096 .disable = dm_encoder_helper_disable,
7097 .atomic_check = dm_encoder_helper_atomic_check
7098};
7099
d9fe1a4c 7100#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7101static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7102 struct dc_state *dc_state,
7103 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7104{
7105 struct dc_stream_state *stream = NULL;
7106 struct drm_connector *connector;
5760dcb9 7107 struct drm_connector_state *new_con_state;
29b9ba74
ML
7108 struct amdgpu_dm_connector *aconnector;
7109 struct dm_connector_state *dm_conn_state;
6513104b 7110 int i, j, clock;
29b9ba74
ML
7111 int vcpi, pbn_div, pbn = 0;
7112
5760dcb9 7113 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7114
7115 aconnector = to_amdgpu_dm_connector(connector);
7116
7117 if (!aconnector->port)
7118 continue;
7119
7120 if (!new_con_state || !new_con_state->crtc)
7121 continue;
7122
7123 dm_conn_state = to_dm_connector_state(new_con_state);
7124
7125 for (j = 0; j < dc_state->stream_count; j++) {
7126 stream = dc_state->streams[j];
7127 if (!stream)
7128 continue;
7129
7130 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7131 break;
7132
7133 stream = NULL;
7134 }
7135
7136 if (!stream)
7137 continue;
7138
7139 if (stream->timing.flags.DSC != 1) {
7140 drm_dp_mst_atomic_enable_dsc(state,
7141 aconnector->port,
7142 dm_conn_state->pbn,
7143 0,
7144 false);
7145 continue;
7146 }
7147
7148 pbn_div = dm_mst_get_pbn_divider(stream->link);
29b9ba74 7149 clock = stream->timing.pix_clk_100hz / 10;
6513104b
HW
7150 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7151 for (j = 0; j < dc_state->stream_count; j++) {
7152 if (vars[j].aconnector == aconnector) {
7153 pbn = vars[j].pbn;
7154 break;
7155 }
7156 }
7157
29b9ba74
ML
7158 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7159 aconnector->port,
7160 pbn, pbn_div,
7161 true);
7162 if (vcpi < 0)
7163 return vcpi;
7164
7165 dm_conn_state->pbn = pbn;
7166 dm_conn_state->vcpi_slots = vcpi;
7167 }
7168 return 0;
7169}
d9fe1a4c 7170#endif
29b9ba74 7171
e7b07cee
HW
7172static void dm_drm_plane_reset(struct drm_plane *plane)
7173{
7174 struct dm_plane_state *amdgpu_state = NULL;
7175
7176 if (plane->state)
7177 plane->funcs->atomic_destroy_state(plane, plane->state);
7178
7179 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7180 WARN_ON(amdgpu_state == NULL);
1f6010a9 7181
7ddaef96
NK
7182 if (amdgpu_state)
7183 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7184}
7185
7186static struct drm_plane_state *
7187dm_drm_plane_duplicate_state(struct drm_plane *plane)
7188{
7189 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7190
7191 old_dm_plane_state = to_dm_plane_state(plane->state);
7192 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7193 if (!dm_plane_state)
7194 return NULL;
7195
7196 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7197
3be5262e
HW
7198 if (old_dm_plane_state->dc_state) {
7199 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7200 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7201 }
7202
7203 return &dm_plane_state->base;
7204}
7205
dfd84d90 7206static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7207 struct drm_plane_state *state)
e7b07cee
HW
7208{
7209 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7210
3be5262e
HW
7211 if (dm_plane_state->dc_state)
7212 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7213
0627bbd3 7214 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7215}
7216
7217static const struct drm_plane_funcs dm_plane_funcs = {
7218 .update_plane = drm_atomic_helper_update_plane,
7219 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7220 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7221 .reset = dm_drm_plane_reset,
7222 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7223 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7224 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7225};
7226
3ee6b26b
AD
7227static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7228 struct drm_plane_state *new_state)
e7b07cee
HW
7229{
7230 struct amdgpu_framebuffer *afb;
7231 struct drm_gem_object *obj;
5d43be0c 7232 struct amdgpu_device *adev;
e7b07cee 7233 struct amdgpu_bo *rbo;
e7b07cee 7234 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7235 struct list_head list;
7236 struct ttm_validate_buffer tv;
7237 struct ww_acquire_ctx ticket;
5d43be0c
CK
7238 uint32_t domain;
7239 int r;
e7b07cee
HW
7240
7241 if (!new_state->fb) {
4711c033 7242 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7243 return 0;
7244 }
7245
7246 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7247 obj = new_state->fb->obj[0];
e7b07cee 7248 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7249 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7250 INIT_LIST_HEAD(&list);
7251
7252 tv.bo = &rbo->tbo;
7253 tv.num_shared = 1;
7254 list_add(&tv.head, &list);
7255
9165fb87 7256 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7257 if (r) {
7258 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7259 return r;
0f257b09 7260 }
e7b07cee 7261
5d43be0c 7262 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7263 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7264 else
7265 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7266
7b7c6c81 7267 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7268 if (unlikely(r != 0)) {
30b7c614
HW
7269 if (r != -ERESTARTSYS)
7270 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7271 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7272 return r;
7273 }
7274
bb812f1e
JZ
7275 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7276 if (unlikely(r != 0)) {
7277 amdgpu_bo_unpin(rbo);
0f257b09 7278 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7279 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7280 return r;
7281 }
7df7e505 7282
0f257b09 7283 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7284
7b7c6c81 7285 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7286
7287 amdgpu_bo_ref(rbo);
7288
cf322b49
NK
7289 /**
7290 * We don't do surface updates on planes that have been newly created,
7291 * but we also don't have the afb->address during atomic check.
7292 *
7293 * Fill in buffer attributes depending on the address here, but only on
7294 * newly created planes since they're not being used by DC yet and this
7295 * won't modify global state.
7296 */
7297 dm_plane_state_old = to_dm_plane_state(plane->state);
7298 dm_plane_state_new = to_dm_plane_state(new_state);
7299
3be5262e 7300 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7301 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7302 struct dc_plane_state *plane_state =
7303 dm_plane_state_new->dc_state;
7304 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7305
320932bf 7306 fill_plane_buffer_attributes(
695af5f9 7307 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7308 afb->tiling_flags,
cf322b49
NK
7309 &plane_state->tiling_info, &plane_state->plane_size,
7310 &plane_state->dcc, &plane_state->address,
6eed95b0 7311 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7312 }
7313
e7b07cee
HW
7314 return 0;
7315}
7316
3ee6b26b
AD
7317static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7318 struct drm_plane_state *old_state)
e7b07cee
HW
7319{
7320 struct amdgpu_bo *rbo;
e7b07cee
HW
7321 int r;
7322
7323 if (!old_state->fb)
7324 return;
7325
e68d14dd 7326 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7327 r = amdgpu_bo_reserve(rbo, false);
7328 if (unlikely(r)) {
7329 DRM_ERROR("failed to reserve rbo before unpin\n");
7330 return;
b830ebc9
HW
7331 }
7332
7333 amdgpu_bo_unpin(rbo);
7334 amdgpu_bo_unreserve(rbo);
7335 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7336}
7337
8c44515b
AP
7338static int dm_plane_helper_check_state(struct drm_plane_state *state,
7339 struct drm_crtc_state *new_crtc_state)
7340{
6300b3bd
MK
7341 struct drm_framebuffer *fb = state->fb;
7342 int min_downscale, max_upscale;
7343 int min_scale = 0;
7344 int max_scale = INT_MAX;
7345
40d916a2 7346 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7347 if (fb && state->crtc) {
40d916a2
NC
7348 /* Validate viewport to cover the case when only the position changes */
7349 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7350 int viewport_width = state->crtc_w;
7351 int viewport_height = state->crtc_h;
7352
7353 if (state->crtc_x < 0)
7354 viewport_width += state->crtc_x;
7355 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7356 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7357
7358 if (state->crtc_y < 0)
7359 viewport_height += state->crtc_y;
7360 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7361 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7362
4abdb72b
NC
7363 if (viewport_width < 0 || viewport_height < 0) {
7364 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7365 return -EINVAL;
7366 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7367 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7368 return -EINVAL;
4abdb72b
NC
7369 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7370 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7371 return -EINVAL;
4abdb72b
NC
7372 }
7373
40d916a2
NC
7374 }
7375
7376 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7377 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7378 &min_downscale, &max_upscale);
7379 /*
7380 * Convert to drm convention: 16.16 fixed point, instead of dc's
7381 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7382 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7383 */
7384 min_scale = (1000 << 16) / max_upscale;
7385 max_scale = (1000 << 16) / min_downscale;
7386 }
8c44515b 7387
8c44515b 7388 return drm_atomic_helper_check_plane_state(
6300b3bd 7389 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7390}
7391
7578ecda 7392static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7393 struct drm_atomic_state *state)
cbd19488 7394{
7c11b99a
MR
7395 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7396 plane);
1348969a 7397 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7398 struct dc *dc = adev->dm.dc;
78171832 7399 struct dm_plane_state *dm_plane_state;
695af5f9 7400 struct dc_scaling_info scaling_info;
8c44515b 7401 struct drm_crtc_state *new_crtc_state;
695af5f9 7402 int ret;
78171832 7403
ba5c1649 7404 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7405
ba5c1649 7406 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7407
3be5262e 7408 if (!dm_plane_state->dc_state)
9a3329b1 7409 return 0;
cbd19488 7410
8c44515b 7411 new_crtc_state =
dec92020 7412 drm_atomic_get_new_crtc_state(state,
ba5c1649 7413 new_plane_state->crtc);
8c44515b
AP
7414 if (!new_crtc_state)
7415 return -EINVAL;
7416
ba5c1649 7417 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7418 if (ret)
7419 return ret;
7420
ba5c1649 7421 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
695af5f9
NK
7422 if (ret)
7423 return ret;
a05bcff1 7424
62c933f9 7425 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7426 return 0;
7427
7428 return -EINVAL;
7429}
7430
674e78ac 7431static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7432 struct drm_atomic_state *state)
674e78ac
NK
7433{
7434 /* Only support async updates on cursor planes. */
7435 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7436 return -EINVAL;
7437
7438 return 0;
7439}
7440
7441static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7442 struct drm_atomic_state *state)
674e78ac 7443{
5ddb0bd4
MR
7444 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7445 plane);
674e78ac 7446 struct drm_plane_state *old_state =
5ddb0bd4 7447 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7448
e8a98235
RS
7449 trace_amdgpu_dm_atomic_update_cursor(new_state);
7450
332af874 7451 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7452
7453 plane->state->src_x = new_state->src_x;
7454 plane->state->src_y = new_state->src_y;
7455 plane->state->src_w = new_state->src_w;
7456 plane->state->src_h = new_state->src_h;
7457 plane->state->crtc_x = new_state->crtc_x;
7458 plane->state->crtc_y = new_state->crtc_y;
7459 plane->state->crtc_w = new_state->crtc_w;
7460 plane->state->crtc_h = new_state->crtc_h;
7461
7462 handle_cursor_update(plane, old_state);
7463}
7464
e7b07cee
HW
7465static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7466 .prepare_fb = dm_plane_helper_prepare_fb,
7467 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7468 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7469 .atomic_async_check = dm_plane_atomic_async_check,
7470 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7471};
7472
7473/*
7474 * TODO: these are currently initialized to rgb formats only.
7475 * For future use cases we should either initialize them dynamically based on
7476 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7477 * check will succeed, and let DC implement proper check
e7b07cee 7478 */
d90371b0 7479static const uint32_t rgb_formats[] = {
e7b07cee
HW
7480 DRM_FORMAT_XRGB8888,
7481 DRM_FORMAT_ARGB8888,
7482 DRM_FORMAT_RGBA8888,
7483 DRM_FORMAT_XRGB2101010,
7484 DRM_FORMAT_XBGR2101010,
7485 DRM_FORMAT_ARGB2101010,
7486 DRM_FORMAT_ABGR2101010,
58020403
MK
7487 DRM_FORMAT_XRGB16161616,
7488 DRM_FORMAT_XBGR16161616,
7489 DRM_FORMAT_ARGB16161616,
7490 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7491 DRM_FORMAT_XBGR8888,
7492 DRM_FORMAT_ABGR8888,
46dd9ff7 7493 DRM_FORMAT_RGB565,
e7b07cee
HW
7494};
7495
0d579c7e
NK
7496static const uint32_t overlay_formats[] = {
7497 DRM_FORMAT_XRGB8888,
7498 DRM_FORMAT_ARGB8888,
7499 DRM_FORMAT_RGBA8888,
7500 DRM_FORMAT_XBGR8888,
7501 DRM_FORMAT_ABGR8888,
7267a1a9 7502 DRM_FORMAT_RGB565
e7b07cee
HW
7503};
7504
7505static const u32 cursor_formats[] = {
7506 DRM_FORMAT_ARGB8888
7507};
7508
37c6a93b
NK
7509static int get_plane_formats(const struct drm_plane *plane,
7510 const struct dc_plane_cap *plane_cap,
7511 uint32_t *formats, int max_formats)
e7b07cee 7512{
37c6a93b
NK
7513 int i, num_formats = 0;
7514
7515 /*
7516 * TODO: Query support for each group of formats directly from
7517 * DC plane caps. This will require adding more formats to the
7518 * caps list.
7519 */
e7b07cee 7520
f180b4bc 7521 switch (plane->type) {
e7b07cee 7522 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7523 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7524 if (num_formats >= max_formats)
7525 break;
7526
7527 formats[num_formats++] = rgb_formats[i];
7528 }
7529
ea36ad34 7530 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7531 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7532 if (plane_cap && plane_cap->pixel_format_support.p010)
7533 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7534 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7535 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7536 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7537 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7538 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7539 }
e7b07cee 7540 break;
37c6a93b 7541
e7b07cee 7542 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7543 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7544 if (num_formats >= max_formats)
7545 break;
7546
7547 formats[num_formats++] = overlay_formats[i];
7548 }
e7b07cee 7549 break;
37c6a93b 7550
e7b07cee 7551 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7552 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7553 if (num_formats >= max_formats)
7554 break;
7555
7556 formats[num_formats++] = cursor_formats[i];
7557 }
e7b07cee
HW
7558 break;
7559 }
7560
37c6a93b
NK
7561 return num_formats;
7562}
7563
7564static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7565 struct drm_plane *plane,
7566 unsigned long possible_crtcs,
7567 const struct dc_plane_cap *plane_cap)
7568{
7569 uint32_t formats[32];
7570 int num_formats;
7571 int res = -EPERM;
ecc874a6 7572 unsigned int supported_rotations;
faa37f54 7573 uint64_t *modifiers = NULL;
37c6a93b
NK
7574
7575 num_formats = get_plane_formats(plane, plane_cap, formats,
7576 ARRAY_SIZE(formats));
7577
faa37f54
BN
7578 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7579 if (res)
7580 return res;
7581
4a580877 7582 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7583 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7584 modifiers, plane->type, NULL);
7585 kfree(modifiers);
37c6a93b
NK
7586 if (res)
7587 return res;
7588
cc1fec57
NK
7589 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7590 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7591 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7592 BIT(DRM_MODE_BLEND_PREMULTI);
7593
7594 drm_plane_create_alpha_property(plane);
7595 drm_plane_create_blend_mode_property(plane, blend_caps);
7596 }
7597
fc8e5230 7598 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7599 plane_cap &&
7600 (plane_cap->pixel_format_support.nv12 ||
7601 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7602 /* This only affects YUV formats. */
7603 drm_plane_create_color_properties(
7604 plane,
7605 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7606 BIT(DRM_COLOR_YCBCR_BT709) |
7607 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7608 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7609 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7610 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7611 }
7612
ecc874a6
PLG
7613 supported_rotations =
7614 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7615 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7616
1347385f
SS
7617 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7618 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7619 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7620 supported_rotations);
ecc874a6 7621
f180b4bc 7622 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7623
96719c54 7624 /* Create (reset) the plane state */
f180b4bc
HW
7625 if (plane->funcs->reset)
7626 plane->funcs->reset(plane);
96719c54 7627
37c6a93b 7628 return 0;
e7b07cee
HW
7629}
7630
7578ecda
AD
7631static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7632 struct drm_plane *plane,
7633 uint32_t crtc_index)
e7b07cee
HW
7634{
7635 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7636 struct drm_plane *cursor_plane;
e7b07cee
HW
7637
7638 int res = -ENOMEM;
7639
7640 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7641 if (!cursor_plane)
7642 goto fail;
7643
f180b4bc 7644 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7645 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7646
7647 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7648 if (!acrtc)
7649 goto fail;
7650
7651 res = drm_crtc_init_with_planes(
7652 dm->ddev,
7653 &acrtc->base,
7654 plane,
f180b4bc 7655 cursor_plane,
e7b07cee
HW
7656 &amdgpu_dm_crtc_funcs, NULL);
7657
7658 if (res)
7659 goto fail;
7660
7661 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7662
96719c54
HW
7663 /* Create (reset) the plane state */
7664 if (acrtc->base.funcs->reset)
7665 acrtc->base.funcs->reset(&acrtc->base);
7666
e7b07cee
HW
7667 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7668 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7669
7670 acrtc->crtc_id = crtc_index;
7671 acrtc->base.enabled = false;
c37e2d29 7672 acrtc->otg_inst = -1;
e7b07cee
HW
7673
7674 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7675 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7676 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7677 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7678
e7b07cee
HW
7679 return 0;
7680
7681fail:
b830ebc9
HW
7682 kfree(acrtc);
7683 kfree(cursor_plane);
e7b07cee
HW
7684 return res;
7685}
7686
7687
7688static int to_drm_connector_type(enum signal_type st)
7689{
7690 switch (st) {
7691 case SIGNAL_TYPE_HDMI_TYPE_A:
7692 return DRM_MODE_CONNECTOR_HDMIA;
7693 case SIGNAL_TYPE_EDP:
7694 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7695 case SIGNAL_TYPE_LVDS:
7696 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7697 case SIGNAL_TYPE_RGB:
7698 return DRM_MODE_CONNECTOR_VGA;
7699 case SIGNAL_TYPE_DISPLAY_PORT:
7700 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7701 return DRM_MODE_CONNECTOR_DisplayPort;
7702 case SIGNAL_TYPE_DVI_DUAL_LINK:
7703 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7704 return DRM_MODE_CONNECTOR_DVID;
7705 case SIGNAL_TYPE_VIRTUAL:
7706 return DRM_MODE_CONNECTOR_VIRTUAL;
7707
7708 default:
7709 return DRM_MODE_CONNECTOR_Unknown;
7710 }
7711}
7712
2b4c1c05
DV
7713static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7714{
62afb4ad
JRS
7715 struct drm_encoder *encoder;
7716
7717 /* There is only one encoder per connector */
7718 drm_connector_for_each_possible_encoder(connector, encoder)
7719 return encoder;
7720
7721 return NULL;
2b4c1c05
DV
7722}
7723
e7b07cee
HW
7724static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7725{
e7b07cee
HW
7726 struct drm_encoder *encoder;
7727 struct amdgpu_encoder *amdgpu_encoder;
7728
2b4c1c05 7729 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7730
7731 if (encoder == NULL)
7732 return;
7733
7734 amdgpu_encoder = to_amdgpu_encoder(encoder);
7735
7736 amdgpu_encoder->native_mode.clock = 0;
7737
7738 if (!list_empty(&connector->probed_modes)) {
7739 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7740
e7b07cee 7741 list_for_each_entry(preferred_mode,
b830ebc9
HW
7742 &connector->probed_modes,
7743 head) {
7744 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7745 amdgpu_encoder->native_mode = *preferred_mode;
7746
e7b07cee
HW
7747 break;
7748 }
7749
7750 }
7751}
7752
3ee6b26b
AD
7753static struct drm_display_mode *
7754amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7755 char *name,
7756 int hdisplay, int vdisplay)
e7b07cee
HW
7757{
7758 struct drm_device *dev = encoder->dev;
7759 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7760 struct drm_display_mode *mode = NULL;
7761 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7762
7763 mode = drm_mode_duplicate(dev, native_mode);
7764
b830ebc9 7765 if (mode == NULL)
e7b07cee
HW
7766 return NULL;
7767
7768 mode->hdisplay = hdisplay;
7769 mode->vdisplay = vdisplay;
7770 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7771 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7772
7773 return mode;
7774
7775}
7776
7777static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7778 struct drm_connector *connector)
e7b07cee
HW
7779{
7780 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7781 struct drm_display_mode *mode = NULL;
7782 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7783 struct amdgpu_dm_connector *amdgpu_dm_connector =
7784 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7785 int i;
7786 int n;
7787 struct mode_size {
7788 char name[DRM_DISPLAY_MODE_LEN];
7789 int w;
7790 int h;
b830ebc9 7791 } common_modes[] = {
e7b07cee
HW
7792 { "640x480", 640, 480},
7793 { "800x600", 800, 600},
7794 { "1024x768", 1024, 768},
7795 { "1280x720", 1280, 720},
7796 { "1280x800", 1280, 800},
7797 {"1280x1024", 1280, 1024},
7798 { "1440x900", 1440, 900},
7799 {"1680x1050", 1680, 1050},
7800 {"1600x1200", 1600, 1200},
7801 {"1920x1080", 1920, 1080},
7802 {"1920x1200", 1920, 1200}
7803 };
7804
b830ebc9 7805 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7806
7807 for (i = 0; i < n; i++) {
7808 struct drm_display_mode *curmode = NULL;
7809 bool mode_existed = false;
7810
7811 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7812 common_modes[i].h > native_mode->vdisplay ||
7813 (common_modes[i].w == native_mode->hdisplay &&
7814 common_modes[i].h == native_mode->vdisplay))
7815 continue;
e7b07cee
HW
7816
7817 list_for_each_entry(curmode, &connector->probed_modes, head) {
7818 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7819 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7820 mode_existed = true;
7821 break;
7822 }
7823 }
7824
7825 if (mode_existed)
7826 continue;
7827
7828 mode = amdgpu_dm_create_common_mode(encoder,
7829 common_modes[i].name, common_modes[i].w,
7830 common_modes[i].h);
7831 drm_mode_probed_add(connector, mode);
c84dec2f 7832 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7833 }
7834}
7835
d77de788
SS
7836static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7837{
7838 struct drm_encoder *encoder;
7839 struct amdgpu_encoder *amdgpu_encoder;
7840 const struct drm_display_mode *native_mode;
7841
7842 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7843 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7844 return;
7845
7846 encoder = amdgpu_dm_connector_to_encoder(connector);
7847 if (!encoder)
7848 return;
7849
7850 amdgpu_encoder = to_amdgpu_encoder(encoder);
7851
7852 native_mode = &amdgpu_encoder->native_mode;
7853 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7854 return;
7855
7856 drm_connector_set_panel_orientation_with_quirk(connector,
7857 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7858 native_mode->hdisplay,
7859 native_mode->vdisplay);
7860}
7861
3ee6b26b
AD
7862static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7863 struct edid *edid)
e7b07cee 7864{
c84dec2f
HW
7865 struct amdgpu_dm_connector *amdgpu_dm_connector =
7866 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7867
7868 if (edid) {
7869 /* empty probed_modes */
7870 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 7871 amdgpu_dm_connector->num_modes =
e7b07cee
HW
7872 drm_add_edid_modes(connector, edid);
7873
f1e5e913
YMM
7874 /* sorting the probed modes before calling function
7875 * amdgpu_dm_get_native_mode() since EDID can have
7876 * more than one preferred mode. The modes that are
7877 * later in the probed mode list could be of higher
7878 * and preferred resolution. For example, 3840x2160
7879 * resolution in base EDID preferred timing and 4096x2160
7880 * preferred resolution in DID extension block later.
7881 */
7882 drm_mode_sort(&connector->probed_modes);
e7b07cee 7883 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
7884
7885 /* Freesync capabilities are reset by calling
7886 * drm_add_edid_modes() and need to be
7887 * restored here.
7888 */
7889 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
7890
7891 amdgpu_set_panel_orientation(connector);
a8d8d3dc 7892 } else {
c84dec2f 7893 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 7894 }
e7b07cee
HW
7895}
7896
a85ba005
NC
7897static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7898 struct drm_display_mode *mode)
7899{
7900 struct drm_display_mode *m;
7901
7902 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7903 if (drm_mode_equal(m, mode))
7904 return true;
7905 }
7906
7907 return false;
7908}
7909
7910static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7911{
7912 const struct drm_display_mode *m;
7913 struct drm_display_mode *new_mode;
7914 uint i;
7915 uint32_t new_modes_count = 0;
7916
7917 /* Standard FPS values
7918 *
7919 * 23.976 - TV/NTSC
7920 * 24 - Cinema
7921 * 25 - TV/PAL
7922 * 29.97 - TV/NTSC
7923 * 30 - TV/NTSC
7924 * 48 - Cinema HFR
7925 * 50 - TV/PAL
7926 * 60 - Commonly used
7927 * 48,72,96 - Multiples of 24
7928 */
9ce5ed6e
CIK
7929 static const uint32_t common_rates[] = {
7930 23976, 24000, 25000, 29970, 30000,
7931 48000, 50000, 60000, 72000, 96000
7932 };
a85ba005
NC
7933
7934 /*
7935 * Find mode with highest refresh rate with the same resolution
7936 * as the preferred mode. Some monitors report a preferred mode
7937 * with lower resolution than the highest refresh rate supported.
7938 */
7939
7940 m = get_highest_refresh_rate_mode(aconnector, true);
7941 if (!m)
7942 return 0;
7943
7944 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7945 uint64_t target_vtotal, target_vtotal_diff;
7946 uint64_t num, den;
7947
7948 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7949 continue;
7950
7951 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7952 common_rates[i] > aconnector->max_vfreq * 1000)
7953 continue;
7954
7955 num = (unsigned long long)m->clock * 1000 * 1000;
7956 den = common_rates[i] * (unsigned long long)m->htotal;
7957 target_vtotal = div_u64(num, den);
7958 target_vtotal_diff = target_vtotal - m->vtotal;
7959
7960 /* Check for illegal modes */
7961 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7962 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7963 m->vtotal + target_vtotal_diff < m->vsync_end)
7964 continue;
7965
7966 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7967 if (!new_mode)
7968 goto out;
7969
7970 new_mode->vtotal += (u16)target_vtotal_diff;
7971 new_mode->vsync_start += (u16)target_vtotal_diff;
7972 new_mode->vsync_end += (u16)target_vtotal_diff;
7973 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7974 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7975
7976 if (!is_duplicate_mode(aconnector, new_mode)) {
7977 drm_mode_probed_add(&aconnector->base, new_mode);
7978 new_modes_count += 1;
7979 } else
7980 drm_mode_destroy(aconnector->base.dev, new_mode);
7981 }
7982 out:
7983 return new_modes_count;
7984}
7985
7986static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7987 struct edid *edid)
7988{
7989 struct amdgpu_dm_connector *amdgpu_dm_connector =
7990 to_amdgpu_dm_connector(connector);
7991
7992 if (!(amdgpu_freesync_vid_mode && edid))
7993 return;
fe8858bb 7994
a85ba005
NC
7995 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7996 amdgpu_dm_connector->num_modes +=
7997 add_fs_modes(amdgpu_dm_connector);
7998}
7999
7578ecda 8000static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8001{
c84dec2f
HW
8002 struct amdgpu_dm_connector *amdgpu_dm_connector =
8003 to_amdgpu_dm_connector(connector);
e7b07cee 8004 struct drm_encoder *encoder;
c84dec2f 8005 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8006
2b4c1c05 8007 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8008
5c0e6840 8009 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8010 amdgpu_dm_connector->num_modes =
8011 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8012 } else {
8013 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8014 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8015 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8016 }
3e332d3a 8017 amdgpu_dm_fbc_init(connector);
5099114b 8018
c84dec2f 8019 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8020}
8021
3ee6b26b
AD
8022void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8023 struct amdgpu_dm_connector *aconnector,
8024 int connector_type,
8025 struct dc_link *link,
8026 int link_index)
e7b07cee 8027{
1348969a 8028 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8029
f04bee34
NK
8030 /*
8031 * Some of the properties below require access to state, like bpc.
8032 * Allocate some default initial connector state with our reset helper.
8033 */
8034 if (aconnector->base.funcs->reset)
8035 aconnector->base.funcs->reset(&aconnector->base);
8036
e7b07cee
HW
8037 aconnector->connector_id = link_index;
8038 aconnector->dc_link = link;
8039 aconnector->base.interlace_allowed = false;
8040 aconnector->base.doublescan_allowed = false;
8041 aconnector->base.stereo_allowed = false;
8042 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8043 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8044 aconnector->audio_inst = -1;
e7b07cee
HW
8045 mutex_init(&aconnector->hpd_lock);
8046
1f6010a9
DF
8047 /*
8048 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8049 * which means HPD hot plug not supported
8050 */
e7b07cee
HW
8051 switch (connector_type) {
8052 case DRM_MODE_CONNECTOR_HDMIA:
8053 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8054 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8055 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8056 break;
8057 case DRM_MODE_CONNECTOR_DisplayPort:
8058 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8059 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8060 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8061 break;
8062 case DRM_MODE_CONNECTOR_DVID:
8063 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8064 break;
8065 default:
8066 break;
8067 }
8068
8069 drm_object_attach_property(&aconnector->base.base,
8070 dm->ddev->mode_config.scaling_mode_property,
8071 DRM_MODE_SCALE_NONE);
8072
8073 drm_object_attach_property(&aconnector->base.base,
8074 adev->mode_info.underscan_property,
8075 UNDERSCAN_OFF);
8076 drm_object_attach_property(&aconnector->base.base,
8077 adev->mode_info.underscan_hborder_property,
8078 0);
8079 drm_object_attach_property(&aconnector->base.base,
8080 adev->mode_info.underscan_vborder_property,
8081 0);
1825fd34 8082
8c61b31e
JFZ
8083 if (!aconnector->mst_port)
8084 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8085
4a8ca46b
RL
8086 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8087 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8088 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8089
c1ee92f9 8090 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8091 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8092 drm_object_attach_property(&aconnector->base.base,
8093 adev->mode_info.abm_level_property, 0);
8094 }
bb47de73
NK
8095
8096 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8097 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8098 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8099 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8100
8c61b31e
JFZ
8101 if (!aconnector->mst_port)
8102 drm_connector_attach_vrr_capable_property(&aconnector->base);
8103
0c8620d6 8104#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8105 if (adev->dm.hdcp_workqueue)
53e108aa 8106 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8107#endif
bb47de73 8108 }
e7b07cee
HW
8109}
8110
7578ecda
AD
8111static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8112 struct i2c_msg *msgs, int num)
e7b07cee
HW
8113{
8114 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8115 struct ddc_service *ddc_service = i2c->ddc_service;
8116 struct i2c_command cmd;
8117 int i;
8118 int result = -EIO;
8119
b830ebc9 8120 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8121
8122 if (!cmd.payloads)
8123 return result;
8124
8125 cmd.number_of_payloads = num;
8126 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8127 cmd.speed = 100;
8128
8129 for (i = 0; i < num; i++) {
8130 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8131 cmd.payloads[i].address = msgs[i].addr;
8132 cmd.payloads[i].length = msgs[i].len;
8133 cmd.payloads[i].data = msgs[i].buf;
8134 }
8135
c85e6e54
DF
8136 if (dc_submit_i2c(
8137 ddc_service->ctx->dc,
8138 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8139 &cmd))
8140 result = num;
8141
8142 kfree(cmd.payloads);
8143 return result;
8144}
8145
7578ecda 8146static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8147{
8148 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8149}
8150
8151static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8152 .master_xfer = amdgpu_dm_i2c_xfer,
8153 .functionality = amdgpu_dm_i2c_func,
8154};
8155
3ee6b26b
AD
8156static struct amdgpu_i2c_adapter *
8157create_i2c(struct ddc_service *ddc_service,
8158 int link_index,
8159 int *res)
e7b07cee
HW
8160{
8161 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8162 struct amdgpu_i2c_adapter *i2c;
8163
b830ebc9 8164 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8165 if (!i2c)
8166 return NULL;
e7b07cee
HW
8167 i2c->base.owner = THIS_MODULE;
8168 i2c->base.class = I2C_CLASS_DDC;
8169 i2c->base.dev.parent = &adev->pdev->dev;
8170 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8171 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8172 i2c_set_adapdata(&i2c->base, i2c);
8173 i2c->ddc_service = ddc_service;
c85e6e54 8174 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8175
8176 return i2c;
8177}
8178
89fc8d4e 8179
1f6010a9
DF
8180/*
8181 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8182 * dc_link which will be represented by this aconnector.
8183 */
7578ecda
AD
8184static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8185 struct amdgpu_dm_connector *aconnector,
8186 uint32_t link_index,
8187 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8188{
8189 int res = 0;
8190 int connector_type;
8191 struct dc *dc = dm->dc;
8192 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8193 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8194
8195 link->priv = aconnector;
e7b07cee 8196
f1ad2f5e 8197 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8198
8199 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8200 if (!i2c) {
8201 DRM_ERROR("Failed to create i2c adapter data\n");
8202 return -ENOMEM;
8203 }
8204
e7b07cee
HW
8205 aconnector->i2c = i2c;
8206 res = i2c_add_adapter(&i2c->base);
8207
8208 if (res) {
8209 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8210 goto out_free;
8211 }
8212
8213 connector_type = to_drm_connector_type(link->connector_signal);
8214
17165de2 8215 res = drm_connector_init_with_ddc(
e7b07cee
HW
8216 dm->ddev,
8217 &aconnector->base,
8218 &amdgpu_dm_connector_funcs,
17165de2
AP
8219 connector_type,
8220 &i2c->base);
e7b07cee
HW
8221
8222 if (res) {
8223 DRM_ERROR("connector_init failed\n");
8224 aconnector->connector_id = -1;
8225 goto out_free;
8226 }
8227
8228 drm_connector_helper_add(
8229 &aconnector->base,
8230 &amdgpu_dm_connector_helper_funcs);
8231
8232 amdgpu_dm_connector_init_helper(
8233 dm,
8234 aconnector,
8235 connector_type,
8236 link,
8237 link_index);
8238
cde4c44d 8239 drm_connector_attach_encoder(
e7b07cee
HW
8240 &aconnector->base, &aencoder->base);
8241
e7b07cee
HW
8242 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8243 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8244 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8245
e7b07cee
HW
8246out_free:
8247 if (res) {
8248 kfree(i2c);
8249 aconnector->i2c = NULL;
8250 }
8251 return res;
8252}
8253
8254int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8255{
8256 switch (adev->mode_info.num_crtc) {
8257 case 1:
8258 return 0x1;
8259 case 2:
8260 return 0x3;
8261 case 3:
8262 return 0x7;
8263 case 4:
8264 return 0xf;
8265 case 5:
8266 return 0x1f;
8267 case 6:
8268 default:
8269 return 0x3f;
8270 }
8271}
8272
7578ecda
AD
8273static int amdgpu_dm_encoder_init(struct drm_device *dev,
8274 struct amdgpu_encoder *aencoder,
8275 uint32_t link_index)
e7b07cee 8276{
1348969a 8277 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8278
8279 int res = drm_encoder_init(dev,
8280 &aencoder->base,
8281 &amdgpu_dm_encoder_funcs,
8282 DRM_MODE_ENCODER_TMDS,
8283 NULL);
8284
8285 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8286
8287 if (!res)
8288 aencoder->encoder_id = link_index;
8289 else
8290 aencoder->encoder_id = -1;
8291
8292 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8293
8294 return res;
8295}
8296
3ee6b26b
AD
8297static void manage_dm_interrupts(struct amdgpu_device *adev,
8298 struct amdgpu_crtc *acrtc,
8299 bool enable)
e7b07cee
HW
8300{
8301 /*
8fe684e9
NK
8302 * We have no guarantee that the frontend index maps to the same
8303 * backend index - some even map to more than one.
8304 *
8305 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8306 */
8307 int irq_type =
734dd01d 8308 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8309 adev,
8310 acrtc->crtc_id);
8311
8312 if (enable) {
8313 drm_crtc_vblank_on(&acrtc->base);
8314 amdgpu_irq_get(
8315 adev,
8316 &adev->pageflip_irq,
8317 irq_type);
86bc2219
WL
8318#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8319 amdgpu_irq_get(
8320 adev,
8321 &adev->vline0_irq,
8322 irq_type);
8323#endif
e7b07cee 8324 } else {
86bc2219
WL
8325#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8326 amdgpu_irq_put(
8327 adev,
8328 &adev->vline0_irq,
8329 irq_type);
8330#endif
e7b07cee
HW
8331 amdgpu_irq_put(
8332 adev,
8333 &adev->pageflip_irq,
8334 irq_type);
8335 drm_crtc_vblank_off(&acrtc->base);
8336 }
8337}
8338
8fe684e9
NK
8339static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8340 struct amdgpu_crtc *acrtc)
8341{
8342 int irq_type =
8343 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8344
8345 /**
8346 * This reads the current state for the IRQ and force reapplies
8347 * the setting to hardware.
8348 */
8349 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8350}
8351
3ee6b26b
AD
8352static bool
8353is_scaling_state_different(const struct dm_connector_state *dm_state,
8354 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8355{
8356 if (dm_state->scaling != old_dm_state->scaling)
8357 return true;
8358 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8359 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8360 return true;
8361 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8362 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8363 return true;
b830ebc9
HW
8364 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8365 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8366 return true;
e7b07cee
HW
8367 return false;
8368}
8369
0c8620d6
BL
8370#ifdef CONFIG_DRM_AMD_DC_HDCP
8371static bool is_content_protection_different(struct drm_connector_state *state,
8372 const struct drm_connector_state *old_state,
8373 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8374{
8375 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8376 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8377
31c0ed90 8378 /* Handle: Type0/1 change */
53e108aa
BL
8379 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8380 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8381 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8382 return true;
8383 }
8384
31c0ed90
BL
8385 /* CP is being re enabled, ignore this
8386 *
8387 * Handles: ENABLED -> DESIRED
8388 */
0c8620d6
BL
8389 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8390 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8391 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8392 return false;
8393 }
8394
31c0ed90
BL
8395 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8396 *
8397 * Handles: UNDESIRED -> ENABLED
8398 */
0c8620d6
BL
8399 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8400 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8401 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8402
0d9a947b
QZ
8403 /* Stream removed and re-enabled
8404 *
8405 * Can sometimes overlap with the HPD case,
8406 * thus set update_hdcp to false to avoid
8407 * setting HDCP multiple times.
8408 *
8409 * Handles: DESIRED -> DESIRED (Special case)
8410 */
8411 if (!(old_state->crtc && old_state->crtc->enabled) &&
8412 state->crtc && state->crtc->enabled &&
8413 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8414 dm_con_state->update_hdcp = false;
8415 return true;
8416 }
8417
8418 /* Hot-plug, headless s3, dpms
8419 *
8420 * Only start HDCP if the display is connected/enabled.
8421 * update_hdcp flag will be set to false until the next
8422 * HPD comes in.
31c0ed90
BL
8423 *
8424 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8425 */
97f6c917
BL
8426 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8427 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8428 dm_con_state->update_hdcp = false;
0c8620d6 8429 return true;
97f6c917 8430 }
0c8620d6 8431
31c0ed90
BL
8432 /*
8433 * Handles: UNDESIRED -> UNDESIRED
8434 * DESIRED -> DESIRED
8435 * ENABLED -> ENABLED
8436 */
0c8620d6
BL
8437 if (old_state->content_protection == state->content_protection)
8438 return false;
8439
31c0ed90
BL
8440 /*
8441 * Handles: UNDESIRED -> DESIRED
8442 * DESIRED -> UNDESIRED
8443 * ENABLED -> UNDESIRED
8444 */
97f6c917 8445 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8446 return true;
8447
31c0ed90
BL
8448 /*
8449 * Handles: DESIRED -> ENABLED
8450 */
0c8620d6
BL
8451 return false;
8452}
8453
0c8620d6 8454#endif
3ee6b26b
AD
8455static void remove_stream(struct amdgpu_device *adev,
8456 struct amdgpu_crtc *acrtc,
8457 struct dc_stream_state *stream)
e7b07cee
HW
8458{
8459 /* this is the update mode case */
e7b07cee
HW
8460
8461 acrtc->otg_inst = -1;
8462 acrtc->enabled = false;
8463}
8464
7578ecda
AD
8465static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8466 struct dc_cursor_position *position)
2a8f6ccb 8467{
f4c2cc43 8468 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8469 int x, y;
8470 int xorigin = 0, yorigin = 0;
8471
e371e19c 8472 if (!crtc || !plane->state->fb)
2a8f6ccb 8473 return 0;
2a8f6ccb
HW
8474
8475 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8476 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8477 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8478 __func__,
8479 plane->state->crtc_w,
8480 plane->state->crtc_h);
8481 return -EINVAL;
8482 }
8483
8484 x = plane->state->crtc_x;
8485 y = plane->state->crtc_y;
c14a005c 8486
e371e19c
NK
8487 if (x <= -amdgpu_crtc->max_cursor_width ||
8488 y <= -amdgpu_crtc->max_cursor_height)
8489 return 0;
8490
2a8f6ccb
HW
8491 if (x < 0) {
8492 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8493 x = 0;
8494 }
8495 if (y < 0) {
8496 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8497 y = 0;
8498 }
8499 position->enable = true;
d243b6ff 8500 position->translate_by_source = true;
2a8f6ccb
HW
8501 position->x = x;
8502 position->y = y;
8503 position->x_hotspot = xorigin;
8504 position->y_hotspot = yorigin;
8505
8506 return 0;
8507}
8508
3ee6b26b
AD
8509static void handle_cursor_update(struct drm_plane *plane,
8510 struct drm_plane_state *old_plane_state)
e7b07cee 8511{
1348969a 8512 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8513 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8514 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8515 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8516 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8517 uint64_t address = afb ? afb->address : 0;
6a30a929 8518 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8519 struct dc_cursor_attributes attributes;
8520 int ret;
8521
e7b07cee
HW
8522 if (!plane->state->fb && !old_plane_state->fb)
8523 return;
8524
cb2318b7 8525 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8526 __func__,
8527 amdgpu_crtc->crtc_id,
8528 plane->state->crtc_w,
8529 plane->state->crtc_h);
2a8f6ccb
HW
8530
8531 ret = get_cursor_position(plane, crtc, &position);
8532 if (ret)
8533 return;
8534
8535 if (!position.enable) {
8536 /* turn off cursor */
674e78ac
NK
8537 if (crtc_state && crtc_state->stream) {
8538 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8539 dc_stream_set_cursor_position(crtc_state->stream,
8540 &position);
674e78ac
NK
8541 mutex_unlock(&adev->dm.dc_lock);
8542 }
2a8f6ccb 8543 return;
e7b07cee 8544 }
e7b07cee 8545
2a8f6ccb
HW
8546 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8547 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8548
c1cefe11 8549 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8550 attributes.address.high_part = upper_32_bits(address);
8551 attributes.address.low_part = lower_32_bits(address);
8552 attributes.width = plane->state->crtc_w;
8553 attributes.height = plane->state->crtc_h;
8554 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8555 attributes.rotation_angle = 0;
8556 attributes.attribute_flags.value = 0;
8557
03a66367 8558 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8559
886daac9 8560 if (crtc_state->stream) {
674e78ac 8561 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8562 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8563 &attributes))
8564 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8565
2a8f6ccb
HW
8566 if (!dc_stream_set_cursor_position(crtc_state->stream,
8567 &position))
8568 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8569 mutex_unlock(&adev->dm.dc_lock);
886daac9 8570 }
2a8f6ccb 8571}
e7b07cee
HW
8572
8573static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8574{
8575
8576 assert_spin_locked(&acrtc->base.dev->event_lock);
8577 WARN_ON(acrtc->event);
8578
8579 acrtc->event = acrtc->base.state->event;
8580
8581 /* Set the flip status */
8582 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8583
8584 /* Mark this event as consumed */
8585 acrtc->base.state->event = NULL;
8586
cb2318b7
VL
8587 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8588 acrtc->crtc_id);
e7b07cee
HW
8589}
8590
bb47de73
NK
8591static void update_freesync_state_on_stream(
8592 struct amdgpu_display_manager *dm,
8593 struct dm_crtc_state *new_crtc_state,
180db303
NK
8594 struct dc_stream_state *new_stream,
8595 struct dc_plane_state *surface,
8596 u32 flip_timestamp_in_us)
bb47de73 8597{
09aef2c4 8598 struct mod_vrr_params vrr_params;
bb47de73 8599 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8600 struct amdgpu_device *adev = dm->adev;
585d450c 8601 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8602 unsigned long flags;
4cda3243 8603 bool pack_sdp_v1_3 = false;
bb47de73
NK
8604
8605 if (!new_stream)
8606 return;
8607
8608 /*
8609 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8610 * For now it's sufficient to just guard against these conditions.
8611 */
8612
8613 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8614 return;
8615
4a580877 8616 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8617 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8618
180db303
NK
8619 if (surface) {
8620 mod_freesync_handle_preflip(
8621 dm->freesync_module,
8622 surface,
8623 new_stream,
8624 flip_timestamp_in_us,
8625 &vrr_params);
09aef2c4
MK
8626
8627 if (adev->family < AMDGPU_FAMILY_AI &&
8628 amdgpu_dm_vrr_active(new_crtc_state)) {
8629 mod_freesync_handle_v_update(dm->freesync_module,
8630 new_stream, &vrr_params);
e63e2491
EB
8631
8632 /* Need to call this before the frame ends. */
8633 dc_stream_adjust_vmin_vmax(dm->dc,
8634 new_crtc_state->stream,
8635 &vrr_params.adjust);
09aef2c4 8636 }
180db303 8637 }
bb47de73
NK
8638
8639 mod_freesync_build_vrr_infopacket(
8640 dm->freesync_module,
8641 new_stream,
180db303 8642 &vrr_params,
ecd0136b
HT
8643 PACKET_TYPE_VRR,
8644 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8645 &vrr_infopacket,
8646 pack_sdp_v1_3);
bb47de73 8647
8a48b44c 8648 new_crtc_state->freesync_timing_changed |=
585d450c 8649 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8650 &vrr_params.adjust,
8651 sizeof(vrr_params.adjust)) != 0);
bb47de73 8652
8a48b44c 8653 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8654 (memcmp(&new_crtc_state->vrr_infopacket,
8655 &vrr_infopacket,
8656 sizeof(vrr_infopacket)) != 0);
8657
585d450c 8658 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8659 new_crtc_state->vrr_infopacket = vrr_infopacket;
8660
585d450c 8661 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8662 new_stream->vrr_infopacket = vrr_infopacket;
8663
8664 if (new_crtc_state->freesync_vrr_info_changed)
8665 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8666 new_crtc_state->base.crtc->base.id,
8667 (int)new_crtc_state->base.vrr_enabled,
180db303 8668 (int)vrr_params.state);
09aef2c4 8669
4a580877 8670 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8671}
8672
585d450c 8673static void update_stream_irq_parameters(
e854194c
MK
8674 struct amdgpu_display_manager *dm,
8675 struct dm_crtc_state *new_crtc_state)
8676{
8677 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8678 struct mod_vrr_params vrr_params;
e854194c 8679 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8680 struct amdgpu_device *adev = dm->adev;
585d450c 8681 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8682 unsigned long flags;
e854194c
MK
8683
8684 if (!new_stream)
8685 return;
8686
8687 /*
8688 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8689 * For now it's sufficient to just guard against these conditions.
8690 */
8691 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8692 return;
8693
4a580877 8694 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8695 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8696
e854194c
MK
8697 if (new_crtc_state->vrr_supported &&
8698 config.min_refresh_in_uhz &&
8699 config.max_refresh_in_uhz) {
a85ba005
NC
8700 /*
8701 * if freesync compatible mode was set, config.state will be set
8702 * in atomic check
8703 */
8704 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8705 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8706 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8707 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8708 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8709 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8710 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8711 } else {
8712 config.state = new_crtc_state->base.vrr_enabled ?
8713 VRR_STATE_ACTIVE_VARIABLE :
8714 VRR_STATE_INACTIVE;
8715 }
e854194c
MK
8716 } else {
8717 config.state = VRR_STATE_UNSUPPORTED;
8718 }
8719
8720 mod_freesync_build_vrr_params(dm->freesync_module,
8721 new_stream,
8722 &config, &vrr_params);
8723
8724 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8725 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8726 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8727
585d450c
AP
8728 new_crtc_state->freesync_config = config;
8729 /* Copy state for access from DM IRQ handler */
8730 acrtc->dm_irq_params.freesync_config = config;
8731 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8732 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8733 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8734}
8735
66b0c973
MK
8736static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8737 struct dm_crtc_state *new_state)
8738{
8739 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8740 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8741
8742 if (!old_vrr_active && new_vrr_active) {
8743 /* Transition VRR inactive -> active:
8744 * While VRR is active, we must not disable vblank irq, as a
8745 * reenable after disable would compute bogus vblank/pflip
8746 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8747 *
8748 * We also need vupdate irq for the actual core vblank handling
8749 * at end of vblank.
66b0c973 8750 */
d2574c33 8751 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8752 drm_crtc_vblank_get(new_state->base.crtc);
8753 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8754 __func__, new_state->base.crtc->base.id);
8755 } else if (old_vrr_active && !new_vrr_active) {
8756 /* Transition VRR active -> inactive:
8757 * Allow vblank irq disable again for fixed refresh rate.
8758 */
d2574c33 8759 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
8760 drm_crtc_vblank_put(new_state->base.crtc);
8761 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8762 __func__, new_state->base.crtc->base.id);
8763 }
8764}
8765
8ad27806
NK
8766static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8767{
8768 struct drm_plane *plane;
5760dcb9 8769 struct drm_plane_state *old_plane_state;
8ad27806
NK
8770 int i;
8771
8772 /*
8773 * TODO: Make this per-stream so we don't issue redundant updates for
8774 * commits with multiple streams.
8775 */
5760dcb9 8776 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
8777 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8778 handle_cursor_update(plane, old_plane_state);
8779}
8780
3be5262e 8781static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 8782 struct dc_state *dc_state,
3ee6b26b
AD
8783 struct drm_device *dev,
8784 struct amdgpu_display_manager *dm,
8785 struct drm_crtc *pcrtc,
420cd472 8786 bool wait_for_vblank)
e7b07cee 8787{
efc8278e 8788 uint32_t i;
8a48b44c 8789 uint64_t timestamp_ns;
e7b07cee 8790 struct drm_plane *plane;
0bc9706d 8791 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 8792 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
8793 struct drm_crtc_state *new_pcrtc_state =
8794 drm_atomic_get_new_crtc_state(state, pcrtc);
8795 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
8796 struct dm_crtc_state *dm_old_crtc_state =
8797 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 8798 int planes_count = 0, vpos, hpos;
570c91d5 8799 long r;
e7b07cee 8800 unsigned long flags;
8a48b44c 8801 struct amdgpu_bo *abo;
fdd1fe57
MK
8802 uint32_t target_vblank, last_flip_vblank;
8803 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 8804 bool pflip_present = false;
bc7f670e
DF
8805 struct {
8806 struct dc_surface_update surface_updates[MAX_SURFACES];
8807 struct dc_plane_info plane_infos[MAX_SURFACES];
8808 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 8809 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 8810 struct dc_stream_update stream_update;
74aa7bd4 8811 } *bundle;
bc7f670e 8812
74aa7bd4 8813 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 8814
74aa7bd4
DF
8815 if (!bundle) {
8816 dm_error("Failed to allocate update bundle\n");
4b510503
NK
8817 goto cleanup;
8818 }
e7b07cee 8819
8ad27806
NK
8820 /*
8821 * Disable the cursor first if we're disabling all the planes.
8822 * It'll remain on the screen after the planes are re-enabled
8823 * if we don't.
8824 */
8825 if (acrtc_state->active_planes == 0)
8826 amdgpu_dm_commit_cursors(state);
8827
e7b07cee 8828 /* update planes when needed */
efc8278e 8829 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 8830 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 8831 struct drm_crtc_state *new_crtc_state;
0bc9706d 8832 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 8833 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 8834 bool plane_needs_flip;
c7af5f77 8835 struct dc_plane_state *dc_plane;
54d76575 8836 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 8837
80c218d5
NK
8838 /* Cursor plane is handled after stream updates */
8839 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 8840 continue;
e7b07cee 8841
f5ba60fe
DD
8842 if (!fb || !crtc || pcrtc != crtc)
8843 continue;
8844
8845 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8846 if (!new_crtc_state->active)
e7b07cee
HW
8847 continue;
8848
bc7f670e 8849 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 8850
74aa7bd4 8851 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 8852 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
8853 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8854 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 8855 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 8856 }
8a48b44c 8857
695af5f9
NK
8858 fill_dc_scaling_info(new_plane_state,
8859 &bundle->scaling_infos[planes_count]);
8a48b44c 8860
695af5f9
NK
8861 bundle->surface_updates[planes_count].scaling_info =
8862 &bundle->scaling_infos[planes_count];
8a48b44c 8863
f5031000 8864 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 8865
f5031000 8866 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 8867
f5031000
DF
8868 if (!plane_needs_flip) {
8869 planes_count += 1;
8870 continue;
8871 }
8a48b44c 8872
2fac0f53
CK
8873 abo = gem_to_amdgpu_bo(fb->obj[0]);
8874
f8308898
AG
8875 /*
8876 * Wait for all fences on this FB. Do limited wait to avoid
8877 * deadlock during GPU reset when this fence will not signal
8878 * but we hold reservation lock for the BO.
8879 */
d3fae3b3
CK
8880 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8881 msecs_to_jiffies(5000));
f8308898 8882 if (unlikely(r <= 0))
ed8a5fb2 8883 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 8884
695af5f9 8885 fill_dc_plane_info_and_addr(
8ce5d842 8886 dm->adev, new_plane_state,
6eed95b0 8887 afb->tiling_flags,
695af5f9 8888 &bundle->plane_infos[planes_count],
87b7ebc2 8889 &bundle->flip_addrs[planes_count].address,
6eed95b0 8890 afb->tmz_surface, false);
87b7ebc2 8891
4711c033 8892 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
8893 new_plane_state->plane->index,
8894 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
8895
8896 bundle->surface_updates[planes_count].plane_info =
8897 &bundle->plane_infos[planes_count];
8a48b44c 8898
caff0e66
NK
8899 /*
8900 * Only allow immediate flips for fast updates that don't
8901 * change FB pitch, DCC state, rotation or mirroing.
8902 */
f5031000 8903 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 8904 crtc->state->async_flip &&
caff0e66 8905 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 8906
f5031000
DF
8907 timestamp_ns = ktime_get_ns();
8908 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8909 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8910 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 8911
f5031000
DF
8912 if (!bundle->surface_updates[planes_count].surface) {
8913 DRM_ERROR("No surface for CRTC: id=%d\n",
8914 acrtc_attach->crtc_id);
8915 continue;
bc7f670e
DF
8916 }
8917
f5031000
DF
8918 if (plane == pcrtc->primary)
8919 update_freesync_state_on_stream(
8920 dm,
8921 acrtc_state,
8922 acrtc_state->stream,
8923 dc_plane,
8924 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 8925
4711c033 8926 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
8927 __func__,
8928 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8929 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
8930
8931 planes_count += 1;
8932
8a48b44c
DF
8933 }
8934
74aa7bd4 8935 if (pflip_present) {
634092b1
MK
8936 if (!vrr_active) {
8937 /* Use old throttling in non-vrr fixed refresh rate mode
8938 * to keep flip scheduling based on target vblank counts
8939 * working in a backwards compatible way, e.g., for
8940 * clients using the GLX_OML_sync_control extension or
8941 * DRI3/Present extension with defined target_msc.
8942 */
e3eff4b5 8943 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
8944 }
8945 else {
8946 /* For variable refresh rate mode only:
8947 * Get vblank of last completed flip to avoid > 1 vrr
8948 * flips per video frame by use of throttling, but allow
8949 * flip programming anywhere in the possibly large
8950 * variable vrr vblank interval for fine-grained flip
8951 * timing control and more opportunity to avoid stutter
8952 * on late submission of flips.
8953 */
8954 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 8955 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
8956 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8957 }
8958
fdd1fe57 8959 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
8960
8961 /*
8962 * Wait until we're out of the vertical blank period before the one
8963 * targeted by the flip
8964 */
8965 while ((acrtc_attach->enabled &&
8966 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8967 0, &vpos, &hpos, NULL,
8968 NULL, &pcrtc->hwmode)
8969 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8970 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8971 (int)(target_vblank -
e3eff4b5 8972 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
8973 usleep_range(1000, 1100);
8974 }
8975
8fe684e9
NK
8976 /**
8977 * Prepare the flip event for the pageflip interrupt to handle.
8978 *
8979 * This only works in the case where we've already turned on the
8980 * appropriate hardware blocks (eg. HUBP) so in the transition case
8981 * from 0 -> n planes we have to skip a hardware generated event
8982 * and rely on sending it from software.
8983 */
8984 if (acrtc_attach->base.state->event &&
035f5496
AP
8985 acrtc_state->active_planes > 0 &&
8986 !acrtc_state->force_dpms_off) {
8a48b44c
DF
8987 drm_crtc_vblank_get(pcrtc);
8988
8989 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8990
8991 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8992 prepare_flip_isr(acrtc_attach);
8993
8994 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8995 }
8996
8997 if (acrtc_state->stream) {
8a48b44c 8998 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 8999 bundle->stream_update.vrr_infopacket =
8a48b44c 9000 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9001 }
e7b07cee
HW
9002 }
9003
bc92c065 9004 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9005 if ((planes_count || acrtc_state->active_planes == 0) &&
9006 acrtc_state->stream) {
96160687 9007#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9008 /*
9009 * If PSR or idle optimizations are enabled then flush out
9010 * any pending work before hardware programming.
9011 */
06dd1888
NK
9012 if (dm->vblank_control_workqueue)
9013 flush_workqueue(dm->vblank_control_workqueue);
96160687 9014#endif
58aa1c50 9015
b6e881c9 9016 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9017 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9018 bundle->stream_update.src = acrtc_state->stream->src;
9019 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9020 }
9021
cf020d49
NK
9022 if (new_pcrtc_state->color_mgmt_changed) {
9023 /*
9024 * TODO: This isn't fully correct since we've actually
9025 * already modified the stream in place.
9026 */
9027 bundle->stream_update.gamut_remap =
9028 &acrtc_state->stream->gamut_remap_matrix;
9029 bundle->stream_update.output_csc_transform =
9030 &acrtc_state->stream->csc_color_matrix;
9031 bundle->stream_update.out_transfer_func =
9032 acrtc_state->stream->out_transfer_func;
9033 }
bc7f670e 9034
8a48b44c 9035 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9036 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9037 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9038
e63e2491
EB
9039 /*
9040 * If FreeSync state on the stream has changed then we need to
9041 * re-adjust the min/max bounds now that DC doesn't handle this
9042 * as part of commit.
9043 */
a85ba005 9044 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9045 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9046 dc_stream_adjust_vmin_vmax(
9047 dm->dc, acrtc_state->stream,
585d450c 9048 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9049 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9050 }
bc7f670e 9051 mutex_lock(&dm->dc_lock);
8c322309 9052 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9053 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9054 amdgpu_dm_psr_disable(acrtc_state->stream);
9055
bc7f670e 9056 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9057 bundle->surface_updates,
bc7f670e
DF
9058 planes_count,
9059 acrtc_state->stream,
efc8278e
AJ
9060 &bundle->stream_update,
9061 dc_state);
8c322309 9062
8fe684e9
NK
9063 /**
9064 * Enable or disable the interrupts on the backend.
9065 *
9066 * Most pipes are put into power gating when unused.
9067 *
9068 * When power gating is enabled on a pipe we lose the
9069 * interrupt enablement state when power gating is disabled.
9070 *
9071 * So we need to update the IRQ control state in hardware
9072 * whenever the pipe turns on (since it could be previously
9073 * power gated) or off (since some pipes can't be power gated
9074 * on some ASICs).
9075 */
9076 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9077 dm_update_pflip_irq_state(drm_to_adev(dev),
9078 acrtc_attach);
8fe684e9 9079
8c322309 9080 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9081 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9082 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9083 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9084
9085 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9086 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9087 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9088 struct amdgpu_dm_connector *aconn =
9089 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9090
9091 if (aconn->psr_skip_count > 0)
9092 aconn->psr_skip_count--;
58aa1c50
NK
9093
9094 /* Allow PSR when skip count is 0. */
9095 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9096 } else {
9097 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9098 }
9099
bc7f670e 9100 mutex_unlock(&dm->dc_lock);
e7b07cee 9101 }
4b510503 9102
8ad27806
NK
9103 /*
9104 * Update cursor state *after* programming all the planes.
9105 * This avoids redundant programming in the case where we're going
9106 * to be disabling a single plane - those pipes are being disabled.
9107 */
9108 if (acrtc_state->active_planes)
9109 amdgpu_dm_commit_cursors(state);
80c218d5 9110
4b510503 9111cleanup:
74aa7bd4 9112 kfree(bundle);
e7b07cee
HW
9113}
9114
6ce8f316
NK
9115static void amdgpu_dm_commit_audio(struct drm_device *dev,
9116 struct drm_atomic_state *state)
9117{
1348969a 9118 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9119 struct amdgpu_dm_connector *aconnector;
9120 struct drm_connector *connector;
9121 struct drm_connector_state *old_con_state, *new_con_state;
9122 struct drm_crtc_state *new_crtc_state;
9123 struct dm_crtc_state *new_dm_crtc_state;
9124 const struct dc_stream_status *status;
9125 int i, inst;
9126
9127 /* Notify device removals. */
9128 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9129 if (old_con_state->crtc != new_con_state->crtc) {
9130 /* CRTC changes require notification. */
9131 goto notify;
9132 }
9133
9134 if (!new_con_state->crtc)
9135 continue;
9136
9137 new_crtc_state = drm_atomic_get_new_crtc_state(
9138 state, new_con_state->crtc);
9139
9140 if (!new_crtc_state)
9141 continue;
9142
9143 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9144 continue;
9145
9146 notify:
9147 aconnector = to_amdgpu_dm_connector(connector);
9148
9149 mutex_lock(&adev->dm.audio_lock);
9150 inst = aconnector->audio_inst;
9151 aconnector->audio_inst = -1;
9152 mutex_unlock(&adev->dm.audio_lock);
9153
9154 amdgpu_dm_audio_eld_notify(adev, inst);
9155 }
9156
9157 /* Notify audio device additions. */
9158 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9159 if (!new_con_state->crtc)
9160 continue;
9161
9162 new_crtc_state = drm_atomic_get_new_crtc_state(
9163 state, new_con_state->crtc);
9164
9165 if (!new_crtc_state)
9166 continue;
9167
9168 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9169 continue;
9170
9171 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9172 if (!new_dm_crtc_state->stream)
9173 continue;
9174
9175 status = dc_stream_get_status(new_dm_crtc_state->stream);
9176 if (!status)
9177 continue;
9178
9179 aconnector = to_amdgpu_dm_connector(connector);
9180
9181 mutex_lock(&adev->dm.audio_lock);
9182 inst = status->audio_inst;
9183 aconnector->audio_inst = inst;
9184 mutex_unlock(&adev->dm.audio_lock);
9185
9186 amdgpu_dm_audio_eld_notify(adev, inst);
9187 }
9188}
9189
1f6010a9 9190/*
27b3f4fc
LSL
9191 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9192 * @crtc_state: the DRM CRTC state
9193 * @stream_state: the DC stream state.
9194 *
9195 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9196 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9197 */
9198static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9199 struct dc_stream_state *stream_state)
9200{
b9952f93 9201 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9202}
e7b07cee 9203
b8592b48
LL
9204/**
9205 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9206 * @state: The atomic state to commit
9207 *
9208 * This will tell DC to commit the constructed DC state from atomic_check,
9209 * programming the hardware. Any failures here implies a hardware failure, since
9210 * atomic check should have filtered anything non-kosher.
9211 */
7578ecda 9212static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9213{
9214 struct drm_device *dev = state->dev;
1348969a 9215 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9216 struct amdgpu_display_manager *dm = &adev->dm;
9217 struct dm_atomic_state *dm_state;
eb3dc897 9218 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9219 uint32_t i, j;
5cc6dcbd 9220 struct drm_crtc *crtc;
0bc9706d 9221 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9222 unsigned long flags;
9223 bool wait_for_vblank = true;
9224 struct drm_connector *connector;
c2cea706 9225 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9226 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9227 int crtc_disable_count = 0;
6ee90e88 9228 bool mode_set_reset_required = false;
e7b07cee 9229
e8a98235
RS
9230 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9231
e7b07cee
HW
9232 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9233
eb3dc897
NK
9234 dm_state = dm_atomic_get_new_state(state);
9235 if (dm_state && dm_state->context) {
9236 dc_state = dm_state->context;
9237 } else {
9238 /* No state changes, retain current state. */
813d20dc 9239 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9240 ASSERT(dc_state_temp);
9241 dc_state = dc_state_temp;
9242 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9243 }
e7b07cee 9244
6d90a208
AP
9245 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9246 new_crtc_state, i) {
9247 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9248
9249 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9250
9251 if (old_crtc_state->active &&
9252 (!new_crtc_state->active ||
9253 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9254 manage_dm_interrupts(adev, acrtc, false);
9255 dc_stream_release(dm_old_crtc_state->stream);
9256 }
9257 }
9258
8976f73b
RS
9259 drm_atomic_helper_calc_timestamping_constants(state);
9260
e7b07cee 9261 /* update changed items */
0bc9706d 9262 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9263 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9264
54d76575
LSL
9265 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9266 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9267
4711c033 9268 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9269 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9270 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9271 "connectors_changed:%d\n",
9272 acrtc->crtc_id,
0bc9706d
LSL
9273 new_crtc_state->enable,
9274 new_crtc_state->active,
9275 new_crtc_state->planes_changed,
9276 new_crtc_state->mode_changed,
9277 new_crtc_state->active_changed,
9278 new_crtc_state->connectors_changed);
e7b07cee 9279
5c68c652
VL
9280 /* Disable cursor if disabling crtc */
9281 if (old_crtc_state->active && !new_crtc_state->active) {
9282 struct dc_cursor_position position;
9283
9284 memset(&position, 0, sizeof(position));
9285 mutex_lock(&dm->dc_lock);
9286 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9287 mutex_unlock(&dm->dc_lock);
9288 }
9289
27b3f4fc
LSL
9290 /* Copy all transient state flags into dc state */
9291 if (dm_new_crtc_state->stream) {
9292 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9293 dm_new_crtc_state->stream);
9294 }
9295
e7b07cee
HW
9296 /* handles headless hotplug case, updating new_state and
9297 * aconnector as needed
9298 */
9299
54d76575 9300 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9301
4711c033 9302 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9303
54d76575 9304 if (!dm_new_crtc_state->stream) {
e7b07cee 9305 /*
b830ebc9
HW
9306 * this could happen because of issues with
9307 * userspace notifications delivery.
9308 * In this case userspace tries to set mode on
1f6010a9
DF
9309 * display which is disconnected in fact.
9310 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9311 * We expect reset mode will come soon.
9312 *
9313 * This can also happen when unplug is done
9314 * during resume sequence ended
9315 *
9316 * In this case, we want to pretend we still
9317 * have a sink to keep the pipe running so that
9318 * hw state is consistent with the sw state
9319 */
f1ad2f5e 9320 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9321 __func__, acrtc->base.base.id);
9322 continue;
9323 }
9324
54d76575
LSL
9325 if (dm_old_crtc_state->stream)
9326 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9327
97028037
LP
9328 pm_runtime_get_noresume(dev->dev);
9329
e7b07cee 9330 acrtc->enabled = true;
0bc9706d
LSL
9331 acrtc->hw_mode = new_crtc_state->mode;
9332 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9333 mode_set_reset_required = true;
0bc9706d 9334 } else if (modereset_required(new_crtc_state)) {
4711c033 9335 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9336 /* i.e. reset mode */
6ee90e88 9337 if (dm_old_crtc_state->stream)
54d76575 9338 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9339
6ee90e88 9340 mode_set_reset_required = true;
e7b07cee
HW
9341 }
9342 } /* for_each_crtc_in_state() */
9343
eb3dc897 9344 if (dc_state) {
6ee90e88 9345 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9346 if (mode_set_reset_required) {
96160687 9347#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9348 if (dm->vblank_control_workqueue)
9349 flush_workqueue(dm->vblank_control_workqueue);
96160687 9350#endif
6ee90e88 9351 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9352 }
6ee90e88 9353
eb3dc897 9354 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9355 mutex_lock(&dm->dc_lock);
eb3dc897 9356 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9357#if defined(CONFIG_DRM_AMD_DC_DCN)
9358 /* Allow idle optimization when vblank count is 0 for display off */
9359 if (dm->active_vblank_irq_count == 0)
9360 dc_allow_idle_optimizations(dm->dc,true);
9361#endif
674e78ac 9362 mutex_unlock(&dm->dc_lock);
fa2123db 9363 }
fe8858bb 9364
0bc9706d 9365 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9366 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9367
54d76575 9368 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9369
54d76575 9370 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9371 const struct dc_stream_status *status =
54d76575 9372 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9373
eb3dc897 9374 if (!status)
09f609c3
LL
9375 status = dc_stream_get_status_from_state(dc_state,
9376 dm_new_crtc_state->stream);
e7b07cee 9377 if (!status)
54d76575 9378 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9379 else
9380 acrtc->otg_inst = status->primary_otg_inst;
9381 }
9382 }
0c8620d6
BL
9383#ifdef CONFIG_DRM_AMD_DC_HDCP
9384 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9385 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9386 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9387 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9388
9389 new_crtc_state = NULL;
9390
9391 if (acrtc)
9392 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9393
9394 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9395
9396 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9397 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9398 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9399 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9400 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9401 continue;
9402 }
9403
9404 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9405 hdcp_update_display(
9406 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9407 new_con_state->hdcp_content_type,
0e86d3d4 9408 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9409 }
9410#endif
e7b07cee 9411
02d6a6fc 9412 /* Handle connector state changes */
c2cea706 9413 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9414 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9415 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9416 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9417 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9418 struct dc_stream_update stream_update;
b232d4ed 9419 struct dc_info_packet hdr_packet;
e7b07cee 9420 struct dc_stream_status *status = NULL;
b232d4ed 9421 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9422
efc8278e 9423 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9424 memset(&stream_update, 0, sizeof(stream_update));
9425
44d09c6a 9426 if (acrtc) {
0bc9706d 9427 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9428 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9429 }
0bc9706d 9430
e7b07cee 9431 /* Skip any modesets/resets */
0bc9706d 9432 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9433 continue;
9434
54d76575 9435 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9436 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9437
b232d4ed
NK
9438 scaling_changed = is_scaling_state_different(dm_new_con_state,
9439 dm_old_con_state);
9440
9441 abm_changed = dm_new_crtc_state->abm_level !=
9442 dm_old_crtc_state->abm_level;
9443
9444 hdr_changed =
72921cdf 9445 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9446
9447 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9448 continue;
e7b07cee 9449
b6e881c9 9450 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9451 if (scaling_changed) {
02d6a6fc 9452 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9453 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9454
02d6a6fc
DF
9455 stream_update.src = dm_new_crtc_state->stream->src;
9456 stream_update.dst = dm_new_crtc_state->stream->dst;
9457 }
9458
b232d4ed 9459 if (abm_changed) {
02d6a6fc
DF
9460 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9461
9462 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9463 }
70e8ffc5 9464
b232d4ed
NK
9465 if (hdr_changed) {
9466 fill_hdr_info_packet(new_con_state, &hdr_packet);
9467 stream_update.hdr_static_metadata = &hdr_packet;
9468 }
9469
54d76575 9470 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9471
9472 if (WARN_ON(!status))
9473 continue;
9474
3be5262e 9475 WARN_ON(!status->plane_count);
e7b07cee 9476
02d6a6fc
DF
9477 /*
9478 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9479 * Here we create an empty update on each plane.
9480 * To fix this, DC should permit updating only stream properties.
9481 */
9482 for (j = 0; j < status->plane_count; j++)
efc8278e 9483 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9484
9485
9486 mutex_lock(&dm->dc_lock);
9487 dc_commit_updates_for_stream(dm->dc,
efc8278e 9488 dummy_updates,
02d6a6fc
DF
9489 status->plane_count,
9490 dm_new_crtc_state->stream,
efc8278e
AJ
9491 &stream_update,
9492 dc_state);
02d6a6fc 9493 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9494 }
9495
b5e83f6f 9496 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9497 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9498 new_crtc_state, i) {
fe2a1965
LP
9499 if (old_crtc_state->active && !new_crtc_state->active)
9500 crtc_disable_count++;
9501
54d76575 9502 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9503 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9504
585d450c
AP
9505 /* For freesync config update on crtc state and params for irq */
9506 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9507
66b0c973
MK
9508 /* Handle vrr on->off / off->on transitions */
9509 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9510 dm_new_crtc_state);
e7b07cee
HW
9511 }
9512
8fe684e9
NK
9513 /**
9514 * Enable interrupts for CRTCs that are newly enabled or went through
9515 * a modeset. It was intentionally deferred until after the front end
9516 * state was modified to wait until the OTG was on and so the IRQ
9517 * handlers didn't access stale or invalid state.
9518 */
9519 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9520 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9521#ifdef CONFIG_DEBUG_FS
86bc2219 9522 bool configure_crc = false;
8e7b6fee 9523 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9524#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9525 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9526#endif
9527 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9528 cur_crc_src = acrtc->dm_irq_params.crc_src;
9529 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9530#endif
585d450c
AP
9531 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9532
8fe684e9
NK
9533 if (new_crtc_state->active &&
9534 (!old_crtc_state->active ||
9535 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9536 dc_stream_retain(dm_new_crtc_state->stream);
9537 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9538 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9539
24eb9374 9540#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9541 /**
9542 * Frontend may have changed so reapply the CRC capture
9543 * settings for the stream.
9544 */
9545 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9546
8e7b6fee 9547 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9548 configure_crc = true;
9549#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9550 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9551 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9552 acrtc->dm_irq_params.crc_window.update_win = true;
9553 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9554 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9555 crc_rd_wrk->crtc = crtc;
9556 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9557 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9558 }
86bc2219 9559#endif
e2881d6d 9560 }
c920888c 9561
86bc2219 9562 if (configure_crc)
bbc49fc0
WL
9563 if (amdgpu_dm_crtc_configure_crc_source(
9564 crtc, dm_new_crtc_state, cur_crc_src))
9565 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9566#endif
8fe684e9
NK
9567 }
9568 }
e7b07cee 9569
420cd472 9570 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9571 if (new_crtc_state->async_flip)
420cd472
DF
9572 wait_for_vblank = false;
9573
e7b07cee 9574 /* update planes when needed per crtc*/
5cc6dcbd 9575 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9576 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9577
54d76575 9578 if (dm_new_crtc_state->stream)
eb3dc897 9579 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9580 dm, crtc, wait_for_vblank);
e7b07cee
HW
9581 }
9582
6ce8f316
NK
9583 /* Update audio instances for each connector. */
9584 amdgpu_dm_commit_audio(dev, state);
9585
7230362c
AD
9586#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9587 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9588 /* restore the backlight level */
7fd13bae
AD
9589 for (i = 0; i < dm->num_of_edps; i++) {
9590 if (dm->backlight_dev[i] &&
9591 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9592 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9593 }
7230362c 9594#endif
e7b07cee
HW
9595 /*
9596 * send vblank event on all events not handled in flip and
9597 * mark consumed event for drm_atomic_helper_commit_hw_done
9598 */
4a580877 9599 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9600 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9601
0bc9706d
LSL
9602 if (new_crtc_state->event)
9603 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9604
0bc9706d 9605 new_crtc_state->event = NULL;
e7b07cee 9606 }
4a580877 9607 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9608
29c8f234
LL
9609 /* Signal HW programming completion */
9610 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9611
9612 if (wait_for_vblank)
320a1274 9613 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9614
9615 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9616
5f6fab24
AD
9617 /* return the stolen vga memory back to VRAM */
9618 if (!adev->mman.keep_stolen_vga_memory)
9619 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9620 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9621
1f6010a9
DF
9622 /*
9623 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9624 * so we can put the GPU into runtime suspend if we're not driving any
9625 * displays anymore
9626 */
fe2a1965
LP
9627 for (i = 0; i < crtc_disable_count; i++)
9628 pm_runtime_put_autosuspend(dev->dev);
97028037 9629 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9630
9631 if (dc_state_temp)
9632 dc_release_state(dc_state_temp);
e7b07cee
HW
9633}
9634
9635
9636static int dm_force_atomic_commit(struct drm_connector *connector)
9637{
9638 int ret = 0;
9639 struct drm_device *ddev = connector->dev;
9640 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9641 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9642 struct drm_plane *plane = disconnected_acrtc->base.primary;
9643 struct drm_connector_state *conn_state;
9644 struct drm_crtc_state *crtc_state;
9645 struct drm_plane_state *plane_state;
9646
9647 if (!state)
9648 return -ENOMEM;
9649
9650 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9651
9652 /* Construct an atomic state to restore previous display setting */
9653
9654 /*
9655 * Attach connectors to drm_atomic_state
9656 */
9657 conn_state = drm_atomic_get_connector_state(state, connector);
9658
9659 ret = PTR_ERR_OR_ZERO(conn_state);
9660 if (ret)
2dc39051 9661 goto out;
e7b07cee
HW
9662
9663 /* Attach crtc to drm_atomic_state*/
9664 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9665
9666 ret = PTR_ERR_OR_ZERO(crtc_state);
9667 if (ret)
2dc39051 9668 goto out;
e7b07cee
HW
9669
9670 /* force a restore */
9671 crtc_state->mode_changed = true;
9672
9673 /* Attach plane to drm_atomic_state */
9674 plane_state = drm_atomic_get_plane_state(state, plane);
9675
9676 ret = PTR_ERR_OR_ZERO(plane_state);
9677 if (ret)
2dc39051 9678 goto out;
e7b07cee
HW
9679
9680 /* Call commit internally with the state we just constructed */
9681 ret = drm_atomic_commit(state);
e7b07cee 9682
2dc39051 9683out:
e7b07cee 9684 drm_atomic_state_put(state);
2dc39051
VL
9685 if (ret)
9686 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9687
9688 return ret;
9689}
9690
9691/*
1f6010a9
DF
9692 * This function handles all cases when set mode does not come upon hotplug.
9693 * This includes when a display is unplugged then plugged back into the
9694 * same port and when running without usermode desktop manager supprot
e7b07cee 9695 */
3ee6b26b
AD
9696void dm_restore_drm_connector_state(struct drm_device *dev,
9697 struct drm_connector *connector)
e7b07cee 9698{
c84dec2f 9699 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9700 struct amdgpu_crtc *disconnected_acrtc;
9701 struct dm_crtc_state *acrtc_state;
9702
9703 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9704 return;
9705
9706 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9707 if (!disconnected_acrtc)
9708 return;
e7b07cee 9709
70e8ffc5
HW
9710 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9711 if (!acrtc_state->stream)
e7b07cee
HW
9712 return;
9713
9714 /*
9715 * If the previous sink is not released and different from the current,
9716 * we deduce we are in a state where we can not rely on usermode call
9717 * to turn on the display, so we do it here
9718 */
9719 if (acrtc_state->stream->sink != aconnector->dc_sink)
9720 dm_force_atomic_commit(&aconnector->base);
9721}
9722
1f6010a9 9723/*
e7b07cee
HW
9724 * Grabs all modesetting locks to serialize against any blocking commits,
9725 * Waits for completion of all non blocking commits.
9726 */
3ee6b26b
AD
9727static int do_aquire_global_lock(struct drm_device *dev,
9728 struct drm_atomic_state *state)
e7b07cee
HW
9729{
9730 struct drm_crtc *crtc;
9731 struct drm_crtc_commit *commit;
9732 long ret;
9733
1f6010a9
DF
9734 /*
9735 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9736 * ensure that when the framework release it the
9737 * extra locks we are locking here will get released to
9738 */
9739 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9740 if (ret)
9741 return ret;
9742
9743 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9744 spin_lock(&crtc->commit_lock);
9745 commit = list_first_entry_or_null(&crtc->commit_list,
9746 struct drm_crtc_commit, commit_entry);
9747 if (commit)
9748 drm_crtc_commit_get(commit);
9749 spin_unlock(&crtc->commit_lock);
9750
9751 if (!commit)
9752 continue;
9753
1f6010a9
DF
9754 /*
9755 * Make sure all pending HW programming completed and
e7b07cee
HW
9756 * page flips done
9757 */
9758 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9759
9760 if (ret > 0)
9761 ret = wait_for_completion_interruptible_timeout(
9762 &commit->flip_done, 10*HZ);
9763
9764 if (ret == 0)
9765 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9766 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9767
9768 drm_crtc_commit_put(commit);
9769 }
9770
9771 return ret < 0 ? ret : 0;
9772}
9773
bb47de73
NK
9774static void get_freesync_config_for_crtc(
9775 struct dm_crtc_state *new_crtc_state,
9776 struct dm_connector_state *new_con_state)
98e6436d
AK
9777{
9778 struct mod_freesync_config config = {0};
98e6436d
AK
9779 struct amdgpu_dm_connector *aconnector =
9780 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9781 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9782 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9783 bool fs_vid_mode = false;
98e6436d 9784
a057ec46 9785 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9786 vrefresh >= aconnector->min_vfreq &&
9787 vrefresh <= aconnector->max_vfreq;
bb47de73 9788
a057ec46
IB
9789 if (new_crtc_state->vrr_supported) {
9790 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
9791 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9792
9793 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9794 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9795 config.vsif_supported = true;
180db303 9796 config.btr = true;
98e6436d 9797
a85ba005
NC
9798 if (fs_vid_mode) {
9799 config.state = VRR_STATE_ACTIVE_FIXED;
9800 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9801 goto out;
9802 } else if (new_crtc_state->base.vrr_enabled) {
9803 config.state = VRR_STATE_ACTIVE_VARIABLE;
9804 } else {
9805 config.state = VRR_STATE_INACTIVE;
9806 }
9807 }
9808out:
bb47de73
NK
9809 new_crtc_state->freesync_config = config;
9810}
98e6436d 9811
bb47de73
NK
9812static void reset_freesync_config_for_crtc(
9813 struct dm_crtc_state *new_crtc_state)
9814{
9815 new_crtc_state->vrr_supported = false;
98e6436d 9816
bb47de73
NK
9817 memset(&new_crtc_state->vrr_infopacket, 0,
9818 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9819}
9820
a85ba005
NC
9821static bool
9822is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9823 struct drm_crtc_state *new_crtc_state)
9824{
9825 struct drm_display_mode old_mode, new_mode;
9826
9827 if (!old_crtc_state || !new_crtc_state)
9828 return false;
9829
9830 old_mode = old_crtc_state->mode;
9831 new_mode = new_crtc_state->mode;
9832
9833 if (old_mode.clock == new_mode.clock &&
9834 old_mode.hdisplay == new_mode.hdisplay &&
9835 old_mode.vdisplay == new_mode.vdisplay &&
9836 old_mode.htotal == new_mode.htotal &&
9837 old_mode.vtotal != new_mode.vtotal &&
9838 old_mode.hsync_start == new_mode.hsync_start &&
9839 old_mode.vsync_start != new_mode.vsync_start &&
9840 old_mode.hsync_end == new_mode.hsync_end &&
9841 old_mode.vsync_end != new_mode.vsync_end &&
9842 old_mode.hskew == new_mode.hskew &&
9843 old_mode.vscan == new_mode.vscan &&
9844 (old_mode.vsync_end - old_mode.vsync_start) ==
9845 (new_mode.vsync_end - new_mode.vsync_start))
9846 return true;
9847
9848 return false;
9849}
9850
9851static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9852 uint64_t num, den, res;
9853 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9854
9855 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9856
9857 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9858 den = (unsigned long long)new_crtc_state->mode.htotal *
9859 (unsigned long long)new_crtc_state->mode.vtotal;
9860
9861 res = div_u64(num, den);
9862 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9863}
9864
4b9674e5
LL
9865static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9866 struct drm_atomic_state *state,
9867 struct drm_crtc *crtc,
9868 struct drm_crtc_state *old_crtc_state,
9869 struct drm_crtc_state *new_crtc_state,
9870 bool enable,
9871 bool *lock_and_validation_needed)
e7b07cee 9872{
eb3dc897 9873 struct dm_atomic_state *dm_state = NULL;
54d76575 9874 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 9875 struct dc_stream_state *new_stream;
62f55537 9876 int ret = 0;
d4d4a645 9877
1f6010a9
DF
9878 /*
9879 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9880 * update changed items
9881 */
4b9674e5
LL
9882 struct amdgpu_crtc *acrtc = NULL;
9883 struct amdgpu_dm_connector *aconnector = NULL;
9884 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9885 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 9886
4b9674e5 9887 new_stream = NULL;
9635b754 9888
4b9674e5
LL
9889 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9890 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9891 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 9892 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 9893
4b9674e5
LL
9894 /* TODO This hack should go away */
9895 if (aconnector && enable) {
9896 /* Make sure fake sink is created in plug-in scenario */
9897 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9898 &aconnector->base);
9899 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9900 &aconnector->base);
19f89e23 9901
4b9674e5
LL
9902 if (IS_ERR(drm_new_conn_state)) {
9903 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9904 goto fail;
9905 }
19f89e23 9906
4b9674e5
LL
9907 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9908 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 9909
02d35a67
JFZ
9910 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9911 goto skip_modeset;
9912
cbd14ae7
SW
9913 new_stream = create_validate_stream_for_sink(aconnector,
9914 &new_crtc_state->mode,
9915 dm_new_conn_state,
9916 dm_old_crtc_state->stream);
19f89e23 9917
4b9674e5
LL
9918 /*
9919 * we can have no stream on ACTION_SET if a display
9920 * was disconnected during S3, in this case it is not an
9921 * error, the OS will be updated after detection, and
9922 * will do the right thing on next atomic commit
9923 */
19f89e23 9924
4b9674e5
LL
9925 if (!new_stream) {
9926 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9927 __func__, acrtc->base.base.id);
9928 ret = -ENOMEM;
9929 goto fail;
9930 }
e7b07cee 9931
3d4e52d0
VL
9932 /*
9933 * TODO: Check VSDB bits to decide whether this should
9934 * be enabled or not.
9935 */
9936 new_stream->triggered_crtc_reset.enabled =
9937 dm->force_timing_sync;
9938
4b9674e5 9939 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 9940
88694af9
NK
9941 ret = fill_hdr_info_packet(drm_new_conn_state,
9942 &new_stream->hdr_static_metadata);
9943 if (ret)
9944 goto fail;
9945
7e930949
NK
9946 /*
9947 * If we already removed the old stream from the context
9948 * (and set the new stream to NULL) then we can't reuse
9949 * the old stream even if the stream and scaling are unchanged.
9950 * We'll hit the BUG_ON and black screen.
9951 *
9952 * TODO: Refactor this function to allow this check to work
9953 * in all conditions.
9954 */
a85ba005
NC
9955 if (amdgpu_freesync_vid_mode &&
9956 dm_new_crtc_state->stream &&
9957 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9958 goto skip_modeset;
9959
7e930949
NK
9960 if (dm_new_crtc_state->stream &&
9961 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
9962 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9963 new_crtc_state->mode_changed = false;
9964 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9965 new_crtc_state->mode_changed);
62f55537 9966 }
4b9674e5 9967 }
b830ebc9 9968
02d35a67 9969 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
9970 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9971 goto skip_modeset;
e7b07cee 9972
4711c033 9973 DRM_DEBUG_ATOMIC(
4b9674e5
LL
9974 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9975 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9976 "connectors_changed:%d\n",
9977 acrtc->crtc_id,
9978 new_crtc_state->enable,
9979 new_crtc_state->active,
9980 new_crtc_state->planes_changed,
9981 new_crtc_state->mode_changed,
9982 new_crtc_state->active_changed,
9983 new_crtc_state->connectors_changed);
62f55537 9984
4b9674e5
LL
9985 /* Remove stream for any changed/disabled CRTC */
9986 if (!enable) {
62f55537 9987
4b9674e5
LL
9988 if (!dm_old_crtc_state->stream)
9989 goto skip_modeset;
eb3dc897 9990
a85ba005
NC
9991 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9992 is_timing_unchanged_for_freesync(new_crtc_state,
9993 old_crtc_state)) {
9994 new_crtc_state->mode_changed = false;
9995 DRM_DEBUG_DRIVER(
9996 "Mode change not required for front porch change, "
9997 "setting mode_changed to %d",
9998 new_crtc_state->mode_changed);
9999
10000 set_freesync_fixed_config(dm_new_crtc_state);
10001
10002 goto skip_modeset;
10003 } else if (amdgpu_freesync_vid_mode && aconnector &&
10004 is_freesync_video_mode(&new_crtc_state->mode,
10005 aconnector)) {
e88ebd83
SC
10006 struct drm_display_mode *high_mode;
10007
10008 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10009 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10010 set_freesync_fixed_config(dm_new_crtc_state);
10011 }
a85ba005
NC
10012 }
10013
4b9674e5
LL
10014 ret = dm_atomic_get_state(state, &dm_state);
10015 if (ret)
10016 goto fail;
e7b07cee 10017
4b9674e5
LL
10018 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10019 crtc->base.id);
62f55537 10020
4b9674e5
LL
10021 /* i.e. reset mode */
10022 if (dc_remove_stream_from_ctx(
10023 dm->dc,
10024 dm_state->context,
10025 dm_old_crtc_state->stream) != DC_OK) {
10026 ret = -EINVAL;
10027 goto fail;
10028 }
62f55537 10029
4b9674e5
LL
10030 dc_stream_release(dm_old_crtc_state->stream);
10031 dm_new_crtc_state->stream = NULL;
bb47de73 10032
4b9674e5 10033 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10034
4b9674e5 10035 *lock_and_validation_needed = true;
62f55537 10036
4b9674e5
LL
10037 } else {/* Add stream for any updated/enabled CRTC */
10038 /*
10039 * Quick fix to prevent NULL pointer on new_stream when
10040 * added MST connectors not found in existing crtc_state in the chained mode
10041 * TODO: need to dig out the root cause of that
10042 */
10043 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10044 goto skip_modeset;
62f55537 10045
4b9674e5
LL
10046 if (modereset_required(new_crtc_state))
10047 goto skip_modeset;
62f55537 10048
4b9674e5
LL
10049 if (modeset_required(new_crtc_state, new_stream,
10050 dm_old_crtc_state->stream)) {
62f55537 10051
4b9674e5 10052 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10053
4b9674e5
LL
10054 ret = dm_atomic_get_state(state, &dm_state);
10055 if (ret)
10056 goto fail;
27b3f4fc 10057
4b9674e5 10058 dm_new_crtc_state->stream = new_stream;
62f55537 10059
4b9674e5 10060 dc_stream_retain(new_stream);
1dc90497 10061
4711c033
LT
10062 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10063 crtc->base.id);
1dc90497 10064
4b9674e5
LL
10065 if (dc_add_stream_to_ctx(
10066 dm->dc,
10067 dm_state->context,
10068 dm_new_crtc_state->stream) != DC_OK) {
10069 ret = -EINVAL;
10070 goto fail;
9b690ef3
BL
10071 }
10072
4b9674e5
LL
10073 *lock_and_validation_needed = true;
10074 }
10075 }
e277adc5 10076
4b9674e5
LL
10077skip_modeset:
10078 /* Release extra reference */
10079 if (new_stream)
10080 dc_stream_release(new_stream);
e277adc5 10081
4b9674e5
LL
10082 /*
10083 * We want to do dc stream updates that do not require a
10084 * full modeset below.
10085 */
2afda735 10086 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10087 return 0;
10088 /*
10089 * Given above conditions, the dc state cannot be NULL because:
10090 * 1. We're in the process of enabling CRTCs (just been added
10091 * to the dc context, or already is on the context)
10092 * 2. Has a valid connector attached, and
10093 * 3. Is currently active and enabled.
10094 * => The dc stream state currently exists.
10095 */
10096 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10097
4b9674e5 10098 /* Scaling or underscan settings */
c521fc31
RL
10099 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10100 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10101 update_stream_scaling_settings(
10102 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10103
b05e2c5e
DF
10104 /* ABM settings */
10105 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10106
4b9674e5
LL
10107 /*
10108 * Color management settings. We also update color properties
10109 * when a modeset is needed, to ensure it gets reprogrammed.
10110 */
10111 if (dm_new_crtc_state->base.color_mgmt_changed ||
10112 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10113 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10114 if (ret)
10115 goto fail;
62f55537 10116 }
e7b07cee 10117
4b9674e5
LL
10118 /* Update Freesync settings. */
10119 get_freesync_config_for_crtc(dm_new_crtc_state,
10120 dm_new_conn_state);
10121
62f55537 10122 return ret;
9635b754
DS
10123
10124fail:
10125 if (new_stream)
10126 dc_stream_release(new_stream);
10127 return ret;
62f55537 10128}
9b690ef3 10129
f6ff2a08
NK
10130static bool should_reset_plane(struct drm_atomic_state *state,
10131 struct drm_plane *plane,
10132 struct drm_plane_state *old_plane_state,
10133 struct drm_plane_state *new_plane_state)
10134{
10135 struct drm_plane *other;
10136 struct drm_plane_state *old_other_state, *new_other_state;
10137 struct drm_crtc_state *new_crtc_state;
10138 int i;
10139
70a1efac
NK
10140 /*
10141 * TODO: Remove this hack once the checks below are sufficient
10142 * enough to determine when we need to reset all the planes on
10143 * the stream.
10144 */
10145 if (state->allow_modeset)
10146 return true;
10147
f6ff2a08
NK
10148 /* Exit early if we know that we're adding or removing the plane. */
10149 if (old_plane_state->crtc != new_plane_state->crtc)
10150 return true;
10151
10152 /* old crtc == new_crtc == NULL, plane not in context. */
10153 if (!new_plane_state->crtc)
10154 return false;
10155
10156 new_crtc_state =
10157 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10158
10159 if (!new_crtc_state)
10160 return true;
10161
7316c4ad
NK
10162 /* CRTC Degamma changes currently require us to recreate planes. */
10163 if (new_crtc_state->color_mgmt_changed)
10164 return true;
10165
f6ff2a08
NK
10166 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10167 return true;
10168
10169 /*
10170 * If there are any new primary or overlay planes being added or
10171 * removed then the z-order can potentially change. To ensure
10172 * correct z-order and pipe acquisition the current DC architecture
10173 * requires us to remove and recreate all existing planes.
10174 *
10175 * TODO: Come up with a more elegant solution for this.
10176 */
10177 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10178 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10179 if (other->type == DRM_PLANE_TYPE_CURSOR)
10180 continue;
10181
10182 if (old_other_state->crtc != new_plane_state->crtc &&
10183 new_other_state->crtc != new_plane_state->crtc)
10184 continue;
10185
10186 if (old_other_state->crtc != new_other_state->crtc)
10187 return true;
10188
dc4cb30d
NK
10189 /* Src/dst size and scaling updates. */
10190 if (old_other_state->src_w != new_other_state->src_w ||
10191 old_other_state->src_h != new_other_state->src_h ||
10192 old_other_state->crtc_w != new_other_state->crtc_w ||
10193 old_other_state->crtc_h != new_other_state->crtc_h)
10194 return true;
10195
10196 /* Rotation / mirroring updates. */
10197 if (old_other_state->rotation != new_other_state->rotation)
10198 return true;
10199
10200 /* Blending updates. */
10201 if (old_other_state->pixel_blend_mode !=
10202 new_other_state->pixel_blend_mode)
10203 return true;
10204
10205 /* Alpha updates. */
10206 if (old_other_state->alpha != new_other_state->alpha)
10207 return true;
10208
10209 /* Colorspace changes. */
10210 if (old_other_state->color_range != new_other_state->color_range ||
10211 old_other_state->color_encoding != new_other_state->color_encoding)
10212 return true;
10213
9a81cc60
NK
10214 /* Framebuffer checks fall at the end. */
10215 if (!old_other_state->fb || !new_other_state->fb)
10216 continue;
10217
10218 /* Pixel format changes can require bandwidth updates. */
10219 if (old_other_state->fb->format != new_other_state->fb->format)
10220 return true;
10221
6eed95b0
BN
10222 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10223 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10224
10225 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10226 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10227 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10228 return true;
10229 }
10230
10231 return false;
10232}
10233
b0455fda
SS
10234static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10235 struct drm_plane_state *new_plane_state,
10236 struct drm_framebuffer *fb)
10237{
e72868c4
SS
10238 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10239 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10240 unsigned int pitch;
e72868c4 10241 bool linear;
b0455fda
SS
10242
10243 if (fb->width > new_acrtc->max_cursor_width ||
10244 fb->height > new_acrtc->max_cursor_height) {
10245 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10246 new_plane_state->fb->width,
10247 new_plane_state->fb->height);
10248 return -EINVAL;
10249 }
10250 if (new_plane_state->src_w != fb->width << 16 ||
10251 new_plane_state->src_h != fb->height << 16) {
10252 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10253 return -EINVAL;
10254 }
10255
10256 /* Pitch in pixels */
10257 pitch = fb->pitches[0] / fb->format->cpp[0];
10258
10259 if (fb->width != pitch) {
10260 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10261 fb->width, pitch);
10262 return -EINVAL;
10263 }
10264
10265 switch (pitch) {
10266 case 64:
10267 case 128:
10268 case 256:
10269 /* FB pitch is supported by cursor plane */
10270 break;
10271 default:
10272 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10273 return -EINVAL;
10274 }
10275
e72868c4
SS
10276 /* Core DRM takes care of checking FB modifiers, so we only need to
10277 * check tiling flags when the FB doesn't have a modifier. */
10278 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10279 if (adev->family < AMDGPU_FAMILY_AI) {
10280 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10281 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10282 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10283 } else {
10284 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10285 }
10286 if (!linear) {
10287 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10288 return -EINVAL;
10289 }
10290 }
10291
b0455fda
SS
10292 return 0;
10293}
10294
9e869063
LL
10295static int dm_update_plane_state(struct dc *dc,
10296 struct drm_atomic_state *state,
10297 struct drm_plane *plane,
10298 struct drm_plane_state *old_plane_state,
10299 struct drm_plane_state *new_plane_state,
10300 bool enable,
10301 bool *lock_and_validation_needed)
62f55537 10302{
eb3dc897
NK
10303
10304 struct dm_atomic_state *dm_state = NULL;
62f55537 10305 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10306 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10307 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10308 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10309 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10310 bool needs_reset;
62f55537 10311 int ret = 0;
e7b07cee 10312
9b690ef3 10313
9e869063
LL
10314 new_plane_crtc = new_plane_state->crtc;
10315 old_plane_crtc = old_plane_state->crtc;
10316 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10317 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10318
626bf90f
SS
10319 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10320 if (!enable || !new_plane_crtc ||
10321 drm_atomic_plane_disabling(plane->state, new_plane_state))
10322 return 0;
10323
10324 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10325
5f581248
SS
10326 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10327 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10328 return -EINVAL;
10329 }
10330
24f99d2b 10331 if (new_plane_state->fb) {
b0455fda
SS
10332 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10333 new_plane_state->fb);
10334 if (ret)
10335 return ret;
24f99d2b
SS
10336 }
10337
9e869063 10338 return 0;
626bf90f 10339 }
9b690ef3 10340
f6ff2a08
NK
10341 needs_reset = should_reset_plane(state, plane, old_plane_state,
10342 new_plane_state);
10343
9e869063
LL
10344 /* Remove any changed/removed planes */
10345 if (!enable) {
f6ff2a08 10346 if (!needs_reset)
9e869063 10347 return 0;
a7b06724 10348
9e869063
LL
10349 if (!old_plane_crtc)
10350 return 0;
62f55537 10351
9e869063
LL
10352 old_crtc_state = drm_atomic_get_old_crtc_state(
10353 state, old_plane_crtc);
10354 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10355
9e869063
LL
10356 if (!dm_old_crtc_state->stream)
10357 return 0;
62f55537 10358
9e869063
LL
10359 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10360 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10361
9e869063
LL
10362 ret = dm_atomic_get_state(state, &dm_state);
10363 if (ret)
10364 return ret;
eb3dc897 10365
9e869063
LL
10366 if (!dc_remove_plane_from_context(
10367 dc,
10368 dm_old_crtc_state->stream,
10369 dm_old_plane_state->dc_state,
10370 dm_state->context)) {
62f55537 10371
c3537613 10372 return -EINVAL;
9e869063 10373 }
e7b07cee 10374
9b690ef3 10375
9e869063
LL
10376 dc_plane_state_release(dm_old_plane_state->dc_state);
10377 dm_new_plane_state->dc_state = NULL;
1dc90497 10378
9e869063 10379 *lock_and_validation_needed = true;
1dc90497 10380
9e869063
LL
10381 } else { /* Add new planes */
10382 struct dc_plane_state *dc_new_plane_state;
1dc90497 10383
9e869063
LL
10384 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10385 return 0;
e7b07cee 10386
9e869063
LL
10387 if (!new_plane_crtc)
10388 return 0;
e7b07cee 10389
9e869063
LL
10390 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10391 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10392
9e869063
LL
10393 if (!dm_new_crtc_state->stream)
10394 return 0;
62f55537 10395
f6ff2a08 10396 if (!needs_reset)
9e869063 10397 return 0;
62f55537 10398
8c44515b
AP
10399 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10400 if (ret)
10401 return ret;
10402
9e869063 10403 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10404
9e869063
LL
10405 dc_new_plane_state = dc_create_plane_state(dc);
10406 if (!dc_new_plane_state)
10407 return -ENOMEM;
62f55537 10408
4711c033
LT
10409 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10410 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10411
695af5f9 10412 ret = fill_dc_plane_attributes(
1348969a 10413 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10414 dc_new_plane_state,
10415 new_plane_state,
10416 new_crtc_state);
10417 if (ret) {
10418 dc_plane_state_release(dc_new_plane_state);
10419 return ret;
10420 }
62f55537 10421
9e869063
LL
10422 ret = dm_atomic_get_state(state, &dm_state);
10423 if (ret) {
10424 dc_plane_state_release(dc_new_plane_state);
10425 return ret;
10426 }
eb3dc897 10427
9e869063
LL
10428 /*
10429 * Any atomic check errors that occur after this will
10430 * not need a release. The plane state will be attached
10431 * to the stream, and therefore part of the atomic
10432 * state. It'll be released when the atomic state is
10433 * cleaned.
10434 */
10435 if (!dc_add_plane_to_context(
10436 dc,
10437 dm_new_crtc_state->stream,
10438 dc_new_plane_state,
10439 dm_state->context)) {
62f55537 10440
9e869063
LL
10441 dc_plane_state_release(dc_new_plane_state);
10442 return -EINVAL;
10443 }
8c45c5db 10444
9e869063 10445 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10446
9e869063
LL
10447 /* Tell DC to do a full surface update every time there
10448 * is a plane change. Inefficient, but works for now.
10449 */
10450 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10451
10452 *lock_and_validation_needed = true;
62f55537 10453 }
e7b07cee
HW
10454
10455
62f55537
AG
10456 return ret;
10457}
a87fa993 10458
12f4849a
SS
10459static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10460 struct drm_crtc *crtc,
10461 struct drm_crtc_state *new_crtc_state)
10462{
10463 struct drm_plane_state *new_cursor_state, *new_primary_state;
10464 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10465
10466 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10467 * cursor per pipe but it's going to inherit the scaling and
10468 * positioning from the underlying pipe. Check the cursor plane's
10469 * blending properties match the primary plane's. */
10470
10471 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10472 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
7df4ceb6
SE
10473 if (!new_cursor_state || !new_primary_state ||
10474 !new_cursor_state->fb || !new_primary_state->fb) {
12f4849a
SS
10475 return 0;
10476 }
10477
10478 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10479 (new_cursor_state->src_w >> 16);
10480 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10481 (new_cursor_state->src_h >> 16);
10482
10483 primary_scale_w = new_primary_state->crtc_w * 1000 /
10484 (new_primary_state->src_w >> 16);
10485 primary_scale_h = new_primary_state->crtc_h * 1000 /
10486 (new_primary_state->src_h >> 16);
10487
10488 if (cursor_scale_w != primary_scale_w ||
10489 cursor_scale_h != primary_scale_h) {
8333388b 10490 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
12f4849a
SS
10491 return -EINVAL;
10492 }
10493
10494 return 0;
10495}
10496
e10517b3 10497#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10498static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10499{
10500 struct drm_connector *connector;
10501 struct drm_connector_state *conn_state;
10502 struct amdgpu_dm_connector *aconnector = NULL;
10503 int i;
10504 for_each_new_connector_in_state(state, connector, conn_state, i) {
10505 if (conn_state->crtc != crtc)
10506 continue;
10507
10508 aconnector = to_amdgpu_dm_connector(connector);
10509 if (!aconnector->port || !aconnector->mst_port)
10510 aconnector = NULL;
10511 else
10512 break;
10513 }
10514
10515 if (!aconnector)
10516 return 0;
10517
10518 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10519}
e10517b3 10520#endif
44be939f 10521
16e9b3e5
RS
10522static int validate_overlay(struct drm_atomic_state *state)
10523{
10524 int i;
10525 struct drm_plane *plane;
ed509955 10526 struct drm_plane_state *new_plane_state;
e7d9560a 10527 struct drm_plane_state *primary_state, *overlay_state = NULL;
16e9b3e5
RS
10528
10529 /* Check if primary plane is contained inside overlay */
a6c3c37b 10530 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
16e9b3e5
RS
10531 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10532 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10533 return 0;
10534
10535 overlay_state = new_plane_state;
10536 continue;
10537 }
10538 }
10539
10540 /* check if we're making changes to the overlay plane */
10541 if (!overlay_state)
10542 return 0;
10543
10544 /* check if overlay plane is enabled */
10545 if (!overlay_state->crtc)
10546 return 0;
10547
10548 /* find the primary plane for the CRTC that the overlay is enabled on */
10549 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10550 if (IS_ERR(primary_state))
10551 return PTR_ERR(primary_state);
10552
10553 /* check if primary plane is enabled */
10554 if (!primary_state->crtc)
10555 return 0;
10556
10557 /* Perform the bounds check to ensure the overlay plane covers the primary */
10558 if (primary_state->crtc_x < overlay_state->crtc_x ||
10559 primary_state->crtc_y < overlay_state->crtc_y ||
10560 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10561 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10562 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10563 return -EINVAL;
10564 }
10565
10566 return 0;
10567}
10568
b8592b48
LL
10569/**
10570 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10571 * @dev: The DRM device
10572 * @state: The atomic state to commit
10573 *
10574 * Validate that the given atomic state is programmable by DC into hardware.
10575 * This involves constructing a &struct dc_state reflecting the new hardware
10576 * state we wish to commit, then querying DC to see if it is programmable. It's
10577 * important not to modify the existing DC state. Otherwise, atomic_check
10578 * may unexpectedly commit hardware changes.
10579 *
10580 * When validating the DC state, it's important that the right locks are
10581 * acquired. For full updates case which removes/adds/updates streams on one
10582 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10583 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10584 * flip using DRMs synchronization events.
b8592b48
LL
10585 *
10586 * Note that DM adds the affected connectors for all CRTCs in state, when that
10587 * might not seem necessary. This is because DC stream creation requires the
10588 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10589 * be possible but non-trivial - a possible TODO item.
10590 *
10591 * Return: -Error code if validation failed.
10592 */
7578ecda
AD
10593static int amdgpu_dm_atomic_check(struct drm_device *dev,
10594 struct drm_atomic_state *state)
62f55537 10595{
1348969a 10596 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10597 struct dm_atomic_state *dm_state = NULL;
62f55537 10598 struct dc *dc = adev->dm.dc;
62f55537 10599 struct drm_connector *connector;
c2cea706 10600 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10601 struct drm_crtc *crtc;
fc9e9920 10602 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10603 struct drm_plane *plane;
10604 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10605 enum dc_status status;
1e88ad0a 10606 int ret, i;
62f55537 10607 bool lock_and_validation_needed = false;
886876ec 10608 struct dm_crtc_state *dm_old_crtc_state;
6513104b
HW
10609#if defined(CONFIG_DRM_AMD_DC_DCN)
10610 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10611#endif
62f55537 10612
e8a98235 10613 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10614
62f55537 10615 ret = drm_atomic_helper_check_modeset(dev, state);
01e28f9c
MD
10616 if (ret)
10617 goto fail;
62f55537 10618
c5892a10
SW
10619 /* Check connector changes */
10620 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10621 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10622 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10623
10624 /* Skip connectors that are disabled or part of modeset already. */
10625 if (!old_con_state->crtc && !new_con_state->crtc)
10626 continue;
10627
10628 if (!new_con_state->crtc)
10629 continue;
10630
10631 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10632 if (IS_ERR(new_crtc_state)) {
10633 ret = PTR_ERR(new_crtc_state);
10634 goto fail;
10635 }
10636
10637 if (dm_old_con_state->abm_level !=
10638 dm_new_con_state->abm_level)
10639 new_crtc_state->connectors_changed = true;
10640 }
10641
e10517b3 10642#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10643 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10644 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10645 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10646 ret = add_affected_mst_dsc_crtcs(state, crtc);
10647 if (ret)
10648 goto fail;
10649 }
10650 }
10651 }
e10517b3 10652#endif
1e88ad0a 10653 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10654 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10655
1e88ad0a 10656 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10657 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10658 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10659 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10660 continue;
7bef1af3 10661
03fc4cf4
MY
10662 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10663 if (ret)
10664 goto fail;
10665
1e88ad0a
S
10666 if (!new_crtc_state->enable)
10667 continue;
fc9e9920 10668
1e88ad0a
S
10669 ret = drm_atomic_add_affected_connectors(state, crtc);
10670 if (ret)
10671 return ret;
fc9e9920 10672
1e88ad0a
S
10673 ret = drm_atomic_add_affected_planes(state, crtc);
10674 if (ret)
10675 goto fail;
115a385c 10676
cbac53f7 10677 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10678 new_crtc_state->mode_changed = true;
e7b07cee
HW
10679 }
10680
2d9e6431
NK
10681 /*
10682 * Add all primary and overlay planes on the CRTC to the state
10683 * whenever a plane is enabled to maintain correct z-ordering
10684 * and to enable fast surface updates.
10685 */
10686 drm_for_each_crtc(crtc, dev) {
10687 bool modified = false;
10688
10689 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10690 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10691 continue;
10692
10693 if (new_plane_state->crtc == crtc ||
10694 old_plane_state->crtc == crtc) {
10695 modified = true;
10696 break;
10697 }
10698 }
10699
10700 if (!modified)
10701 continue;
10702
10703 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10704 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10705 continue;
10706
10707 new_plane_state =
10708 drm_atomic_get_plane_state(state, plane);
10709
10710 if (IS_ERR(new_plane_state)) {
10711 ret = PTR_ERR(new_plane_state);
10712 goto fail;
10713 }
10714 }
10715 }
10716
62f55537 10717 /* Remove exiting planes if they are modified */
9e869063
LL
10718 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10719 ret = dm_update_plane_state(dc, state, plane,
10720 old_plane_state,
10721 new_plane_state,
10722 false,
10723 &lock_and_validation_needed);
10724 if (ret)
10725 goto fail;
62f55537
AG
10726 }
10727
10728 /* Disable all crtcs which require disable */
4b9674e5
LL
10729 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10730 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10731 old_crtc_state,
10732 new_crtc_state,
10733 false,
10734 &lock_and_validation_needed);
10735 if (ret)
10736 goto fail;
62f55537
AG
10737 }
10738
10739 /* Enable all crtcs which require enable */
4b9674e5
LL
10740 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10741 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10742 old_crtc_state,
10743 new_crtc_state,
10744 true,
10745 &lock_and_validation_needed);
10746 if (ret)
10747 goto fail;
62f55537
AG
10748 }
10749
16e9b3e5
RS
10750 ret = validate_overlay(state);
10751 if (ret)
10752 goto fail;
10753
62f55537 10754 /* Add new/modified planes */
9e869063
LL
10755 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10756 ret = dm_update_plane_state(dc, state, plane,
10757 old_plane_state,
10758 new_plane_state,
10759 true,
10760 &lock_and_validation_needed);
10761 if (ret)
10762 goto fail;
62f55537
AG
10763 }
10764
b349f76e
ES
10765 /* Run this here since we want to validate the streams we created */
10766 ret = drm_atomic_helper_check_planes(dev, state);
10767 if (ret)
10768 goto fail;
62f55537 10769
12f4849a
SS
10770 /* Check cursor planes scaling */
10771 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10772 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10773 if (ret)
10774 goto fail;
10775 }
10776
43d10d30
NK
10777 if (state->legacy_cursor_update) {
10778 /*
10779 * This is a fast cursor update coming from the plane update
10780 * helper, check if it can be done asynchronously for better
10781 * performance.
10782 */
10783 state->async_update =
10784 !drm_atomic_helper_async_check(dev, state);
10785
10786 /*
10787 * Skip the remaining global validation if this is an async
10788 * update. Cursor updates can be done without affecting
10789 * state or bandwidth calcs and this avoids the performance
10790 * penalty of locking the private state object and
10791 * allocating a new dc_state.
10792 */
10793 if (state->async_update)
10794 return 0;
10795 }
10796
ebdd27e1 10797 /* Check scaling and underscan changes*/
1f6010a9 10798 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10799 * new stream into context w\o causing full reset. Need to
10800 * decide how to handle.
10801 */
c2cea706 10802 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10803 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10804 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10805 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10806
10807 /* Skip any modesets/resets */
0bc9706d
LSL
10808 if (!acrtc || drm_atomic_crtc_needs_modeset(
10809 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10810 continue;
10811
b830ebc9 10812 /* Skip any thing not scale or underscan changes */
54d76575 10813 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10814 continue;
10815
10816 lock_and_validation_needed = true;
10817 }
10818
f6d7c7fa
NK
10819 /**
10820 * Streams and planes are reset when there are changes that affect
10821 * bandwidth. Anything that affects bandwidth needs to go through
10822 * DC global validation to ensure that the configuration can be applied
10823 * to hardware.
10824 *
10825 * We have to currently stall out here in atomic_check for outstanding
10826 * commits to finish in this case because our IRQ handlers reference
10827 * DRM state directly - we can end up disabling interrupts too early
10828 * if we don't.
10829 *
10830 * TODO: Remove this stall and drop DM state private objects.
a87fa993 10831 */
f6d7c7fa 10832 if (lock_and_validation_needed) {
eb3dc897
NK
10833 ret = dm_atomic_get_state(state, &dm_state);
10834 if (ret)
10835 goto fail;
e7b07cee
HW
10836
10837 ret = do_aquire_global_lock(dev, state);
10838 if (ret)
10839 goto fail;
1dc90497 10840
d9fe1a4c 10841#if defined(CONFIG_DRM_AMD_DC_DCN)
6513104b 10842 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
8c20a1ed
DF
10843 goto fail;
10844
6513104b 10845 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
29b9ba74
ML
10846 if (ret)
10847 goto fail;
d9fe1a4c 10848#endif
29b9ba74 10849
ded58c7b
ZL
10850 /*
10851 * Perform validation of MST topology in the state:
10852 * We need to perform MST atomic check before calling
10853 * dc_validate_global_state(), or there is a chance
10854 * to get stuck in an infinite loop and hang eventually.
10855 */
10856 ret = drm_dp_mst_atomic_check(state);
10857 if (ret)
10858 goto fail;
74a16675
RS
10859 status = dc_validate_global_state(dc, dm_state->context, false);
10860 if (status != DC_OK) {
a906331c
SS
10861 drm_dbg_atomic(dev,
10862 "DC global validation failure: %s (%d)",
74a16675 10863 dc_status_to_str(status), status);
e7b07cee
HW
10864 ret = -EINVAL;
10865 goto fail;
10866 }
bd200d19 10867 } else {
674e78ac 10868 /*
bd200d19
NK
10869 * The commit is a fast update. Fast updates shouldn't change
10870 * the DC context, affect global validation, and can have their
10871 * commit work done in parallel with other commits not touching
10872 * the same resource. If we have a new DC context as part of
10873 * the DM atomic state from validation we need to free it and
10874 * retain the existing one instead.
fde9f39a
MR
10875 *
10876 * Furthermore, since the DM atomic state only contains the DC
10877 * context and can safely be annulled, we can free the state
10878 * and clear the associated private object now to free
10879 * some memory and avoid a possible use-after-free later.
674e78ac 10880 */
bd200d19 10881
fde9f39a
MR
10882 for (i = 0; i < state->num_private_objs; i++) {
10883 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 10884
fde9f39a
MR
10885 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10886 int j = state->num_private_objs-1;
bd200d19 10887
fde9f39a
MR
10888 dm_atomic_destroy_state(obj,
10889 state->private_objs[i].state);
10890
10891 /* If i is not at the end of the array then the
10892 * last element needs to be moved to where i was
10893 * before the array can safely be truncated.
10894 */
10895 if (i != j)
10896 state->private_objs[i] =
10897 state->private_objs[j];
bd200d19 10898
fde9f39a
MR
10899 state->private_objs[j].ptr = NULL;
10900 state->private_objs[j].state = NULL;
10901 state->private_objs[j].old_state = NULL;
10902 state->private_objs[j].new_state = NULL;
10903
10904 state->num_private_objs = j;
10905 break;
10906 }
bd200d19 10907 }
e7b07cee
HW
10908 }
10909
caff0e66
NK
10910 /* Store the overall update type for use later in atomic check. */
10911 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10912 struct dm_crtc_state *dm_new_crtc_state =
10913 to_dm_crtc_state(new_crtc_state);
10914
f6d7c7fa
NK
10915 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10916 UPDATE_TYPE_FULL :
10917 UPDATE_TYPE_FAST;
e7b07cee
HW
10918 }
10919
10920 /* Must be success */
10921 WARN_ON(ret);
e8a98235
RS
10922
10923 trace_amdgpu_dm_atomic_check_finish(state, ret);
10924
e7b07cee
HW
10925 return ret;
10926
10927fail:
10928 if (ret == -EDEADLK)
01e28f9c 10929 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 10930 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 10931 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 10932 else
01e28f9c 10933 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 10934
e8a98235
RS
10935 trace_amdgpu_dm_atomic_check_finish(state, ret);
10936
e7b07cee
HW
10937 return ret;
10938}
10939
3ee6b26b
AD
10940static bool is_dp_capable_without_timing_msa(struct dc *dc,
10941 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
10942{
10943 uint8_t dpcd_data;
10944 bool capable = false;
10945
c84dec2f 10946 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
10947 dm_helpers_dp_read_dpcd(
10948 NULL,
c84dec2f 10949 amdgpu_dm_connector->dc_link,
e7b07cee
HW
10950 DP_DOWN_STREAM_PORT_COUNT,
10951 &dpcd_data,
10952 sizeof(dpcd_data))) {
10953 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10954 }
10955
10956 return capable;
10957}
f9b4f20c 10958
46db138d
SW
10959static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10960 unsigned int offset,
10961 unsigned int total_length,
10962 uint8_t *data,
10963 unsigned int length,
10964 struct amdgpu_hdmi_vsdb_info *vsdb)
10965{
10966 bool res;
10967 union dmub_rb_cmd cmd;
10968 struct dmub_cmd_send_edid_cea *input;
10969 struct dmub_cmd_edid_cea_output *output;
10970
10971 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10972 return false;
10973
10974 memset(&cmd, 0, sizeof(cmd));
10975
10976 input = &cmd.edid_cea.data.input;
10977
10978 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10979 cmd.edid_cea.header.sub_type = 0;
10980 cmd.edid_cea.header.payload_bytes =
10981 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10982 input->offset = offset;
10983 input->length = length;
10984 input->total_length = total_length;
10985 memcpy(input->payload, data, length);
10986
10987 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10988 if (!res) {
10989 DRM_ERROR("EDID CEA parser failed\n");
10990 return false;
10991 }
10992
10993 output = &cmd.edid_cea.data.output;
10994
10995 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10996 if (!output->ack.success) {
10997 DRM_ERROR("EDID CEA ack failed at offset %d\n",
10998 output->ack.offset);
10999 }
11000 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11001 if (!output->amd_vsdb.vsdb_found)
11002 return false;
11003
11004 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11005 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11006 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11007 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11008 } else {
b76a8062 11009 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11010 return false;
11011 }
11012
11013 return true;
11014}
11015
11016static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11017 uint8_t *edid_ext, int len,
11018 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11019{
11020 int i;
f9b4f20c
SW
11021
11022 /* send extension block to DMCU for parsing */
11023 for (i = 0; i < len; i += 8) {
11024 bool res;
11025 int offset;
11026
11027 /* send 8 bytes a time */
46db138d 11028 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11029 return false;
11030
11031 if (i+8 == len) {
11032 /* EDID block sent completed, expect result */
11033 int version, min_rate, max_rate;
11034
46db138d 11035 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11036 if (res) {
11037 /* amd vsdb found */
11038 vsdb_info->freesync_supported = 1;
11039 vsdb_info->amd_vsdb_version = version;
11040 vsdb_info->min_refresh_rate_hz = min_rate;
11041 vsdb_info->max_refresh_rate_hz = max_rate;
11042 return true;
11043 }
11044 /* not amd vsdb */
11045 return false;
11046 }
11047
11048 /* check for ack*/
46db138d 11049 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11050 if (!res)
11051 return false;
11052 }
11053
11054 return false;
11055}
11056
46db138d
SW
11057static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11058 uint8_t *edid_ext, int len,
11059 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11060{
11061 int i;
11062
11063 /* send extension block to DMCU for parsing */
11064 for (i = 0; i < len; i += 8) {
11065 /* send 8 bytes a time */
11066 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11067 return false;
11068 }
11069
11070 return vsdb_info->freesync_supported;
11071}
11072
11073static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11074 uint8_t *edid_ext, int len,
11075 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11076{
11077 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11078
11079 if (adev->dm.dmub_srv)
11080 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11081 else
11082 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11083}
11084
7c7dd774 11085static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11086 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11087{
11088 uint8_t *edid_ext = NULL;
11089 int i;
11090 bool valid_vsdb_found = false;
11091
11092 /*----- drm_find_cea_extension() -----*/
11093 /* No EDID or EDID extensions */
11094 if (edid == NULL || edid->extensions == 0)
7c7dd774 11095 return -ENODEV;
f9b4f20c
SW
11096
11097 /* Find CEA extension */
11098 for (i = 0; i < edid->extensions; i++) {
11099 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11100 if (edid_ext[0] == CEA_EXT)
11101 break;
11102 }
11103
11104 if (i == edid->extensions)
7c7dd774 11105 return -ENODEV;
f9b4f20c
SW
11106
11107 /*----- cea_db_offsets() -----*/
11108 if (edid_ext[0] != CEA_EXT)
7c7dd774 11109 return -ENODEV;
f9b4f20c
SW
11110
11111 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11112
11113 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11114}
11115
98e6436d
AK
11116void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11117 struct edid *edid)
e7b07cee 11118{
eb0709ba 11119 int i = 0;
e7b07cee
HW
11120 struct detailed_timing *timing;
11121 struct detailed_non_pixel *data;
11122 struct detailed_data_monitor_range *range;
c84dec2f
HW
11123 struct amdgpu_dm_connector *amdgpu_dm_connector =
11124 to_amdgpu_dm_connector(connector);
bb47de73 11125 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11126 struct dc_sink *sink;
e7b07cee
HW
11127
11128 struct drm_device *dev = connector->dev;
1348969a 11129 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11130 bool freesync_capable = false;
f9b4f20c 11131 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11132
8218d7f1
HW
11133 if (!connector->state) {
11134 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11135 goto update;
8218d7f1
HW
11136 }
11137
9b2fdc33
AP
11138 sink = amdgpu_dm_connector->dc_sink ?
11139 amdgpu_dm_connector->dc_sink :
11140 amdgpu_dm_connector->dc_em_sink;
11141
11142 if (!edid || !sink) {
98e6436d
AK
11143 dm_con_state = to_dm_connector_state(connector->state);
11144
11145 amdgpu_dm_connector->min_vfreq = 0;
11146 amdgpu_dm_connector->max_vfreq = 0;
11147 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11148 connector->display_info.monitor_range.min_vfreq = 0;
11149 connector->display_info.monitor_range.max_vfreq = 0;
11150 freesync_capable = false;
98e6436d 11151
bb47de73 11152 goto update;
98e6436d
AK
11153 }
11154
8218d7f1
HW
11155 dm_con_state = to_dm_connector_state(connector->state);
11156
e7b07cee 11157 if (!adev->dm.freesync_module)
bb47de73 11158 goto update;
f9b4f20c
SW
11159
11160
9b2fdc33
AP
11161 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11162 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11163 bool edid_check_required = false;
11164
11165 if (edid) {
e7b07cee
HW
11166 edid_check_required = is_dp_capable_without_timing_msa(
11167 adev->dm.dc,
c84dec2f 11168 amdgpu_dm_connector);
e7b07cee 11169 }
e7b07cee 11170
f9b4f20c
SW
11171 if (edid_check_required == true && (edid->version > 1 ||
11172 (edid->version == 1 && edid->revision > 1))) {
11173 for (i = 0; i < 4; i++) {
e7b07cee 11174
f9b4f20c
SW
11175 timing = &edid->detailed_timings[i];
11176 data = &timing->data.other_data;
11177 range = &data->data.range;
11178 /*
11179 * Check if monitor has continuous frequency mode
11180 */
11181 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11182 continue;
11183 /*
11184 * Check for flag range limits only. If flag == 1 then
11185 * no additional timing information provided.
11186 * Default GTF, GTF Secondary curve and CVT are not
11187 * supported
11188 */
11189 if (range->flags != 1)
11190 continue;
a0ffc3fd 11191
f9b4f20c
SW
11192 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11193 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11194 amdgpu_dm_connector->pixel_clock_mhz =
11195 range->pixel_clock_mhz * 10;
a0ffc3fd 11196
f9b4f20c
SW
11197 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11198 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11199
f9b4f20c
SW
11200 break;
11201 }
98e6436d 11202
f9b4f20c
SW
11203 if (amdgpu_dm_connector->max_vfreq -
11204 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11205
f9b4f20c
SW
11206 freesync_capable = true;
11207 }
11208 }
9b2fdc33 11209 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11210 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11211 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11212 timing = &edid->detailed_timings[i];
11213 data = &timing->data.other_data;
11214
11215 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11216 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11217 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11218 freesync_capable = true;
11219
11220 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11221 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11222 }
11223 }
bb47de73
NK
11224
11225update:
11226 if (dm_con_state)
11227 dm_con_state->freesync_capable = freesync_capable;
11228
11229 if (connector->vrr_capable_property)
11230 drm_connector_set_vrr_capable_property(connector,
11231 freesync_capable);
e7b07cee
HW
11232}
11233
3d4e52d0
VL
11234void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11235{
1348969a 11236 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11237 struct dc *dc = adev->dm.dc;
11238 int i;
11239
11240 mutex_lock(&adev->dm.dc_lock);
11241 if (dc->current_state) {
11242 for (i = 0; i < dc->current_state->stream_count; ++i)
11243 dc->current_state->streams[i]
11244 ->triggered_crtc_reset.enabled =
11245 adev->dm.force_timing_sync;
11246
11247 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11248 dc_trigger_sync(dc, dc->current_state);
11249 }
11250 mutex_unlock(&adev->dm.dc_lock);
11251}
9d83722d
RS
11252
11253void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11254 uint32_t value, const char *func_name)
11255{
11256#ifdef DM_CHECK_ADDR_0
11257 if (address == 0) {
11258 DC_ERR("invalid register write. address = 0");
11259 return;
11260 }
11261#endif
11262 cgs_write_register(ctx->cgs_device, address, value);
11263 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11264}
11265
11266uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11267 const char *func_name)
11268{
11269 uint32_t value;
11270#ifdef DM_CHECK_ADDR_0
11271 if (address == 0) {
11272 DC_ERR("invalid register read; address = 0\n");
11273 return 0;
11274 }
11275#endif
11276
11277 if (ctx->dmub_srv &&
11278 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11279 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11280 ASSERT(false);
11281 return 0;
11282 }
11283
11284 value = cgs_read_register(ctx->cgs_device, address);
11285
11286 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11287
11288 return value;
11289}
81927e28
JS
11290
11291int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
11292 struct aux_payload *payload, enum aux_return_code_type *operation_result)
11293{
11294 struct amdgpu_device *adev = ctx->driver_context;
11295 int ret = 0;
11296
11297 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
11298 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
11299 if (ret == 0) {
11300 *operation_result = AUX_RET_ERROR_TIMEOUT;
11301 return -1;
11302 }
11303 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
11304
11305 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11306 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11307
11308 // For read case, Copy data to payload
11309 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11310 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11311 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11312 adev->dm.dmub_notify->aux_reply.length);
11313 }
11314
11315 return adev->dm.dmub_notify->aux_reply.length;
11316}