drm/amd/display: based on flag reset z10 function pointer
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
53e108aa 51#include <drm/drm_hdcp.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
4562236b
HW
54
55#include "amd_shared.h"
56#include "amdgpu_dm_irq.h"
57#include "dm_helpers.h"
e7b07cee 58#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
59#if defined(CONFIG_DEBUG_FS)
60#include "amdgpu_dm_debugfs.h"
61#endif
f4594cd1 62#include "amdgpu_dm_psr.h"
4562236b
HW
63
64#include "ivsrcid/ivsrcid_vislands30.h"
65
81927e28 66#include "i2caux_interface.h"
4562236b
HW
67#include <linux/module.h>
68#include <linux/moduleparam.h>
e7b07cee 69#include <linux/types.h>
97028037 70#include <linux/pm_runtime.h>
09d21852 71#include <linux/pci.h>
a94d5569 72#include <linux/firmware.h>
6ce8f316 73#include <linux/component.h>
4562236b
HW
74
75#include <drm/drm_atomic.h>
674e78ac 76#include <drm/drm_atomic_uapi.h>
4562236b
HW
77#include <drm/drm_atomic_helper.h>
78#include <drm/drm_dp_mst_helper.h>
e7b07cee 79#include <drm/drm_fb_helper.h>
09d21852 80#include <drm/drm_fourcc.h>
e7b07cee 81#include <drm/drm_edid.h>
09d21852 82#include <drm/drm_vblank.h>
6ce8f316 83#include <drm/drm_audio_component.h>
4562236b 84
b86a1aa3 85#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 86#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 87
ad941f7a
FX
88#include "dcn/dcn_1_0_offset.h"
89#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
90#include "soc15_hw_ip.h"
91#include "vega10_ip_offset.h"
ff5ef992
AD
92
93#include "soc15_common.h"
94#endif
95
e7b07cee 96#include "modules/inc/mod_freesync.h"
bbf854dc 97#include "modules/power/power_helpers.h"
ecd0136b 98#include "modules/inc/mod_info_packet.h"
e7b07cee 99
743b9786
NK
100#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
102#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
104#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
106#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
108#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
110#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
112#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
114#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 116
a94d5569
DF
117#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
118MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 119
5ea23931
RL
120#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
121MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122
8c7aea40
NK
123/* Number of bytes in PSP header for firmware. */
124#define PSP_HEADER_BYTES 0x100
125
126/* Number of bytes in PSP footer for firmware. */
127#define PSP_FOOTER_BYTES 0x100
128
b8592b48
LL
129/**
130 * DOC: overview
131 *
132 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 133 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
134 * requests into DC requests, and DC responses into DRM responses.
135 *
136 * The root control structure is &struct amdgpu_display_manager.
137 */
138
7578ecda
AD
139/* basic init/fini API */
140static int amdgpu_dm_init(struct amdgpu_device *adev);
141static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 142static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 143
0f877894
OV
144static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145{
146 switch (link->dpcd_caps.dongle_type) {
147 case DISPLAY_DONGLE_NONE:
148 return DRM_MODE_SUBCONNECTOR_Native;
149 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 return DRM_MODE_SUBCONNECTOR_VGA;
151 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 return DRM_MODE_SUBCONNECTOR_DVID;
154 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 return DRM_MODE_SUBCONNECTOR_HDMIA;
157 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 default:
159 return DRM_MODE_SUBCONNECTOR_Unknown;
160 }
161}
162
163static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164{
165 struct dc_link *link = aconnector->dc_link;
166 struct drm_connector *connector = &aconnector->base;
167 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168
169 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 return;
171
172 if (aconnector->dc_sink)
173 subconnector = get_subconnector_type(link);
174
175 drm_object_property_set_value(&connector->base,
176 connector->dev->mode_config.dp_subconnector_property,
177 subconnector);
178}
179
1f6010a9
DF
180/*
181 * initializes drm_device display related structures, based on the information
7578ecda
AD
182 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183 * drm_encoder, drm_mode_config
184 *
185 * Returns 0 on success
186 */
187static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188/* removes and deallocates the drm structures, created by the above function */
189static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190
7578ecda 191static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 192 struct drm_plane *plane,
cc1fec57
NK
193 unsigned long possible_crtcs,
194 const struct dc_plane_cap *plane_cap);
7578ecda
AD
195static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 struct drm_plane *plane,
197 uint32_t link_index);
198static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 struct amdgpu_dm_connector *amdgpu_dm_connector,
200 uint32_t link_index,
201 struct amdgpu_encoder *amdgpu_encoder);
202static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 struct amdgpu_encoder *aencoder,
204 uint32_t link_index);
205
206static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207
7578ecda
AD
208static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209
210static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 struct drm_atomic_state *state);
212
674e78ac
NK
213static void handle_cursor_update(struct drm_plane *plane,
214 struct drm_plane_state *old_plane_state);
7578ecda 215
dfbbfe3c
BN
216static const struct drm_format_info *
217amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218
e27c41d5 219static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 220static void handle_hpd_rx_irq(void *param);
e27c41d5 221
a85ba005
NC
222static bool
223is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
224 struct drm_crtc_state *new_crtc_state);
4562236b
HW
225/*
226 * dm_vblank_get_counter
227 *
228 * @brief
229 * Get counter for number of vertical blanks
230 *
231 * @param
232 * struct amdgpu_device *adev - [in] desired amdgpu device
233 * int disp_idx - [in] which CRTC to get the counter from
234 *
235 * @return
236 * Counter for vertical blanks
237 */
238static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239{
240 if (crtc >= adev->mode_info.num_crtc)
241 return 0;
242 else {
243 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244
585d450c 245 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
246 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
247 crtc);
4562236b
HW
248 return 0;
249 }
250
585d450c 251 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
252 }
253}
254
255static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 256 u32 *vbl, u32 *position)
4562236b 257{
81c50963
ST
258 uint32_t v_blank_start, v_blank_end, h_position, v_position;
259
4562236b
HW
260 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
261 return -EINVAL;
262 else {
263 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264
585d450c 265 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
266 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
267 crtc);
4562236b
HW
268 return 0;
269 }
270
81c50963
ST
271 /*
272 * TODO rework base driver to use values directly.
273 * for now parse it back into reg-format
274 */
585d450c 275 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
276 &v_blank_start,
277 &v_blank_end,
278 &h_position,
279 &v_position);
280
e806208d
AG
281 *position = v_position | (h_position << 16);
282 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
283 }
284
285 return 0;
286}
287
288static bool dm_is_idle(void *handle)
289{
290 /* XXX todo */
291 return true;
292}
293
294static int dm_wait_for_idle(void *handle)
295{
296 /* XXX todo */
297 return 0;
298}
299
300static bool dm_check_soft_reset(void *handle)
301{
302 return false;
303}
304
305static int dm_soft_reset(void *handle)
306{
307 /* XXX todo */
308 return 0;
309}
310
3ee6b26b
AD
311static struct amdgpu_crtc *
312get_crtc_by_otg_inst(struct amdgpu_device *adev,
313 int otg_inst)
4562236b 314{
4a580877 315 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
316 struct drm_crtc *crtc;
317 struct amdgpu_crtc *amdgpu_crtc;
318
bcd74374 319 if (WARN_ON(otg_inst == -1))
4562236b 320 return adev->mode_info.crtcs[0];
4562236b
HW
321
322 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323 amdgpu_crtc = to_amdgpu_crtc(crtc);
324
325 if (amdgpu_crtc->otg_inst == otg_inst)
326 return amdgpu_crtc;
327 }
328
329 return NULL;
330}
331
585d450c
AP
332static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333{
334 return acrtc->dm_irq_params.freesync_config.state ==
335 VRR_STATE_ACTIVE_VARIABLE ||
336 acrtc->dm_irq_params.freesync_config.state ==
337 VRR_STATE_ACTIVE_FIXED;
338}
339
66b0c973
MK
340static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341{
342 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344}
345
a85ba005
NC
346static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347 struct dm_crtc_state *new_state)
348{
349 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
350 return true;
351 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
352 return true;
353 else
354 return false;
355}
356
b8e8c934
HW
357/**
358 * dm_pflip_high_irq() - Handle pageflip interrupt
359 * @interrupt_params: ignored
360 *
361 * Handles the pageflip interrupt by notifying all interested parties
362 * that the pageflip has been completed.
363 */
4562236b
HW
364static void dm_pflip_high_irq(void *interrupt_params)
365{
4562236b
HW
366 struct amdgpu_crtc *amdgpu_crtc;
367 struct common_irq_params *irq_params = interrupt_params;
368 struct amdgpu_device *adev = irq_params->adev;
369 unsigned long flags;
71bbe51a 370 struct drm_pending_vblank_event *e;
71bbe51a
MK
371 uint32_t vpos, hpos, v_blank_start, v_blank_end;
372 bool vrr_active;
4562236b
HW
373
374 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375
376 /* IRQ could occur when in initial stage */
1f6010a9 377 /* TODO work and BO cleanup */
4562236b 378 if (amdgpu_crtc == NULL) {
cb2318b7 379 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
380 return;
381 }
382
4a580877 383 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
384
385 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 386 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
387 amdgpu_crtc->pflip_status,
388 AMDGPU_FLIP_SUBMITTED,
389 amdgpu_crtc->crtc_id,
390 amdgpu_crtc);
4a580877 391 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
392 return;
393 }
394
71bbe51a
MK
395 /* page flip completed. */
396 e = amdgpu_crtc->event;
397 amdgpu_crtc->event = NULL;
4562236b 398
bcd74374 399 WARN_ON(!e);
1159898a 400
585d450c 401 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
402
403 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
404 if (!vrr_active ||
585d450c 405 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
406 &v_blank_end, &hpos, &vpos) ||
407 (vpos < v_blank_start)) {
408 /* Update to correct count and vblank timestamp if racing with
409 * vblank irq. This also updates to the correct vblank timestamp
410 * even in VRR mode, as scanout is past the front-porch atm.
411 */
412 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 413
71bbe51a
MK
414 /* Wake up userspace by sending the pageflip event with proper
415 * count and timestamp of vblank of flip completion.
416 */
417 if (e) {
418 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
419
420 /* Event sent, so done with vblank for this flip */
421 drm_crtc_vblank_put(&amdgpu_crtc->base);
422 }
423 } else if (e) {
424 /* VRR active and inside front-porch: vblank count and
425 * timestamp for pageflip event will only be up to date after
426 * drm_crtc_handle_vblank() has been executed from late vblank
427 * irq handler after start of back-porch (vline 0). We queue the
428 * pageflip event for send-out by drm_crtc_handle_vblank() with
429 * updated timestamp and count, once it runs after us.
430 *
431 * We need to open-code this instead of using the helper
432 * drm_crtc_arm_vblank_event(), as that helper would
433 * call drm_crtc_accurate_vblank_count(), which we must
434 * not call in VRR mode while we are in front-porch!
435 */
436
437 /* sequence will be replaced by real count during send-out. */
438 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
439 e->pipe = amdgpu_crtc->crtc_id;
440
4a580877 441 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
442 e = NULL;
443 }
4562236b 444
fdd1fe57
MK
445 /* Keep track of vblank of this flip for flip throttling. We use the
446 * cooked hw counter, as that one incremented at start of this vblank
447 * of pageflip completion, so last_flip_vblank is the forbidden count
448 * for queueing new pageflips if vsync + VRR is enabled.
449 */
5d1c59c4 450 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 451 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 452
54f5499a 453 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 454 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 455
cb2318b7
VL
456 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
457 amdgpu_crtc->crtc_id, amdgpu_crtc,
458 vrr_active, (int) !e);
4562236b
HW
459}
460
d2574c33
MK
461static void dm_vupdate_high_irq(void *interrupt_params)
462{
463 struct common_irq_params *irq_params = interrupt_params;
464 struct amdgpu_device *adev = irq_params->adev;
465 struct amdgpu_crtc *acrtc;
47588233
RS
466 struct drm_device *drm_dev;
467 struct drm_vblank_crtc *vblank;
468 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 469 unsigned long flags;
585d450c 470 int vrr_active;
d2574c33
MK
471
472 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
473
474 if (acrtc) {
585d450c 475 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
476 drm_dev = acrtc->base.dev;
477 vblank = &drm_dev->vblank[acrtc->base.index];
478 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
479 frame_duration_ns = vblank->time - previous_timestamp;
480
481 if (frame_duration_ns > 0) {
482 trace_amdgpu_refresh_rate_track(acrtc->base.index,
483 frame_duration_ns,
484 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
485 atomic64_set(&irq_params->previous_timestamp, vblank->time);
486 }
d2574c33 487
cb2318b7 488 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 489 acrtc->crtc_id,
585d450c 490 vrr_active);
d2574c33
MK
491
492 /* Core vblank handling is done here after end of front-porch in
493 * vrr mode, as vblank timestamping will give valid results
494 * while now done after front-porch. This will also deliver
495 * page-flip completion events that have been queued to us
496 * if a pageflip happened inside front-porch.
497 */
585d450c 498 if (vrr_active) {
d2574c33 499 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
500
501 /* BTR processing for pre-DCE12 ASICs */
585d450c 502 if (acrtc->dm_irq_params.stream &&
09aef2c4 503 adev->family < AMDGPU_FAMILY_AI) {
4a580877 504 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
505 mod_freesync_handle_v_update(
506 adev->dm.freesync_module,
585d450c
AP
507 acrtc->dm_irq_params.stream,
508 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
509
510 dc_stream_adjust_vmin_vmax(
511 adev->dm.dc,
585d450c
AP
512 acrtc->dm_irq_params.stream,
513 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 514 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
515 }
516 }
d2574c33
MK
517 }
518}
519
b8e8c934
HW
520/**
521 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 522 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
523 *
524 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
525 * event handler.
526 */
4562236b
HW
527static void dm_crtc_high_irq(void *interrupt_params)
528{
529 struct common_irq_params *irq_params = interrupt_params;
530 struct amdgpu_device *adev = irq_params->adev;
4562236b 531 struct amdgpu_crtc *acrtc;
09aef2c4 532 unsigned long flags;
585d450c 533 int vrr_active;
4562236b 534
b57de80a 535 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
536 if (!acrtc)
537 return;
538
585d450c 539 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 540
cb2318b7 541 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 542 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 543
2346ef47
NK
544 /**
545 * Core vblank handling at start of front-porch is only possible
546 * in non-vrr mode, as only there vblank timestamping will give
547 * valid results while done in front-porch. Otherwise defer it
548 * to dm_vupdate_high_irq after end of front-porch.
549 */
585d450c 550 if (!vrr_active)
2346ef47
NK
551 drm_crtc_handle_vblank(&acrtc->base);
552
553 /**
554 * Following stuff must happen at start of vblank, for crc
555 * computation and below-the-range btr support in vrr mode.
556 */
16f17eda 557 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
558
559 /* BTR updates need to happen before VUPDATE on Vega and above. */
560 if (adev->family < AMDGPU_FAMILY_AI)
561 return;
16f17eda 562
4a580877 563 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 564
585d450c
AP
565 if (acrtc->dm_irq_params.stream &&
566 acrtc->dm_irq_params.vrr_params.supported &&
567 acrtc->dm_irq_params.freesync_config.state ==
568 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 569 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
570 acrtc->dm_irq_params.stream,
571 &acrtc->dm_irq_params.vrr_params);
16f17eda 572
585d450c
AP
573 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
574 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
575 }
576
2b5aed9a
MK
577 /*
578 * If there aren't any active_planes then DCH HUBP may be clock-gated.
579 * In that case, pageflip completion interrupts won't fire and pageflip
580 * completion events won't get delivered. Prevent this by sending
581 * pending pageflip events from here if a flip is still pending.
582 *
583 * If any planes are enabled, use dm_pflip_high_irq() instead, to
584 * avoid race conditions between flip programming and completion,
585 * which could cause too early flip completion events.
586 */
2346ef47
NK
587 if (adev->family >= AMDGPU_FAMILY_RV &&
588 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 589 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
590 if (acrtc->event) {
591 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
592 acrtc->event = NULL;
593 drm_crtc_vblank_put(&acrtc->base);
594 }
595 acrtc->pflip_status = AMDGPU_FLIP_NONE;
596 }
597
4a580877 598 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
599}
600
86bc2219 601#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 602#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
603/**
604 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605 * DCN generation ASICs
48e01bf4 606 * @interrupt_params: interrupt parameters
86bc2219
WL
607 *
608 * Used to set crc window/read out crc value at vertical line 0 position
609 */
86bc2219
WL
610static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
611{
612 struct common_irq_params *irq_params = interrupt_params;
613 struct amdgpu_device *adev = irq_params->adev;
614 struct amdgpu_crtc *acrtc;
615
616 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
617
618 if (!acrtc)
619 return;
620
621 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
622}
433e5dec 623#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 624
e27c41d5
JS
625/**
626 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
627 * @adev: amdgpu_device pointer
628 * @notify: dmub notification structure
629 *
630 * Dmub AUX or SET_CONFIG command completion processing callback
631 * Copies dmub notification to DM which is to be read by AUX command.
632 * issuing thread and also signals the event to wake up the thread.
633 */
634void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
635{
636 if (adev->dm.dmub_notify)
637 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
638 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
639 complete(&adev->dm.dmub_aux_transfer_done);
640}
641
642/**
643 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
644 * @adev: amdgpu_device pointer
645 * @notify: dmub notification structure
646 *
647 * Dmub Hpd interrupt processing callback. Gets displayindex through the
648 * ink index and calls helper to do the processing.
649 */
650void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
651{
652 struct amdgpu_dm_connector *aconnector;
f6e03f80 653 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
654 struct drm_connector *connector;
655 struct drm_connector_list_iter iter;
656 struct dc_link *link;
657 uint8_t link_index = 0;
658 struct drm_device *dev = adev->dm.ddev;
659
660 if (adev == NULL)
661 return;
662
663 if (notify == NULL) {
664 DRM_ERROR("DMUB HPD callback notification was NULL");
665 return;
666 }
667
668 if (notify->link_index > adev->dm.dc->link_count) {
669 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
670 return;
671 }
672
e27c41d5 673 link_index = notify->link_index;
e27c41d5
JS
674 link = adev->dm.dc->links[link_index];
675
676 drm_connector_list_iter_begin(dev, &iter);
677 drm_for_each_connector_iter(connector, &iter) {
678 aconnector = to_amdgpu_dm_connector(connector);
679 if (link && aconnector->dc_link == link) {
680 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 681 hpd_aconnector = aconnector;
e27c41d5
JS
682 break;
683 }
684 }
685 drm_connector_list_iter_end(&iter);
e27c41d5 686
c40a09e5
NK
687 if (hpd_aconnector) {
688 if (notify->type == DMUB_NOTIFICATION_HPD)
689 handle_hpd_irq_helper(hpd_aconnector);
690 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
691 handle_hpd_rx_irq(hpd_aconnector);
692 }
e27c41d5
JS
693}
694
695/**
696 * register_dmub_notify_callback - Sets callback for DMUB notify
697 * @adev: amdgpu_device pointer
698 * @type: Type of dmub notification
699 * @callback: Dmub interrupt callback function
700 * @dmub_int_thread_offload: offload indicator
701 *
702 * API to register a dmub callback handler for a dmub notification
703 * Also sets indicator whether callback processing to be offloaded.
704 * to dmub interrupt handling thread
705 * Return: true if successfully registered, false if there is existing registration
706 */
707bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
708dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
709{
710 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
711 adev->dm.dmub_callback[type] = callback;
712 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
713 } else
714 return false;
715
716 return true;
717}
718
719static void dm_handle_hpd_work(struct work_struct *work)
720{
721 struct dmub_hpd_work *dmub_hpd_wrk;
722
723 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
724
725 if (!dmub_hpd_wrk->dmub_notify) {
726 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
727 return;
728 }
729
730 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
731 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
732 dmub_hpd_wrk->dmub_notify);
733 }
094b21c1
JS
734
735 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
736 kfree(dmub_hpd_wrk);
737
738}
739
e25515e2 740#define DMUB_TRACE_MAX_READ 64
81927e28
JS
741/**
742 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
743 * @interrupt_params: used for determining the Outbox instance
744 *
745 * Handles the Outbox Interrupt
746 * event handler.
747 */
81927e28
JS
748static void dm_dmub_outbox1_low_irq(void *interrupt_params)
749{
750 struct dmub_notification notify;
751 struct common_irq_params *irq_params = interrupt_params;
752 struct amdgpu_device *adev = irq_params->adev;
753 struct amdgpu_display_manager *dm = &adev->dm;
754 struct dmcub_trace_buf_entry entry = { 0 };
755 uint32_t count = 0;
e27c41d5 756 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 757 struct dc_link *plink = NULL;
81927e28 758
f6e03f80
JS
759 if (dc_enable_dmub_notifications(adev->dm.dc) &&
760 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 761
f6e03f80
JS
762 do {
763 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
764 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
765 DRM_ERROR("DM: notify type %d invalid!", notify.type);
766 continue;
767 }
c40a09e5
NK
768 if (!dm->dmub_callback[notify.type]) {
769 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
770 continue;
771 }
f6e03f80 772 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
773 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
774 if (!dmub_hpd_wrk) {
775 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
776 return;
777 }
778 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
779 if (!dmub_hpd_wrk->dmub_notify) {
780 kfree(dmub_hpd_wrk);
781 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
782 return;
783 }
784 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
785 if (dmub_hpd_wrk->dmub_notify)
786 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
787 dmub_hpd_wrk->adev = adev;
788 if (notify.type == DMUB_NOTIFICATION_HPD) {
789 plink = adev->dm.dc->links[notify.link_index];
790 if (plink) {
791 plink->hpd_status =
b97788e5 792 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 793 }
e27c41d5 794 }
f6e03f80
JS
795 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
796 } else {
797 dm->dmub_callback[notify.type](adev, &notify);
798 }
799 } while (notify.pending_notification);
81927e28
JS
800 }
801
802
803 do {
804 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
805 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
806 entry.param0, entry.param1);
807
808 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
809 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
810 } else
811 break;
812
813 count++;
814
815 } while (count <= DMUB_TRACE_MAX_READ);
816
f6e03f80
JS
817 if (count > DMUB_TRACE_MAX_READ)
818 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 819}
433e5dec 820#endif /* CONFIG_DRM_AMD_DC_DCN */
86bc2219 821
4562236b
HW
822static int dm_set_clockgating_state(void *handle,
823 enum amd_clockgating_state state)
824{
825 return 0;
826}
827
828static int dm_set_powergating_state(void *handle,
829 enum amd_powergating_state state)
830{
831 return 0;
832}
833
834/* Prototypes of private functions */
835static int dm_early_init(void* handle);
836
a32e24b4 837/* Allocate memory for FBC compressed data */
3e332d3a 838static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 839{
3e332d3a 840 struct drm_device *dev = connector->dev;
1348969a 841 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 842 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
843 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
844 struct drm_display_mode *mode;
42e67c3b
RL
845 unsigned long max_size = 0;
846
847 if (adev->dm.dc->fbc_compressor == NULL)
848 return;
a32e24b4 849
3e332d3a 850 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
851 return;
852
3e332d3a
RL
853 if (compressor->bo_ptr)
854 return;
42e67c3b 855
42e67c3b 856
3e332d3a
RL
857 list_for_each_entry(mode, &connector->modes, head) {
858 if (max_size < mode->htotal * mode->vtotal)
859 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
860 }
861
862 if (max_size) {
863 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 864 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 865 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
866
867 if (r)
42e67c3b
RL
868 DRM_ERROR("DM: Failed to initialize FBC\n");
869 else {
870 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
871 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
872 }
873
a32e24b4
RL
874 }
875
876}
a32e24b4 877
6ce8f316
NK
878static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
879 int pipe, bool *enabled,
880 unsigned char *buf, int max_bytes)
881{
882 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 883 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
884 struct drm_connector *connector;
885 struct drm_connector_list_iter conn_iter;
886 struct amdgpu_dm_connector *aconnector;
887 int ret = 0;
888
889 *enabled = false;
890
891 mutex_lock(&adev->dm.audio_lock);
892
893 drm_connector_list_iter_begin(dev, &conn_iter);
894 drm_for_each_connector_iter(connector, &conn_iter) {
895 aconnector = to_amdgpu_dm_connector(connector);
896 if (aconnector->audio_inst != port)
897 continue;
898
899 *enabled = true;
900 ret = drm_eld_size(connector->eld);
901 memcpy(buf, connector->eld, min(max_bytes, ret));
902
903 break;
904 }
905 drm_connector_list_iter_end(&conn_iter);
906
907 mutex_unlock(&adev->dm.audio_lock);
908
909 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
910
911 return ret;
912}
913
914static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
915 .get_eld = amdgpu_dm_audio_component_get_eld,
916};
917
918static int amdgpu_dm_audio_component_bind(struct device *kdev,
919 struct device *hda_kdev, void *data)
920{
921 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 922 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
923 struct drm_audio_component *acomp = data;
924
925 acomp->ops = &amdgpu_dm_audio_component_ops;
926 acomp->dev = kdev;
927 adev->dm.audio_component = acomp;
928
929 return 0;
930}
931
932static void amdgpu_dm_audio_component_unbind(struct device *kdev,
933 struct device *hda_kdev, void *data)
934{
935 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 936 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
937 struct drm_audio_component *acomp = data;
938
939 acomp->ops = NULL;
940 acomp->dev = NULL;
941 adev->dm.audio_component = NULL;
942}
943
944static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
945 .bind = amdgpu_dm_audio_component_bind,
946 .unbind = amdgpu_dm_audio_component_unbind,
947};
948
949static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
950{
951 int i, ret;
952
953 if (!amdgpu_audio)
954 return 0;
955
956 adev->mode_info.audio.enabled = true;
957
958 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
959
960 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
961 adev->mode_info.audio.pin[i].channels = -1;
962 adev->mode_info.audio.pin[i].rate = -1;
963 adev->mode_info.audio.pin[i].bits_per_sample = -1;
964 adev->mode_info.audio.pin[i].status_bits = 0;
965 adev->mode_info.audio.pin[i].category_code = 0;
966 adev->mode_info.audio.pin[i].connected = false;
967 adev->mode_info.audio.pin[i].id =
968 adev->dm.dc->res_pool->audios[i]->inst;
969 adev->mode_info.audio.pin[i].offset = 0;
970 }
971
972 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
973 if (ret < 0)
974 return ret;
975
976 adev->dm.audio_registered = true;
977
978 return 0;
979}
980
981static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
982{
983 if (!amdgpu_audio)
984 return;
985
986 if (!adev->mode_info.audio.enabled)
987 return;
988
989 if (adev->dm.audio_registered) {
990 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
991 adev->dm.audio_registered = false;
992 }
993
994 /* TODO: Disable audio? */
995
996 adev->mode_info.audio.enabled = false;
997}
998
dfd84d90 999static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1000{
1001 struct drm_audio_component *acomp = adev->dm.audio_component;
1002
1003 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1004 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1005
1006 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1007 pin, -1);
1008 }
1009}
1010
743b9786
NK
1011static int dm_dmub_hw_init(struct amdgpu_device *adev)
1012{
743b9786
NK
1013 const struct dmcub_firmware_header_v1_0 *hdr;
1014 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1015 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1016 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1017 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1018 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1019 struct dmub_srv_hw_params hw_params;
1020 enum dmub_status status;
1021 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1022 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786 1023 bool has_hw_support;
5b109397 1024 struct dc *dc = adev->dm.dc;
743b9786
NK
1025
1026 if (!dmub_srv)
1027 /* DMUB isn't supported on the ASIC. */
1028 return 0;
1029
8c7aea40
NK
1030 if (!fb_info) {
1031 DRM_ERROR("No framebuffer info for DMUB service.\n");
1032 return -EINVAL;
1033 }
1034
743b9786
NK
1035 if (!dmub_fw) {
1036 /* Firmware required for DMUB support. */
1037 DRM_ERROR("No firmware provided for DMUB.\n");
1038 return -EINVAL;
1039 }
1040
1041 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1042 if (status != DMUB_STATUS_OK) {
1043 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1044 return -EINVAL;
1045 }
1046
1047 if (!has_hw_support) {
1048 DRM_INFO("DMUB unsupported on ASIC\n");
1049 return 0;
1050 }
1051
1052 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1053
743b9786
NK
1054 fw_inst_const = dmub_fw->data +
1055 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1056 PSP_HEADER_BYTES;
743b9786
NK
1057
1058 fw_bss_data = dmub_fw->data +
1059 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060 le32_to_cpu(hdr->inst_const_bytes);
1061
1062 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1063 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1064 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1065
1066 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1067
ddde28a5
HW
1068 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1069 * amdgpu_ucode_init_single_fw will load dmub firmware
1070 * fw_inst_const part to cw0; otherwise, the firmware back door load
1071 * will be done by dm_dmub_hw_init
1072 */
1073 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1075 fw_inst_const_size);
1076 }
1077
a576b345
NK
1078 if (fw_bss_data_size)
1079 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1080 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1081
1082 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1083 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1084 adev->bios_size);
1085
1086 /* Reset regions that need to be reset. */
1087 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1088 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1089
1090 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1091 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1092
1093 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1094 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1095
1096 /* Initialize hardware. */
1097 memset(&hw_params, 0, sizeof(hw_params));
1098 hw_params.fb_base = adev->gmc.fb_start;
1099 hw_params.fb_offset = adev->gmc.aper_base;
1100
31a7f4bb
HW
1101 /* backdoor load firmware and trigger dmub running */
1102 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1103 hw_params.load_inst_const = true;
1104
743b9786
NK
1105 if (dmcu)
1106 hw_params.psp_version = dmcu->psp_version;
1107
8c7aea40
NK
1108 for (i = 0; i < fb_info->num_fb; ++i)
1109 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1110
5b109397
JS
1111 switch (adev->asic_type) {
1112 case CHIP_YELLOW_CARP:
1113 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1114 hw_params.dpia_supported = true;
1115#if defined(CONFIG_DRM_AMD_DC_DCN)
1116 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1117#endif
1118 }
1119 break;
1120 default:
1121 break;
1122 }
1123
743b9786
NK
1124 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1125 if (status != DMUB_STATUS_OK) {
1126 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1127 return -EINVAL;
1128 }
1129
1130 /* Wait for firmware load to finish. */
1131 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1132 if (status != DMUB_STATUS_OK)
1133 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1134
1135 /* Init DMCU and ABM if available. */
1136 if (dmcu && abm) {
1137 dmcu->funcs->dmcu_init(dmcu);
1138 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1139 }
1140
051b7887
RL
1141 if (!adev->dm.dc->ctx->dmub_srv)
1142 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1143 if (!adev->dm.dc->ctx->dmub_srv) {
1144 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1145 return -ENOMEM;
1146 }
1147
743b9786
NK
1148 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1149 adev->dm.dmcub_fw_version);
1150
1151 return 0;
1152}
1153
a3fe0e33 1154#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1155static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1156{
c0fb85ae
YZ
1157 uint64_t pt_base;
1158 uint32_t logical_addr_low;
1159 uint32_t logical_addr_high;
1160 uint32_t agp_base, agp_bot, agp_top;
1161 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1162
a0f884f5
NK
1163 memset(pa_config, 0, sizeof(*pa_config));
1164
c0fb85ae
YZ
1165 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1166 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1167
c0fb85ae
YZ
1168 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1169 /*
1170 * Raven2 has a HW issue that it is unable to use the vram which
1171 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1172 * workaround that increase system aperture high address (add 1)
1173 * to get rid of the VM fault and hardware hang.
1174 */
1175 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1176 else
1177 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1178
c0fb85ae
YZ
1179 agp_base = 0;
1180 agp_bot = adev->gmc.agp_start >> 24;
1181 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1182
c44a22b3 1183
c0fb85ae
YZ
1184 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1185 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1186 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1187 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1188 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1189 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1190
c0fb85ae
YZ
1191 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1192 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1193
1194 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1195 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1196 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1197
1198 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1199 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1200 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1201
1202 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1203 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1204 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1205
1206 pa_config->is_hvm_enabled = 0;
c44a22b3 1207
c44a22b3 1208}
e6cd859d 1209#endif
ea3b4242 1210#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1211static void vblank_control_worker(struct work_struct *work)
ea3b4242 1212{
09a5df6c
NK
1213 struct vblank_control_work *vblank_work =
1214 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1215 struct amdgpu_display_manager *dm = vblank_work->dm;
1216
1217 mutex_lock(&dm->dc_lock);
1218
1219 if (vblank_work->enable)
1220 dm->active_vblank_irq_count++;
5af50b0b 1221 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1222 dm->active_vblank_irq_count--;
1223
2cbcb78c 1224 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1225
4711c033 1226 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1227
58aa1c50
NK
1228 /* Control PSR based on vblank requirements from OS */
1229 if (vblank_work->stream && vblank_work->stream->link) {
1230 if (vblank_work->enable) {
1231 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1232 amdgpu_dm_psr_disable(vblank_work->stream);
1233 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1234 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1235 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1236 amdgpu_dm_psr_enable(vblank_work->stream);
1237 }
1238 }
1239
ea3b4242 1240 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1241
1242 dc_stream_release(vblank_work->stream);
1243
09a5df6c 1244 kfree(vblank_work);
ea3b4242
QZ
1245}
1246
ea3b4242 1247#endif
8e794421
WL
1248
1249static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1250{
1251 struct hpd_rx_irq_offload_work *offload_work;
1252 struct amdgpu_dm_connector *aconnector;
1253 struct dc_link *dc_link;
1254 struct amdgpu_device *adev;
1255 enum dc_connection_type new_connection_type = dc_connection_none;
1256 unsigned long flags;
1257
1258 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1259 aconnector = offload_work->offload_wq->aconnector;
1260
1261 if (!aconnector) {
1262 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1263 goto skip;
1264 }
1265
1266 adev = drm_to_adev(aconnector->base.dev);
1267 dc_link = aconnector->dc_link;
1268
1269 mutex_lock(&aconnector->hpd_lock);
1270 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1271 DRM_ERROR("KMS: Failed to detect connector\n");
1272 mutex_unlock(&aconnector->hpd_lock);
1273
1274 if (new_connection_type == dc_connection_none)
1275 goto skip;
1276
1277 if (amdgpu_in_reset(adev))
1278 goto skip;
1279
1280 mutex_lock(&adev->dm.dc_lock);
1281 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1282 dc_link_dp_handle_automated_test(dc_link);
1283 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1284 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1285 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1286 dc_link_dp_handle_link_loss(dc_link);
1287 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1288 offload_work->offload_wq->is_handling_link_loss = false;
1289 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1290 }
1291 mutex_unlock(&adev->dm.dc_lock);
1292
1293skip:
1294 kfree(offload_work);
1295
1296}
1297
1298static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1299{
1300 int max_caps = dc->caps.max_links;
1301 int i = 0;
1302 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1303
1304 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1305
1306 if (!hpd_rx_offload_wq)
1307 return NULL;
1308
1309
1310 for (i = 0; i < max_caps; i++) {
1311 hpd_rx_offload_wq[i].wq =
1312 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1313
1314 if (hpd_rx_offload_wq[i].wq == NULL) {
1315 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1316 return NULL;
1317 }
1318
1319 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1320 }
1321
1322 return hpd_rx_offload_wq;
1323}
1324
3ce51649
AD
1325struct amdgpu_stutter_quirk {
1326 u16 chip_vendor;
1327 u16 chip_device;
1328 u16 subsys_vendor;
1329 u16 subsys_device;
1330 u8 revision;
1331};
1332
1333static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1334 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1335 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1336 { 0, 0, 0, 0, 0 },
1337};
1338
1339static bool dm_should_disable_stutter(struct pci_dev *pdev)
1340{
1341 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1342
1343 while (p && p->chip_device != 0) {
1344 if (pdev->vendor == p->chip_vendor &&
1345 pdev->device == p->chip_device &&
1346 pdev->subsystem_vendor == p->subsys_vendor &&
1347 pdev->subsystem_device == p->subsys_device &&
1348 pdev->revision == p->revision) {
1349 return true;
1350 }
1351 ++p;
1352 }
1353 return false;
1354}
1355
7578ecda 1356static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1357{
1358 struct dc_init_data init_data;
52704fca
BL
1359#ifdef CONFIG_DRM_AMD_DC_HDCP
1360 struct dc_callback_init init_params;
1361#endif
743b9786 1362 int r;
52704fca 1363
4a580877 1364 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1365 adev->dm.adev = adev;
1366
4562236b
HW
1367 /* Zero all the fields */
1368 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1369#ifdef CONFIG_DRM_AMD_DC_HDCP
1370 memset(&init_params, 0, sizeof(init_params));
1371#endif
4562236b 1372
674e78ac 1373 mutex_init(&adev->dm.dc_lock);
6ce8f316 1374 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1375#if defined(CONFIG_DRM_AMD_DC_DCN)
1376 spin_lock_init(&adev->dm.vblank_lock);
1377#endif
674e78ac 1378
4562236b
HW
1379 if(amdgpu_dm_irq_init(adev)) {
1380 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1381 goto error;
1382 }
1383
1384 init_data.asic_id.chip_family = adev->family;
1385
2dc31ca1 1386 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1387 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1388 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1389
770d13b1 1390 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1391 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1392 init_data.asic_id.atombios_base_address =
1393 adev->mode_info.atom_context->bios;
1394
1395 init_data.driver = adev;
1396
1397 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1398
1399 if (!adev->dm.cgs_device) {
1400 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1401 goto error;
1402 }
1403
1404 init_data.cgs_device = adev->dm.cgs_device;
1405
4562236b
HW
1406 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1407
60fb100b
AD
1408 switch (adev->asic_type) {
1409 case CHIP_CARRIZO:
1410 case CHIP_STONEY:
1ebcaebd
NK
1411 init_data.flags.gpu_vm_support = true;
1412 break;
60fb100b 1413 default:
1d789535 1414 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1415 case IP_VERSION(2, 1, 0):
1416 init_data.flags.gpu_vm_support = true;
91adec9e
ML
1417 switch (adev->dm.dmcub_fw_version) {
1418 case 0: /* development */
1419 case 0x1: /* linux-firmware.git hash 6d9f399 */
1420 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1421 init_data.flags.disable_dmcu = false;
1422 break;
1423 default:
1424 init_data.flags.disable_dmcu = true;
1425 }
c08182f2 1426 break;
559f591d
AD
1427 case IP_VERSION(1, 0, 0):
1428 case IP_VERSION(1, 0, 1):
c08182f2
AD
1429 case IP_VERSION(3, 0, 1):
1430 case IP_VERSION(3, 1, 2):
1431 case IP_VERSION(3, 1, 3):
1432 init_data.flags.gpu_vm_support = true;
1433 break;
1434 case IP_VERSION(2, 0, 3):
1435 init_data.flags.disable_dmcu = true;
1436 break;
1437 default:
1438 break;
1439 }
60fb100b
AD
1440 break;
1441 }
6e227308 1442
04b94af4
AD
1443 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1444 init_data.flags.fbc_support = true;
1445
d99f38ae
AD
1446 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1447 init_data.flags.multi_mon_pp_mclk_switch = true;
1448
eaf56410
LL
1449 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1450 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1451
1452 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1453 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1454
27eaa492 1455 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1456
0dd79532 1457 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1458 /* Display Core create. */
1459 adev->dm.dc = dc_create(&init_data);
1460
423788c7 1461 if (adev->dm.dc) {
76121231 1462 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1463 } else {
76121231 1464 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1465 goto error;
1466 }
4562236b 1467
8a791dab
HW
1468 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1469 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1470 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1471 }
1472
f99d8762
HW
1473 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1474 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1475 if (dm_should_disable_stutter(adev->pdev))
1476 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1477
8a791dab
HW
1478 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1479 adev->dm.dc->debug.disable_stutter = true;
1480
2665f63a 1481 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
8a791dab 1482 adev->dm.dc->debug.disable_dsc = true;
2665f63a
ML
1483 adev->dm.dc->debug.disable_dsc_edp = true;
1484 }
8a791dab
HW
1485
1486 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1487 adev->dm.dc->debug.disable_clock_gate = true;
1488
743b9786
NK
1489 r = dm_dmub_hw_init(adev);
1490 if (r) {
1491 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1492 goto error;
1493 }
1494
bb6785c1
NK
1495 dc_hardware_init(adev->dm.dc);
1496
8e794421
WL
1497 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1498 if (!adev->dm.hpd_rx_offload_wq) {
1499 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1500 goto error;
1501 }
1502
0b08c54b 1503#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1504 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1505 struct dc_phy_addr_space_config pa_config;
1506
0b08c54b 1507 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1508
0b08c54b
YZ
1509 // Call the DC init_memory func
1510 dc_setup_system_context(adev->dm.dc, &pa_config);
1511 }
1512#endif
c0fb85ae 1513
4562236b
HW
1514 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1515 if (!adev->dm.freesync_module) {
1516 DRM_ERROR(
1517 "amdgpu: failed to initialize freesync_module.\n");
1518 } else
f1ad2f5e 1519 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1520 adev->dm.freesync_module);
1521
e277adc5
LSL
1522 amdgpu_dm_init_color_mod();
1523
ea3b4242
QZ
1524#if defined(CONFIG_DRM_AMD_DC_DCN)
1525 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1526 adev->dm.vblank_control_workqueue =
1527 create_singlethread_workqueue("dm_vblank_control_workqueue");
1528 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1529 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1530 }
1531#endif
1532
52704fca 1533#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1534 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1535 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1536
96a3b32e
BL
1537 if (!adev->dm.hdcp_workqueue)
1538 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1539 else
1540 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1541
96a3b32e
BL
1542 dc_init_callbacks(adev->dm.dc, &init_params);
1543 }
9a65df19
WL
1544#endif
1545#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1546 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1547#endif
81927e28
JS
1548 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1549 init_completion(&adev->dm.dmub_aux_transfer_done);
1550 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1551 if (!adev->dm.dmub_notify) {
1552 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1553 goto error;
1554 }
e27c41d5
JS
1555
1556 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1557 if (!adev->dm.delayed_hpd_wq) {
1558 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1559 goto error;
1560 }
1561
81927e28 1562 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1563#if defined(CONFIG_DRM_AMD_DC_DCN)
1564 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1565 dmub_aux_setconfig_callback, false)) {
1566 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1567 goto error;
1568 }
1569 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1570 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1571 goto error;
1572 }
c40a09e5
NK
1573 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1574 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1575 goto error;
1576 }
433e5dec 1577#endif /* CONFIG_DRM_AMD_DC_DCN */
81927e28
JS
1578 }
1579
4562236b
HW
1580 if (amdgpu_dm_initialize_drm_device(adev)) {
1581 DRM_ERROR(
1582 "amdgpu: failed to initialize sw for display support.\n");
1583 goto error;
1584 }
1585
f74367e4
AD
1586 /* create fake encoders for MST */
1587 dm_dp_create_fake_mst_encoders(adev);
1588
4562236b
HW
1589 /* TODO: Add_display_info? */
1590
1591 /* TODO use dynamic cursor width */
4a580877
LT
1592 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1593 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1594
92020e81
AD
1595 /* Disable vblank IRQs aggressively for power-saving */
1596 adev_to_drm(adev)->vblank_disable_immediate = true;
1597
4a580877 1598 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1599 DRM_ERROR(
1600 "amdgpu: failed to initialize sw for display support.\n");
1601 goto error;
1602 }
1603
c0fb85ae 1604
f1ad2f5e 1605 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1606
1607 return 0;
1608error:
1609 amdgpu_dm_fini(adev);
1610
59d0f396 1611 return -EINVAL;
4562236b
HW
1612}
1613
e9669fb7
AG
1614static int amdgpu_dm_early_fini(void *handle)
1615{
1616 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1617
1618 amdgpu_dm_audio_fini(adev);
1619
1620 return 0;
1621}
1622
7578ecda 1623static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1624{
f74367e4
AD
1625 int i;
1626
09a5df6c
NK
1627#if defined(CONFIG_DRM_AMD_DC_DCN)
1628 if (adev->dm.vblank_control_workqueue) {
1629 destroy_workqueue(adev->dm.vblank_control_workqueue);
1630 adev->dm.vblank_control_workqueue = NULL;
1631 }
1632#endif
1633
f74367e4
AD
1634 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1635 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1636 }
1637
4562236b 1638 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1639
9a65df19
WL
1640#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1641 if (adev->dm.crc_rd_wrk) {
1642 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1643 kfree(adev->dm.crc_rd_wrk);
1644 adev->dm.crc_rd_wrk = NULL;
1645 }
1646#endif
52704fca
BL
1647#ifdef CONFIG_DRM_AMD_DC_HDCP
1648 if (adev->dm.hdcp_workqueue) {
e96b1b29 1649 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1650 adev->dm.hdcp_workqueue = NULL;
1651 }
1652
1653 if (adev->dm.dc)
1654 dc_deinit_callbacks(adev->dm.dc);
1655#endif
51ba6912 1656
3beac533 1657 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1658
81927e28
JS
1659 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1660 kfree(adev->dm.dmub_notify);
1661 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1662 destroy_workqueue(adev->dm.delayed_hpd_wq);
1663 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1664 }
1665
743b9786
NK
1666 if (adev->dm.dmub_bo)
1667 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1668 &adev->dm.dmub_bo_gpu_addr,
1669 &adev->dm.dmub_bo_cpu_addr);
52704fca 1670
006c26a0
AG
1671 if (adev->dm.hpd_rx_offload_wq) {
1672 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1673 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1674 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1675 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1676 }
1677 }
1678
1679 kfree(adev->dm.hpd_rx_offload_wq);
1680 adev->dm.hpd_rx_offload_wq = NULL;
1681 }
1682
c8bdf2b6
ED
1683 /* DC Destroy TODO: Replace destroy DAL */
1684 if (adev->dm.dc)
1685 dc_destroy(&adev->dm.dc);
4562236b
HW
1686 /*
1687 * TODO: pageflip, vlank interrupt
1688 *
1689 * amdgpu_dm_irq_fini(adev);
1690 */
1691
1692 if (adev->dm.cgs_device) {
1693 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1694 adev->dm.cgs_device = NULL;
1695 }
1696 if (adev->dm.freesync_module) {
1697 mod_freesync_destroy(adev->dm.freesync_module);
1698 adev->dm.freesync_module = NULL;
1699 }
674e78ac 1700
6ce8f316 1701 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1702 mutex_destroy(&adev->dm.dc_lock);
1703
4562236b
HW
1704 return;
1705}
1706
a94d5569 1707static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1708{
a7669aff 1709 const char *fw_name_dmcu = NULL;
a94d5569
DF
1710 int r;
1711 const struct dmcu_firmware_header_v1_0 *hdr;
1712
1713 switch(adev->asic_type) {
55e56389
MR
1714#if defined(CONFIG_DRM_AMD_DC_SI)
1715 case CHIP_TAHITI:
1716 case CHIP_PITCAIRN:
1717 case CHIP_VERDE:
1718 case CHIP_OLAND:
1719#endif
a94d5569
DF
1720 case CHIP_BONAIRE:
1721 case CHIP_HAWAII:
1722 case CHIP_KAVERI:
1723 case CHIP_KABINI:
1724 case CHIP_MULLINS:
1725 case CHIP_TONGA:
1726 case CHIP_FIJI:
1727 case CHIP_CARRIZO:
1728 case CHIP_STONEY:
1729 case CHIP_POLARIS11:
1730 case CHIP_POLARIS10:
1731 case CHIP_POLARIS12:
1732 case CHIP_VEGAM:
1733 case CHIP_VEGA10:
1734 case CHIP_VEGA12:
1735 case CHIP_VEGA20:
1736 return 0;
5ea23931
RL
1737 case CHIP_NAVI12:
1738 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1739 break;
a94d5569 1740 case CHIP_RAVEN:
a7669aff
HW
1741 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1742 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1743 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1744 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1745 else
a7669aff 1746 return 0;
a94d5569
DF
1747 break;
1748 default:
1d789535 1749 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1750 case IP_VERSION(2, 0, 2):
1751 case IP_VERSION(2, 0, 3):
1752 case IP_VERSION(2, 0, 0):
1753 case IP_VERSION(2, 1, 0):
1754 case IP_VERSION(3, 0, 0):
1755 case IP_VERSION(3, 0, 2):
1756 case IP_VERSION(3, 0, 3):
1757 case IP_VERSION(3, 0, 1):
1758 case IP_VERSION(3, 1, 2):
1759 case IP_VERSION(3, 1, 3):
1760 return 0;
1761 default:
1762 break;
1763 }
a94d5569 1764 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1765 return -EINVAL;
a94d5569
DF
1766 }
1767
1768 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1769 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1770 return 0;
1771 }
1772
1773 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1774 if (r == -ENOENT) {
1775 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1776 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1777 adev->dm.fw_dmcu = NULL;
1778 return 0;
1779 }
1780 if (r) {
1781 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1782 fw_name_dmcu);
1783 return r;
1784 }
1785
1786 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1787 if (r) {
1788 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1789 fw_name_dmcu);
1790 release_firmware(adev->dm.fw_dmcu);
1791 adev->dm.fw_dmcu = NULL;
1792 return r;
1793 }
1794
1795 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1796 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1797 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1798 adev->firmware.fw_size +=
1799 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1800
1801 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1802 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1803 adev->firmware.fw_size +=
1804 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1805
ee6e89c0
DF
1806 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1807
a94d5569
DF
1808 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1809
4562236b
HW
1810 return 0;
1811}
1812
743b9786
NK
1813static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1814{
1815 struct amdgpu_device *adev = ctx;
1816
1817 return dm_read_reg(adev->dm.dc->ctx, address);
1818}
1819
1820static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1821 uint32_t value)
1822{
1823 struct amdgpu_device *adev = ctx;
1824
1825 return dm_write_reg(adev->dm.dc->ctx, address, value);
1826}
1827
1828static int dm_dmub_sw_init(struct amdgpu_device *adev)
1829{
1830 struct dmub_srv_create_params create_params;
8c7aea40
NK
1831 struct dmub_srv_region_params region_params;
1832 struct dmub_srv_region_info region_info;
1833 struct dmub_srv_fb_params fb_params;
1834 struct dmub_srv_fb_info *fb_info;
1835 struct dmub_srv *dmub_srv;
743b9786
NK
1836 const struct dmcub_firmware_header_v1_0 *hdr;
1837 const char *fw_name_dmub;
1838 enum dmub_asic dmub_asic;
1839 enum dmub_status status;
1840 int r;
1841
1d789535 1842 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1843 case IP_VERSION(2, 1, 0):
743b9786
NK
1844 dmub_asic = DMUB_ASIC_DCN21;
1845 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1846 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1847 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1848 break;
c08182f2 1849 case IP_VERSION(3, 0, 0):
1d789535 1850 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1851 dmub_asic = DMUB_ASIC_DCN30;
1852 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1853 } else {
1854 dmub_asic = DMUB_ASIC_DCN30;
1855 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1856 }
79037324 1857 break;
c08182f2 1858 case IP_VERSION(3, 0, 1):
469989ca
RL
1859 dmub_asic = DMUB_ASIC_DCN301;
1860 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1861 break;
c08182f2 1862 case IP_VERSION(3, 0, 2):
2a411205
BL
1863 dmub_asic = DMUB_ASIC_DCN302;
1864 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1865 break;
c08182f2 1866 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1867 dmub_asic = DMUB_ASIC_DCN303;
1868 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1869 break;
c08182f2
AD
1870 case IP_VERSION(3, 1, 2):
1871 case IP_VERSION(3, 1, 3):
3137f792 1872 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1873 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1874 break;
743b9786
NK
1875
1876 default:
1877 /* ASIC doesn't support DMUB. */
1878 return 0;
1879 }
1880
743b9786
NK
1881 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1882 if (r) {
1883 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1884 return 0;
1885 }
1886
1887 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1888 if (r) {
1889 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1890 return 0;
1891 }
1892
743b9786 1893 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1894 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1895
9a6ed547
NK
1896 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1897 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1898 AMDGPU_UCODE_ID_DMCUB;
1899 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1900 adev->dm.dmub_fw;
1901 adev->firmware.fw_size +=
1902 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1903
9a6ed547
NK
1904 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1905 adev->dm.dmcub_fw_version);
1906 }
1907
743b9786 1908
8c7aea40
NK
1909 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1910 dmub_srv = adev->dm.dmub_srv;
1911
1912 if (!dmub_srv) {
1913 DRM_ERROR("Failed to allocate DMUB service!\n");
1914 return -ENOMEM;
1915 }
1916
1917 memset(&create_params, 0, sizeof(create_params));
1918 create_params.user_ctx = adev;
1919 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1920 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1921 create_params.asic = dmub_asic;
1922
1923 /* Create the DMUB service. */
1924 status = dmub_srv_create(dmub_srv, &create_params);
1925 if (status != DMUB_STATUS_OK) {
1926 DRM_ERROR("Error creating DMUB service: %d\n", status);
1927 return -EINVAL;
1928 }
1929
1930 /* Calculate the size of all the regions for the DMUB service. */
1931 memset(&region_params, 0, sizeof(region_params));
1932
1933 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1934 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1935 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1936 region_params.vbios_size = adev->bios_size;
0922b899 1937 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1938 adev->dm.dmub_fw->data +
1939 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1940 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1941 region_params.fw_inst_const =
1942 adev->dm.dmub_fw->data +
1943 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1944 PSP_HEADER_BYTES;
8c7aea40
NK
1945
1946 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1947 &region_info);
1948
1949 if (status != DMUB_STATUS_OK) {
1950 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1951 return -EINVAL;
1952 }
1953
1954 /*
1955 * Allocate a framebuffer based on the total size of all the regions.
1956 * TODO: Move this into GART.
1957 */
1958 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1959 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1960 &adev->dm.dmub_bo_gpu_addr,
1961 &adev->dm.dmub_bo_cpu_addr);
1962 if (r)
1963 return r;
1964
1965 /* Rebase the regions on the framebuffer address. */
1966 memset(&fb_params, 0, sizeof(fb_params));
1967 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1968 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1969 fb_params.region_info = &region_info;
1970
1971 adev->dm.dmub_fb_info =
1972 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1973 fb_info = adev->dm.dmub_fb_info;
1974
1975 if (!fb_info) {
1976 DRM_ERROR(
1977 "Failed to allocate framebuffer info for DMUB service!\n");
1978 return -ENOMEM;
1979 }
1980
1981 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1982 if (status != DMUB_STATUS_OK) {
1983 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1984 return -EINVAL;
1985 }
1986
743b9786
NK
1987 return 0;
1988}
1989
a94d5569
DF
1990static int dm_sw_init(void *handle)
1991{
1992 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1993 int r;
1994
1995 r = dm_dmub_sw_init(adev);
1996 if (r)
1997 return r;
a94d5569
DF
1998
1999 return load_dmcu_fw(adev);
2000}
2001
4562236b
HW
2002static int dm_sw_fini(void *handle)
2003{
a94d5569
DF
2004 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2005
8c7aea40
NK
2006 kfree(adev->dm.dmub_fb_info);
2007 adev->dm.dmub_fb_info = NULL;
2008
743b9786
NK
2009 if (adev->dm.dmub_srv) {
2010 dmub_srv_destroy(adev->dm.dmub_srv);
2011 adev->dm.dmub_srv = NULL;
2012 }
2013
75e1658e
ND
2014 release_firmware(adev->dm.dmub_fw);
2015 adev->dm.dmub_fw = NULL;
743b9786 2016
75e1658e
ND
2017 release_firmware(adev->dm.fw_dmcu);
2018 adev->dm.fw_dmcu = NULL;
a94d5569 2019
4562236b
HW
2020 return 0;
2021}
2022
7abcf6b5 2023static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2024{
c84dec2f 2025 struct amdgpu_dm_connector *aconnector;
4562236b 2026 struct drm_connector *connector;
f8d2d39e 2027 struct drm_connector_list_iter iter;
7abcf6b5 2028 int ret = 0;
4562236b 2029
f8d2d39e
LP
2030 drm_connector_list_iter_begin(dev, &iter);
2031 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2032 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2033 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2034 aconnector->mst_mgr.aux) {
f1ad2f5e 2035 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2036 aconnector,
2037 aconnector->base.base.id);
7abcf6b5
AG
2038
2039 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2040 if (ret < 0) {
2041 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2042 aconnector->dc_link->type =
2043 dc_connection_single;
2044 break;
7abcf6b5 2045 }
f8d2d39e 2046 }
4562236b 2047 }
f8d2d39e 2048 drm_connector_list_iter_end(&iter);
4562236b 2049
7abcf6b5
AG
2050 return ret;
2051}
2052
2053static int dm_late_init(void *handle)
2054{
42e67c3b 2055 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2056
bbf854dc
DF
2057 struct dmcu_iram_parameters params;
2058 unsigned int linear_lut[16];
2059 int i;
17bdb4a8 2060 struct dmcu *dmcu = NULL;
bbf854dc 2061
17bdb4a8
JFZ
2062 dmcu = adev->dm.dc->res_pool->dmcu;
2063
bbf854dc
DF
2064 for (i = 0; i < 16; i++)
2065 linear_lut[i] = 0xFFFF * i / 15;
2066
2067 params.set = 0;
75068994 2068 params.backlight_ramping_override = false;
bbf854dc
DF
2069 params.backlight_ramping_start = 0xCCCC;
2070 params.backlight_ramping_reduction = 0xCCCCCCCC;
2071 params.backlight_lut_array_size = 16;
2072 params.backlight_lut_array = linear_lut;
2073
2ad0cdf9
AK
2074 /* Min backlight level after ABM reduction, Don't allow below 1%
2075 * 0xFFFF x 0.01 = 0x28F
2076 */
2077 params.min_abm_backlight = 0x28F;
5cb32419 2078 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2079 * dmcu object will be null.
2080 * ABM 2.4 and up are implemented on dmcub.
2081 */
2082 if (dmcu) {
2083 if (!dmcu_load_iram(dmcu, params))
2084 return -EINVAL;
2085 } else if (adev->dm.dc->ctx->dmub_srv) {
2086 struct dc_link *edp_links[MAX_NUM_EDP];
2087 int edp_num;
bbf854dc 2088
6e568e43
JW
2089 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2090 for (i = 0; i < edp_num; i++) {
2091 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2092 return -EINVAL;
2093 }
2094 }
bbf854dc 2095
4a580877 2096 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2097}
2098
2099static void s3_handle_mst(struct drm_device *dev, bool suspend)
2100{
c84dec2f 2101 struct amdgpu_dm_connector *aconnector;
4562236b 2102 struct drm_connector *connector;
f8d2d39e 2103 struct drm_connector_list_iter iter;
fe7553be
LP
2104 struct drm_dp_mst_topology_mgr *mgr;
2105 int ret;
2106 bool need_hotplug = false;
4562236b 2107
f8d2d39e
LP
2108 drm_connector_list_iter_begin(dev, &iter);
2109 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2110 aconnector = to_amdgpu_dm_connector(connector);
2111 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2112 aconnector->mst_port)
2113 continue;
2114
2115 mgr = &aconnector->mst_mgr;
2116
2117 if (suspend) {
2118 drm_dp_mst_topology_mgr_suspend(mgr);
2119 } else {
6f85f738 2120 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2121 if (ret < 0) {
2122 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2123 need_hotplug = true;
2124 }
2125 }
4562236b 2126 }
f8d2d39e 2127 drm_connector_list_iter_end(&iter);
fe7553be
LP
2128
2129 if (need_hotplug)
2130 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2131}
2132
9340dfd3
HW
2133static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2134{
2135 struct smu_context *smu = &adev->smu;
2136 int ret = 0;
2137
2138 if (!is_support_sw_smu(adev))
2139 return 0;
2140
2141 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2142 * on window driver dc implementation.
2143 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2144 * should be passed to smu during boot up and resume from s3.
2145 * boot up: dc calculate dcn watermark clock settings within dc_create,
2146 * dcn20_resource_construct
2147 * then call pplib functions below to pass the settings to smu:
2148 * smu_set_watermarks_for_clock_ranges
2149 * smu_set_watermarks_table
2150 * navi10_set_watermarks_table
2151 * smu_write_watermarks_table
2152 *
2153 * For Renoir, clock settings of dcn watermark are also fixed values.
2154 * dc has implemented different flow for window driver:
2155 * dc_hardware_init / dc_set_power_state
2156 * dcn10_init_hw
2157 * notify_wm_ranges
2158 * set_wm_ranges
2159 * -- Linux
2160 * smu_set_watermarks_for_clock_ranges
2161 * renoir_set_watermarks_table
2162 * smu_write_watermarks_table
2163 *
2164 * For Linux,
2165 * dc_hardware_init -> amdgpu_dm_init
2166 * dc_set_power_state --> dm_resume
2167 *
2168 * therefore, this function apply to navi10/12/14 but not Renoir
2169 * *
2170 */
1d789535 2171 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2172 case IP_VERSION(2, 0, 2):
2173 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2174 break;
2175 default:
2176 return 0;
2177 }
2178
e7a95eea
EQ
2179 ret = smu_write_watermarks_table(smu);
2180 if (ret) {
2181 DRM_ERROR("Failed to update WMTABLE!\n");
2182 return ret;
9340dfd3
HW
2183 }
2184
9340dfd3
HW
2185 return 0;
2186}
2187
b8592b48
LL
2188/**
2189 * dm_hw_init() - Initialize DC device
28d687ea 2190 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2191 *
2192 * Initialize the &struct amdgpu_display_manager device. This involves calling
2193 * the initializers of each DM component, then populating the struct with them.
2194 *
2195 * Although the function implies hardware initialization, both hardware and
2196 * software are initialized here. Splitting them out to their relevant init
2197 * hooks is a future TODO item.
2198 *
2199 * Some notable things that are initialized here:
2200 *
2201 * - Display Core, both software and hardware
2202 * - DC modules that we need (freesync and color management)
2203 * - DRM software states
2204 * - Interrupt sources and handlers
2205 * - Vblank support
2206 * - Debug FS entries, if enabled
2207 */
4562236b
HW
2208static int dm_hw_init(void *handle)
2209{
2210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2211 /* Create DAL display manager */
2212 amdgpu_dm_init(adev);
4562236b
HW
2213 amdgpu_dm_hpd_init(adev);
2214
4562236b
HW
2215 return 0;
2216}
2217
b8592b48
LL
2218/**
2219 * dm_hw_fini() - Teardown DC device
28d687ea 2220 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2221 *
2222 * Teardown components within &struct amdgpu_display_manager that require
2223 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2224 * were loaded. Also flush IRQ workqueues and disable them.
2225 */
4562236b
HW
2226static int dm_hw_fini(void *handle)
2227{
2228 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2229
2230 amdgpu_dm_hpd_fini(adev);
2231
2232 amdgpu_dm_irq_fini(adev);
21de3396 2233 amdgpu_dm_fini(adev);
4562236b
HW
2234 return 0;
2235}
2236
cdaae837
BL
2237
2238static int dm_enable_vblank(struct drm_crtc *crtc);
2239static void dm_disable_vblank(struct drm_crtc *crtc);
2240
2241static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2242 struct dc_state *state, bool enable)
2243{
2244 enum dc_irq_source irq_source;
2245 struct amdgpu_crtc *acrtc;
2246 int rc = -EBUSY;
2247 int i = 0;
2248
2249 for (i = 0; i < state->stream_count; i++) {
2250 acrtc = get_crtc_by_otg_inst(
2251 adev, state->stream_status[i].primary_otg_inst);
2252
2253 if (acrtc && state->stream_status[i].plane_count != 0) {
2254 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2255 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2256 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2257 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2258 if (rc)
2259 DRM_WARN("Failed to %s pflip interrupts\n",
2260 enable ? "enable" : "disable");
2261
2262 if (enable) {
2263 rc = dm_enable_vblank(&acrtc->base);
2264 if (rc)
2265 DRM_WARN("Failed to enable vblank interrupts\n");
2266 } else {
2267 dm_disable_vblank(&acrtc->base);
2268 }
2269
2270 }
2271 }
2272
2273}
2274
dfd84d90 2275static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2276{
2277 struct dc_state *context = NULL;
2278 enum dc_status res = DC_ERROR_UNEXPECTED;
2279 int i;
2280 struct dc_stream_state *del_streams[MAX_PIPES];
2281 int del_streams_count = 0;
2282
2283 memset(del_streams, 0, sizeof(del_streams));
2284
2285 context = dc_create_state(dc);
2286 if (context == NULL)
2287 goto context_alloc_fail;
2288
2289 dc_resource_state_copy_construct_current(dc, context);
2290
2291 /* First remove from context all streams */
2292 for (i = 0; i < context->stream_count; i++) {
2293 struct dc_stream_state *stream = context->streams[i];
2294
2295 del_streams[del_streams_count++] = stream;
2296 }
2297
2298 /* Remove all planes for removed streams and then remove the streams */
2299 for (i = 0; i < del_streams_count; i++) {
2300 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2301 res = DC_FAIL_DETACH_SURFACES;
2302 goto fail;
2303 }
2304
2305 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2306 if (res != DC_OK)
2307 goto fail;
2308 }
2309
2310
2311 res = dc_validate_global_state(dc, context, false);
2312
2313 if (res != DC_OK) {
2314 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2315 goto fail;
2316 }
2317
2318 res = dc_commit_state(dc, context);
2319
2320fail:
2321 dc_release_state(context);
2322
2323context_alloc_fail:
2324 return res;
2325}
2326
8e794421
WL
2327static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2328{
2329 int i;
2330
2331 if (dm->hpd_rx_offload_wq) {
2332 for (i = 0; i < dm->dc->caps.max_links; i++)
2333 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2334 }
2335}
2336
4562236b
HW
2337static int dm_suspend(void *handle)
2338{
2339 struct amdgpu_device *adev = handle;
2340 struct amdgpu_display_manager *dm = &adev->dm;
2341 int ret = 0;
4562236b 2342
53b3f8f4 2343 if (amdgpu_in_reset(adev)) {
cdaae837 2344 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2345
2346#if defined(CONFIG_DRM_AMD_DC_DCN)
2347 dc_allow_idle_optimizations(adev->dm.dc, false);
2348#endif
2349
cdaae837
BL
2350 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2351
2352 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2353
2354 amdgpu_dm_commit_zero_streams(dm->dc);
2355
2356 amdgpu_dm_irq_suspend(adev);
2357
8e794421
WL
2358 hpd_rx_irq_work_suspend(dm);
2359
cdaae837
BL
2360 return ret;
2361 }
4562236b 2362
d2f0b53b 2363 WARN_ON(adev->dm.cached_state);
4a580877 2364 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2365
4a580877 2366 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2367
4562236b
HW
2368 amdgpu_dm_irq_suspend(adev);
2369
8e794421
WL
2370 hpd_rx_irq_work_suspend(dm);
2371
32f5062d 2372 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2373
1c2075d4 2374 return 0;
4562236b
HW
2375}
2376
1daf8c63
AD
2377static struct amdgpu_dm_connector *
2378amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2379 struct drm_crtc *crtc)
4562236b
HW
2380{
2381 uint32_t i;
c2cea706 2382 struct drm_connector_state *new_con_state;
4562236b
HW
2383 struct drm_connector *connector;
2384 struct drm_crtc *crtc_from_state;
2385
c2cea706
LSL
2386 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2387 crtc_from_state = new_con_state->crtc;
4562236b
HW
2388
2389 if (crtc_from_state == crtc)
c84dec2f 2390 return to_amdgpu_dm_connector(connector);
4562236b
HW
2391 }
2392
2393 return NULL;
2394}
2395
fbbdadf2
BL
2396static void emulated_link_detect(struct dc_link *link)
2397{
2398 struct dc_sink_init_data sink_init_data = { 0 };
2399 struct display_sink_capability sink_caps = { 0 };
2400 enum dc_edid_status edid_status;
2401 struct dc_context *dc_ctx = link->ctx;
2402 struct dc_sink *sink = NULL;
2403 struct dc_sink *prev_sink = NULL;
2404
2405 link->type = dc_connection_none;
2406 prev_sink = link->local_sink;
2407
30164a16
VL
2408 if (prev_sink)
2409 dc_sink_release(prev_sink);
fbbdadf2
BL
2410
2411 switch (link->connector_signal) {
2412 case SIGNAL_TYPE_HDMI_TYPE_A: {
2413 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2414 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2415 break;
2416 }
2417
2418 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2419 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2420 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2421 break;
2422 }
2423
2424 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2425 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2426 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2427 break;
2428 }
2429
2430 case SIGNAL_TYPE_LVDS: {
2431 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2432 sink_caps.signal = SIGNAL_TYPE_LVDS;
2433 break;
2434 }
2435
2436 case SIGNAL_TYPE_EDP: {
2437 sink_caps.transaction_type =
2438 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2439 sink_caps.signal = SIGNAL_TYPE_EDP;
2440 break;
2441 }
2442
2443 case SIGNAL_TYPE_DISPLAY_PORT: {
2444 sink_caps.transaction_type =
2445 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2446 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2447 break;
2448 }
2449
2450 default:
2451 DC_ERROR("Invalid connector type! signal:%d\n",
2452 link->connector_signal);
2453 return;
2454 }
2455
2456 sink_init_data.link = link;
2457 sink_init_data.sink_signal = sink_caps.signal;
2458
2459 sink = dc_sink_create(&sink_init_data);
2460 if (!sink) {
2461 DC_ERROR("Failed to create sink!\n");
2462 return;
2463 }
2464
dcd5fb82 2465 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2466 link->local_sink = sink;
2467
2468 edid_status = dm_helpers_read_local_edid(
2469 link->ctx,
2470 link,
2471 sink);
2472
2473 if (edid_status != EDID_OK)
2474 DC_ERROR("Failed to read EDID");
2475
2476}
2477
cdaae837
BL
2478static void dm_gpureset_commit_state(struct dc_state *dc_state,
2479 struct amdgpu_display_manager *dm)
2480{
2481 struct {
2482 struct dc_surface_update surface_updates[MAX_SURFACES];
2483 struct dc_plane_info plane_infos[MAX_SURFACES];
2484 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2485 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2486 struct dc_stream_update stream_update;
2487 } * bundle;
2488 int k, m;
2489
2490 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2491
2492 if (!bundle) {
2493 dm_error("Failed to allocate update bundle\n");
2494 goto cleanup;
2495 }
2496
2497 for (k = 0; k < dc_state->stream_count; k++) {
2498 bundle->stream_update.stream = dc_state->streams[k];
2499
2500 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2501 bundle->surface_updates[m].surface =
2502 dc_state->stream_status->plane_states[m];
2503 bundle->surface_updates[m].surface->force_full_update =
2504 true;
2505 }
2506 dc_commit_updates_for_stream(
2507 dm->dc, bundle->surface_updates,
2508 dc_state->stream_status->plane_count,
efc8278e 2509 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2510 }
2511
2512cleanup:
2513 kfree(bundle);
2514
2515 return;
2516}
2517
035f5496 2518static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2519{
2520 struct dc_stream_state *stream_state;
2521 struct amdgpu_dm_connector *aconnector = link->priv;
2522 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2523 struct dc_stream_update stream_update;
2524 bool dpms_off = true;
2525
2526 memset(&stream_update, 0, sizeof(stream_update));
2527 stream_update.dpms_off = &dpms_off;
2528
2529 mutex_lock(&adev->dm.dc_lock);
2530 stream_state = dc_stream_find_from_link(link);
2531
2532 if (stream_state == NULL) {
2533 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2534 mutex_unlock(&adev->dm.dc_lock);
2535 return;
2536 }
2537
2538 stream_update.stream = stream_state;
035f5496 2539 acrtc_state->force_dpms_off = true;
3c4d55c9 2540 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2541 stream_state, &stream_update,
2542 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2543 mutex_unlock(&adev->dm.dc_lock);
2544}
2545
4562236b
HW
2546static int dm_resume(void *handle)
2547{
2548 struct amdgpu_device *adev = handle;
4a580877 2549 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2550 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2551 struct amdgpu_dm_connector *aconnector;
4562236b 2552 struct drm_connector *connector;
f8d2d39e 2553 struct drm_connector_list_iter iter;
4562236b 2554 struct drm_crtc *crtc;
c2cea706 2555 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2556 struct dm_crtc_state *dm_new_crtc_state;
2557 struct drm_plane *plane;
2558 struct drm_plane_state *new_plane_state;
2559 struct dm_plane_state *dm_new_plane_state;
113b7a01 2560 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2561 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2562 struct dc_state *dc_state;
2563 int i, r, j;
4562236b 2564
53b3f8f4 2565 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2566 dc_state = dm->cached_dc_state;
2567
524a0ba6
NK
2568 amdgpu_dm_outbox_init(adev);
2569
cdaae837
BL
2570 r = dm_dmub_hw_init(adev);
2571 if (r)
2572 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2573
2574 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2575 dc_resume(dm->dc);
2576
2577 amdgpu_dm_irq_resume_early(adev);
2578
2579 for (i = 0; i < dc_state->stream_count; i++) {
2580 dc_state->streams[i]->mode_changed = true;
2581 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2582 dc_state->stream_status->plane_states[j]->update_flags.raw
2583 = 0xffffffff;
2584 }
2585 }
8fe44c08 2586#if defined(CONFIG_DRM_AMD_DC_DCN)
1ebcaebd
NK
2587 /*
2588 * Resource allocation happens for link encoders for newer ASIC in
2589 * dc_validate_global_state, so we need to revalidate it.
2590 *
2591 * This shouldn't fail (it passed once before), so warn if it does.
2592 */
2593 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2594#endif
cdaae837
BL
2595
2596 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2597
cdaae837
BL
2598 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2599
2600 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2601
2602 dc_release_state(dm->cached_dc_state);
2603 dm->cached_dc_state = NULL;
2604
2605 amdgpu_dm_irq_resume_late(adev);
2606
2607 mutex_unlock(&dm->dc_lock);
2608
2609 return 0;
2610 }
113b7a01
LL
2611 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2612 dc_release_state(dm_state->context);
2613 dm_state->context = dc_create_state(dm->dc);
2614 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2615 dc_resource_state_construct(dm->dc, dm_state->context);
2616
8c7aea40
NK
2617 /* Before powering on DC we need to re-initialize DMUB. */
2618 r = dm_dmub_hw_init(adev);
2619 if (r)
2620 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2621
a80aa93d
ML
2622 /* power on hardware */
2623 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2624
4562236b
HW
2625 /* program HPD filter */
2626 dc_resume(dm->dc);
2627
4562236b
HW
2628 /*
2629 * early enable HPD Rx IRQ, should be done before set mode as short
2630 * pulse interrupts are used for MST
2631 */
2632 amdgpu_dm_irq_resume_early(adev);
2633
d20ebea8 2634 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2635 s3_handle_mst(ddev, false);
2636
4562236b 2637 /* Do detection*/
f8d2d39e
LP
2638 drm_connector_list_iter_begin(ddev, &iter);
2639 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2640 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2641
2642 /*
2643 * this is the case when traversing through already created
2644 * MST connectors, should be skipped
2645 */
2646 if (aconnector->mst_port)
2647 continue;
2648
03ea364c 2649 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2650 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2651 DRM_ERROR("KMS: Failed to detect connector\n");
2652
2653 if (aconnector->base.force && new_connection_type == dc_connection_none)
2654 emulated_link_detect(aconnector->dc_link);
2655 else
2656 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2657
2658 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2659 aconnector->fake_enable = false;
2660
dcd5fb82
MF
2661 if (aconnector->dc_sink)
2662 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2663 aconnector->dc_sink = NULL;
2664 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2665 mutex_unlock(&aconnector->hpd_lock);
4562236b 2666 }
f8d2d39e 2667 drm_connector_list_iter_end(&iter);
4562236b 2668
1f6010a9 2669 /* Force mode set in atomic commit */
a80aa93d 2670 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2671 new_crtc_state->active_changed = true;
4f346e65 2672
fcb4019e
LSL
2673 /*
2674 * atomic_check is expected to create the dc states. We need to release
2675 * them here, since they were duplicated as part of the suspend
2676 * procedure.
2677 */
a80aa93d 2678 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2679 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2680 if (dm_new_crtc_state->stream) {
2681 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2682 dc_stream_release(dm_new_crtc_state->stream);
2683 dm_new_crtc_state->stream = NULL;
2684 }
2685 }
2686
a80aa93d 2687 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2688 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2689 if (dm_new_plane_state->dc_state) {
2690 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2691 dc_plane_state_release(dm_new_plane_state->dc_state);
2692 dm_new_plane_state->dc_state = NULL;
2693 }
2694 }
2695
2d1af6a1 2696 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2697
a80aa93d 2698 dm->cached_state = NULL;
0a214e2f 2699
9faa4237 2700 amdgpu_dm_irq_resume_late(adev);
4562236b 2701
9340dfd3
HW
2702 amdgpu_dm_smu_write_watermarks_table(adev);
2703
2d1af6a1 2704 return 0;
4562236b
HW
2705}
2706
b8592b48
LL
2707/**
2708 * DOC: DM Lifecycle
2709 *
2710 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2711 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2712 * the base driver's device list to be initialized and torn down accordingly.
2713 *
2714 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2715 */
2716
4562236b
HW
2717static const struct amd_ip_funcs amdgpu_dm_funcs = {
2718 .name = "dm",
2719 .early_init = dm_early_init,
7abcf6b5 2720 .late_init = dm_late_init,
4562236b
HW
2721 .sw_init = dm_sw_init,
2722 .sw_fini = dm_sw_fini,
e9669fb7 2723 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2724 .hw_init = dm_hw_init,
2725 .hw_fini = dm_hw_fini,
2726 .suspend = dm_suspend,
2727 .resume = dm_resume,
2728 .is_idle = dm_is_idle,
2729 .wait_for_idle = dm_wait_for_idle,
2730 .check_soft_reset = dm_check_soft_reset,
2731 .soft_reset = dm_soft_reset,
2732 .set_clockgating_state = dm_set_clockgating_state,
2733 .set_powergating_state = dm_set_powergating_state,
2734};
2735
2736const struct amdgpu_ip_block_version dm_ip_block =
2737{
2738 .type = AMD_IP_BLOCK_TYPE_DCE,
2739 .major = 1,
2740 .minor = 0,
2741 .rev = 0,
2742 .funcs = &amdgpu_dm_funcs,
2743};
2744
ca3268c4 2745
b8592b48
LL
2746/**
2747 * DOC: atomic
2748 *
2749 * *WIP*
2750 */
0a323b84 2751
b3663f70 2752static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2753 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2754 .get_format_info = amd_get_format_info,
366c1baa 2755 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2756 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2757 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2758};
2759
2760static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2761 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2762};
2763
94562810
RS
2764static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2765{
2766 u32 max_cll, min_cll, max, min, q, r;
2767 struct amdgpu_dm_backlight_caps *caps;
2768 struct amdgpu_display_manager *dm;
2769 struct drm_connector *conn_base;
2770 struct amdgpu_device *adev;
ec11fe37 2771 struct dc_link *link = NULL;
94562810
RS
2772 static const u8 pre_computed_values[] = {
2773 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2774 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2775 int i;
94562810
RS
2776
2777 if (!aconnector || !aconnector->dc_link)
2778 return;
2779
ec11fe37 2780 link = aconnector->dc_link;
2781 if (link->connector_signal != SIGNAL_TYPE_EDP)
2782 return;
2783
94562810 2784 conn_base = &aconnector->base;
1348969a 2785 adev = drm_to_adev(conn_base->dev);
94562810 2786 dm = &adev->dm;
7fd13bae
AD
2787 for (i = 0; i < dm->num_of_edps; i++) {
2788 if (link == dm->backlight_link[i])
2789 break;
2790 }
2791 if (i >= dm->num_of_edps)
2792 return;
2793 caps = &dm->backlight_caps[i];
94562810
RS
2794 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2795 caps->aux_support = false;
2796 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2797 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2798
d0ae0b64 2799 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2800 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2801 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2802 caps->aux_support = true;
2803
7a46f05e
TI
2804 if (amdgpu_backlight == 0)
2805 caps->aux_support = false;
2806 else if (amdgpu_backlight == 1)
2807 caps->aux_support = true;
2808
94562810
RS
2809 /* From the specification (CTA-861-G), for calculating the maximum
2810 * luminance we need to use:
2811 * Luminance = 50*2**(CV/32)
2812 * Where CV is a one-byte value.
2813 * For calculating this expression we may need float point precision;
2814 * to avoid this complexity level, we take advantage that CV is divided
2815 * by a constant. From the Euclids division algorithm, we know that CV
2816 * can be written as: CV = 32*q + r. Next, we replace CV in the
2817 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2818 * need to pre-compute the value of r/32. For pre-computing the values
2819 * We just used the following Ruby line:
2820 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2821 * The results of the above expressions can be verified at
2822 * pre_computed_values.
2823 */
2824 q = max_cll >> 5;
2825 r = max_cll % 32;
2826 max = (1 << q) * pre_computed_values[r];
2827
2828 // min luminance: maxLum * (CV/255)^2 / 100
2829 q = DIV_ROUND_CLOSEST(min_cll, 255);
2830 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2831
2832 caps->aux_max_input_signal = max;
2833 caps->aux_min_input_signal = min;
2834}
2835
97e51c16
HW
2836void amdgpu_dm_update_connector_after_detect(
2837 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2838{
2839 struct drm_connector *connector = &aconnector->base;
2840 struct drm_device *dev = connector->dev;
b73a22d3 2841 struct dc_sink *sink;
4562236b
HW
2842
2843 /* MST handled by drm_mst framework */
2844 if (aconnector->mst_mgr.mst_state == true)
2845 return;
2846
4562236b 2847 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2848 if (sink)
2849 dc_sink_retain(sink);
4562236b 2850
1f6010a9
DF
2851 /*
2852 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2853 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2854 * Skip if already done during boot.
4562236b
HW
2855 */
2856 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2857 && aconnector->dc_em_sink) {
2858
1f6010a9
DF
2859 /*
2860 * For S3 resume with headless use eml_sink to fake stream
2861 * because on resume connector->sink is set to NULL
4562236b
HW
2862 */
2863 mutex_lock(&dev->mode_config.mutex);
2864
2865 if (sink) {
922aa1e1 2866 if (aconnector->dc_sink) {
98e6436d 2867 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2868 /*
2869 * retain and release below are used to
2870 * bump up refcount for sink because the link doesn't point
2871 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2872 * reshuffle by UMD we will get into unwanted dc_sink release
2873 */
dcd5fb82 2874 dc_sink_release(aconnector->dc_sink);
922aa1e1 2875 }
4562236b 2876 aconnector->dc_sink = sink;
dcd5fb82 2877 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2878 amdgpu_dm_update_freesync_caps(connector,
2879 aconnector->edid);
4562236b 2880 } else {
98e6436d 2881 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2882 if (!aconnector->dc_sink) {
4562236b 2883 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2884 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2885 }
4562236b
HW
2886 }
2887
2888 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2889
2890 if (sink)
2891 dc_sink_release(sink);
4562236b
HW
2892 return;
2893 }
2894
2895 /*
2896 * TODO: temporary guard to look for proper fix
2897 * if this sink is MST sink, we should not do anything
2898 */
dcd5fb82
MF
2899 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2900 dc_sink_release(sink);
4562236b 2901 return;
dcd5fb82 2902 }
4562236b
HW
2903
2904 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2905 /*
2906 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2907 * Do nothing!!
2908 */
f1ad2f5e 2909 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2910 aconnector->connector_id);
dcd5fb82
MF
2911 if (sink)
2912 dc_sink_release(sink);
4562236b
HW
2913 return;
2914 }
2915
f1ad2f5e 2916 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2917 aconnector->connector_id, aconnector->dc_sink, sink);
2918
2919 mutex_lock(&dev->mode_config.mutex);
2920
1f6010a9
DF
2921 /*
2922 * 1. Update status of the drm connector
2923 * 2. Send an event and let userspace tell us what to do
2924 */
4562236b 2925 if (sink) {
1f6010a9
DF
2926 /*
2927 * TODO: check if we still need the S3 mode update workaround.
2928 * If yes, put it here.
2929 */
c64b0d6b 2930 if (aconnector->dc_sink) {
98e6436d 2931 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2932 dc_sink_release(aconnector->dc_sink);
2933 }
4562236b
HW
2934
2935 aconnector->dc_sink = sink;
dcd5fb82 2936 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2937 if (sink->dc_edid.length == 0) {
4562236b 2938 aconnector->edid = NULL;
e6142dd5
AP
2939 if (aconnector->dc_link->aux_mode) {
2940 drm_dp_cec_unset_edid(
2941 &aconnector->dm_dp_aux.aux);
2942 }
900b3cb1 2943 } else {
4562236b 2944 aconnector->edid =
e6142dd5 2945 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2946
c555f023 2947 drm_connector_update_edid_property(connector,
e6142dd5 2948 aconnector->edid);
e6142dd5
AP
2949 if (aconnector->dc_link->aux_mode)
2950 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2951 aconnector->edid);
4562236b 2952 }
e6142dd5 2953
98e6436d 2954 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2955 update_connector_ext_caps(aconnector);
4562236b 2956 } else {
e86e8947 2957 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2958 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2959 drm_connector_update_edid_property(connector, NULL);
4562236b 2960 aconnector->num_modes = 0;
dcd5fb82 2961 dc_sink_release(aconnector->dc_sink);
4562236b 2962 aconnector->dc_sink = NULL;
5326c452 2963 aconnector->edid = NULL;
0c8620d6
BL
2964#ifdef CONFIG_DRM_AMD_DC_HDCP
2965 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2966 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2967 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2968#endif
4562236b
HW
2969 }
2970
2971 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2972
0f877894
OV
2973 update_subconnector_property(aconnector);
2974
dcd5fb82
MF
2975 if (sink)
2976 dc_sink_release(sink);
4562236b
HW
2977}
2978
e27c41d5 2979static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 2980{
4562236b
HW
2981 struct drm_connector *connector = &aconnector->base;
2982 struct drm_device *dev = connector->dev;
fbbdadf2 2983 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2984 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2985 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 2986 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 2987
b972b4f9
HW
2988 if (adev->dm.disable_hpd_irq)
2989 return;
2990
035f5496
AP
2991 if (dm_con_state->base.state && dm_con_state->base.crtc)
2992 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2993 dm_con_state->base.state,
2994 dm_con_state->base.crtc));
1f6010a9
DF
2995 /*
2996 * In case of failure or MST no need to update connector status or notify the OS
2997 * since (for MST case) MST does this in its own context.
4562236b
HW
2998 */
2999 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 3000
0c8620d6 3001#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 3002 if (adev->dm.hdcp_workqueue) {
96a3b32e 3003 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
3004 dm_con_state->update_hdcp = true;
3005 }
0c8620d6 3006#endif
2e0ac3d6
HW
3007 if (aconnector->fake_enable)
3008 aconnector->fake_enable = false;
3009
fbbdadf2
BL
3010 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3011 DRM_ERROR("KMS: Failed to detect connector\n");
3012
3013 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3014 emulated_link_detect(aconnector->dc_link);
3015
fbbdadf2
BL
3016 drm_modeset_lock_all(dev);
3017 dm_restore_drm_connector_state(dev, connector);
3018 drm_modeset_unlock_all(dev);
3019
3020 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3021 drm_kms_helper_hotplug_event(dev);
3022
3023 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3024 if (new_connection_type == dc_connection_none &&
035f5496
AP
3025 aconnector->dc_link->type == dc_connection_none &&
3026 dm_crtc_state)
3027 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 3028
3c4d55c9 3029 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3030
3031 drm_modeset_lock_all(dev);
3032 dm_restore_drm_connector_state(dev, connector);
3033 drm_modeset_unlock_all(dev);
3034
3035 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3036 drm_kms_helper_hotplug_event(dev);
3037 }
3038 mutex_unlock(&aconnector->hpd_lock);
3039
3040}
3041
e27c41d5
JS
3042static void handle_hpd_irq(void *param)
3043{
3044 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3045
3046 handle_hpd_irq_helper(aconnector);
3047
3048}
3049
8e794421 3050static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3051{
3052 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3053 uint8_t dret;
3054 bool new_irq_handled = false;
3055 int dpcd_addr;
3056 int dpcd_bytes_to_read;
3057
3058 const int max_process_count = 30;
3059 int process_count = 0;
3060
3061 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3062
3063 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3064 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3065 /* DPCD 0x200 - 0x201 for downstream IRQ */
3066 dpcd_addr = DP_SINK_COUNT;
3067 } else {
3068 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3069 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3070 dpcd_addr = DP_SINK_COUNT_ESI;
3071 }
3072
3073 dret = drm_dp_dpcd_read(
3074 &aconnector->dm_dp_aux.aux,
3075 dpcd_addr,
3076 esi,
3077 dpcd_bytes_to_read);
3078
3079 while (dret == dpcd_bytes_to_read &&
3080 process_count < max_process_count) {
3081 uint8_t retry;
3082 dret = 0;
3083
3084 process_count++;
3085
f1ad2f5e 3086 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3087 /* handle HPD short pulse irq */
3088 if (aconnector->mst_mgr.mst_state)
3089 drm_dp_mst_hpd_irq(
3090 &aconnector->mst_mgr,
3091 esi,
3092 &new_irq_handled);
4562236b
HW
3093
3094 if (new_irq_handled) {
3095 /* ACK at DPCD to notify down stream */
3096 const int ack_dpcd_bytes_to_write =
3097 dpcd_bytes_to_read - 1;
3098
3099 for (retry = 0; retry < 3; retry++) {
3100 uint8_t wret;
3101
3102 wret = drm_dp_dpcd_write(
3103 &aconnector->dm_dp_aux.aux,
3104 dpcd_addr + 1,
3105 &esi[1],
3106 ack_dpcd_bytes_to_write);
3107 if (wret == ack_dpcd_bytes_to_write)
3108 break;
3109 }
3110
1f6010a9 3111 /* check if there is new irq to be handled */
4562236b
HW
3112 dret = drm_dp_dpcd_read(
3113 &aconnector->dm_dp_aux.aux,
3114 dpcd_addr,
3115 esi,
3116 dpcd_bytes_to_read);
3117
3118 new_irq_handled = false;
d4a6e8a9 3119 } else {
4562236b 3120 break;
d4a6e8a9 3121 }
4562236b
HW
3122 }
3123
3124 if (process_count == max_process_count)
f1ad2f5e 3125 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3126}
3127
8e794421
WL
3128static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3129 union hpd_irq_data hpd_irq_data)
3130{
3131 struct hpd_rx_irq_offload_work *offload_work =
3132 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3133
3134 if (!offload_work) {
3135 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3136 return;
3137 }
3138
3139 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3140 offload_work->data = hpd_irq_data;
3141 offload_work->offload_wq = offload_wq;
3142
3143 queue_work(offload_wq->wq, &offload_work->work);
3144 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3145}
3146
4562236b
HW
3147static void handle_hpd_rx_irq(void *param)
3148{
c84dec2f 3149 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3150 struct drm_connector *connector = &aconnector->base;
3151 struct drm_device *dev = connector->dev;
53cbf65c 3152 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3153 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3154 bool result = false;
fbbdadf2 3155 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3156 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3157 union hpd_irq_data hpd_irq_data;
8e794421
WL
3158 bool link_loss = false;
3159 bool has_left_work = false;
3160 int idx = aconnector->base.index;
3161 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3162
3163 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3164
b972b4f9
HW
3165 if (adev->dm.disable_hpd_irq)
3166 return;
3167
1f6010a9
DF
3168 /*
3169 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3170 * conflict, after implement i2c helper, this mutex should be
3171 * retired.
3172 */
b86e7eef 3173 mutex_lock(&aconnector->hpd_lock);
4562236b 3174
8e794421
WL
3175 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3176 &link_loss, true, &has_left_work);
3083a984 3177
8e794421
WL
3178 if (!has_left_work)
3179 goto out;
3180
3181 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3182 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3183 goto out;
3184 }
3185
3186 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3187 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3188 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3189 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3190 goto out;
3191 }
3083a984 3192
8e794421
WL
3193 if (link_loss) {
3194 bool skip = false;
d2aa1356 3195
8e794421
WL
3196 spin_lock(&offload_wq->offload_lock);
3197 skip = offload_wq->is_handling_link_loss;
3198
3199 if (!skip)
3200 offload_wq->is_handling_link_loss = true;
3201
3202 spin_unlock(&offload_wq->offload_lock);
3203
3204 if (!skip)
3205 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3206
3207 goto out;
3208 }
3209 }
c8ea79a8 3210
3083a984 3211out:
c8ea79a8 3212 if (result && !is_mst_root_connector) {
4562236b 3213 /* Downstream Port status changed. */
fbbdadf2
BL
3214 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3215 DRM_ERROR("KMS: Failed to detect connector\n");
3216
3217 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3218 emulated_link_detect(dc_link);
3219
3220 if (aconnector->fake_enable)
3221 aconnector->fake_enable = false;
3222
3223 amdgpu_dm_update_connector_after_detect(aconnector);
3224
3225
3226 drm_modeset_lock_all(dev);
3227 dm_restore_drm_connector_state(dev, connector);
3228 drm_modeset_unlock_all(dev);
3229
3230 drm_kms_helper_hotplug_event(dev);
3231 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3232
3233 if (aconnector->fake_enable)
3234 aconnector->fake_enable = false;
3235
4562236b
HW
3236 amdgpu_dm_update_connector_after_detect(aconnector);
3237
3238
3239 drm_modeset_lock_all(dev);
3240 dm_restore_drm_connector_state(dev, connector);
3241 drm_modeset_unlock_all(dev);
3242
3243 drm_kms_helper_hotplug_event(dev);
3244 }
3245 }
2a0f9270 3246#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3247 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3248 if (adev->dm.hdcp_workqueue)
3249 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3250 }
2a0f9270 3251#endif
4562236b 3252
b86e7eef 3253 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3254 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3255
3256 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3257}
3258
3259static void register_hpd_handlers(struct amdgpu_device *adev)
3260{
4a580877 3261 struct drm_device *dev = adev_to_drm(adev);
4562236b 3262 struct drm_connector *connector;
c84dec2f 3263 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3264 const struct dc_link *dc_link;
3265 struct dc_interrupt_params int_params = {0};
3266
3267 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3268 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3269
3270 list_for_each_entry(connector,
3271 &dev->mode_config.connector_list, head) {
3272
c84dec2f 3273 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3274 dc_link = aconnector->dc_link;
3275
3276 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3277 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3278 int_params.irq_source = dc_link->irq_source_hpd;
3279
3280 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3281 handle_hpd_irq,
3282 (void *) aconnector);
3283 }
3284
3285 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3286
3287 /* Also register for DP short pulse (hpd_rx). */
3288 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3289 int_params.irq_source = dc_link->irq_source_hpd_rx;
3290
3291 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3292 handle_hpd_rx_irq,
3293 (void *) aconnector);
8e794421
WL
3294
3295 if (adev->dm.hpd_rx_offload_wq)
3296 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3297 aconnector;
4562236b
HW
3298 }
3299 }
3300}
3301
55e56389
MR
3302#if defined(CONFIG_DRM_AMD_DC_SI)
3303/* Register IRQ sources and initialize IRQ callbacks */
3304static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3305{
3306 struct dc *dc = adev->dm.dc;
3307 struct common_irq_params *c_irq_params;
3308 struct dc_interrupt_params int_params = {0};
3309 int r;
3310 int i;
3311 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3312
3313 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3314 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3315
3316 /*
3317 * Actions of amdgpu_irq_add_id():
3318 * 1. Register a set() function with base driver.
3319 * Base driver will call set() function to enable/disable an
3320 * interrupt in DC hardware.
3321 * 2. Register amdgpu_dm_irq_handler().
3322 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3323 * coming from DC hardware.
3324 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3325 * for acknowledging and handling. */
3326
3327 /* Use VBLANK interrupt */
3328 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3329 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3330 if (r) {
3331 DRM_ERROR("Failed to add crtc irq id!\n");
3332 return r;
3333 }
3334
3335 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3336 int_params.irq_source =
3337 dc_interrupt_to_irq_source(dc, i+1 , 0);
3338
3339 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3340
3341 c_irq_params->adev = adev;
3342 c_irq_params->irq_src = int_params.irq_source;
3343
3344 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3345 dm_crtc_high_irq, c_irq_params);
3346 }
3347
3348 /* Use GRPH_PFLIP interrupt */
3349 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3350 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3351 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3352 if (r) {
3353 DRM_ERROR("Failed to add page flip irq id!\n");
3354 return r;
3355 }
3356
3357 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3358 int_params.irq_source =
3359 dc_interrupt_to_irq_source(dc, i, 0);
3360
3361 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3362
3363 c_irq_params->adev = adev;
3364 c_irq_params->irq_src = int_params.irq_source;
3365
3366 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3367 dm_pflip_high_irq, c_irq_params);
3368
3369 }
3370
3371 /* HPD */
3372 r = amdgpu_irq_add_id(adev, client_id,
3373 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3374 if (r) {
3375 DRM_ERROR("Failed to add hpd irq id!\n");
3376 return r;
3377 }
3378
3379 register_hpd_handlers(adev);
3380
3381 return 0;
3382}
3383#endif
3384
4562236b
HW
3385/* Register IRQ sources and initialize IRQ callbacks */
3386static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3387{
3388 struct dc *dc = adev->dm.dc;
3389 struct common_irq_params *c_irq_params;
3390 struct dc_interrupt_params int_params = {0};
3391 int r;
3392 int i;
1ffdeca6 3393 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3394
c08182f2 3395 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3396 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3397
3398 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3399 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3400
1f6010a9
DF
3401 /*
3402 * Actions of amdgpu_irq_add_id():
4562236b
HW
3403 * 1. Register a set() function with base driver.
3404 * Base driver will call set() function to enable/disable an
3405 * interrupt in DC hardware.
3406 * 2. Register amdgpu_dm_irq_handler().
3407 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3408 * coming from DC hardware.
3409 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3410 * for acknowledging and handling. */
3411
b57de80a 3412 /* Use VBLANK interrupt */
e9029155 3413 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3414 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3415 if (r) {
3416 DRM_ERROR("Failed to add crtc irq id!\n");
3417 return r;
3418 }
3419
3420 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3421 int_params.irq_source =
3d761e79 3422 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3423
b57de80a 3424 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3425
3426 c_irq_params->adev = adev;
3427 c_irq_params->irq_src = int_params.irq_source;
3428
3429 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3430 dm_crtc_high_irq, c_irq_params);
3431 }
3432
d2574c33
MK
3433 /* Use VUPDATE interrupt */
3434 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3435 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3436 if (r) {
3437 DRM_ERROR("Failed to add vupdate irq id!\n");
3438 return r;
3439 }
3440
3441 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3442 int_params.irq_source =
3443 dc_interrupt_to_irq_source(dc, i, 0);
3444
3445 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3446
3447 c_irq_params->adev = adev;
3448 c_irq_params->irq_src = int_params.irq_source;
3449
3450 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3451 dm_vupdate_high_irq, c_irq_params);
3452 }
3453
3d761e79 3454 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3455 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3456 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3457 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3458 if (r) {
3459 DRM_ERROR("Failed to add page flip irq id!\n");
3460 return r;
3461 }
3462
3463 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3464 int_params.irq_source =
3465 dc_interrupt_to_irq_source(dc, i, 0);
3466
3467 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3468
3469 c_irq_params->adev = adev;
3470 c_irq_params->irq_src = int_params.irq_source;
3471
3472 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3473 dm_pflip_high_irq, c_irq_params);
3474
3475 }
3476
3477 /* HPD */
2c8ad2d5
AD
3478 r = amdgpu_irq_add_id(adev, client_id,
3479 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3480 if (r) {
3481 DRM_ERROR("Failed to add hpd irq id!\n");
3482 return r;
3483 }
3484
3485 register_hpd_handlers(adev);
3486
3487 return 0;
3488}
3489
b86a1aa3 3490#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3491/* Register IRQ sources and initialize IRQ callbacks */
3492static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3493{
3494 struct dc *dc = adev->dm.dc;
3495 struct common_irq_params *c_irq_params;
3496 struct dc_interrupt_params int_params = {0};
3497 int r;
3498 int i;
660d5406
WL
3499#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3500 static const unsigned int vrtl_int_srcid[] = {
3501 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3502 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3503 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3504 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3505 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3506 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3507 };
3508#endif
ff5ef992
AD
3509
3510 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3511 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3512
1f6010a9
DF
3513 /*
3514 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3515 * 1. Register a set() function with base driver.
3516 * Base driver will call set() function to enable/disable an
3517 * interrupt in DC hardware.
3518 * 2. Register amdgpu_dm_irq_handler().
3519 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3520 * coming from DC hardware.
3521 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3522 * for acknowledging and handling.
1f6010a9 3523 */
ff5ef992
AD
3524
3525 /* Use VSTARTUP interrupt */
3526 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3527 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3528 i++) {
3760f76c 3529 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3530
3531 if (r) {
3532 DRM_ERROR("Failed to add crtc irq id!\n");
3533 return r;
3534 }
3535
3536 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3537 int_params.irq_source =
3538 dc_interrupt_to_irq_source(dc, i, 0);
3539
3540 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3541
3542 c_irq_params->adev = adev;
3543 c_irq_params->irq_src = int_params.irq_source;
3544
2346ef47
NK
3545 amdgpu_dm_irq_register_interrupt(
3546 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3547 }
3548
86bc2219
WL
3549 /* Use otg vertical line interrupt */
3550#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3551 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3552 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3553 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3554
3555 if (r) {
3556 DRM_ERROR("Failed to add vline0 irq id!\n");
3557 return r;
3558 }
3559
3560 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3561 int_params.irq_source =
660d5406
WL
3562 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3563
3564 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3565 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3566 break;
3567 }
86bc2219
WL
3568
3569 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3570 - DC_IRQ_SOURCE_DC1_VLINE0];
3571
3572 c_irq_params->adev = adev;
3573 c_irq_params->irq_src = int_params.irq_source;
3574
3575 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3576 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3577 }
3578#endif
3579
2346ef47
NK
3580 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3581 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3582 * to trigger at end of each vblank, regardless of state of the lock,
3583 * matching DCE behaviour.
3584 */
3585 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3586 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3587 i++) {
3588 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3589
3590 if (r) {
3591 DRM_ERROR("Failed to add vupdate irq id!\n");
3592 return r;
3593 }
3594
3595 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3596 int_params.irq_source =
3597 dc_interrupt_to_irq_source(dc, i, 0);
3598
3599 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3600
3601 c_irq_params->adev = adev;
3602 c_irq_params->irq_src = int_params.irq_source;
3603
ff5ef992 3604 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3605 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3606 }
3607
ff5ef992
AD
3608 /* Use GRPH_PFLIP interrupt */
3609 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3610 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3611 i++) {
3760f76c 3612 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3613 if (r) {
3614 DRM_ERROR("Failed to add page flip irq id!\n");
3615 return r;
3616 }
3617
3618 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3619 int_params.irq_source =
3620 dc_interrupt_to_irq_source(dc, i, 0);
3621
3622 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3623
3624 c_irq_params->adev = adev;
3625 c_irq_params->irq_src = int_params.irq_source;
3626
3627 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3628 dm_pflip_high_irq, c_irq_params);
3629
3630 }
3631
81927e28
JS
3632 /* HPD */
3633 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3634 &adev->hpd_irq);
3635 if (r) {
3636 DRM_ERROR("Failed to add hpd irq id!\n");
3637 return r;
3638 }
a08f16cf 3639
81927e28 3640 register_hpd_handlers(adev);
a08f16cf 3641
81927e28
JS
3642 return 0;
3643}
3644/* Register Outbox IRQ sources and initialize IRQ callbacks */
3645static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3646{
3647 struct dc *dc = adev->dm.dc;
3648 struct common_irq_params *c_irq_params;
3649 struct dc_interrupt_params int_params = {0};
3650 int r, i;
3651
3652 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3653 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3654
3655 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3656 &adev->dmub_outbox_irq);
3657 if (r) {
3658 DRM_ERROR("Failed to add outbox irq id!\n");
3659 return r;
3660 }
3661
3662 if (dc->ctx->dmub_srv) {
3663 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3664 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3665 int_params.irq_source =
81927e28 3666 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3667
81927e28 3668 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3669
3670 c_irq_params->adev = adev;
3671 c_irq_params->irq_src = int_params.irq_source;
3672
3673 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3674 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3675 }
3676
ff5ef992
AD
3677 return 0;
3678}
3679#endif
3680
eb3dc897
NK
3681/*
3682 * Acquires the lock for the atomic state object and returns
3683 * the new atomic state.
3684 *
3685 * This should only be called during atomic check.
3686 */
3687static int dm_atomic_get_state(struct drm_atomic_state *state,
3688 struct dm_atomic_state **dm_state)
3689{
3690 struct drm_device *dev = state->dev;
1348969a 3691 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3692 struct amdgpu_display_manager *dm = &adev->dm;
3693 struct drm_private_state *priv_state;
eb3dc897
NK
3694
3695 if (*dm_state)
3696 return 0;
3697
eb3dc897
NK
3698 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3699 if (IS_ERR(priv_state))
3700 return PTR_ERR(priv_state);
3701
3702 *dm_state = to_dm_atomic_state(priv_state);
3703
3704 return 0;
3705}
3706
dfd84d90 3707static struct dm_atomic_state *
eb3dc897
NK
3708dm_atomic_get_new_state(struct drm_atomic_state *state)
3709{
3710 struct drm_device *dev = state->dev;
1348969a 3711 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3712 struct amdgpu_display_manager *dm = &adev->dm;
3713 struct drm_private_obj *obj;
3714 struct drm_private_state *new_obj_state;
3715 int i;
3716
3717 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3718 if (obj->funcs == dm->atomic_obj.funcs)
3719 return to_dm_atomic_state(new_obj_state);
3720 }
3721
3722 return NULL;
3723}
3724
eb3dc897
NK
3725static struct drm_private_state *
3726dm_atomic_duplicate_state(struct drm_private_obj *obj)
3727{
3728 struct dm_atomic_state *old_state, *new_state;
3729
3730 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3731 if (!new_state)
3732 return NULL;
3733
3734 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3735
813d20dc
AW
3736 old_state = to_dm_atomic_state(obj->state);
3737
3738 if (old_state && old_state->context)
3739 new_state->context = dc_copy_state(old_state->context);
3740
eb3dc897
NK
3741 if (!new_state->context) {
3742 kfree(new_state);
3743 return NULL;
3744 }
3745
eb3dc897
NK
3746 return &new_state->base;
3747}
3748
3749static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3750 struct drm_private_state *state)
3751{
3752 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3753
3754 if (dm_state && dm_state->context)
3755 dc_release_state(dm_state->context);
3756
3757 kfree(dm_state);
3758}
3759
3760static struct drm_private_state_funcs dm_atomic_state_funcs = {
3761 .atomic_duplicate_state = dm_atomic_duplicate_state,
3762 .atomic_destroy_state = dm_atomic_destroy_state,
3763};
3764
4562236b
HW
3765static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3766{
eb3dc897 3767 struct dm_atomic_state *state;
4562236b
HW
3768 int r;
3769
3770 adev->mode_info.mode_config_initialized = true;
3771
4a580877
LT
3772 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3773 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3774
4a580877
LT
3775 adev_to_drm(adev)->mode_config.max_width = 16384;
3776 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3777
4a580877
LT
3778 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3779 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3780 /* indicates support for immediate flip */
4a580877 3781 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3782
4a580877 3783 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3784
eb3dc897
NK
3785 state = kzalloc(sizeof(*state), GFP_KERNEL);
3786 if (!state)
3787 return -ENOMEM;
3788
813d20dc 3789 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3790 if (!state->context) {
3791 kfree(state);
3792 return -ENOMEM;
3793 }
3794
3795 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3796
4a580877 3797 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3798 &adev->dm.atomic_obj,
eb3dc897
NK
3799 &state->base,
3800 &dm_atomic_state_funcs);
3801
3dc9b1ce 3802 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3803 if (r) {
3804 dc_release_state(state->context);
3805 kfree(state);
4562236b 3806 return r;
b67a468a 3807 }
4562236b 3808
6ce8f316 3809 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3810 if (r) {
3811 dc_release_state(state->context);
3812 kfree(state);
6ce8f316 3813 return r;
b67a468a 3814 }
6ce8f316 3815
4562236b
HW
3816 return 0;
3817}
3818
206bbafe
DF
3819#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3820#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3821#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3822
4562236b
HW
3823#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3824 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3825
7fd13bae
AD
3826static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3827 int bl_idx)
206bbafe
DF
3828{
3829#if defined(CONFIG_ACPI)
3830 struct amdgpu_dm_backlight_caps caps;
3831
58965855
FS
3832 memset(&caps, 0, sizeof(caps));
3833
7fd13bae 3834 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3835 return;
3836
f9b7f370 3837 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3838 if (caps.caps_valid) {
7fd13bae 3839 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3840 if (caps.aux_support)
3841 return;
7fd13bae
AD
3842 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3843 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3844 } else {
7fd13bae 3845 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3846 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3847 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3848 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3849 }
3850#else
7fd13bae 3851 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3852 return;
3853
7fd13bae
AD
3854 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3855 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3856#endif
3857}
3858
69d9f427
AM
3859static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3860 unsigned *min, unsigned *max)
94562810 3861{
94562810 3862 if (!caps)
69d9f427 3863 return 0;
94562810 3864
69d9f427
AM
3865 if (caps->aux_support) {
3866 // Firmware limits are in nits, DC API wants millinits.
3867 *max = 1000 * caps->aux_max_input_signal;
3868 *min = 1000 * caps->aux_min_input_signal;
94562810 3869 } else {
69d9f427
AM
3870 // Firmware limits are 8-bit, PWM control is 16-bit.
3871 *max = 0x101 * caps->max_input_signal;
3872 *min = 0x101 * caps->min_input_signal;
94562810 3873 }
69d9f427
AM
3874 return 1;
3875}
94562810 3876
69d9f427
AM
3877static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3878 uint32_t brightness)
3879{
3880 unsigned min, max;
94562810 3881
69d9f427
AM
3882 if (!get_brightness_range(caps, &min, &max))
3883 return brightness;
3884
3885 // Rescale 0..255 to min..max
3886 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3887 AMDGPU_MAX_BL_LEVEL);
3888}
3889
3890static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3891 uint32_t brightness)
3892{
3893 unsigned min, max;
3894
3895 if (!get_brightness_range(caps, &min, &max))
3896 return brightness;
3897
3898 if (brightness < min)
3899 return 0;
3900 // Rescale min..max to 0..255
3901 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3902 max - min);
94562810
RS
3903}
3904
3d6c9164 3905static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3906 int bl_idx,
3d6c9164 3907 u32 user_brightness)
4562236b 3908{
206bbafe 3909 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3910 struct dc_link *link;
3911 u32 brightness;
94562810 3912 bool rc;
4562236b 3913
7fd13bae
AD
3914 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3915 caps = dm->backlight_caps[bl_idx];
94562810 3916
7fd13bae
AD
3917 dm->brightness[bl_idx] = user_brightness;
3918 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3919 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3920
3d6c9164 3921 /* Change brightness based on AUX property */
118b4627 3922 if (caps.aux_support) {
7fd13bae
AD
3923 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3924 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3925 if (!rc)
3926 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3927 } else {
7fd13bae
AD
3928 rc = dc_link_set_backlight_level(link, brightness, 0);
3929 if (!rc)
3930 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3931 }
94562810
RS
3932
3933 return rc ? 0 : 1;
4562236b
HW
3934}
3935
3d6c9164 3936static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3937{
620a0d27 3938 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3939 int i;
3d6c9164 3940
7fd13bae
AD
3941 for (i = 0; i < dm->num_of_edps; i++) {
3942 if (bd == dm->backlight_dev[i])
3943 break;
3944 }
3945 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3946 i = 0;
3947 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3948
3949 return 0;
3950}
3951
7fd13bae
AD
3952static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3953 int bl_idx)
3d6c9164 3954{
0ad3e64e 3955 struct amdgpu_dm_backlight_caps caps;
7fd13bae 3956 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 3957
7fd13bae
AD
3958 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3959 caps = dm->backlight_caps[bl_idx];
620a0d27 3960
0ad3e64e 3961 if (caps.aux_support) {
0ad3e64e
AD
3962 u32 avg, peak;
3963 bool rc;
3964
3965 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3966 if (!rc)
7fd13bae 3967 return dm->brightness[bl_idx];
0ad3e64e
AD
3968 return convert_brightness_to_user(&caps, avg);
3969 } else {
7fd13bae 3970 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
3971
3972 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 3973 return dm->brightness[bl_idx];
0ad3e64e
AD
3974 return convert_brightness_to_user(&caps, ret);
3975 }
4562236b
HW
3976}
3977
3d6c9164
AD
3978static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3979{
3980 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3981 int i;
3d6c9164 3982
7fd13bae
AD
3983 for (i = 0; i < dm->num_of_edps; i++) {
3984 if (bd == dm->backlight_dev[i])
3985 break;
3986 }
3987 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3988 i = 0;
3989 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
3990}
3991
4562236b 3992static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3993 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3994 .get_brightness = amdgpu_dm_backlight_get_brightness,
3995 .update_status = amdgpu_dm_backlight_update_status,
3996};
3997
7578ecda
AD
3998static void
3999amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
4000{
4001 char bl_name[16];
4002 struct backlight_properties props = { 0 };
4003
7fd13bae
AD
4004 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4005 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 4006
4562236b 4007 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4008 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4009 props.type = BACKLIGHT_RAW;
4010
4011 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4012 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4013
7fd13bae
AD
4014 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4015 adev_to_drm(dm->adev)->dev,
4016 dm,
4017 &amdgpu_dm_backlight_ops,
4018 &props);
4562236b 4019
7fd13bae 4020 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4021 DRM_ERROR("DM: Backlight registration failed!\n");
4022 else
f1ad2f5e 4023 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4024}
4562236b
HW
4025#endif
4026
df534fff 4027static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4028 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4029 enum drm_plane_type plane_type,
4030 const struct dc_plane_cap *plane_cap)
df534fff 4031{
f180b4bc 4032 struct drm_plane *plane;
df534fff
S
4033 unsigned long possible_crtcs;
4034 int ret = 0;
4035
f180b4bc 4036 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4037 if (!plane) {
4038 DRM_ERROR("KMS: Failed to allocate plane\n");
4039 return -ENOMEM;
4040 }
b2fddb13 4041 plane->type = plane_type;
df534fff
S
4042
4043 /*
b2fddb13
NK
4044 * HACK: IGT tests expect that the primary plane for a CRTC
4045 * can only have one possible CRTC. Only expose support for
4046 * any CRTC if they're not going to be used as a primary plane
4047 * for a CRTC - like overlay or underlay planes.
df534fff
S
4048 */
4049 possible_crtcs = 1 << plane_id;
4050 if (plane_id >= dm->dc->caps.max_streams)
4051 possible_crtcs = 0xff;
4052
cc1fec57 4053 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4054
4055 if (ret) {
4056 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4057 kfree(plane);
df534fff
S
4058 return ret;
4059 }
4060
54087768
NK
4061 if (mode_info)
4062 mode_info->planes[plane_id] = plane;
4063
df534fff
S
4064 return ret;
4065}
4066
89fc8d4e
HW
4067
4068static void register_backlight_device(struct amdgpu_display_manager *dm,
4069 struct dc_link *link)
4070{
4071#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4072 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4073
4074 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4075 link->type != dc_connection_none) {
1f6010a9
DF
4076 /*
4077 * Event if registration failed, we should continue with
89fc8d4e
HW
4078 * DM initialization because not having a backlight control
4079 * is better then a black screen.
4080 */
7fd13bae 4081 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4082 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4083
7fd13bae 4084 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4085 dm->backlight_link[dm->num_of_edps] = link;
4086 dm->num_of_edps++;
4087 }
89fc8d4e
HW
4088 }
4089#endif
4090}
4091
4092
1f6010a9
DF
4093/*
4094 * In this architecture, the association
4562236b
HW
4095 * connector -> encoder -> crtc
4096 * id not really requried. The crtc and connector will hold the
4097 * display_index as an abstraction to use with DAL component
4098 *
4099 * Returns 0 on success
4100 */
7578ecda 4101static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4102{
4103 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4104 int32_t i;
c84dec2f 4105 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4106 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4107 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4108 uint32_t link_cnt;
cc1fec57 4109 int32_t primary_planes;
fbbdadf2 4110 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4111 const struct dc_plane_cap *plane;
9470620e 4112 bool psr_feature_enabled = false;
4562236b 4113
d58159de
AD
4114 dm->display_indexes_num = dm->dc->caps.max_streams;
4115 /* Update the actual used number of crtc */
4116 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4117
4562236b 4118 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4119 if (amdgpu_dm_mode_config_init(dm->adev)) {
4120 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4121 return -EINVAL;
4562236b
HW
4122 }
4123
b2fddb13
NK
4124 /* There is one primary plane per CRTC */
4125 primary_planes = dm->dc->caps.max_streams;
54087768 4126 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4127
b2fddb13
NK
4128 /*
4129 * Initialize primary planes, implicit planes for legacy IOCTLS.
4130 * Order is reversed to match iteration order in atomic check.
4131 */
4132 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4133 plane = &dm->dc->caps.planes[i];
4134
b2fddb13 4135 if (initialize_plane(dm, mode_info, i,
cc1fec57 4136 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4137 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4138 goto fail;
d4e13b0d 4139 }
df534fff 4140 }
92f3ac40 4141
0d579c7e
NK
4142 /*
4143 * Initialize overlay planes, index starting after primary planes.
4144 * These planes have a higher DRM index than the primary planes since
4145 * they should be considered as having a higher z-order.
4146 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4147 *
4148 * Only support DCN for now, and only expose one so we don't encourage
4149 * userspace to use up all the pipes.
0d579c7e 4150 */
cc1fec57
NK
4151 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4152 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4153
4154 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4155 continue;
4156
4157 if (!plane->blends_with_above || !plane->blends_with_below)
4158 continue;
4159
ea36ad34 4160 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4161 continue;
4162
54087768 4163 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4164 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4165 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4166 goto fail;
d4e13b0d 4167 }
cc1fec57
NK
4168
4169 /* Only create one overlay plane. */
4170 break;
d4e13b0d 4171 }
4562236b 4172
d4e13b0d 4173 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4174 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4175 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4176 goto fail;
4562236b 4177 }
4562236b 4178
50610b74 4179#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28 4180 /* Use Outbox interrupt */
1d789535 4181 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4182 case IP_VERSION(3, 0, 0):
4183 case IP_VERSION(3, 1, 2):
4184 case IP_VERSION(3, 1, 3):
4185 case IP_VERSION(2, 1, 0):
81927e28
JS
4186 if (register_outbox_irq_handlers(dm->adev)) {
4187 DRM_ERROR("DM: Failed to initialize IRQ\n");
4188 goto fail;
4189 }
4190 break;
4191 default:
c08182f2 4192 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4193 adev->ip_versions[DCE_HWIP][0]);
81927e28 4194 }
9470620e
NK
4195
4196 /* Determine whether to enable PSR support by default. */
4197 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4198 switch (adev->ip_versions[DCE_HWIP][0]) {
4199 case IP_VERSION(3, 1, 2):
4200 case IP_VERSION(3, 1, 3):
4201 psr_feature_enabled = true;
4202 break;
4203 default:
4204 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4205 break;
4206 }
4207 }
50610b74 4208#endif
81927e28 4209
4562236b
HW
4210 /* loops over all connectors on the board */
4211 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4212 struct dc_link *link = NULL;
4562236b
HW
4213
4214 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4215 DRM_ERROR(
4216 "KMS: Cannot support more than %d display indexes\n",
4217 AMDGPU_DM_MAX_DISPLAY_INDEX);
4218 continue;
4219 }
4220
4221 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4222 if (!aconnector)
cd8a2ae8 4223 goto fail;
4562236b
HW
4224
4225 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4226 if (!aencoder)
cd8a2ae8 4227 goto fail;
4562236b
HW
4228
4229 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4230 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4231 goto fail;
4562236b
HW
4232 }
4233
4234 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4235 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4236 goto fail;
4562236b
HW
4237 }
4238
89fc8d4e
HW
4239 link = dc_get_link_at_index(dm->dc, i);
4240
fbbdadf2
BL
4241 if (!dc_link_detect_sink(link, &new_connection_type))
4242 DRM_ERROR("KMS: Failed to detect connector\n");
4243
4244 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4245 emulated_link_detect(link);
4246 amdgpu_dm_update_connector_after_detect(aconnector);
4247
4248 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4249 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4250 register_backlight_device(dm, link);
b295ce39
RL
4251 if (dm->num_of_edps)
4252 update_connector_ext_caps(aconnector);
9470620e 4253 if (psr_feature_enabled)
397a9bc5 4254 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
4255 }
4256
4257
4562236b
HW
4258 }
4259
4260 /* Software is initialized. Now we can register interrupt handlers. */
4261 switch (adev->asic_type) {
55e56389
MR
4262#if defined(CONFIG_DRM_AMD_DC_SI)
4263 case CHIP_TAHITI:
4264 case CHIP_PITCAIRN:
4265 case CHIP_VERDE:
4266 case CHIP_OLAND:
4267 if (dce60_register_irq_handlers(dm->adev)) {
4268 DRM_ERROR("DM: Failed to initialize IRQ\n");
4269 goto fail;
4270 }
4271 break;
4272#endif
4562236b
HW
4273 case CHIP_BONAIRE:
4274 case CHIP_HAWAII:
cd4b356f
AD
4275 case CHIP_KAVERI:
4276 case CHIP_KABINI:
4277 case CHIP_MULLINS:
4562236b
HW
4278 case CHIP_TONGA:
4279 case CHIP_FIJI:
4280 case CHIP_CARRIZO:
4281 case CHIP_STONEY:
4282 case CHIP_POLARIS11:
4283 case CHIP_POLARIS10:
b264d345 4284 case CHIP_POLARIS12:
7737de91 4285 case CHIP_VEGAM:
2c8ad2d5 4286 case CHIP_VEGA10:
2325ff30 4287 case CHIP_VEGA12:
1fe6bf2f 4288 case CHIP_VEGA20:
4562236b
HW
4289 if (dce110_register_irq_handlers(dm->adev)) {
4290 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4291 goto fail;
4562236b
HW
4292 }
4293 break;
4294 default:
c08182f2 4295#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4296 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4297 case IP_VERSION(1, 0, 0):
4298 case IP_VERSION(1, 0, 1):
c08182f2
AD
4299 case IP_VERSION(2, 0, 2):
4300 case IP_VERSION(2, 0, 3):
4301 case IP_VERSION(2, 0, 0):
4302 case IP_VERSION(2, 1, 0):
4303 case IP_VERSION(3, 0, 0):
4304 case IP_VERSION(3, 0, 2):
4305 case IP_VERSION(3, 0, 3):
4306 case IP_VERSION(3, 0, 1):
4307 case IP_VERSION(3, 1, 2):
4308 case IP_VERSION(3, 1, 3):
4309 if (dcn10_register_irq_handlers(dm->adev)) {
4310 DRM_ERROR("DM: Failed to initialize IRQ\n");
4311 goto fail;
4312 }
4313 break;
4314 default:
2cbc6f42 4315 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4316 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4317 goto fail;
c08182f2
AD
4318 }
4319#endif
2cbc6f42 4320 break;
4562236b
HW
4321 }
4322
4562236b 4323 return 0;
cd8a2ae8 4324fail:
4562236b 4325 kfree(aencoder);
4562236b 4326 kfree(aconnector);
54087768 4327
59d0f396 4328 return -EINVAL;
4562236b
HW
4329}
4330
7578ecda 4331static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4332{
eb3dc897 4333 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4334 return;
4335}
4336
4337/******************************************************************************
4338 * amdgpu_display_funcs functions
4339 *****************************************************************************/
4340
1f6010a9 4341/*
4562236b
HW
4342 * dm_bandwidth_update - program display watermarks
4343 *
4344 * @adev: amdgpu_device pointer
4345 *
4346 * Calculate and program the display watermarks and line buffer allocation.
4347 */
4348static void dm_bandwidth_update(struct amdgpu_device *adev)
4349{
49c07a99 4350 /* TODO: implement later */
4562236b
HW
4351}
4352
39cc5be2 4353static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4354 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4355 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4356 .backlight_set_level = NULL, /* never called for DC */
4357 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4358 .hpd_sense = NULL,/* called unconditionally */
4359 .hpd_set_polarity = NULL, /* called unconditionally */
4360 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4361 .page_flip_get_scanoutpos =
4362 dm_crtc_get_scanoutpos,/* called unconditionally */
4363 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4364 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4365};
4366
4367#if defined(CONFIG_DEBUG_KERNEL_DC)
4368
3ee6b26b
AD
4369static ssize_t s3_debug_store(struct device *device,
4370 struct device_attribute *attr,
4371 const char *buf,
4372 size_t count)
4562236b
HW
4373{
4374 int ret;
4375 int s3_state;
ef1de361 4376 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4377 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4378
4379 ret = kstrtoint(buf, 0, &s3_state);
4380
4381 if (ret == 0) {
4382 if (s3_state) {
4383 dm_resume(adev);
4a580877 4384 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4385 } else
4386 dm_suspend(adev);
4387 }
4388
4389 return ret == 0 ? count : 0;
4390}
4391
4392DEVICE_ATTR_WO(s3_debug);
4393
4394#endif
4395
4396static int dm_early_init(void *handle)
4397{
4398 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4399
4562236b 4400 switch (adev->asic_type) {
55e56389
MR
4401#if defined(CONFIG_DRM_AMD_DC_SI)
4402 case CHIP_TAHITI:
4403 case CHIP_PITCAIRN:
4404 case CHIP_VERDE:
4405 adev->mode_info.num_crtc = 6;
4406 adev->mode_info.num_hpd = 6;
4407 adev->mode_info.num_dig = 6;
4408 break;
4409 case CHIP_OLAND:
4410 adev->mode_info.num_crtc = 2;
4411 adev->mode_info.num_hpd = 2;
4412 adev->mode_info.num_dig = 2;
4413 break;
4414#endif
4562236b
HW
4415 case CHIP_BONAIRE:
4416 case CHIP_HAWAII:
4417 adev->mode_info.num_crtc = 6;
4418 adev->mode_info.num_hpd = 6;
4419 adev->mode_info.num_dig = 6;
4562236b 4420 break;
cd4b356f
AD
4421 case CHIP_KAVERI:
4422 adev->mode_info.num_crtc = 4;
4423 adev->mode_info.num_hpd = 6;
4424 adev->mode_info.num_dig = 7;
cd4b356f
AD
4425 break;
4426 case CHIP_KABINI:
4427 case CHIP_MULLINS:
4428 adev->mode_info.num_crtc = 2;
4429 adev->mode_info.num_hpd = 6;
4430 adev->mode_info.num_dig = 6;
cd4b356f 4431 break;
4562236b
HW
4432 case CHIP_FIJI:
4433 case CHIP_TONGA:
4434 adev->mode_info.num_crtc = 6;
4435 adev->mode_info.num_hpd = 6;
4436 adev->mode_info.num_dig = 7;
4562236b
HW
4437 break;
4438 case CHIP_CARRIZO:
4439 adev->mode_info.num_crtc = 3;
4440 adev->mode_info.num_hpd = 6;
4441 adev->mode_info.num_dig = 9;
4562236b
HW
4442 break;
4443 case CHIP_STONEY:
4444 adev->mode_info.num_crtc = 2;
4445 adev->mode_info.num_hpd = 6;
4446 adev->mode_info.num_dig = 9;
4562236b
HW
4447 break;
4448 case CHIP_POLARIS11:
b264d345 4449 case CHIP_POLARIS12:
4562236b
HW
4450 adev->mode_info.num_crtc = 5;
4451 adev->mode_info.num_hpd = 5;
4452 adev->mode_info.num_dig = 5;
4562236b
HW
4453 break;
4454 case CHIP_POLARIS10:
7737de91 4455 case CHIP_VEGAM:
4562236b
HW
4456 adev->mode_info.num_crtc = 6;
4457 adev->mode_info.num_hpd = 6;
4458 adev->mode_info.num_dig = 6;
4562236b 4459 break;
2c8ad2d5 4460 case CHIP_VEGA10:
2325ff30 4461 case CHIP_VEGA12:
1fe6bf2f 4462 case CHIP_VEGA20:
2c8ad2d5
AD
4463 adev->mode_info.num_crtc = 6;
4464 adev->mode_info.num_hpd = 6;
4465 adev->mode_info.num_dig = 6;
4466 break;
4562236b 4467 default:
c08182f2 4468#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4469 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4470 case IP_VERSION(2, 0, 2):
4471 case IP_VERSION(3, 0, 0):
4472 adev->mode_info.num_crtc = 6;
4473 adev->mode_info.num_hpd = 6;
4474 adev->mode_info.num_dig = 6;
4475 break;
4476 case IP_VERSION(2, 0, 0):
4477 case IP_VERSION(3, 0, 2):
4478 adev->mode_info.num_crtc = 5;
4479 adev->mode_info.num_hpd = 5;
4480 adev->mode_info.num_dig = 5;
4481 break;
4482 case IP_VERSION(2, 0, 3):
4483 case IP_VERSION(3, 0, 3):
4484 adev->mode_info.num_crtc = 2;
4485 adev->mode_info.num_hpd = 2;
4486 adev->mode_info.num_dig = 2;
4487 break;
559f591d
AD
4488 case IP_VERSION(1, 0, 0):
4489 case IP_VERSION(1, 0, 1):
c08182f2
AD
4490 case IP_VERSION(3, 0, 1):
4491 case IP_VERSION(2, 1, 0):
4492 case IP_VERSION(3, 1, 2):
4493 case IP_VERSION(3, 1, 3):
4494 adev->mode_info.num_crtc = 4;
4495 adev->mode_info.num_hpd = 4;
4496 adev->mode_info.num_dig = 4;
4497 break;
4498 default:
2cbc6f42 4499 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4500 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4501 return -EINVAL;
c08182f2
AD
4502 }
4503#endif
2cbc6f42 4504 break;
4562236b
HW
4505 }
4506
c8dd5715
MD
4507 amdgpu_dm_set_irq_funcs(adev);
4508
39cc5be2
AD
4509 if (adev->mode_info.funcs == NULL)
4510 adev->mode_info.funcs = &dm_display_funcs;
4511
1f6010a9
DF
4512 /*
4513 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4514 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4515 * amdgpu_device_init()
4516 */
4562236b
HW
4517#if defined(CONFIG_DEBUG_KERNEL_DC)
4518 device_create_file(
4a580877 4519 adev_to_drm(adev)->dev,
4562236b
HW
4520 &dev_attr_s3_debug);
4521#endif
4522
4523 return 0;
4524}
4525
9b690ef3 4526static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4527 struct dc_stream_state *new_stream,
4528 struct dc_stream_state *old_stream)
9b690ef3 4529{
2afda735 4530 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4531}
4532
4533static bool modereset_required(struct drm_crtc_state *crtc_state)
4534{
2afda735 4535 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4536}
4537
7578ecda 4538static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4539{
4540 drm_encoder_cleanup(encoder);
4541 kfree(encoder);
4542}
4543
4544static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4545 .destroy = amdgpu_dm_encoder_destroy,
4546};
4547
e7b07cee 4548
6300b3bd
MK
4549static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4550 struct drm_framebuffer *fb,
4551 int *min_downscale, int *max_upscale)
4552{
4553 struct amdgpu_device *adev = drm_to_adev(dev);
4554 struct dc *dc = adev->dm.dc;
4555 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4556 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4557
4558 switch (fb->format->format) {
4559 case DRM_FORMAT_P010:
4560 case DRM_FORMAT_NV12:
4561 case DRM_FORMAT_NV21:
4562 *max_upscale = plane_cap->max_upscale_factor.nv12;
4563 *min_downscale = plane_cap->max_downscale_factor.nv12;
4564 break;
4565
4566 case DRM_FORMAT_XRGB16161616F:
4567 case DRM_FORMAT_ARGB16161616F:
4568 case DRM_FORMAT_XBGR16161616F:
4569 case DRM_FORMAT_ABGR16161616F:
4570 *max_upscale = plane_cap->max_upscale_factor.fp16;
4571 *min_downscale = plane_cap->max_downscale_factor.fp16;
4572 break;
4573
4574 default:
4575 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4576 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4577 break;
4578 }
4579
4580 /*
4581 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4582 * scaling factor of 1.0 == 1000 units.
4583 */
4584 if (*max_upscale == 1)
4585 *max_upscale = 1000;
4586
4587 if (*min_downscale == 1)
4588 *min_downscale = 1000;
4589}
4590
4591
4375d625
S
4592static int fill_dc_scaling_info(struct amdgpu_device *adev,
4593 const struct drm_plane_state *state,
695af5f9 4594 struct dc_scaling_info *scaling_info)
e7b07cee 4595{
6300b3bd 4596 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4597
695af5f9 4598 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4599
695af5f9
NK
4600 /* Source is fixed 16.16 but we ignore mantissa for now... */
4601 scaling_info->src_rect.x = state->src_x >> 16;
4602 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4603
d89f6048
HW
4604 /*
4605 * For reasons we don't (yet) fully understand a non-zero
4606 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4607 * system hang on DCN1x.
4608 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4609 * let's reject both non-zero src_x and src_y.
4610 *
4611 * We currently know of only one use-case to reproduce a
4612 * scenario with non-zero src_x and src_y for NV12, which
4613 * is to gesture the YouTube Android app into full screen
4614 * on ChromeOS.
4615 */
4375d625
S
4616 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4617 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4618 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4619 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4620 return -EINVAL;
4621
695af5f9
NK
4622 scaling_info->src_rect.width = state->src_w >> 16;
4623 if (scaling_info->src_rect.width == 0)
4624 return -EINVAL;
4625
4626 scaling_info->src_rect.height = state->src_h >> 16;
4627 if (scaling_info->src_rect.height == 0)
4628 return -EINVAL;
4629
4630 scaling_info->dst_rect.x = state->crtc_x;
4631 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4632
4633 if (state->crtc_w == 0)
695af5f9 4634 return -EINVAL;
e7b07cee 4635
695af5f9 4636 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4637
4638 if (state->crtc_h == 0)
695af5f9 4639 return -EINVAL;
e7b07cee 4640
695af5f9 4641 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4642
695af5f9
NK
4643 /* DRM doesn't specify clipping on destination output. */
4644 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4645
6300b3bd
MK
4646 /* Validate scaling per-format with DC plane caps */
4647 if (state->plane && state->plane->dev && state->fb) {
4648 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4649 &min_downscale, &max_upscale);
4650 } else {
4651 min_downscale = 250;
4652 max_upscale = 16000;
4653 }
4654
6491f0c0
NK
4655 scale_w = scaling_info->dst_rect.width * 1000 /
4656 scaling_info->src_rect.width;
e7b07cee 4657
6300b3bd 4658 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4659 return -EINVAL;
4660
4661 scale_h = scaling_info->dst_rect.height * 1000 /
4662 scaling_info->src_rect.height;
4663
6300b3bd 4664 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4665 return -EINVAL;
4666
695af5f9
NK
4667 /*
4668 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4669 * assume reasonable defaults based on the format.
4670 */
e7b07cee 4671
695af5f9 4672 return 0;
4562236b 4673}
695af5f9 4674
a3241991
BN
4675static void
4676fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4677 uint64_t tiling_flags)
e7b07cee 4678{
a3241991
BN
4679 /* Fill GFX8 params */
4680 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4681 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4682
a3241991
BN
4683 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4684 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4685 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4686 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4687 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4688
a3241991
BN
4689 /* XXX fix me for VI */
4690 tiling_info->gfx8.num_banks = num_banks;
4691 tiling_info->gfx8.array_mode =
4692 DC_ARRAY_2D_TILED_THIN1;
4693 tiling_info->gfx8.tile_split = tile_split;
4694 tiling_info->gfx8.bank_width = bankw;
4695 tiling_info->gfx8.bank_height = bankh;
4696 tiling_info->gfx8.tile_aspect = mtaspect;
4697 tiling_info->gfx8.tile_mode =
4698 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4699 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4700 == DC_ARRAY_1D_TILED_THIN1) {
4701 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4702 }
4703
a3241991
BN
4704 tiling_info->gfx8.pipe_config =
4705 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4706}
4707
a3241991
BN
4708static void
4709fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4710 union dc_tiling_info *tiling_info)
4711{
4712 tiling_info->gfx9.num_pipes =
4713 adev->gfx.config.gb_addr_config_fields.num_pipes;
4714 tiling_info->gfx9.num_banks =
4715 adev->gfx.config.gb_addr_config_fields.num_banks;
4716 tiling_info->gfx9.pipe_interleave =
4717 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4718 tiling_info->gfx9.num_shader_engines =
4719 adev->gfx.config.gb_addr_config_fields.num_se;
4720 tiling_info->gfx9.max_compressed_frags =
4721 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4722 tiling_info->gfx9.num_rb_per_se =
4723 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4724 tiling_info->gfx9.shaderEnable = 1;
1d789535 4725 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4726 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4727}
4728
695af5f9 4729static int
a3241991
BN
4730validate_dcc(struct amdgpu_device *adev,
4731 const enum surface_pixel_format format,
4732 const enum dc_rotation_angle rotation,
4733 const union dc_tiling_info *tiling_info,
4734 const struct dc_plane_dcc_param *dcc,
4735 const struct dc_plane_address *address,
4736 const struct plane_size *plane_size)
7df7e505
NK
4737{
4738 struct dc *dc = adev->dm.dc;
8daa1218
NC
4739 struct dc_dcc_surface_param input;
4740 struct dc_surface_dcc_cap output;
7df7e505 4741
8daa1218
NC
4742 memset(&input, 0, sizeof(input));
4743 memset(&output, 0, sizeof(output));
4744
a3241991 4745 if (!dcc->enable)
87b7ebc2
RS
4746 return 0;
4747
a3241991
BN
4748 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4749 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4750 return -EINVAL;
7df7e505 4751
695af5f9 4752 input.format = format;
12e2b2d4
DL
4753 input.surface_size.width = plane_size->surface_size.width;
4754 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4755 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4756
695af5f9 4757 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4758 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4759 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4760 input.scan = SCAN_DIRECTION_VERTICAL;
4761
4762 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4763 return -EINVAL;
7df7e505
NK
4764
4765 if (!output.capable)
09e5665a 4766 return -EINVAL;
7df7e505 4767
a3241991
BN
4768 if (dcc->independent_64b_blks == 0 &&
4769 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4770 return -EINVAL;
7df7e505 4771
a3241991
BN
4772 return 0;
4773}
4774
37384b3f
BN
4775static bool
4776modifier_has_dcc(uint64_t modifier)
4777{
4778 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4779}
4780
4781static unsigned
4782modifier_gfx9_swizzle_mode(uint64_t modifier)
4783{
4784 if (modifier == DRM_FORMAT_MOD_LINEAR)
4785 return 0;
4786
4787 return AMD_FMT_MOD_GET(TILE, modifier);
4788}
4789
dfbbfe3c
BN
4790static const struct drm_format_info *
4791amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4792{
816853f9 4793 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4794}
4795
37384b3f
BN
4796static void
4797fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4798 union dc_tiling_info *tiling_info,
4799 uint64_t modifier)
4800{
4801 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4802 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4803 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4804 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4805
4806 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4807
4808 if (!IS_AMD_FMT_MOD(modifier))
4809 return;
4810
4811 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4812 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4813
4814 if (adev->family >= AMDGPU_FAMILY_NV) {
4815 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4816 } else {
4817 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4818
4819 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4820 }
4821}
4822
faa37f54
BN
4823enum dm_micro_swizzle {
4824 MICRO_SWIZZLE_Z = 0,
4825 MICRO_SWIZZLE_S = 1,
4826 MICRO_SWIZZLE_D = 2,
4827 MICRO_SWIZZLE_R = 3
4828};
4829
4830static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4831 uint32_t format,
4832 uint64_t modifier)
4833{
4834 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4835 const struct drm_format_info *info = drm_format_info(format);
fe180178 4836 int i;
faa37f54
BN
4837
4838 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4839
4840 if (!info)
4841 return false;
4842
4843 /*
fe180178
QZ
4844 * We always have to allow these modifiers:
4845 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4846 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4847 */
fe180178
QZ
4848 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4849 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4850 return true;
fe180178 4851 }
faa37f54 4852
fe180178
QZ
4853 /* Check that the modifier is on the list of the plane's supported modifiers. */
4854 for (i = 0; i < plane->modifier_count; i++) {
4855 if (modifier == plane->modifiers[i])
4856 break;
4857 }
4858 if (i == plane->modifier_count)
faa37f54
BN
4859 return false;
4860
4861 /*
4862 * For D swizzle the canonical modifier depends on the bpp, so check
4863 * it here.
4864 */
4865 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4866 adev->family >= AMDGPU_FAMILY_NV) {
4867 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4868 return false;
4869 }
4870
4871 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4872 info->cpp[0] < 8)
4873 return false;
4874
4875 if (modifier_has_dcc(modifier)) {
4876 /* Per radeonsi comments 16/64 bpp are more complicated. */
4877 if (info->cpp[0] != 4)
4878 return false;
951796f2
SS
4879 /* We support multi-planar formats, but not when combined with
4880 * additional DCC metadata planes. */
4881 if (info->num_planes > 1)
4882 return false;
faa37f54
BN
4883 }
4884
4885 return true;
4886}
4887
4888static void
4889add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4890{
4891 if (!*mods)
4892 return;
4893
4894 if (*cap - *size < 1) {
4895 uint64_t new_cap = *cap * 2;
4896 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4897
4898 if (!new_mods) {
4899 kfree(*mods);
4900 *mods = NULL;
4901 return;
4902 }
4903
4904 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4905 kfree(*mods);
4906 *mods = new_mods;
4907 *cap = new_cap;
4908 }
4909
4910 (*mods)[*size] = mod;
4911 *size += 1;
4912}
4913
4914static void
4915add_gfx9_modifiers(const struct amdgpu_device *adev,
4916 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4917{
4918 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4919 int pipe_xor_bits = min(8, pipes +
4920 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4921 int bank_xor_bits = min(8 - pipe_xor_bits,
4922 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4923 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4924 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4925
4926
4927 if (adev->family == AMDGPU_FAMILY_RV) {
4928 /* Raven2 and later */
4929 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4930
4931 /*
4932 * No _D DCC swizzles yet because we only allow 32bpp, which
4933 * doesn't support _D on DCN
4934 */
4935
4936 if (has_constant_encode) {
4937 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4938 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4939 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4940 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4941 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4942 AMD_FMT_MOD_SET(DCC, 1) |
4943 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4944 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4945 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4946 }
4947
4948 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4949 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4950 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4951 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4952 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4953 AMD_FMT_MOD_SET(DCC, 1) |
4954 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4955 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4956 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4957
4958 if (has_constant_encode) {
4959 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4960 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4961 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4962 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4963 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4964 AMD_FMT_MOD_SET(DCC, 1) |
4965 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4966 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4967 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4968
4969 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4970 AMD_FMT_MOD_SET(RB, rb) |
4971 AMD_FMT_MOD_SET(PIPE, pipes));
4972 }
4973
4974 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4975 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4976 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4977 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4978 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4979 AMD_FMT_MOD_SET(DCC, 1) |
4980 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4981 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4982 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4983 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4984 AMD_FMT_MOD_SET(RB, rb) |
4985 AMD_FMT_MOD_SET(PIPE, pipes));
4986 }
4987
4988 /*
4989 * Only supported for 64bpp on Raven, will be filtered on format in
4990 * dm_plane_format_mod_supported.
4991 */
4992 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4993 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4994 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4995 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4996 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4997
4998 if (adev->family == AMDGPU_FAMILY_RV) {
4999 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5000 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5001 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5002 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5003 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5004 }
5005
5006 /*
5007 * Only supported for 64bpp on Raven, will be filtered on format in
5008 * dm_plane_format_mod_supported.
5009 */
5010 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5011 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5012 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5013
5014 if (adev->family == AMDGPU_FAMILY_RV) {
5015 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5016 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5017 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5018 }
5019}
5020
5021static void
5022add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5023 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5024{
5025 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5026
5027 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5028 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5029 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5030 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5031 AMD_FMT_MOD_SET(DCC, 1) |
5032 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5033 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5034 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5035
5036 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5037 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5038 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5039 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5040 AMD_FMT_MOD_SET(DCC, 1) |
5041 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5042 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5043 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5044 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5045
5046 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5047 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5048 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5049 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5050
5051 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5053 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5054 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5055
5056
5057 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5058 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5059 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5060 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5061
5062 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5063 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5064 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5065}
5066
5067static void
5068add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5069 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5070{
5071 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5072 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5073
5074 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5076 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5077 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5078 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5079 AMD_FMT_MOD_SET(DCC, 1) |
5080 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5081 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5082 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5083 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5084
7f6ab50a
JA
5085 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5086 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5087 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5088 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5089 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5090 AMD_FMT_MOD_SET(DCC, 1) |
5091 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5092 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5093 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5094
faa37f54
BN
5095 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5096 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5097 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5098 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5099 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5100 AMD_FMT_MOD_SET(DCC, 1) |
5101 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5102 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5103 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5104 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5105 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5106
7f6ab50a
JA
5107 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5108 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5109 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5110 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5111 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5112 AMD_FMT_MOD_SET(DCC, 1) |
5113 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5114 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5115 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5116 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5117
faa37f54
BN
5118 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5119 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5120 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5121 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5122 AMD_FMT_MOD_SET(PACKERS, pkrs));
5123
5124 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5126 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5127 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5128 AMD_FMT_MOD_SET(PACKERS, pkrs));
5129
5130 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5131 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5132 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5133 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5134
5135 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5137 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5138}
5139
5140static int
5141get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5142{
5143 uint64_t size = 0, capacity = 128;
5144 *mods = NULL;
5145
5146 /* We have not hooked up any pre-GFX9 modifiers. */
5147 if (adev->family < AMDGPU_FAMILY_AI)
5148 return 0;
5149
5150 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5151
5152 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5153 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5154 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5155 return *mods ? 0 : -ENOMEM;
5156 }
5157
5158 switch (adev->family) {
5159 case AMDGPU_FAMILY_AI:
5160 case AMDGPU_FAMILY_RV:
5161 add_gfx9_modifiers(adev, mods, &size, &capacity);
5162 break;
5163 case AMDGPU_FAMILY_NV:
5164 case AMDGPU_FAMILY_VGH:
1ebcaebd 5165 case AMDGPU_FAMILY_YC:
1d789535 5166 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5167 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5168 else
5169 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5170 break;
5171 }
5172
5173 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5174
5175 /* INVALID marks the end of the list. */
5176 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5177
5178 if (!*mods)
5179 return -ENOMEM;
5180
5181 return 0;
5182}
5183
37384b3f
BN
5184static int
5185fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5186 const struct amdgpu_framebuffer *afb,
5187 const enum surface_pixel_format format,
5188 const enum dc_rotation_angle rotation,
5189 const struct plane_size *plane_size,
5190 union dc_tiling_info *tiling_info,
5191 struct dc_plane_dcc_param *dcc,
5192 struct dc_plane_address *address,
5193 const bool force_disable_dcc)
5194{
5195 const uint64_t modifier = afb->base.modifier;
2be7f77f 5196 int ret = 0;
37384b3f
BN
5197
5198 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5199 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5200
5201 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5202 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5203 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5204 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5205
5206 dcc->enable = 1;
5207 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5208 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5209 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5210 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5211 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5212 else if (independent_128b_blks)
5213 dcc->dcc_ind_blk = hubp_ind_block_128b;
5214 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5215 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5216 else
5217 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5218 } else {
5219 if (independent_64b_blks)
5220 dcc->dcc_ind_blk = hubp_ind_block_64b;
5221 else
5222 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5223 }
37384b3f
BN
5224
5225 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5226 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5227 }
5228
5229 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5230 if (ret)
2be7f77f 5231 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5232
2be7f77f 5233 return ret;
09e5665a
NK
5234}
5235
5236static int
320932bf 5237fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5238 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5239 const enum surface_pixel_format format,
5240 const enum dc_rotation_angle rotation,
5241 const uint64_t tiling_flags,
09e5665a 5242 union dc_tiling_info *tiling_info,
12e2b2d4 5243 struct plane_size *plane_size,
09e5665a 5244 struct dc_plane_dcc_param *dcc,
87b7ebc2 5245 struct dc_plane_address *address,
5888f07a 5246 bool tmz_surface,
87b7ebc2 5247 bool force_disable_dcc)
09e5665a 5248{
320932bf 5249 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5250 int ret;
5251
5252 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5253 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5254 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5255 memset(address, 0, sizeof(*address));
5256
5888f07a
HW
5257 address->tmz_surface = tmz_surface;
5258
695af5f9 5259 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5260 uint64_t addr = afb->address + fb->offsets[0];
5261
12e2b2d4
DL
5262 plane_size->surface_size.x = 0;
5263 plane_size->surface_size.y = 0;
5264 plane_size->surface_size.width = fb->width;
5265 plane_size->surface_size.height = fb->height;
5266 plane_size->surface_pitch =
320932bf
NK
5267 fb->pitches[0] / fb->format->cpp[0];
5268
e0634e8d 5269 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5270 address->grph.addr.low_part = lower_32_bits(addr);
5271 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5272 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5273 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5274 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5275
12e2b2d4
DL
5276 plane_size->surface_size.x = 0;
5277 plane_size->surface_size.y = 0;
5278 plane_size->surface_size.width = fb->width;
5279 plane_size->surface_size.height = fb->height;
5280 plane_size->surface_pitch =
320932bf
NK
5281 fb->pitches[0] / fb->format->cpp[0];
5282
12e2b2d4
DL
5283 plane_size->chroma_size.x = 0;
5284 plane_size->chroma_size.y = 0;
320932bf 5285 /* TODO: set these based on surface format */
12e2b2d4
DL
5286 plane_size->chroma_size.width = fb->width / 2;
5287 plane_size->chroma_size.height = fb->height / 2;
320932bf 5288
12e2b2d4 5289 plane_size->chroma_pitch =
320932bf
NK
5290 fb->pitches[1] / fb->format->cpp[1];
5291
e0634e8d
NK
5292 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5293 address->video_progressive.luma_addr.low_part =
be7b9b32 5294 lower_32_bits(luma_addr);
e0634e8d 5295 address->video_progressive.luma_addr.high_part =
be7b9b32 5296 upper_32_bits(luma_addr);
e0634e8d
NK
5297 address->video_progressive.chroma_addr.low_part =
5298 lower_32_bits(chroma_addr);
5299 address->video_progressive.chroma_addr.high_part =
5300 upper_32_bits(chroma_addr);
5301 }
09e5665a 5302
a3241991 5303 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5304 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5305 rotation, plane_size,
5306 tiling_info, dcc,
5307 address,
5308 force_disable_dcc);
09e5665a
NK
5309 if (ret)
5310 return ret;
a3241991
BN
5311 } else {
5312 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5313 }
5314
5315 return 0;
7df7e505
NK
5316}
5317
d74004b6 5318static void
695af5f9 5319fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5320 bool *per_pixel_alpha, bool *global_alpha,
5321 int *global_alpha_value)
5322{
5323 *per_pixel_alpha = false;
5324 *global_alpha = false;
5325 *global_alpha_value = 0xff;
5326
5327 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5328 return;
5329
5330 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5331 static const uint32_t alpha_formats[] = {
5332 DRM_FORMAT_ARGB8888,
5333 DRM_FORMAT_RGBA8888,
5334 DRM_FORMAT_ABGR8888,
5335 };
5336 uint32_t format = plane_state->fb->format->format;
5337 unsigned int i;
5338
5339 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5340 if (format == alpha_formats[i]) {
5341 *per_pixel_alpha = true;
5342 break;
5343 }
5344 }
5345 }
5346
5347 if (plane_state->alpha < 0xffff) {
5348 *global_alpha = true;
5349 *global_alpha_value = plane_state->alpha >> 8;
5350 }
5351}
5352
004fefa3
NK
5353static int
5354fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5355 const enum surface_pixel_format format,
004fefa3
NK
5356 enum dc_color_space *color_space)
5357{
5358 bool full_range;
5359
5360 *color_space = COLOR_SPACE_SRGB;
5361
5362 /* DRM color properties only affect non-RGB formats. */
695af5f9 5363 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5364 return 0;
5365
5366 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5367
5368 switch (plane_state->color_encoding) {
5369 case DRM_COLOR_YCBCR_BT601:
5370 if (full_range)
5371 *color_space = COLOR_SPACE_YCBCR601;
5372 else
5373 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5374 break;
5375
5376 case DRM_COLOR_YCBCR_BT709:
5377 if (full_range)
5378 *color_space = COLOR_SPACE_YCBCR709;
5379 else
5380 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5381 break;
5382
5383 case DRM_COLOR_YCBCR_BT2020:
5384 if (full_range)
5385 *color_space = COLOR_SPACE_2020_YCBCR;
5386 else
5387 return -EINVAL;
5388 break;
5389
5390 default:
5391 return -EINVAL;
5392 }
5393
5394 return 0;
5395}
5396
695af5f9
NK
5397static int
5398fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5399 const struct drm_plane_state *plane_state,
5400 const uint64_t tiling_flags,
5401 struct dc_plane_info *plane_info,
87b7ebc2 5402 struct dc_plane_address *address,
5888f07a 5403 bool tmz_surface,
87b7ebc2 5404 bool force_disable_dcc)
695af5f9
NK
5405{
5406 const struct drm_framebuffer *fb = plane_state->fb;
5407 const struct amdgpu_framebuffer *afb =
5408 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5409 int ret;
5410
5411 memset(plane_info, 0, sizeof(*plane_info));
5412
5413 switch (fb->format->format) {
5414 case DRM_FORMAT_C8:
5415 plane_info->format =
5416 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5417 break;
5418 case DRM_FORMAT_RGB565:
5419 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5420 break;
5421 case DRM_FORMAT_XRGB8888:
5422 case DRM_FORMAT_ARGB8888:
5423 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5424 break;
5425 case DRM_FORMAT_XRGB2101010:
5426 case DRM_FORMAT_ARGB2101010:
5427 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5428 break;
5429 case DRM_FORMAT_XBGR2101010:
5430 case DRM_FORMAT_ABGR2101010:
5431 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5432 break;
5433 case DRM_FORMAT_XBGR8888:
5434 case DRM_FORMAT_ABGR8888:
5435 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5436 break;
5437 case DRM_FORMAT_NV21:
5438 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5439 break;
5440 case DRM_FORMAT_NV12:
5441 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5442 break;
cbec6477
SW
5443 case DRM_FORMAT_P010:
5444 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5445 break;
492548dc
SW
5446 case DRM_FORMAT_XRGB16161616F:
5447 case DRM_FORMAT_ARGB16161616F:
5448 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5449 break;
2a5195dc
MK
5450 case DRM_FORMAT_XBGR16161616F:
5451 case DRM_FORMAT_ABGR16161616F:
5452 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5453 break;
58020403
MK
5454 case DRM_FORMAT_XRGB16161616:
5455 case DRM_FORMAT_ARGB16161616:
5456 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5457 break;
5458 case DRM_FORMAT_XBGR16161616:
5459 case DRM_FORMAT_ABGR16161616:
5460 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5461 break;
695af5f9
NK
5462 default:
5463 DRM_ERROR(
92f1d09c
SA
5464 "Unsupported screen format %p4cc\n",
5465 &fb->format->format);
695af5f9
NK
5466 return -EINVAL;
5467 }
5468
5469 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5470 case DRM_MODE_ROTATE_0:
5471 plane_info->rotation = ROTATION_ANGLE_0;
5472 break;
5473 case DRM_MODE_ROTATE_90:
5474 plane_info->rotation = ROTATION_ANGLE_90;
5475 break;
5476 case DRM_MODE_ROTATE_180:
5477 plane_info->rotation = ROTATION_ANGLE_180;
5478 break;
5479 case DRM_MODE_ROTATE_270:
5480 plane_info->rotation = ROTATION_ANGLE_270;
5481 break;
5482 default:
5483 plane_info->rotation = ROTATION_ANGLE_0;
5484 break;
5485 }
5486
5487 plane_info->visible = true;
5488 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5489
6d83a32d
MS
5490 plane_info->layer_index = 0;
5491
695af5f9
NK
5492 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5493 &plane_info->color_space);
5494 if (ret)
5495 return ret;
5496
5497 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5498 plane_info->rotation, tiling_flags,
5499 &plane_info->tiling_info,
5500 &plane_info->plane_size,
5888f07a 5501 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5502 force_disable_dcc);
695af5f9
NK
5503 if (ret)
5504 return ret;
5505
5506 fill_blending_from_plane_state(
5507 plane_state, &plane_info->per_pixel_alpha,
5508 &plane_info->global_alpha, &plane_info->global_alpha_value);
5509
5510 return 0;
5511}
5512
5513static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5514 struct dc_plane_state *dc_plane_state,
5515 struct drm_plane_state *plane_state,
5516 struct drm_crtc_state *crtc_state)
e7b07cee 5517{
cf020d49 5518 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5519 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5520 struct dc_scaling_info scaling_info;
5521 struct dc_plane_info plane_info;
695af5f9 5522 int ret;
87b7ebc2 5523 bool force_disable_dcc = false;
e7b07cee 5524
4375d625 5525 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5526 if (ret)
5527 return ret;
e7b07cee 5528
695af5f9
NK
5529 dc_plane_state->src_rect = scaling_info.src_rect;
5530 dc_plane_state->dst_rect = scaling_info.dst_rect;
5531 dc_plane_state->clip_rect = scaling_info.clip_rect;
5532 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5533
87b7ebc2 5534 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5535 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5536 afb->tiling_flags,
695af5f9 5537 &plane_info,
87b7ebc2 5538 &dc_plane_state->address,
6eed95b0 5539 afb->tmz_surface,
87b7ebc2 5540 force_disable_dcc);
004fefa3
NK
5541 if (ret)
5542 return ret;
5543
695af5f9
NK
5544 dc_plane_state->format = plane_info.format;
5545 dc_plane_state->color_space = plane_info.color_space;
5546 dc_plane_state->format = plane_info.format;
5547 dc_plane_state->plane_size = plane_info.plane_size;
5548 dc_plane_state->rotation = plane_info.rotation;
5549 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5550 dc_plane_state->stereo_format = plane_info.stereo_format;
5551 dc_plane_state->tiling_info = plane_info.tiling_info;
5552 dc_plane_state->visible = plane_info.visible;
5553 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5554 dc_plane_state->global_alpha = plane_info.global_alpha;
5555 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5556 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5557 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5558 dc_plane_state->flip_int_enabled = true;
695af5f9 5559
e277adc5
LSL
5560 /*
5561 * Always set input transfer function, since plane state is refreshed
5562 * every time.
5563 */
cf020d49
NK
5564 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5565 if (ret)
5566 return ret;
e7b07cee 5567
cf020d49 5568 return 0;
e7b07cee
HW
5569}
5570
3ee6b26b
AD
5571static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5572 const struct dm_connector_state *dm_state,
5573 struct dc_stream_state *stream)
e7b07cee
HW
5574{
5575 enum amdgpu_rmx_type rmx_type;
5576
5577 struct rect src = { 0 }; /* viewport in composition space*/
5578 struct rect dst = { 0 }; /* stream addressable area */
5579
5580 /* no mode. nothing to be done */
5581 if (!mode)
5582 return;
5583
5584 /* Full screen scaling by default */
5585 src.width = mode->hdisplay;
5586 src.height = mode->vdisplay;
5587 dst.width = stream->timing.h_addressable;
5588 dst.height = stream->timing.v_addressable;
5589
f4791779
HW
5590 if (dm_state) {
5591 rmx_type = dm_state->scaling;
5592 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5593 if (src.width * dst.height <
5594 src.height * dst.width) {
5595 /* height needs less upscaling/more downscaling */
5596 dst.width = src.width *
5597 dst.height / src.height;
5598 } else {
5599 /* width needs less upscaling/more downscaling */
5600 dst.height = src.height *
5601 dst.width / src.width;
5602 }
5603 } else if (rmx_type == RMX_CENTER) {
5604 dst = src;
e7b07cee 5605 }
e7b07cee 5606
f4791779
HW
5607 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5608 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5609
f4791779
HW
5610 if (dm_state->underscan_enable) {
5611 dst.x += dm_state->underscan_hborder / 2;
5612 dst.y += dm_state->underscan_vborder / 2;
5613 dst.width -= dm_state->underscan_hborder;
5614 dst.height -= dm_state->underscan_vborder;
5615 }
e7b07cee
HW
5616 }
5617
5618 stream->src = src;
5619 stream->dst = dst;
5620
4711c033
LT
5621 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5622 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5623
5624}
5625
3ee6b26b 5626static enum dc_color_depth
42ba01fc 5627convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5628 bool is_y420, int requested_bpc)
e7b07cee 5629{
1bc22f20 5630 uint8_t bpc;
01c22997 5631
1bc22f20
SW
5632 if (is_y420) {
5633 bpc = 8;
5634
5635 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5636 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5637 bpc = 16;
5638 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5639 bpc = 12;
5640 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5641 bpc = 10;
5642 } else {
5643 bpc = (uint8_t)connector->display_info.bpc;
5644 /* Assume 8 bpc by default if no bpc is specified. */
5645 bpc = bpc ? bpc : 8;
5646 }
e7b07cee 5647
cbd14ae7 5648 if (requested_bpc > 0) {
01c22997
NK
5649 /*
5650 * Cap display bpc based on the user requested value.
5651 *
5652 * The value for state->max_bpc may not correctly updated
5653 * depending on when the connector gets added to the state
5654 * or if this was called outside of atomic check, so it
5655 * can't be used directly.
5656 */
cbd14ae7 5657 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5658
1825fd34
NK
5659 /* Round down to the nearest even number. */
5660 bpc = bpc - (bpc & 1);
5661 }
07e3a1cf 5662
e7b07cee
HW
5663 switch (bpc) {
5664 case 0:
1f6010a9
DF
5665 /*
5666 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5667 * EDID revision before 1.4
5668 * TODO: Fix edid parsing
5669 */
5670 return COLOR_DEPTH_888;
5671 case 6:
5672 return COLOR_DEPTH_666;
5673 case 8:
5674 return COLOR_DEPTH_888;
5675 case 10:
5676 return COLOR_DEPTH_101010;
5677 case 12:
5678 return COLOR_DEPTH_121212;
5679 case 14:
5680 return COLOR_DEPTH_141414;
5681 case 16:
5682 return COLOR_DEPTH_161616;
5683 default:
5684 return COLOR_DEPTH_UNDEFINED;
5685 }
5686}
5687
3ee6b26b
AD
5688static enum dc_aspect_ratio
5689get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5690{
e11d4147
LSL
5691 /* 1-1 mapping, since both enums follow the HDMI spec. */
5692 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5693}
5694
3ee6b26b
AD
5695static enum dc_color_space
5696get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5697{
5698 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5699
5700 switch (dc_crtc_timing->pixel_encoding) {
5701 case PIXEL_ENCODING_YCBCR422:
5702 case PIXEL_ENCODING_YCBCR444:
5703 case PIXEL_ENCODING_YCBCR420:
5704 {
5705 /*
5706 * 27030khz is the separation point between HDTV and SDTV
5707 * according to HDMI spec, we use YCbCr709 and YCbCr601
5708 * respectively
5709 */
380604e2 5710 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5711 if (dc_crtc_timing->flags.Y_ONLY)
5712 color_space =
5713 COLOR_SPACE_YCBCR709_LIMITED;
5714 else
5715 color_space = COLOR_SPACE_YCBCR709;
5716 } else {
5717 if (dc_crtc_timing->flags.Y_ONLY)
5718 color_space =
5719 COLOR_SPACE_YCBCR601_LIMITED;
5720 else
5721 color_space = COLOR_SPACE_YCBCR601;
5722 }
5723
5724 }
5725 break;
5726 case PIXEL_ENCODING_RGB:
5727 color_space = COLOR_SPACE_SRGB;
5728 break;
5729
5730 default:
5731 WARN_ON(1);
5732 break;
5733 }
5734
5735 return color_space;
5736}
5737
ea117312
TA
5738static bool adjust_colour_depth_from_display_info(
5739 struct dc_crtc_timing *timing_out,
5740 const struct drm_display_info *info)
400443e8 5741{
ea117312 5742 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5743 int normalized_clk;
400443e8 5744 do {
380604e2 5745 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5746 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5747 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5748 normalized_clk /= 2;
5749 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5750 switch (depth) {
5751 case COLOR_DEPTH_888:
5752 break;
400443e8
ML
5753 case COLOR_DEPTH_101010:
5754 normalized_clk = (normalized_clk * 30) / 24;
5755 break;
5756 case COLOR_DEPTH_121212:
5757 normalized_clk = (normalized_clk * 36) / 24;
5758 break;
5759 case COLOR_DEPTH_161616:
5760 normalized_clk = (normalized_clk * 48) / 24;
5761 break;
5762 default:
ea117312
TA
5763 /* The above depths are the only ones valid for HDMI. */
5764 return false;
400443e8 5765 }
ea117312
TA
5766 if (normalized_clk <= info->max_tmds_clock) {
5767 timing_out->display_color_depth = depth;
5768 return true;
5769 }
5770 } while (--depth > COLOR_DEPTH_666);
5771 return false;
400443e8 5772}
e7b07cee 5773
42ba01fc
NK
5774static void fill_stream_properties_from_drm_display_mode(
5775 struct dc_stream_state *stream,
5776 const struct drm_display_mode *mode_in,
5777 const struct drm_connector *connector,
5778 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5779 const struct dc_stream_state *old_stream,
5780 int requested_bpc)
e7b07cee
HW
5781{
5782 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5783 const struct drm_display_info *info = &connector->display_info;
d4252eee 5784 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5785 struct hdmi_vendor_infoframe hv_frame;
5786 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5787
acf83f86
WL
5788 memset(&hv_frame, 0, sizeof(hv_frame));
5789 memset(&avi_frame, 0, sizeof(avi_frame));
5790
e7b07cee
HW
5791 timing_out->h_border_left = 0;
5792 timing_out->h_border_right = 0;
5793 timing_out->v_border_top = 0;
5794 timing_out->v_border_bottom = 0;
5795 /* TODO: un-hardcode */
fe61a2f1 5796 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5797 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5798 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5799 else if (drm_mode_is_420_also(info, mode_in)
5800 && aconnector->force_yuv420_output)
5801 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5802 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5803 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5804 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5805 else
5806 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5807
5808 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5809 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5810 connector,
5811 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5812 requested_bpc);
e7b07cee
HW
5813 timing_out->scan_type = SCANNING_TYPE_NODATA;
5814 timing_out->hdmi_vic = 0;
b333730d
BL
5815
5816 if(old_stream) {
5817 timing_out->vic = old_stream->timing.vic;
5818 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5819 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5820 } else {
5821 timing_out->vic = drm_match_cea_mode(mode_in);
5822 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5823 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5824 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5825 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5826 }
e7b07cee 5827
1cb1d477
WL
5828 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5829 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5830 timing_out->vic = avi_frame.video_code;
5831 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5832 timing_out->hdmi_vic = hv_frame.vic;
5833 }
5834
fe8858bb
NC
5835 if (is_freesync_video_mode(mode_in, aconnector)) {
5836 timing_out->h_addressable = mode_in->hdisplay;
5837 timing_out->h_total = mode_in->htotal;
5838 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5839 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5840 timing_out->v_total = mode_in->vtotal;
5841 timing_out->v_addressable = mode_in->vdisplay;
5842 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5843 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5844 timing_out->pix_clk_100hz = mode_in->clock * 10;
5845 } else {
5846 timing_out->h_addressable = mode_in->crtc_hdisplay;
5847 timing_out->h_total = mode_in->crtc_htotal;
5848 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5849 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5850 timing_out->v_total = mode_in->crtc_vtotal;
5851 timing_out->v_addressable = mode_in->crtc_vdisplay;
5852 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5853 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5854 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5855 }
a85ba005 5856
e7b07cee 5857 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5858
5859 stream->output_color_space = get_output_color_space(timing_out);
5860
e43a432c
AK
5861 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5862 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5863 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5864 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5865 drm_mode_is_420_also(info, mode_in) &&
5866 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5867 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5868 adjust_colour_depth_from_display_info(timing_out, info);
5869 }
5870 }
e7b07cee
HW
5871}
5872
3ee6b26b
AD
5873static void fill_audio_info(struct audio_info *audio_info,
5874 const struct drm_connector *drm_connector,
5875 const struct dc_sink *dc_sink)
e7b07cee
HW
5876{
5877 int i = 0;
5878 int cea_revision = 0;
5879 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5880
5881 audio_info->manufacture_id = edid_caps->manufacturer_id;
5882 audio_info->product_id = edid_caps->product_id;
5883
5884 cea_revision = drm_connector->display_info.cea_rev;
5885
090afc1e 5886 strscpy(audio_info->display_name,
d2b2562c 5887 edid_caps->display_name,
090afc1e 5888 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5889
b830ebc9 5890 if (cea_revision >= 3) {
e7b07cee
HW
5891 audio_info->mode_count = edid_caps->audio_mode_count;
5892
5893 for (i = 0; i < audio_info->mode_count; ++i) {
5894 audio_info->modes[i].format_code =
5895 (enum audio_format_code)
5896 (edid_caps->audio_modes[i].format_code);
5897 audio_info->modes[i].channel_count =
5898 edid_caps->audio_modes[i].channel_count;
5899 audio_info->modes[i].sample_rates.all =
5900 edid_caps->audio_modes[i].sample_rate;
5901 audio_info->modes[i].sample_size =
5902 edid_caps->audio_modes[i].sample_size;
5903 }
5904 }
5905
5906 audio_info->flags.all = edid_caps->speaker_flags;
5907
5908 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5909 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5910 audio_info->video_latency = drm_connector->video_latency[0];
5911 audio_info->audio_latency = drm_connector->audio_latency[0];
5912 }
5913
5914 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5915
5916}
5917
3ee6b26b
AD
5918static void
5919copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5920 struct drm_display_mode *dst_mode)
e7b07cee
HW
5921{
5922 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5923 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5924 dst_mode->crtc_clock = src_mode->crtc_clock;
5925 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5926 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5927 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5928 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5929 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5930 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5931 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5932 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5933 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5934 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5935 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5936}
5937
3ee6b26b
AD
5938static void
5939decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5940 const struct drm_display_mode *native_mode,
5941 bool scale_enabled)
e7b07cee
HW
5942{
5943 if (scale_enabled) {
5944 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5945 } else if (native_mode->clock == drm_mode->clock &&
5946 native_mode->htotal == drm_mode->htotal &&
5947 native_mode->vtotal == drm_mode->vtotal) {
5948 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5949 } else {
5950 /* no scaling nor amdgpu inserted, no need to patch */
5951 }
5952}
5953
aed15309
ML
5954static struct dc_sink *
5955create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5956{
2e0ac3d6 5957 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5958 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5959 sink_init_data.link = aconnector->dc_link;
5960 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5961
5962 sink = dc_sink_create(&sink_init_data);
423788c7 5963 if (!sink) {
2e0ac3d6 5964 DRM_ERROR("Failed to create sink!\n");
aed15309 5965 return NULL;
423788c7 5966 }
2e0ac3d6 5967 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5968
aed15309 5969 return sink;
2e0ac3d6
HW
5970}
5971
fa2123db
ML
5972static void set_multisync_trigger_params(
5973 struct dc_stream_state *stream)
5974{
ec372186
ML
5975 struct dc_stream_state *master = NULL;
5976
fa2123db 5977 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5978 master = stream->triggered_crtc_reset.event_source;
5979 stream->triggered_crtc_reset.event =
5980 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5981 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5982 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5983 }
5984}
5985
5986static void set_master_stream(struct dc_stream_state *stream_set[],
5987 int stream_count)
5988{
5989 int j, highest_rfr = 0, master_stream = 0;
5990
5991 for (j = 0; j < stream_count; j++) {
5992 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5993 int refresh_rate = 0;
5994
380604e2 5995 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5996 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5997 if (refresh_rate > highest_rfr) {
5998 highest_rfr = refresh_rate;
5999 master_stream = j;
6000 }
6001 }
6002 }
6003 for (j = 0; j < stream_count; j++) {
03736f4c 6004 if (stream_set[j])
fa2123db
ML
6005 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6006 }
6007}
6008
6009static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6010{
6011 int i = 0;
ec372186 6012 struct dc_stream_state *stream;
fa2123db
ML
6013
6014 if (context->stream_count < 2)
6015 return;
6016 for (i = 0; i < context->stream_count ; i++) {
6017 if (!context->streams[i])
6018 continue;
1f6010a9
DF
6019 /*
6020 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6021 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6022 * For now it's set to false
fa2123db 6023 */
fa2123db 6024 }
ec372186 6025
fa2123db 6026 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6027
6028 for (i = 0; i < context->stream_count ; i++) {
6029 stream = context->streams[i];
6030
6031 if (!stream)
6032 continue;
6033
6034 set_multisync_trigger_params(stream);
6035 }
fa2123db
ML
6036}
6037
ea2be5c0 6038#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6039static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6040 struct dc_sink *sink, struct dc_stream_state *stream,
6041 struct dsc_dec_dpcd_caps *dsc_caps)
6042{
6043 stream->timing.flags.DSC = 0;
6044
2665f63a
ML
6045 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6046 sink->sink_signal == SIGNAL_TYPE_EDP)) {
998b7ad2
FZ
6047 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6048 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6049 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6050 dsc_caps);
998b7ad2
FZ
6051 }
6052}
6053
2665f63a
ML
6054static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6055 struct dc_sink *sink, struct dc_stream_state *stream,
6056 struct dsc_dec_dpcd_caps *dsc_caps,
6057 uint32_t max_dsc_target_bpp_limit_override)
6058{
6059 const struct dc_link_settings *verified_link_cap = NULL;
6060 uint32_t link_bw_in_kbps;
6061 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6062 struct dc *dc = sink->ctx->dc;
6063 struct dc_dsc_bw_range bw_range = {0};
6064 struct dc_dsc_config dsc_cfg = {0};
6065
6066 verified_link_cap = dc_link_get_link_cap(stream->link);
6067 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6068 edp_min_bpp_x16 = 8 * 16;
6069 edp_max_bpp_x16 = 8 * 16;
6070
6071 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6072 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6073
6074 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6075 edp_min_bpp_x16 = edp_max_bpp_x16;
6076
6077 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6078 dc->debug.dsc_min_slice_height_override,
6079 edp_min_bpp_x16, edp_max_bpp_x16,
6080 dsc_caps,
6081 &stream->timing,
6082 &bw_range)) {
6083
6084 if (bw_range.max_kbps < link_bw_in_kbps) {
6085 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6086 dsc_caps,
6087 dc->debug.dsc_min_slice_height_override,
6088 max_dsc_target_bpp_limit_override,
6089 0,
6090 &stream->timing,
6091 &dsc_cfg)) {
6092 stream->timing.dsc_cfg = dsc_cfg;
6093 stream->timing.flags.DSC = 1;
6094 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6095 }
6096 return;
6097 }
6098 }
6099
6100 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6101 dsc_caps,
6102 dc->debug.dsc_min_slice_height_override,
6103 max_dsc_target_bpp_limit_override,
6104 link_bw_in_kbps,
6105 &stream->timing,
6106 &dsc_cfg)) {
6107 stream->timing.dsc_cfg = dsc_cfg;
6108 stream->timing.flags.DSC = 1;
6109 }
6110}
6111
998b7ad2
FZ
6112static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6113 struct dc_sink *sink, struct dc_stream_state *stream,
6114 struct dsc_dec_dpcd_caps *dsc_caps)
6115{
6116 struct drm_connector *drm_connector = &aconnector->base;
6117 uint32_t link_bandwidth_kbps;
f1c1a982 6118 uint32_t max_dsc_target_bpp_limit_override = 0;
2665f63a 6119 struct dc *dc = sink->ctx->dc;
998b7ad2
FZ
6120
6121 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6122 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6123
6124 if (stream->link && stream->link->local_sink)
6125 max_dsc_target_bpp_limit_override =
6126 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6127
998b7ad2
FZ
6128 /* Set DSC policy according to dsc_clock_en */
6129 dc_dsc_policy_set_enable_dsc_when_not_needed(
6130 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6131
2665f63a
ML
6132 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6133 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6134
6135 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6136
6137 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
998b7ad2
FZ
6138
6139 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6140 dsc_caps,
6141 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6142 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6143 link_bandwidth_kbps,
6144 &stream->timing,
6145 &stream->timing.dsc_cfg)) {
6146 stream->timing.flags.DSC = 1;
6147 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6148 }
6149 }
6150
6151 /* Overwrite the stream flag if DSC is enabled through debugfs */
6152 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6153 stream->timing.flags.DSC = 1;
6154
6155 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6156 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6157
6158 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6159 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6160
6161 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6162 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6163}
433e5dec 6164#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6165
5fd953a3
RS
6166/**
6167 * DOC: FreeSync Video
6168 *
6169 * When a userspace application wants to play a video, the content follows a
6170 * standard format definition that usually specifies the FPS for that format.
6171 * The below list illustrates some video format and the expected FPS,
6172 * respectively:
6173 *
6174 * - TV/NTSC (23.976 FPS)
6175 * - Cinema (24 FPS)
6176 * - TV/PAL (25 FPS)
6177 * - TV/NTSC (29.97 FPS)
6178 * - TV/NTSC (30 FPS)
6179 * - Cinema HFR (48 FPS)
6180 * - TV/PAL (50 FPS)
6181 * - Commonly used (60 FPS)
12cdff6b 6182 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6183 *
6184 * The list of standards video format is not huge and can be added to the
6185 * connector modeset list beforehand. With that, userspace can leverage
6186 * FreeSync to extends the front porch in order to attain the target refresh
6187 * rate. Such a switch will happen seamlessly, without screen blanking or
6188 * reprogramming of the output in any other way. If the userspace requests a
6189 * modesetting change compatible with FreeSync modes that only differ in the
6190 * refresh rate, DC will skip the full update and avoid blink during the
6191 * transition. For example, the video player can change the modesetting from
6192 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6193 * causing any display blink. This same concept can be applied to a mode
6194 * setting change.
6195 */
a85ba005
NC
6196static struct drm_display_mode *
6197get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6198 bool use_probed_modes)
6199{
6200 struct drm_display_mode *m, *m_pref = NULL;
6201 u16 current_refresh, highest_refresh;
6202 struct list_head *list_head = use_probed_modes ?
6203 &aconnector->base.probed_modes :
6204 &aconnector->base.modes;
6205
6206 if (aconnector->freesync_vid_base.clock != 0)
6207 return &aconnector->freesync_vid_base;
6208
6209 /* Find the preferred mode */
6210 list_for_each_entry (m, list_head, head) {
6211 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6212 m_pref = m;
6213 break;
6214 }
6215 }
6216
6217 if (!m_pref) {
6218 /* Probably an EDID with no preferred mode. Fallback to first entry */
6219 m_pref = list_first_entry_or_null(
6220 &aconnector->base.modes, struct drm_display_mode, head);
6221 if (!m_pref) {
6222 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6223 return NULL;
6224 }
6225 }
6226
6227 highest_refresh = drm_mode_vrefresh(m_pref);
6228
6229 /*
6230 * Find the mode with highest refresh rate with same resolution.
6231 * For some monitors, preferred mode is not the mode with highest
6232 * supported refresh rate.
6233 */
6234 list_for_each_entry (m, list_head, head) {
6235 current_refresh = drm_mode_vrefresh(m);
6236
6237 if (m->hdisplay == m_pref->hdisplay &&
6238 m->vdisplay == m_pref->vdisplay &&
6239 highest_refresh < current_refresh) {
6240 highest_refresh = current_refresh;
6241 m_pref = m;
6242 }
6243 }
6244
6245 aconnector->freesync_vid_base = *m_pref;
6246 return m_pref;
6247}
6248
fe8858bb 6249static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6250 struct amdgpu_dm_connector *aconnector)
6251{
6252 struct drm_display_mode *high_mode;
6253 int timing_diff;
6254
6255 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6256 if (!high_mode || !mode)
6257 return false;
6258
6259 timing_diff = high_mode->vtotal - mode->vtotal;
6260
6261 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6262 high_mode->hdisplay != mode->hdisplay ||
6263 high_mode->vdisplay != mode->vdisplay ||
6264 high_mode->hsync_start != mode->hsync_start ||
6265 high_mode->hsync_end != mode->hsync_end ||
6266 high_mode->htotal != mode->htotal ||
6267 high_mode->hskew != mode->hskew ||
6268 high_mode->vscan != mode->vscan ||
6269 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6270 high_mode->vsync_end - mode->vsync_end != timing_diff)
6271 return false;
6272 else
6273 return true;
6274}
6275
3ee6b26b
AD
6276static struct dc_stream_state *
6277create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6278 const struct drm_display_mode *drm_mode,
b333730d 6279 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6280 const struct dc_stream_state *old_stream,
6281 int requested_bpc)
e7b07cee
HW
6282{
6283 struct drm_display_mode *preferred_mode = NULL;
391ef035 6284 struct drm_connector *drm_connector;
42ba01fc
NK
6285 const struct drm_connector_state *con_state =
6286 dm_state ? &dm_state->base : NULL;
0971c40e 6287 struct dc_stream_state *stream = NULL;
e7b07cee 6288 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6289 struct drm_display_mode saved_mode;
6290 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6291 bool native_mode_found = false;
b0781603
NK
6292 bool recalculate_timing = false;
6293 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6294 int mode_refresh;
58124bf8 6295 int preferred_refresh = 0;
defeb878 6296#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6297 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6298#endif
aed15309 6299 struct dc_sink *sink = NULL;
a85ba005
NC
6300
6301 memset(&saved_mode, 0, sizeof(saved_mode));
6302
b830ebc9 6303 if (aconnector == NULL) {
e7b07cee 6304 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6305 return stream;
e7b07cee
HW
6306 }
6307
e7b07cee 6308 drm_connector = &aconnector->base;
2e0ac3d6 6309
f4ac176e 6310 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6311 sink = create_fake_sink(aconnector);
6312 if (!sink)
6313 return stream;
aed15309
ML
6314 } else {
6315 sink = aconnector->dc_sink;
dcd5fb82 6316 dc_sink_retain(sink);
f4ac176e 6317 }
2e0ac3d6 6318
aed15309 6319 stream = dc_create_stream_for_sink(sink);
4562236b 6320
b830ebc9 6321 if (stream == NULL) {
e7b07cee 6322 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6323 goto finish;
e7b07cee
HW
6324 }
6325
ceb3dbb4
JL
6326 stream->dm_stream_context = aconnector;
6327
4a36fcba
WL
6328 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6329 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6330
e7b07cee
HW
6331 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6332 /* Search for preferred mode */
6333 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6334 native_mode_found = true;
6335 break;
6336 }
6337 }
6338 if (!native_mode_found)
6339 preferred_mode = list_first_entry_or_null(
6340 &aconnector->base.modes,
6341 struct drm_display_mode,
6342 head);
6343
b333730d
BL
6344 mode_refresh = drm_mode_vrefresh(&mode);
6345
b830ebc9 6346 if (preferred_mode == NULL) {
1f6010a9
DF
6347 /*
6348 * This may not be an error, the use case is when we have no
e7b07cee
HW
6349 * usermode calls to reset and set mode upon hotplug. In this
6350 * case, we call set mode ourselves to restore the previous mode
6351 * and the modelist may not be filled in in time.
6352 */
f1ad2f5e 6353 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6354 } else {
b0781603 6355 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
6356 is_freesync_video_mode(&mode, aconnector);
6357 if (recalculate_timing) {
6358 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6359 saved_mode = mode;
6360 mode = *freesync_mode;
6361 } else {
6362 decide_crtc_timing_for_drm_display_mode(
b0781603 6363 &mode, preferred_mode, scale);
a85ba005 6364
b0781603
NK
6365 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6366 }
e7b07cee
HW
6367 }
6368
a85ba005
NC
6369 if (recalculate_timing)
6370 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6371 else if (!dm_state)
f783577c
JFZ
6372 drm_mode_set_crtcinfo(&mode, 0);
6373
a85ba005 6374 /*
b333730d
BL
6375 * If scaling is enabled and refresh rate didn't change
6376 * we copy the vic and polarities of the old timings
6377 */
b0781603 6378 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6379 fill_stream_properties_from_drm_display_mode(
6380 stream, &mode, &aconnector->base, con_state, NULL,
6381 requested_bpc);
b333730d 6382 else
a85ba005
NC
6383 fill_stream_properties_from_drm_display_mode(
6384 stream, &mode, &aconnector->base, con_state, old_stream,
6385 requested_bpc);
b333730d 6386
defeb878 6387#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6388 /* SST DSC determination policy */
6389 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6390 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6391 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6392#endif
6393
e7b07cee
HW
6394 update_stream_scaling_settings(&mode, dm_state, stream);
6395
6396 fill_audio_info(
6397 &stream->audio_info,
6398 drm_connector,
aed15309 6399 sink);
e7b07cee 6400
ceb3dbb4 6401 update_stream_signal(stream, sink);
9182b4cb 6402
d832fc3b 6403 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6404 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6405
8a488f5d
RL
6406 if (stream->link->psr_settings.psr_feature_enabled) {
6407 //
6408 // should decide stream support vsc sdp colorimetry capability
6409 // before building vsc info packet
6410 //
6411 stream->use_vsc_sdp_for_colorimetry = false;
6412 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6413 stream->use_vsc_sdp_for_colorimetry =
6414 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6415 } else {
6416 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6417 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6418 }
8a488f5d 6419 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
6420 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6421
8c322309 6422 }
aed15309 6423finish:
dcd5fb82 6424 dc_sink_release(sink);
9e3efe3e 6425
e7b07cee
HW
6426 return stream;
6427}
6428
7578ecda 6429static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6430{
6431 drm_crtc_cleanup(crtc);
6432 kfree(crtc);
6433}
6434
6435static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6436 struct drm_crtc_state *state)
e7b07cee
HW
6437{
6438 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6439
6440 /* TODO Destroy dc_stream objects are stream object is flattened */
6441 if (cur->stream)
6442 dc_stream_release(cur->stream);
6443
6444
6445 __drm_atomic_helper_crtc_destroy_state(state);
6446
6447
6448 kfree(state);
6449}
6450
6451static void dm_crtc_reset_state(struct drm_crtc *crtc)
6452{
6453 struct dm_crtc_state *state;
6454
6455 if (crtc->state)
6456 dm_crtc_destroy_state(crtc, crtc->state);
6457
6458 state = kzalloc(sizeof(*state), GFP_KERNEL);
6459 if (WARN_ON(!state))
6460 return;
6461
1f8a52ec 6462 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6463}
6464
6465static struct drm_crtc_state *
6466dm_crtc_duplicate_state(struct drm_crtc *crtc)
6467{
6468 struct dm_crtc_state *state, *cur;
6469
6470 cur = to_dm_crtc_state(crtc->state);
6471
6472 if (WARN_ON(!crtc->state))
6473 return NULL;
6474
2004f45e 6475 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6476 if (!state)
6477 return NULL;
e7b07cee
HW
6478
6479 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6480
6481 if (cur->stream) {
6482 state->stream = cur->stream;
6483 dc_stream_retain(state->stream);
6484 }
6485
d6ef9b41 6486 state->active_planes = cur->active_planes;
98e6436d 6487 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6488 state->abm_level = cur->abm_level;
bb47de73
NK
6489 state->vrr_supported = cur->vrr_supported;
6490 state->freesync_config = cur->freesync_config;
cf020d49
NK
6491 state->cm_has_degamma = cur->cm_has_degamma;
6492 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6493 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6494 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6495
6496 return &state->base;
6497}
6498
86bc2219 6499#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6500static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6501{
6502 crtc_debugfs_init(crtc);
6503
6504 return 0;
6505}
6506#endif
6507
d2574c33
MK
6508static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6509{
6510 enum dc_irq_source irq_source;
6511 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6512 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6513 int rc;
6514
6515 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6516
6517 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6518
4711c033
LT
6519 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6520 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6521 return rc;
6522}
589d2739
HW
6523
6524static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6525{
6526 enum dc_irq_source irq_source;
6527 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6528 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6529 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6530#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6531 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6532 struct vblank_control_work *work;
ea3b4242 6533#endif
d2574c33
MK
6534 int rc = 0;
6535
6536 if (enable) {
6537 /* vblank irq on -> Only need vupdate irq in vrr mode */
6538 if (amdgpu_dm_vrr_active(acrtc_state))
6539 rc = dm_set_vupdate_irq(crtc, true);
6540 } else {
6541 /* vblank irq off -> vupdate irq off */
6542 rc = dm_set_vupdate_irq(crtc, false);
6543 }
6544
6545 if (rc)
6546 return rc;
589d2739
HW
6547
6548 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6549
6550 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6551 return -EBUSY;
6552
98ab5f35
BL
6553 if (amdgpu_in_reset(adev))
6554 return 0;
6555
4928b480 6556#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6557 if (dm->vblank_control_workqueue) {
6558 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6559 if (!work)
6560 return -ENOMEM;
09a5df6c 6561
06dd1888
NK
6562 INIT_WORK(&work->work, vblank_control_worker);
6563 work->dm = dm;
6564 work->acrtc = acrtc;
6565 work->enable = enable;
09a5df6c 6566
06dd1888
NK
6567 if (acrtc_state->stream) {
6568 dc_stream_retain(acrtc_state->stream);
6569 work->stream = acrtc_state->stream;
6570 }
58aa1c50 6571
06dd1888
NK
6572 queue_work(dm->vblank_control_workqueue, &work->work);
6573 }
4928b480 6574#endif
71338cb4 6575
71338cb4 6576 return 0;
589d2739
HW
6577}
6578
6579static int dm_enable_vblank(struct drm_crtc *crtc)
6580{
6581 return dm_set_vblank(crtc, true);
6582}
6583
6584static void dm_disable_vblank(struct drm_crtc *crtc)
6585{
6586 dm_set_vblank(crtc, false);
6587}
6588
e7b07cee
HW
6589/* Implemented only the options currently availible for the driver */
6590static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6591 .reset = dm_crtc_reset_state,
6592 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6593 .set_config = drm_atomic_helper_set_config,
6594 .page_flip = drm_atomic_helper_page_flip,
6595 .atomic_duplicate_state = dm_crtc_duplicate_state,
6596 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6597 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6598 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6599 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6600 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6601 .enable_vblank = dm_enable_vblank,
6602 .disable_vblank = dm_disable_vblank,
e3eff4b5 6603 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6604#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6605 .late_register = amdgpu_dm_crtc_late_register,
6606#endif
e7b07cee
HW
6607};
6608
6609static enum drm_connector_status
6610amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6611{
6612 bool connected;
c84dec2f 6613 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6614
1f6010a9
DF
6615 /*
6616 * Notes:
e7b07cee
HW
6617 * 1. This interface is NOT called in context of HPD irq.
6618 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6619 * makes it a bad place for *any* MST-related activity.
6620 */
e7b07cee 6621
8580d60b
HW
6622 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6623 !aconnector->fake_enable)
e7b07cee
HW
6624 connected = (aconnector->dc_sink != NULL);
6625 else
6626 connected = (aconnector->base.force == DRM_FORCE_ON);
6627
0f877894
OV
6628 update_subconnector_property(aconnector);
6629
e7b07cee
HW
6630 return (connected ? connector_status_connected :
6631 connector_status_disconnected);
6632}
6633
3ee6b26b
AD
6634int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6635 struct drm_connector_state *connector_state,
6636 struct drm_property *property,
6637 uint64_t val)
e7b07cee
HW
6638{
6639 struct drm_device *dev = connector->dev;
1348969a 6640 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6641 struct dm_connector_state *dm_old_state =
6642 to_dm_connector_state(connector->state);
6643 struct dm_connector_state *dm_new_state =
6644 to_dm_connector_state(connector_state);
6645
6646 int ret = -EINVAL;
6647
6648 if (property == dev->mode_config.scaling_mode_property) {
6649 enum amdgpu_rmx_type rmx_type;
6650
6651 switch (val) {
6652 case DRM_MODE_SCALE_CENTER:
6653 rmx_type = RMX_CENTER;
6654 break;
6655 case DRM_MODE_SCALE_ASPECT:
6656 rmx_type = RMX_ASPECT;
6657 break;
6658 case DRM_MODE_SCALE_FULLSCREEN:
6659 rmx_type = RMX_FULL;
6660 break;
6661 case DRM_MODE_SCALE_NONE:
6662 default:
6663 rmx_type = RMX_OFF;
6664 break;
6665 }
6666
6667 if (dm_old_state->scaling == rmx_type)
6668 return 0;
6669
6670 dm_new_state->scaling = rmx_type;
6671 ret = 0;
6672 } else if (property == adev->mode_info.underscan_hborder_property) {
6673 dm_new_state->underscan_hborder = val;
6674 ret = 0;
6675 } else if (property == adev->mode_info.underscan_vborder_property) {
6676 dm_new_state->underscan_vborder = val;
6677 ret = 0;
6678 } else if (property == adev->mode_info.underscan_property) {
6679 dm_new_state->underscan_enable = val;
6680 ret = 0;
c1ee92f9
DF
6681 } else if (property == adev->mode_info.abm_level_property) {
6682 dm_new_state->abm_level = val;
6683 ret = 0;
e7b07cee
HW
6684 }
6685
6686 return ret;
6687}
6688
3ee6b26b
AD
6689int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6690 const struct drm_connector_state *state,
6691 struct drm_property *property,
6692 uint64_t *val)
e7b07cee
HW
6693{
6694 struct drm_device *dev = connector->dev;
1348969a 6695 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6696 struct dm_connector_state *dm_state =
6697 to_dm_connector_state(state);
6698 int ret = -EINVAL;
6699
6700 if (property == dev->mode_config.scaling_mode_property) {
6701 switch (dm_state->scaling) {
6702 case RMX_CENTER:
6703 *val = DRM_MODE_SCALE_CENTER;
6704 break;
6705 case RMX_ASPECT:
6706 *val = DRM_MODE_SCALE_ASPECT;
6707 break;
6708 case RMX_FULL:
6709 *val = DRM_MODE_SCALE_FULLSCREEN;
6710 break;
6711 case RMX_OFF:
6712 default:
6713 *val = DRM_MODE_SCALE_NONE;
6714 break;
6715 }
6716 ret = 0;
6717 } else if (property == adev->mode_info.underscan_hborder_property) {
6718 *val = dm_state->underscan_hborder;
6719 ret = 0;
6720 } else if (property == adev->mode_info.underscan_vborder_property) {
6721 *val = dm_state->underscan_vborder;
6722 ret = 0;
6723 } else if (property == adev->mode_info.underscan_property) {
6724 *val = dm_state->underscan_enable;
6725 ret = 0;
c1ee92f9
DF
6726 } else if (property == adev->mode_info.abm_level_property) {
6727 *val = dm_state->abm_level;
6728 ret = 0;
e7b07cee 6729 }
c1ee92f9 6730
e7b07cee
HW
6731 return ret;
6732}
6733
526c654a
ED
6734static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6735{
6736 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6737
6738 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6739}
6740
7578ecda 6741static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6742{
c84dec2f 6743 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6744 const struct dc_link *link = aconnector->dc_link;
1348969a 6745 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6746 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6747 int i;
ada8ce15 6748
5dff80bd
AG
6749 /*
6750 * Call only if mst_mgr was iniitalized before since it's not done
6751 * for all connector types.
6752 */
6753 if (aconnector->mst_mgr.dev)
6754 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6755
e7b07cee
HW
6756#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6757 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6758 for (i = 0; i < dm->num_of_edps; i++) {
6759 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6760 backlight_device_unregister(dm->backlight_dev[i]);
6761 dm->backlight_dev[i] = NULL;
6762 }
e7b07cee
HW
6763 }
6764#endif
dcd5fb82
MF
6765
6766 if (aconnector->dc_em_sink)
6767 dc_sink_release(aconnector->dc_em_sink);
6768 aconnector->dc_em_sink = NULL;
6769 if (aconnector->dc_sink)
6770 dc_sink_release(aconnector->dc_sink);
6771 aconnector->dc_sink = NULL;
6772
e86e8947 6773 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6774 drm_connector_unregister(connector);
6775 drm_connector_cleanup(connector);
526c654a
ED
6776 if (aconnector->i2c) {
6777 i2c_del_adapter(&aconnector->i2c->base);
6778 kfree(aconnector->i2c);
6779 }
7daec99f 6780 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6781
e7b07cee
HW
6782 kfree(connector);
6783}
6784
6785void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6786{
6787 struct dm_connector_state *state =
6788 to_dm_connector_state(connector->state);
6789
df099b9b
LSL
6790 if (connector->state)
6791 __drm_atomic_helper_connector_destroy_state(connector->state);
6792
e7b07cee
HW
6793 kfree(state);
6794
6795 state = kzalloc(sizeof(*state), GFP_KERNEL);
6796
6797 if (state) {
6798 state->scaling = RMX_OFF;
6799 state->underscan_enable = false;
6800 state->underscan_hborder = 0;
6801 state->underscan_vborder = 0;
01933ba4 6802 state->base.max_requested_bpc = 8;
3261e013
ML
6803 state->vcpi_slots = 0;
6804 state->pbn = 0;
c3e50f89
NK
6805 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6806 state->abm_level = amdgpu_dm_abm_level;
6807
df099b9b 6808 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6809 }
6810}
6811
3ee6b26b
AD
6812struct drm_connector_state *
6813amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6814{
6815 struct dm_connector_state *state =
6816 to_dm_connector_state(connector->state);
6817
6818 struct dm_connector_state *new_state =
6819 kmemdup(state, sizeof(*state), GFP_KERNEL);
6820
98e6436d
AK
6821 if (!new_state)
6822 return NULL;
e7b07cee 6823
98e6436d
AK
6824 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6825
6826 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6827 new_state->abm_level = state->abm_level;
922454c2
NK
6828 new_state->scaling = state->scaling;
6829 new_state->underscan_enable = state->underscan_enable;
6830 new_state->underscan_hborder = state->underscan_hborder;
6831 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6832 new_state->vcpi_slots = state->vcpi_slots;
6833 new_state->pbn = state->pbn;
98e6436d 6834 return &new_state->base;
e7b07cee
HW
6835}
6836
14f04fa4
AD
6837static int
6838amdgpu_dm_connector_late_register(struct drm_connector *connector)
6839{
6840 struct amdgpu_dm_connector *amdgpu_dm_connector =
6841 to_amdgpu_dm_connector(connector);
00a8037e 6842 int r;
14f04fa4 6843
00a8037e
AD
6844 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6845 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6846 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6847 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6848 if (r)
6849 return r;
6850 }
6851
6852#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6853 connector_debugfs_init(amdgpu_dm_connector);
6854#endif
6855
6856 return 0;
6857}
6858
e7b07cee
HW
6859static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6860 .reset = amdgpu_dm_connector_funcs_reset,
6861 .detect = amdgpu_dm_connector_detect,
6862 .fill_modes = drm_helper_probe_single_connector_modes,
6863 .destroy = amdgpu_dm_connector_destroy,
6864 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6865 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6866 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6867 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6868 .late_register = amdgpu_dm_connector_late_register,
526c654a 6869 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6870};
6871
e7b07cee
HW
6872static int get_modes(struct drm_connector *connector)
6873{
6874 return amdgpu_dm_connector_get_modes(connector);
6875}
6876
c84dec2f 6877static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6878{
6879 struct dc_sink_init_data init_params = {
6880 .link = aconnector->dc_link,
6881 .sink_signal = SIGNAL_TYPE_VIRTUAL
6882 };
70e8ffc5 6883 struct edid *edid;
e7b07cee 6884
a89ff457 6885 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6886 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6887 aconnector->base.name);
6888
6889 aconnector->base.force = DRM_FORCE_OFF;
6890 aconnector->base.override_edid = false;
6891 return;
6892 }
6893
70e8ffc5
HW
6894 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6895
e7b07cee
HW
6896 aconnector->edid = edid;
6897
6898 aconnector->dc_em_sink = dc_link_add_remote_sink(
6899 aconnector->dc_link,
6900 (uint8_t *)edid,
6901 (edid->extensions + 1) * EDID_LENGTH,
6902 &init_params);
6903
dcd5fb82 6904 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6905 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6906 aconnector->dc_link->local_sink :
6907 aconnector->dc_em_sink;
dcd5fb82
MF
6908 dc_sink_retain(aconnector->dc_sink);
6909 }
e7b07cee
HW
6910}
6911
c84dec2f 6912static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6913{
6914 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6915
1f6010a9
DF
6916 /*
6917 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6918 * Those settings have to be != 0 to get initial modeset
6919 */
6920 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6921 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6922 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6923 }
6924
6925
6926 aconnector->base.override_edid = true;
6927 create_eml_sink(aconnector);
6928}
6929
cbd14ae7
SW
6930static struct dc_stream_state *
6931create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6932 const struct drm_display_mode *drm_mode,
6933 const struct dm_connector_state *dm_state,
6934 const struct dc_stream_state *old_stream)
6935{
6936 struct drm_connector *connector = &aconnector->base;
1348969a 6937 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6938 struct dc_stream_state *stream;
4b7da34b
SW
6939 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6940 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6941 enum dc_status dc_result = DC_OK;
6942
6943 do {
6944 stream = create_stream_for_sink(aconnector, drm_mode,
6945 dm_state, old_stream,
6946 requested_bpc);
6947 if (stream == NULL) {
6948 DRM_ERROR("Failed to create stream for sink!\n");
6949 break;
6950 }
6951
6952 dc_result = dc_validate_stream(adev->dm.dc, stream);
6953
6954 if (dc_result != DC_OK) {
74a16675 6955 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6956 drm_mode->hdisplay,
6957 drm_mode->vdisplay,
6958 drm_mode->clock,
74a16675
RS
6959 dc_result,
6960 dc_status_to_str(dc_result));
cbd14ae7
SW
6961
6962 dc_stream_release(stream);
6963 stream = NULL;
6964 requested_bpc -= 2; /* lower bpc to retry validation */
6965 }
6966
6967 } while (stream == NULL && requested_bpc >= 6);
6968
68eb3ae3
WS
6969 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6970 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6971
6972 aconnector->force_yuv420_output = true;
6973 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6974 dm_state, old_stream);
6975 aconnector->force_yuv420_output = false;
6976 }
6977
cbd14ae7
SW
6978 return stream;
6979}
6980
ba9ca088 6981enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6982 struct drm_display_mode *mode)
e7b07cee
HW
6983{
6984 int result = MODE_ERROR;
6985 struct dc_sink *dc_sink;
e7b07cee 6986 /* TODO: Unhardcode stream count */
0971c40e 6987 struct dc_stream_state *stream;
c84dec2f 6988 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6989
6990 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6991 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6992 return result;
6993
1f6010a9
DF
6994 /*
6995 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6996 * EDID mgmt
6997 */
6998 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6999 !aconnector->dc_em_sink)
7000 handle_edid_mgmt(aconnector);
7001
c84dec2f 7002 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 7003
ad975f44
VL
7004 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7005 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
7006 DRM_ERROR("dc_sink is NULL!\n");
7007 goto fail;
7008 }
7009
cbd14ae7
SW
7010 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7011 if (stream) {
7012 dc_stream_release(stream);
e7b07cee 7013 result = MODE_OK;
cbd14ae7 7014 }
e7b07cee
HW
7015
7016fail:
7017 /* TODO: error handling*/
7018 return result;
7019}
7020
88694af9
NK
7021static int fill_hdr_info_packet(const struct drm_connector_state *state,
7022 struct dc_info_packet *out)
7023{
7024 struct hdmi_drm_infoframe frame;
7025 unsigned char buf[30]; /* 26 + 4 */
7026 ssize_t len;
7027 int ret, i;
7028
7029 memset(out, 0, sizeof(*out));
7030
7031 if (!state->hdr_output_metadata)
7032 return 0;
7033
7034 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7035 if (ret)
7036 return ret;
7037
7038 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7039 if (len < 0)
7040 return (int)len;
7041
7042 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7043 if (len != 30)
7044 return -EINVAL;
7045
7046 /* Prepare the infopacket for DC. */
7047 switch (state->connector->connector_type) {
7048 case DRM_MODE_CONNECTOR_HDMIA:
7049 out->hb0 = 0x87; /* type */
7050 out->hb1 = 0x01; /* version */
7051 out->hb2 = 0x1A; /* length */
7052 out->sb[0] = buf[3]; /* checksum */
7053 i = 1;
7054 break;
7055
7056 case DRM_MODE_CONNECTOR_DisplayPort:
7057 case DRM_MODE_CONNECTOR_eDP:
7058 out->hb0 = 0x00; /* sdp id, zero */
7059 out->hb1 = 0x87; /* type */
7060 out->hb2 = 0x1D; /* payload len - 1 */
7061 out->hb3 = (0x13 << 2); /* sdp version */
7062 out->sb[0] = 0x01; /* version */
7063 out->sb[1] = 0x1A; /* length */
7064 i = 2;
7065 break;
7066
7067 default:
7068 return -EINVAL;
7069 }
7070
7071 memcpy(&out->sb[i], &buf[4], 26);
7072 out->valid = true;
7073
7074 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7075 sizeof(out->sb), false);
7076
7077 return 0;
7078}
7079
88694af9
NK
7080static int
7081amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7082 struct drm_atomic_state *state)
88694af9 7083{
51e857af
SP
7084 struct drm_connector_state *new_con_state =
7085 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7086 struct drm_connector_state *old_con_state =
7087 drm_atomic_get_old_connector_state(state, conn);
7088 struct drm_crtc *crtc = new_con_state->crtc;
7089 struct drm_crtc_state *new_crtc_state;
7090 int ret;
7091
e8a98235
RS
7092 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7093
88694af9
NK
7094 if (!crtc)
7095 return 0;
7096
72921cdf 7097 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7098 struct dc_info_packet hdr_infopacket;
7099
7100 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7101 if (ret)
7102 return ret;
7103
7104 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7105 if (IS_ERR(new_crtc_state))
7106 return PTR_ERR(new_crtc_state);
7107
7108 /*
7109 * DC considers the stream backends changed if the
7110 * static metadata changes. Forcing the modeset also
7111 * gives a simple way for userspace to switch from
b232d4ed
NK
7112 * 8bpc to 10bpc when setting the metadata to enter
7113 * or exit HDR.
7114 *
7115 * Changing the static metadata after it's been
7116 * set is permissible, however. So only force a
7117 * modeset if we're entering or exiting HDR.
88694af9 7118 */
b232d4ed
NK
7119 new_crtc_state->mode_changed =
7120 !old_con_state->hdr_output_metadata ||
7121 !new_con_state->hdr_output_metadata;
88694af9
NK
7122 }
7123
7124 return 0;
7125}
7126
e7b07cee
HW
7127static const struct drm_connector_helper_funcs
7128amdgpu_dm_connector_helper_funcs = {
7129 /*
1f6010a9 7130 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7131 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7132 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7133 * in get_modes call back, not just return the modes count
7134 */
e7b07cee
HW
7135 .get_modes = get_modes,
7136 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7137 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7138};
7139
7140static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7141{
7142}
7143
d6ef9b41 7144static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7145{
7146 struct drm_atomic_state *state = new_crtc_state->state;
7147 struct drm_plane *plane;
7148 int num_active = 0;
7149
7150 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7151 struct drm_plane_state *new_plane_state;
7152
7153 /* Cursor planes are "fake". */
7154 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7155 continue;
7156
7157 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7158
7159 if (!new_plane_state) {
7160 /*
7161 * The plane is enable on the CRTC and hasn't changed
7162 * state. This means that it previously passed
7163 * validation and is therefore enabled.
7164 */
7165 num_active += 1;
7166 continue;
7167 }
7168
7169 /* We need a framebuffer to be considered enabled. */
7170 num_active += (new_plane_state->fb != NULL);
7171 }
7172
d6ef9b41
NK
7173 return num_active;
7174}
7175
8fe684e9
NK
7176static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7177 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7178{
7179 struct dm_crtc_state *dm_new_crtc_state =
7180 to_dm_crtc_state(new_crtc_state);
7181
7182 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7183
7184 if (!dm_new_crtc_state->stream)
7185 return;
7186
7187 dm_new_crtc_state->active_planes =
7188 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7189}
7190
3ee6b26b 7191static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7192 struct drm_atomic_state *state)
e7b07cee 7193{
29b77ad7
MR
7194 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7195 crtc);
1348969a 7196 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7197 struct dc *dc = adev->dm.dc;
29b77ad7 7198 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7199 int ret = -EINVAL;
7200
5b8c5969 7201 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7202
29b77ad7 7203 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7204
bcd74374
ND
7205 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7206 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7207 return ret;
7208 }
7209
bc92c065 7210 /*
b836a274
MD
7211 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7212 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7213 * planes are disabled, which is not supported by the hardware. And there is legacy
7214 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7215 */
29b77ad7 7216 if (crtc_state->enable &&
ea9522f5
SS
7217 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7218 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7219 return -EINVAL;
ea9522f5 7220 }
c14a005c 7221
b836a274
MD
7222 /* In some use cases, like reset, no stream is attached */
7223 if (!dm_crtc_state->stream)
7224 return 0;
7225
62c933f9 7226 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7227 return 0;
7228
ea9522f5 7229 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7230 return ret;
7231}
7232
3ee6b26b
AD
7233static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7234 const struct drm_display_mode *mode,
7235 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7236{
7237 return true;
7238}
7239
7240static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7241 .disable = dm_crtc_helper_disable,
7242 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7243 .mode_fixup = dm_crtc_helper_mode_fixup,
7244 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7245};
7246
7247static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7248{
7249
7250}
7251
3261e013
ML
7252static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7253{
7254 switch (display_color_depth) {
7255 case COLOR_DEPTH_666:
7256 return 6;
7257 case COLOR_DEPTH_888:
7258 return 8;
7259 case COLOR_DEPTH_101010:
7260 return 10;
7261 case COLOR_DEPTH_121212:
7262 return 12;
7263 case COLOR_DEPTH_141414:
7264 return 14;
7265 case COLOR_DEPTH_161616:
7266 return 16;
7267 default:
7268 break;
7269 }
7270 return 0;
7271}
7272
3ee6b26b
AD
7273static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7274 struct drm_crtc_state *crtc_state,
7275 struct drm_connector_state *conn_state)
e7b07cee 7276{
3261e013
ML
7277 struct drm_atomic_state *state = crtc_state->state;
7278 struct drm_connector *connector = conn_state->connector;
7279 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7280 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7281 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7282 struct drm_dp_mst_topology_mgr *mst_mgr;
7283 struct drm_dp_mst_port *mst_port;
7284 enum dc_color_depth color_depth;
7285 int clock, bpp = 0;
1bc22f20 7286 bool is_y420 = false;
3261e013
ML
7287
7288 if (!aconnector->port || !aconnector->dc_sink)
7289 return 0;
7290
7291 mst_port = aconnector->port;
7292 mst_mgr = &aconnector->mst_port->mst_mgr;
7293
7294 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7295 return 0;
7296
7297 if (!state->duplicated) {
cbd14ae7 7298 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7299 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7300 aconnector->force_yuv420_output;
cbd14ae7
SW
7301 color_depth = convert_color_depth_from_display_info(connector,
7302 is_y420,
7303 max_bpc);
3261e013
ML
7304 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7305 clock = adjusted_mode->clock;
dc48529f 7306 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7307 }
7308 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7309 mst_mgr,
7310 mst_port,
1c6c1cb5 7311 dm_new_connector_state->pbn,
03ca9600 7312 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7313 if (dm_new_connector_state->vcpi_slots < 0) {
7314 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7315 return dm_new_connector_state->vcpi_slots;
7316 }
e7b07cee
HW
7317 return 0;
7318}
7319
7320const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7321 .disable = dm_encoder_helper_disable,
7322 .atomic_check = dm_encoder_helper_atomic_check
7323};
7324
d9fe1a4c 7325#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7326static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7327 struct dc_state *dc_state,
7328 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7329{
7330 struct dc_stream_state *stream = NULL;
7331 struct drm_connector *connector;
5760dcb9 7332 struct drm_connector_state *new_con_state;
29b9ba74
ML
7333 struct amdgpu_dm_connector *aconnector;
7334 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7335 int i, j;
7336 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7337
5760dcb9 7338 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7339
7340 aconnector = to_amdgpu_dm_connector(connector);
7341
7342 if (!aconnector->port)
7343 continue;
7344
7345 if (!new_con_state || !new_con_state->crtc)
7346 continue;
7347
7348 dm_conn_state = to_dm_connector_state(new_con_state);
7349
7350 for (j = 0; j < dc_state->stream_count; j++) {
7351 stream = dc_state->streams[j];
7352 if (!stream)
7353 continue;
7354
7355 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7356 break;
7357
7358 stream = NULL;
7359 }
7360
7361 if (!stream)
7362 continue;
7363
29b9ba74 7364 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7365 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7366 for (j = 0; j < dc_state->stream_count; j++) {
7367 if (vars[j].aconnector == aconnector) {
7368 pbn = vars[j].pbn;
7369 break;
7370 }
7371 }
7372
a550bb16
HW
7373 if (j == dc_state->stream_count)
7374 continue;
7375
7376 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7377
7378 if (stream->timing.flags.DSC != 1) {
7379 dm_conn_state->pbn = pbn;
7380 dm_conn_state->vcpi_slots = slot_num;
7381
7382 drm_dp_mst_atomic_enable_dsc(state,
7383 aconnector->port,
7384 dm_conn_state->pbn,
7385 0,
7386 false);
7387 continue;
7388 }
7389
29b9ba74
ML
7390 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7391 aconnector->port,
7392 pbn, pbn_div,
7393 true);
7394 if (vcpi < 0)
7395 return vcpi;
7396
7397 dm_conn_state->pbn = pbn;
7398 dm_conn_state->vcpi_slots = vcpi;
7399 }
7400 return 0;
7401}
d9fe1a4c 7402#endif
29b9ba74 7403
e7b07cee
HW
7404static void dm_drm_plane_reset(struct drm_plane *plane)
7405{
7406 struct dm_plane_state *amdgpu_state = NULL;
7407
7408 if (plane->state)
7409 plane->funcs->atomic_destroy_state(plane, plane->state);
7410
7411 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7412 WARN_ON(amdgpu_state == NULL);
1f6010a9 7413
7ddaef96
NK
7414 if (amdgpu_state)
7415 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7416}
7417
7418static struct drm_plane_state *
7419dm_drm_plane_duplicate_state(struct drm_plane *plane)
7420{
7421 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7422
7423 old_dm_plane_state = to_dm_plane_state(plane->state);
7424 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7425 if (!dm_plane_state)
7426 return NULL;
7427
7428 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7429
3be5262e
HW
7430 if (old_dm_plane_state->dc_state) {
7431 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7432 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7433 }
7434
7435 return &dm_plane_state->base;
7436}
7437
dfd84d90 7438static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7439 struct drm_plane_state *state)
e7b07cee
HW
7440{
7441 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7442
3be5262e
HW
7443 if (dm_plane_state->dc_state)
7444 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7445
0627bbd3 7446 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7447}
7448
7449static const struct drm_plane_funcs dm_plane_funcs = {
7450 .update_plane = drm_atomic_helper_update_plane,
7451 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7452 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7453 .reset = dm_drm_plane_reset,
7454 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7455 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7456 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7457};
7458
3ee6b26b
AD
7459static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7460 struct drm_plane_state *new_state)
e7b07cee
HW
7461{
7462 struct amdgpu_framebuffer *afb;
7463 struct drm_gem_object *obj;
5d43be0c 7464 struct amdgpu_device *adev;
e7b07cee 7465 struct amdgpu_bo *rbo;
e7b07cee 7466 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7467 struct list_head list;
7468 struct ttm_validate_buffer tv;
7469 struct ww_acquire_ctx ticket;
5d43be0c
CK
7470 uint32_t domain;
7471 int r;
e7b07cee
HW
7472
7473 if (!new_state->fb) {
4711c033 7474 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7475 return 0;
7476 }
7477
7478 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7479 obj = new_state->fb->obj[0];
e7b07cee 7480 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7481 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7482 INIT_LIST_HEAD(&list);
7483
7484 tv.bo = &rbo->tbo;
7485 tv.num_shared = 1;
7486 list_add(&tv.head, &list);
7487
9165fb87 7488 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7489 if (r) {
7490 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7491 return r;
0f257b09 7492 }
e7b07cee 7493
5d43be0c 7494 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7495 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7496 else
7497 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7498
7b7c6c81 7499 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7500 if (unlikely(r != 0)) {
30b7c614
HW
7501 if (r != -ERESTARTSYS)
7502 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7503 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7504 return r;
7505 }
7506
bb812f1e
JZ
7507 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7508 if (unlikely(r != 0)) {
7509 amdgpu_bo_unpin(rbo);
0f257b09 7510 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7511 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7512 return r;
7513 }
7df7e505 7514
0f257b09 7515 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7516
7b7c6c81 7517 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7518
7519 amdgpu_bo_ref(rbo);
7520
cf322b49
NK
7521 /**
7522 * We don't do surface updates on planes that have been newly created,
7523 * but we also don't have the afb->address during atomic check.
7524 *
7525 * Fill in buffer attributes depending on the address here, but only on
7526 * newly created planes since they're not being used by DC yet and this
7527 * won't modify global state.
7528 */
7529 dm_plane_state_old = to_dm_plane_state(plane->state);
7530 dm_plane_state_new = to_dm_plane_state(new_state);
7531
3be5262e 7532 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7533 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7534 struct dc_plane_state *plane_state =
7535 dm_plane_state_new->dc_state;
7536 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7537
320932bf 7538 fill_plane_buffer_attributes(
695af5f9 7539 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7540 afb->tiling_flags,
cf322b49
NK
7541 &plane_state->tiling_info, &plane_state->plane_size,
7542 &plane_state->dcc, &plane_state->address,
6eed95b0 7543 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7544 }
7545
e7b07cee
HW
7546 return 0;
7547}
7548
3ee6b26b
AD
7549static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7550 struct drm_plane_state *old_state)
e7b07cee
HW
7551{
7552 struct amdgpu_bo *rbo;
e7b07cee
HW
7553 int r;
7554
7555 if (!old_state->fb)
7556 return;
7557
e68d14dd 7558 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7559 r = amdgpu_bo_reserve(rbo, false);
7560 if (unlikely(r)) {
7561 DRM_ERROR("failed to reserve rbo before unpin\n");
7562 return;
b830ebc9
HW
7563 }
7564
7565 amdgpu_bo_unpin(rbo);
7566 amdgpu_bo_unreserve(rbo);
7567 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7568}
7569
8c44515b
AP
7570static int dm_plane_helper_check_state(struct drm_plane_state *state,
7571 struct drm_crtc_state *new_crtc_state)
7572{
6300b3bd
MK
7573 struct drm_framebuffer *fb = state->fb;
7574 int min_downscale, max_upscale;
7575 int min_scale = 0;
7576 int max_scale = INT_MAX;
7577
40d916a2 7578 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7579 if (fb && state->crtc) {
40d916a2
NC
7580 /* Validate viewport to cover the case when only the position changes */
7581 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7582 int viewport_width = state->crtc_w;
7583 int viewport_height = state->crtc_h;
7584
7585 if (state->crtc_x < 0)
7586 viewport_width += state->crtc_x;
7587 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7588 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7589
7590 if (state->crtc_y < 0)
7591 viewport_height += state->crtc_y;
7592 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7593 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7594
4abdb72b
NC
7595 if (viewport_width < 0 || viewport_height < 0) {
7596 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7597 return -EINVAL;
7598 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7599 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7600 return -EINVAL;
4abdb72b
NC
7601 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7602 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7603 return -EINVAL;
4abdb72b
NC
7604 }
7605
40d916a2
NC
7606 }
7607
7608 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7609 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7610 &min_downscale, &max_upscale);
7611 /*
7612 * Convert to drm convention: 16.16 fixed point, instead of dc's
7613 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7614 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7615 */
7616 min_scale = (1000 << 16) / max_upscale;
7617 max_scale = (1000 << 16) / min_downscale;
7618 }
8c44515b 7619
8c44515b 7620 return drm_atomic_helper_check_plane_state(
6300b3bd 7621 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7622}
7623
7578ecda 7624static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7625 struct drm_atomic_state *state)
cbd19488 7626{
7c11b99a
MR
7627 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7628 plane);
1348969a 7629 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7630 struct dc *dc = adev->dm.dc;
78171832 7631 struct dm_plane_state *dm_plane_state;
695af5f9 7632 struct dc_scaling_info scaling_info;
8c44515b 7633 struct drm_crtc_state *new_crtc_state;
695af5f9 7634 int ret;
78171832 7635
ba5c1649 7636 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7637
ba5c1649 7638 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7639
3be5262e 7640 if (!dm_plane_state->dc_state)
9a3329b1 7641 return 0;
cbd19488 7642
8c44515b 7643 new_crtc_state =
dec92020 7644 drm_atomic_get_new_crtc_state(state,
ba5c1649 7645 new_plane_state->crtc);
8c44515b
AP
7646 if (!new_crtc_state)
7647 return -EINVAL;
7648
ba5c1649 7649 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7650 if (ret)
7651 return ret;
7652
4375d625 7653 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7654 if (ret)
7655 return ret;
a05bcff1 7656
62c933f9 7657 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7658 return 0;
7659
7660 return -EINVAL;
7661}
7662
674e78ac 7663static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7664 struct drm_atomic_state *state)
674e78ac
NK
7665{
7666 /* Only support async updates on cursor planes. */
7667 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7668 return -EINVAL;
7669
7670 return 0;
7671}
7672
7673static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7674 struct drm_atomic_state *state)
674e78ac 7675{
5ddb0bd4
MR
7676 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7677 plane);
674e78ac 7678 struct drm_plane_state *old_state =
5ddb0bd4 7679 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7680
e8a98235
RS
7681 trace_amdgpu_dm_atomic_update_cursor(new_state);
7682
332af874 7683 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7684
7685 plane->state->src_x = new_state->src_x;
7686 plane->state->src_y = new_state->src_y;
7687 plane->state->src_w = new_state->src_w;
7688 plane->state->src_h = new_state->src_h;
7689 plane->state->crtc_x = new_state->crtc_x;
7690 plane->state->crtc_y = new_state->crtc_y;
7691 plane->state->crtc_w = new_state->crtc_w;
7692 plane->state->crtc_h = new_state->crtc_h;
7693
7694 handle_cursor_update(plane, old_state);
7695}
7696
e7b07cee
HW
7697static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7698 .prepare_fb = dm_plane_helper_prepare_fb,
7699 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7700 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7701 .atomic_async_check = dm_plane_atomic_async_check,
7702 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7703};
7704
7705/*
7706 * TODO: these are currently initialized to rgb formats only.
7707 * For future use cases we should either initialize them dynamically based on
7708 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7709 * check will succeed, and let DC implement proper check
e7b07cee 7710 */
d90371b0 7711static const uint32_t rgb_formats[] = {
e7b07cee
HW
7712 DRM_FORMAT_XRGB8888,
7713 DRM_FORMAT_ARGB8888,
7714 DRM_FORMAT_RGBA8888,
7715 DRM_FORMAT_XRGB2101010,
7716 DRM_FORMAT_XBGR2101010,
7717 DRM_FORMAT_ARGB2101010,
7718 DRM_FORMAT_ABGR2101010,
58020403
MK
7719 DRM_FORMAT_XRGB16161616,
7720 DRM_FORMAT_XBGR16161616,
7721 DRM_FORMAT_ARGB16161616,
7722 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7723 DRM_FORMAT_XBGR8888,
7724 DRM_FORMAT_ABGR8888,
46dd9ff7 7725 DRM_FORMAT_RGB565,
e7b07cee
HW
7726};
7727
0d579c7e
NK
7728static const uint32_t overlay_formats[] = {
7729 DRM_FORMAT_XRGB8888,
7730 DRM_FORMAT_ARGB8888,
7731 DRM_FORMAT_RGBA8888,
7732 DRM_FORMAT_XBGR8888,
7733 DRM_FORMAT_ABGR8888,
7267a1a9 7734 DRM_FORMAT_RGB565
e7b07cee
HW
7735};
7736
7737static const u32 cursor_formats[] = {
7738 DRM_FORMAT_ARGB8888
7739};
7740
37c6a93b
NK
7741static int get_plane_formats(const struct drm_plane *plane,
7742 const struct dc_plane_cap *plane_cap,
7743 uint32_t *formats, int max_formats)
e7b07cee 7744{
37c6a93b
NK
7745 int i, num_formats = 0;
7746
7747 /*
7748 * TODO: Query support for each group of formats directly from
7749 * DC plane caps. This will require adding more formats to the
7750 * caps list.
7751 */
e7b07cee 7752
f180b4bc 7753 switch (plane->type) {
e7b07cee 7754 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7755 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7756 if (num_formats >= max_formats)
7757 break;
7758
7759 formats[num_formats++] = rgb_formats[i];
7760 }
7761
ea36ad34 7762 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7763 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7764 if (plane_cap && plane_cap->pixel_format_support.p010)
7765 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7766 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7767 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7768 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7769 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7770 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7771 }
e7b07cee 7772 break;
37c6a93b 7773
e7b07cee 7774 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7775 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7776 if (num_formats >= max_formats)
7777 break;
7778
7779 formats[num_formats++] = overlay_formats[i];
7780 }
e7b07cee 7781 break;
37c6a93b 7782
e7b07cee 7783 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7784 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7785 if (num_formats >= max_formats)
7786 break;
7787
7788 formats[num_formats++] = cursor_formats[i];
7789 }
e7b07cee
HW
7790 break;
7791 }
7792
37c6a93b
NK
7793 return num_formats;
7794}
7795
7796static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7797 struct drm_plane *plane,
7798 unsigned long possible_crtcs,
7799 const struct dc_plane_cap *plane_cap)
7800{
7801 uint32_t formats[32];
7802 int num_formats;
7803 int res = -EPERM;
ecc874a6 7804 unsigned int supported_rotations;
faa37f54 7805 uint64_t *modifiers = NULL;
37c6a93b
NK
7806
7807 num_formats = get_plane_formats(plane, plane_cap, formats,
7808 ARRAY_SIZE(formats));
7809
faa37f54
BN
7810 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7811 if (res)
7812 return res;
7813
4a580877 7814 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7815 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7816 modifiers, plane->type, NULL);
7817 kfree(modifiers);
37c6a93b
NK
7818 if (res)
7819 return res;
7820
cc1fec57
NK
7821 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7822 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7823 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7824 BIT(DRM_MODE_BLEND_PREMULTI);
7825
7826 drm_plane_create_alpha_property(plane);
7827 drm_plane_create_blend_mode_property(plane, blend_caps);
7828 }
7829
fc8e5230 7830 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7831 plane_cap &&
7832 (plane_cap->pixel_format_support.nv12 ||
7833 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7834 /* This only affects YUV formats. */
7835 drm_plane_create_color_properties(
7836 plane,
7837 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7838 BIT(DRM_COLOR_YCBCR_BT709) |
7839 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7840 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7841 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7842 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7843 }
7844
ecc874a6
PLG
7845 supported_rotations =
7846 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7847 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7848
1347385f
SS
7849 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7850 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7851 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7852 supported_rotations);
ecc874a6 7853
f180b4bc 7854 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7855
96719c54 7856 /* Create (reset) the plane state */
f180b4bc
HW
7857 if (plane->funcs->reset)
7858 plane->funcs->reset(plane);
96719c54 7859
37c6a93b 7860 return 0;
e7b07cee
HW
7861}
7862
7578ecda
AD
7863static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7864 struct drm_plane *plane,
7865 uint32_t crtc_index)
e7b07cee
HW
7866{
7867 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7868 struct drm_plane *cursor_plane;
e7b07cee
HW
7869
7870 int res = -ENOMEM;
7871
7872 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7873 if (!cursor_plane)
7874 goto fail;
7875
f180b4bc 7876 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7877 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7878
7879 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7880 if (!acrtc)
7881 goto fail;
7882
7883 res = drm_crtc_init_with_planes(
7884 dm->ddev,
7885 &acrtc->base,
7886 plane,
f180b4bc 7887 cursor_plane,
e7b07cee
HW
7888 &amdgpu_dm_crtc_funcs, NULL);
7889
7890 if (res)
7891 goto fail;
7892
7893 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7894
96719c54
HW
7895 /* Create (reset) the plane state */
7896 if (acrtc->base.funcs->reset)
7897 acrtc->base.funcs->reset(&acrtc->base);
7898
e7b07cee
HW
7899 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7900 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7901
7902 acrtc->crtc_id = crtc_index;
7903 acrtc->base.enabled = false;
c37e2d29 7904 acrtc->otg_inst = -1;
e7b07cee
HW
7905
7906 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7907 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7908 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7909 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7910
e7b07cee
HW
7911 return 0;
7912
7913fail:
b830ebc9
HW
7914 kfree(acrtc);
7915 kfree(cursor_plane);
e7b07cee
HW
7916 return res;
7917}
7918
7919
7920static int to_drm_connector_type(enum signal_type st)
7921{
7922 switch (st) {
7923 case SIGNAL_TYPE_HDMI_TYPE_A:
7924 return DRM_MODE_CONNECTOR_HDMIA;
7925 case SIGNAL_TYPE_EDP:
7926 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7927 case SIGNAL_TYPE_LVDS:
7928 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7929 case SIGNAL_TYPE_RGB:
7930 return DRM_MODE_CONNECTOR_VGA;
7931 case SIGNAL_TYPE_DISPLAY_PORT:
7932 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7933 return DRM_MODE_CONNECTOR_DisplayPort;
7934 case SIGNAL_TYPE_DVI_DUAL_LINK:
7935 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7936 return DRM_MODE_CONNECTOR_DVID;
7937 case SIGNAL_TYPE_VIRTUAL:
7938 return DRM_MODE_CONNECTOR_VIRTUAL;
7939
7940 default:
7941 return DRM_MODE_CONNECTOR_Unknown;
7942 }
7943}
7944
2b4c1c05
DV
7945static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7946{
62afb4ad
JRS
7947 struct drm_encoder *encoder;
7948
7949 /* There is only one encoder per connector */
7950 drm_connector_for_each_possible_encoder(connector, encoder)
7951 return encoder;
7952
7953 return NULL;
2b4c1c05
DV
7954}
7955
e7b07cee
HW
7956static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7957{
e7b07cee
HW
7958 struct drm_encoder *encoder;
7959 struct amdgpu_encoder *amdgpu_encoder;
7960
2b4c1c05 7961 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7962
7963 if (encoder == NULL)
7964 return;
7965
7966 amdgpu_encoder = to_amdgpu_encoder(encoder);
7967
7968 amdgpu_encoder->native_mode.clock = 0;
7969
7970 if (!list_empty(&connector->probed_modes)) {
7971 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7972
e7b07cee 7973 list_for_each_entry(preferred_mode,
b830ebc9
HW
7974 &connector->probed_modes,
7975 head) {
7976 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7977 amdgpu_encoder->native_mode = *preferred_mode;
7978
e7b07cee
HW
7979 break;
7980 }
7981
7982 }
7983}
7984
3ee6b26b
AD
7985static struct drm_display_mode *
7986amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7987 char *name,
7988 int hdisplay, int vdisplay)
e7b07cee
HW
7989{
7990 struct drm_device *dev = encoder->dev;
7991 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7992 struct drm_display_mode *mode = NULL;
7993 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7994
7995 mode = drm_mode_duplicate(dev, native_mode);
7996
b830ebc9 7997 if (mode == NULL)
e7b07cee
HW
7998 return NULL;
7999
8000 mode->hdisplay = hdisplay;
8001 mode->vdisplay = vdisplay;
8002 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 8003 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
8004
8005 return mode;
8006
8007}
8008
8009static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 8010 struct drm_connector *connector)
e7b07cee
HW
8011{
8012 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8013 struct drm_display_mode *mode = NULL;
8014 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
8015 struct amdgpu_dm_connector *amdgpu_dm_connector =
8016 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8017 int i;
8018 int n;
8019 struct mode_size {
8020 char name[DRM_DISPLAY_MODE_LEN];
8021 int w;
8022 int h;
b830ebc9 8023 } common_modes[] = {
e7b07cee
HW
8024 { "640x480", 640, 480},
8025 { "800x600", 800, 600},
8026 { "1024x768", 1024, 768},
8027 { "1280x720", 1280, 720},
8028 { "1280x800", 1280, 800},
8029 {"1280x1024", 1280, 1024},
8030 { "1440x900", 1440, 900},
8031 {"1680x1050", 1680, 1050},
8032 {"1600x1200", 1600, 1200},
8033 {"1920x1080", 1920, 1080},
8034 {"1920x1200", 1920, 1200}
8035 };
8036
b830ebc9 8037 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
8038
8039 for (i = 0; i < n; i++) {
8040 struct drm_display_mode *curmode = NULL;
8041 bool mode_existed = false;
8042
8043 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
8044 common_modes[i].h > native_mode->vdisplay ||
8045 (common_modes[i].w == native_mode->hdisplay &&
8046 common_modes[i].h == native_mode->vdisplay))
8047 continue;
e7b07cee
HW
8048
8049 list_for_each_entry(curmode, &connector->probed_modes, head) {
8050 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 8051 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
8052 mode_existed = true;
8053 break;
8054 }
8055 }
8056
8057 if (mode_existed)
8058 continue;
8059
8060 mode = amdgpu_dm_create_common_mode(encoder,
8061 common_modes[i].name, common_modes[i].w,
8062 common_modes[i].h);
8063 drm_mode_probed_add(connector, mode);
c84dec2f 8064 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
8065 }
8066}
8067
d77de788
SS
8068static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8069{
8070 struct drm_encoder *encoder;
8071 struct amdgpu_encoder *amdgpu_encoder;
8072 const struct drm_display_mode *native_mode;
8073
8074 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8075 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8076 return;
8077
8078 encoder = amdgpu_dm_connector_to_encoder(connector);
8079 if (!encoder)
8080 return;
8081
8082 amdgpu_encoder = to_amdgpu_encoder(encoder);
8083
8084 native_mode = &amdgpu_encoder->native_mode;
8085 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8086 return;
8087
8088 drm_connector_set_panel_orientation_with_quirk(connector,
8089 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8090 native_mode->hdisplay,
8091 native_mode->vdisplay);
8092}
8093
3ee6b26b
AD
8094static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8095 struct edid *edid)
e7b07cee 8096{
c84dec2f
HW
8097 struct amdgpu_dm_connector *amdgpu_dm_connector =
8098 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8099
8100 if (edid) {
8101 /* empty probed_modes */
8102 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8103 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8104 drm_add_edid_modes(connector, edid);
8105
f1e5e913
YMM
8106 /* sorting the probed modes before calling function
8107 * amdgpu_dm_get_native_mode() since EDID can have
8108 * more than one preferred mode. The modes that are
8109 * later in the probed mode list could be of higher
8110 * and preferred resolution. For example, 3840x2160
8111 * resolution in base EDID preferred timing and 4096x2160
8112 * preferred resolution in DID extension block later.
8113 */
8114 drm_mode_sort(&connector->probed_modes);
e7b07cee 8115 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8116
8117 /* Freesync capabilities are reset by calling
8118 * drm_add_edid_modes() and need to be
8119 * restored here.
8120 */
8121 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8122
8123 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8124 } else {
c84dec2f 8125 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8126 }
e7b07cee
HW
8127}
8128
a85ba005
NC
8129static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8130 struct drm_display_mode *mode)
8131{
8132 struct drm_display_mode *m;
8133
8134 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8135 if (drm_mode_equal(m, mode))
8136 return true;
8137 }
8138
8139 return false;
8140}
8141
8142static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8143{
8144 const struct drm_display_mode *m;
8145 struct drm_display_mode *new_mode;
8146 uint i;
8147 uint32_t new_modes_count = 0;
8148
8149 /* Standard FPS values
8150 *
12cdff6b
SC
8151 * 23.976 - TV/NTSC
8152 * 24 - Cinema
8153 * 25 - TV/PAL
8154 * 29.97 - TV/NTSC
8155 * 30 - TV/NTSC
8156 * 48 - Cinema HFR
8157 * 50 - TV/PAL
8158 * 60 - Commonly used
8159 * 48,72,96,120 - Multiples of 24
a85ba005 8160 */
9ce5ed6e
CIK
8161 static const uint32_t common_rates[] = {
8162 23976, 24000, 25000, 29970, 30000,
12cdff6b 8163 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8164 };
a85ba005
NC
8165
8166 /*
8167 * Find mode with highest refresh rate with the same resolution
8168 * as the preferred mode. Some monitors report a preferred mode
8169 * with lower resolution than the highest refresh rate supported.
8170 */
8171
8172 m = get_highest_refresh_rate_mode(aconnector, true);
8173 if (!m)
8174 return 0;
8175
8176 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8177 uint64_t target_vtotal, target_vtotal_diff;
8178 uint64_t num, den;
8179
8180 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8181 continue;
8182
8183 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8184 common_rates[i] > aconnector->max_vfreq * 1000)
8185 continue;
8186
8187 num = (unsigned long long)m->clock * 1000 * 1000;
8188 den = common_rates[i] * (unsigned long long)m->htotal;
8189 target_vtotal = div_u64(num, den);
8190 target_vtotal_diff = target_vtotal - m->vtotal;
8191
8192 /* Check for illegal modes */
8193 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8194 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8195 m->vtotal + target_vtotal_diff < m->vsync_end)
8196 continue;
8197
8198 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8199 if (!new_mode)
8200 goto out;
8201
8202 new_mode->vtotal += (u16)target_vtotal_diff;
8203 new_mode->vsync_start += (u16)target_vtotal_diff;
8204 new_mode->vsync_end += (u16)target_vtotal_diff;
8205 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8206 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8207
8208 if (!is_duplicate_mode(aconnector, new_mode)) {
8209 drm_mode_probed_add(&aconnector->base, new_mode);
8210 new_modes_count += 1;
8211 } else
8212 drm_mode_destroy(aconnector->base.dev, new_mode);
8213 }
8214 out:
8215 return new_modes_count;
8216}
8217
8218static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8219 struct edid *edid)
8220{
8221 struct amdgpu_dm_connector *amdgpu_dm_connector =
8222 to_amdgpu_dm_connector(connector);
8223
8224 if (!(amdgpu_freesync_vid_mode && edid))
8225 return;
fe8858bb 8226
a85ba005
NC
8227 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8228 amdgpu_dm_connector->num_modes +=
8229 add_fs_modes(amdgpu_dm_connector);
8230}
8231
7578ecda 8232static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8233{
c84dec2f
HW
8234 struct amdgpu_dm_connector *amdgpu_dm_connector =
8235 to_amdgpu_dm_connector(connector);
e7b07cee 8236 struct drm_encoder *encoder;
c84dec2f 8237 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8238
2b4c1c05 8239 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8240
5c0e6840 8241 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8242 amdgpu_dm_connector->num_modes =
8243 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8244 } else {
8245 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8246 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8247 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8248 }
3e332d3a 8249 amdgpu_dm_fbc_init(connector);
5099114b 8250
c84dec2f 8251 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8252}
8253
3ee6b26b
AD
8254void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8255 struct amdgpu_dm_connector *aconnector,
8256 int connector_type,
8257 struct dc_link *link,
8258 int link_index)
e7b07cee 8259{
1348969a 8260 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8261
f04bee34
NK
8262 /*
8263 * Some of the properties below require access to state, like bpc.
8264 * Allocate some default initial connector state with our reset helper.
8265 */
8266 if (aconnector->base.funcs->reset)
8267 aconnector->base.funcs->reset(&aconnector->base);
8268
e7b07cee
HW
8269 aconnector->connector_id = link_index;
8270 aconnector->dc_link = link;
8271 aconnector->base.interlace_allowed = false;
8272 aconnector->base.doublescan_allowed = false;
8273 aconnector->base.stereo_allowed = false;
8274 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8275 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8276 aconnector->audio_inst = -1;
e7b07cee
HW
8277 mutex_init(&aconnector->hpd_lock);
8278
1f6010a9
DF
8279 /*
8280 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8281 * which means HPD hot plug not supported
8282 */
e7b07cee
HW
8283 switch (connector_type) {
8284 case DRM_MODE_CONNECTOR_HDMIA:
8285 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8286 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8287 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8288 break;
8289 case DRM_MODE_CONNECTOR_DisplayPort:
8290 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
f6e03f80
JS
8291 if (link->is_dig_mapping_flexible &&
8292 link->dc->res_pool->funcs->link_encs_assign) {
8293 link->link_enc =
8294 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8295 if (!link->link_enc)
8296 link->link_enc =
8297 link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8298 }
8299
8300 if (link->link_enc)
8301 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8302 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8303 break;
8304 case DRM_MODE_CONNECTOR_DVID:
8305 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8306 break;
8307 default:
8308 break;
8309 }
8310
8311 drm_object_attach_property(&aconnector->base.base,
8312 dm->ddev->mode_config.scaling_mode_property,
8313 DRM_MODE_SCALE_NONE);
8314
8315 drm_object_attach_property(&aconnector->base.base,
8316 adev->mode_info.underscan_property,
8317 UNDERSCAN_OFF);
8318 drm_object_attach_property(&aconnector->base.base,
8319 adev->mode_info.underscan_hborder_property,
8320 0);
8321 drm_object_attach_property(&aconnector->base.base,
8322 adev->mode_info.underscan_vborder_property,
8323 0);
1825fd34 8324
8c61b31e
JFZ
8325 if (!aconnector->mst_port)
8326 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8327
4a8ca46b
RL
8328 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8329 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8330 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8331
c1ee92f9 8332 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8333 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8334 drm_object_attach_property(&aconnector->base.base,
8335 adev->mode_info.abm_level_property, 0);
8336 }
bb47de73
NK
8337
8338 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8339 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8340 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8341 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8342
8c61b31e
JFZ
8343 if (!aconnector->mst_port)
8344 drm_connector_attach_vrr_capable_property(&aconnector->base);
8345
0c8620d6 8346#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8347 if (adev->dm.hdcp_workqueue)
53e108aa 8348 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8349#endif
bb47de73 8350 }
e7b07cee
HW
8351}
8352
7578ecda
AD
8353static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8354 struct i2c_msg *msgs, int num)
e7b07cee
HW
8355{
8356 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8357 struct ddc_service *ddc_service = i2c->ddc_service;
8358 struct i2c_command cmd;
8359 int i;
8360 int result = -EIO;
8361
b830ebc9 8362 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8363
8364 if (!cmd.payloads)
8365 return result;
8366
8367 cmd.number_of_payloads = num;
8368 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8369 cmd.speed = 100;
8370
8371 for (i = 0; i < num; i++) {
8372 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8373 cmd.payloads[i].address = msgs[i].addr;
8374 cmd.payloads[i].length = msgs[i].len;
8375 cmd.payloads[i].data = msgs[i].buf;
8376 }
8377
c85e6e54
DF
8378 if (dc_submit_i2c(
8379 ddc_service->ctx->dc,
8380 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8381 &cmd))
8382 result = num;
8383
8384 kfree(cmd.payloads);
8385 return result;
8386}
8387
7578ecda 8388static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8389{
8390 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8391}
8392
8393static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8394 .master_xfer = amdgpu_dm_i2c_xfer,
8395 .functionality = amdgpu_dm_i2c_func,
8396};
8397
3ee6b26b
AD
8398static struct amdgpu_i2c_adapter *
8399create_i2c(struct ddc_service *ddc_service,
8400 int link_index,
8401 int *res)
e7b07cee
HW
8402{
8403 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8404 struct amdgpu_i2c_adapter *i2c;
8405
b830ebc9 8406 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8407 if (!i2c)
8408 return NULL;
e7b07cee
HW
8409 i2c->base.owner = THIS_MODULE;
8410 i2c->base.class = I2C_CLASS_DDC;
8411 i2c->base.dev.parent = &adev->pdev->dev;
8412 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8413 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8414 i2c_set_adapdata(&i2c->base, i2c);
8415 i2c->ddc_service = ddc_service;
f6e03f80
JS
8416 if (i2c->ddc_service->ddc_pin)
8417 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8418
8419 return i2c;
8420}
8421
89fc8d4e 8422
1f6010a9
DF
8423/*
8424 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8425 * dc_link which will be represented by this aconnector.
8426 */
7578ecda
AD
8427static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8428 struct amdgpu_dm_connector *aconnector,
8429 uint32_t link_index,
8430 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8431{
8432 int res = 0;
8433 int connector_type;
8434 struct dc *dc = dm->dc;
8435 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8436 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8437
8438 link->priv = aconnector;
e7b07cee 8439
f1ad2f5e 8440 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8441
8442 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8443 if (!i2c) {
8444 DRM_ERROR("Failed to create i2c adapter data\n");
8445 return -ENOMEM;
8446 }
8447
e7b07cee
HW
8448 aconnector->i2c = i2c;
8449 res = i2c_add_adapter(&i2c->base);
8450
8451 if (res) {
8452 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8453 goto out_free;
8454 }
8455
8456 connector_type = to_drm_connector_type(link->connector_signal);
8457
17165de2 8458 res = drm_connector_init_with_ddc(
e7b07cee
HW
8459 dm->ddev,
8460 &aconnector->base,
8461 &amdgpu_dm_connector_funcs,
17165de2
AP
8462 connector_type,
8463 &i2c->base);
e7b07cee
HW
8464
8465 if (res) {
8466 DRM_ERROR("connector_init failed\n");
8467 aconnector->connector_id = -1;
8468 goto out_free;
8469 }
8470
8471 drm_connector_helper_add(
8472 &aconnector->base,
8473 &amdgpu_dm_connector_helper_funcs);
8474
8475 amdgpu_dm_connector_init_helper(
8476 dm,
8477 aconnector,
8478 connector_type,
8479 link,
8480 link_index);
8481
cde4c44d 8482 drm_connector_attach_encoder(
e7b07cee
HW
8483 &aconnector->base, &aencoder->base);
8484
e7b07cee
HW
8485 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8486 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8487 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8488
e7b07cee
HW
8489out_free:
8490 if (res) {
8491 kfree(i2c);
8492 aconnector->i2c = NULL;
8493 }
8494 return res;
8495}
8496
8497int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8498{
8499 switch (adev->mode_info.num_crtc) {
8500 case 1:
8501 return 0x1;
8502 case 2:
8503 return 0x3;
8504 case 3:
8505 return 0x7;
8506 case 4:
8507 return 0xf;
8508 case 5:
8509 return 0x1f;
8510 case 6:
8511 default:
8512 return 0x3f;
8513 }
8514}
8515
7578ecda
AD
8516static int amdgpu_dm_encoder_init(struct drm_device *dev,
8517 struct amdgpu_encoder *aencoder,
8518 uint32_t link_index)
e7b07cee 8519{
1348969a 8520 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8521
8522 int res = drm_encoder_init(dev,
8523 &aencoder->base,
8524 &amdgpu_dm_encoder_funcs,
8525 DRM_MODE_ENCODER_TMDS,
8526 NULL);
8527
8528 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8529
8530 if (!res)
8531 aencoder->encoder_id = link_index;
8532 else
8533 aencoder->encoder_id = -1;
8534
8535 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8536
8537 return res;
8538}
8539
3ee6b26b
AD
8540static void manage_dm_interrupts(struct amdgpu_device *adev,
8541 struct amdgpu_crtc *acrtc,
8542 bool enable)
e7b07cee
HW
8543{
8544 /*
8fe684e9
NK
8545 * We have no guarantee that the frontend index maps to the same
8546 * backend index - some even map to more than one.
8547 *
8548 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8549 */
8550 int irq_type =
734dd01d 8551 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8552 adev,
8553 acrtc->crtc_id);
8554
8555 if (enable) {
8556 drm_crtc_vblank_on(&acrtc->base);
8557 amdgpu_irq_get(
8558 adev,
8559 &adev->pageflip_irq,
8560 irq_type);
86bc2219
WL
8561#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8562 amdgpu_irq_get(
8563 adev,
8564 &adev->vline0_irq,
8565 irq_type);
8566#endif
e7b07cee 8567 } else {
86bc2219
WL
8568#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8569 amdgpu_irq_put(
8570 adev,
8571 &adev->vline0_irq,
8572 irq_type);
8573#endif
e7b07cee
HW
8574 amdgpu_irq_put(
8575 adev,
8576 &adev->pageflip_irq,
8577 irq_type);
8578 drm_crtc_vblank_off(&acrtc->base);
8579 }
8580}
8581
8fe684e9
NK
8582static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8583 struct amdgpu_crtc *acrtc)
8584{
8585 int irq_type =
8586 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8587
8588 /**
8589 * This reads the current state for the IRQ and force reapplies
8590 * the setting to hardware.
8591 */
8592 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8593}
8594
3ee6b26b
AD
8595static bool
8596is_scaling_state_different(const struct dm_connector_state *dm_state,
8597 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8598{
8599 if (dm_state->scaling != old_dm_state->scaling)
8600 return true;
8601 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8602 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8603 return true;
8604 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8605 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8606 return true;
b830ebc9
HW
8607 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8608 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8609 return true;
e7b07cee
HW
8610 return false;
8611}
8612
0c8620d6
BL
8613#ifdef CONFIG_DRM_AMD_DC_HDCP
8614static bool is_content_protection_different(struct drm_connector_state *state,
8615 const struct drm_connector_state *old_state,
8616 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8617{
8618 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8619 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8620
31c0ed90 8621 /* Handle: Type0/1 change */
53e108aa
BL
8622 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8623 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8624 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8625 return true;
8626 }
8627
31c0ed90
BL
8628 /* CP is being re enabled, ignore this
8629 *
8630 * Handles: ENABLED -> DESIRED
8631 */
0c8620d6
BL
8632 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8633 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8634 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8635 return false;
8636 }
8637
31c0ed90
BL
8638 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8639 *
8640 * Handles: UNDESIRED -> ENABLED
8641 */
0c8620d6
BL
8642 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8643 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8644 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8645
0d9a947b
QZ
8646 /* Stream removed and re-enabled
8647 *
8648 * Can sometimes overlap with the HPD case,
8649 * thus set update_hdcp to false to avoid
8650 * setting HDCP multiple times.
8651 *
8652 * Handles: DESIRED -> DESIRED (Special case)
8653 */
8654 if (!(old_state->crtc && old_state->crtc->enabled) &&
8655 state->crtc && state->crtc->enabled &&
8656 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8657 dm_con_state->update_hdcp = false;
8658 return true;
8659 }
8660
8661 /* Hot-plug, headless s3, dpms
8662 *
8663 * Only start HDCP if the display is connected/enabled.
8664 * update_hdcp flag will be set to false until the next
8665 * HPD comes in.
31c0ed90
BL
8666 *
8667 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8668 */
97f6c917
BL
8669 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8670 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8671 dm_con_state->update_hdcp = false;
0c8620d6 8672 return true;
97f6c917 8673 }
0c8620d6 8674
31c0ed90
BL
8675 /*
8676 * Handles: UNDESIRED -> UNDESIRED
8677 * DESIRED -> DESIRED
8678 * ENABLED -> ENABLED
8679 */
0c8620d6
BL
8680 if (old_state->content_protection == state->content_protection)
8681 return false;
8682
31c0ed90
BL
8683 /*
8684 * Handles: UNDESIRED -> DESIRED
8685 * DESIRED -> UNDESIRED
8686 * ENABLED -> UNDESIRED
8687 */
97f6c917 8688 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8689 return true;
8690
31c0ed90
BL
8691 /*
8692 * Handles: DESIRED -> ENABLED
8693 */
0c8620d6
BL
8694 return false;
8695}
8696
0c8620d6 8697#endif
3ee6b26b
AD
8698static void remove_stream(struct amdgpu_device *adev,
8699 struct amdgpu_crtc *acrtc,
8700 struct dc_stream_state *stream)
e7b07cee
HW
8701{
8702 /* this is the update mode case */
e7b07cee
HW
8703
8704 acrtc->otg_inst = -1;
8705 acrtc->enabled = false;
8706}
8707
7578ecda
AD
8708static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8709 struct dc_cursor_position *position)
2a8f6ccb 8710{
f4c2cc43 8711 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8712 int x, y;
8713 int xorigin = 0, yorigin = 0;
8714
e371e19c 8715 if (!crtc || !plane->state->fb)
2a8f6ccb 8716 return 0;
2a8f6ccb
HW
8717
8718 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8719 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8720 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8721 __func__,
8722 plane->state->crtc_w,
8723 plane->state->crtc_h);
8724 return -EINVAL;
8725 }
8726
8727 x = plane->state->crtc_x;
8728 y = plane->state->crtc_y;
c14a005c 8729
e371e19c
NK
8730 if (x <= -amdgpu_crtc->max_cursor_width ||
8731 y <= -amdgpu_crtc->max_cursor_height)
8732 return 0;
8733
2a8f6ccb
HW
8734 if (x < 0) {
8735 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8736 x = 0;
8737 }
8738 if (y < 0) {
8739 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8740 y = 0;
8741 }
8742 position->enable = true;
d243b6ff 8743 position->translate_by_source = true;
2a8f6ccb
HW
8744 position->x = x;
8745 position->y = y;
8746 position->x_hotspot = xorigin;
8747 position->y_hotspot = yorigin;
8748
8749 return 0;
8750}
8751
3ee6b26b
AD
8752static void handle_cursor_update(struct drm_plane *plane,
8753 struct drm_plane_state *old_plane_state)
e7b07cee 8754{
1348969a 8755 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8756 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8757 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8758 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8759 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8760 uint64_t address = afb ? afb->address : 0;
6a30a929 8761 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8762 struct dc_cursor_attributes attributes;
8763 int ret;
8764
e7b07cee
HW
8765 if (!plane->state->fb && !old_plane_state->fb)
8766 return;
8767
cb2318b7 8768 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8769 __func__,
8770 amdgpu_crtc->crtc_id,
8771 plane->state->crtc_w,
8772 plane->state->crtc_h);
2a8f6ccb
HW
8773
8774 ret = get_cursor_position(plane, crtc, &position);
8775 if (ret)
8776 return;
8777
8778 if (!position.enable) {
8779 /* turn off cursor */
674e78ac
NK
8780 if (crtc_state && crtc_state->stream) {
8781 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8782 dc_stream_set_cursor_position(crtc_state->stream,
8783 &position);
674e78ac
NK
8784 mutex_unlock(&adev->dm.dc_lock);
8785 }
2a8f6ccb 8786 return;
e7b07cee 8787 }
e7b07cee 8788
2a8f6ccb
HW
8789 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8790 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8791
c1cefe11 8792 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8793 attributes.address.high_part = upper_32_bits(address);
8794 attributes.address.low_part = lower_32_bits(address);
8795 attributes.width = plane->state->crtc_w;
8796 attributes.height = plane->state->crtc_h;
8797 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8798 attributes.rotation_angle = 0;
8799 attributes.attribute_flags.value = 0;
8800
03a66367 8801 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8802
886daac9 8803 if (crtc_state->stream) {
674e78ac 8804 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8805 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8806 &attributes))
8807 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8808
2a8f6ccb
HW
8809 if (!dc_stream_set_cursor_position(crtc_state->stream,
8810 &position))
8811 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8812 mutex_unlock(&adev->dm.dc_lock);
886daac9 8813 }
2a8f6ccb 8814}
e7b07cee
HW
8815
8816static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8817{
8818
8819 assert_spin_locked(&acrtc->base.dev->event_lock);
8820 WARN_ON(acrtc->event);
8821
8822 acrtc->event = acrtc->base.state->event;
8823
8824 /* Set the flip status */
8825 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8826
8827 /* Mark this event as consumed */
8828 acrtc->base.state->event = NULL;
8829
cb2318b7
VL
8830 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8831 acrtc->crtc_id);
e7b07cee
HW
8832}
8833
bb47de73
NK
8834static void update_freesync_state_on_stream(
8835 struct amdgpu_display_manager *dm,
8836 struct dm_crtc_state *new_crtc_state,
180db303
NK
8837 struct dc_stream_state *new_stream,
8838 struct dc_plane_state *surface,
8839 u32 flip_timestamp_in_us)
bb47de73 8840{
09aef2c4 8841 struct mod_vrr_params vrr_params;
bb47de73 8842 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8843 struct amdgpu_device *adev = dm->adev;
585d450c 8844 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8845 unsigned long flags;
4cda3243 8846 bool pack_sdp_v1_3 = false;
bb47de73
NK
8847
8848 if (!new_stream)
8849 return;
8850
8851 /*
8852 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8853 * For now it's sufficient to just guard against these conditions.
8854 */
8855
8856 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8857 return;
8858
4a580877 8859 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8860 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8861
180db303
NK
8862 if (surface) {
8863 mod_freesync_handle_preflip(
8864 dm->freesync_module,
8865 surface,
8866 new_stream,
8867 flip_timestamp_in_us,
8868 &vrr_params);
09aef2c4
MK
8869
8870 if (adev->family < AMDGPU_FAMILY_AI &&
8871 amdgpu_dm_vrr_active(new_crtc_state)) {
8872 mod_freesync_handle_v_update(dm->freesync_module,
8873 new_stream, &vrr_params);
e63e2491
EB
8874
8875 /* Need to call this before the frame ends. */
8876 dc_stream_adjust_vmin_vmax(dm->dc,
8877 new_crtc_state->stream,
8878 &vrr_params.adjust);
09aef2c4 8879 }
180db303 8880 }
bb47de73
NK
8881
8882 mod_freesync_build_vrr_infopacket(
8883 dm->freesync_module,
8884 new_stream,
180db303 8885 &vrr_params,
ecd0136b
HT
8886 PACKET_TYPE_VRR,
8887 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8888 &vrr_infopacket,
8889 pack_sdp_v1_3);
bb47de73 8890
8a48b44c 8891 new_crtc_state->freesync_timing_changed |=
585d450c 8892 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8893 &vrr_params.adjust,
8894 sizeof(vrr_params.adjust)) != 0);
bb47de73 8895
8a48b44c 8896 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8897 (memcmp(&new_crtc_state->vrr_infopacket,
8898 &vrr_infopacket,
8899 sizeof(vrr_infopacket)) != 0);
8900
585d450c 8901 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8902 new_crtc_state->vrr_infopacket = vrr_infopacket;
8903
585d450c 8904 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8905 new_stream->vrr_infopacket = vrr_infopacket;
8906
8907 if (new_crtc_state->freesync_vrr_info_changed)
8908 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8909 new_crtc_state->base.crtc->base.id,
8910 (int)new_crtc_state->base.vrr_enabled,
180db303 8911 (int)vrr_params.state);
09aef2c4 8912
4a580877 8913 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8914}
8915
585d450c 8916static void update_stream_irq_parameters(
e854194c
MK
8917 struct amdgpu_display_manager *dm,
8918 struct dm_crtc_state *new_crtc_state)
8919{
8920 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8921 struct mod_vrr_params vrr_params;
e854194c 8922 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8923 struct amdgpu_device *adev = dm->adev;
585d450c 8924 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8925 unsigned long flags;
e854194c
MK
8926
8927 if (!new_stream)
8928 return;
8929
8930 /*
8931 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8932 * For now it's sufficient to just guard against these conditions.
8933 */
8934 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8935 return;
8936
4a580877 8937 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8938 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8939
e854194c
MK
8940 if (new_crtc_state->vrr_supported &&
8941 config.min_refresh_in_uhz &&
8942 config.max_refresh_in_uhz) {
a85ba005
NC
8943 /*
8944 * if freesync compatible mode was set, config.state will be set
8945 * in atomic check
8946 */
8947 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8948 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8949 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8950 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8951 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8952 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8953 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8954 } else {
8955 config.state = new_crtc_state->base.vrr_enabled ?
8956 VRR_STATE_ACTIVE_VARIABLE :
8957 VRR_STATE_INACTIVE;
8958 }
e854194c
MK
8959 } else {
8960 config.state = VRR_STATE_UNSUPPORTED;
8961 }
8962
8963 mod_freesync_build_vrr_params(dm->freesync_module,
8964 new_stream,
8965 &config, &vrr_params);
8966
8967 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8968 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8969 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8970
585d450c
AP
8971 new_crtc_state->freesync_config = config;
8972 /* Copy state for access from DM IRQ handler */
8973 acrtc->dm_irq_params.freesync_config = config;
8974 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8975 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8976 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8977}
8978
66b0c973
MK
8979static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8980 struct dm_crtc_state *new_state)
8981{
8982 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8983 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8984
8985 if (!old_vrr_active && new_vrr_active) {
8986 /* Transition VRR inactive -> active:
8987 * While VRR is active, we must not disable vblank irq, as a
8988 * reenable after disable would compute bogus vblank/pflip
8989 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8990 *
8991 * We also need vupdate irq for the actual core vblank handling
8992 * at end of vblank.
66b0c973 8993 */
d2574c33 8994 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8995 drm_crtc_vblank_get(new_state->base.crtc);
8996 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8997 __func__, new_state->base.crtc->base.id);
8998 } else if (old_vrr_active && !new_vrr_active) {
8999 /* Transition VRR active -> inactive:
9000 * Allow vblank irq disable again for fixed refresh rate.
9001 */
d2574c33 9002 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
9003 drm_crtc_vblank_put(new_state->base.crtc);
9004 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9005 __func__, new_state->base.crtc->base.id);
9006 }
9007}
9008
8ad27806
NK
9009static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9010{
9011 struct drm_plane *plane;
5760dcb9 9012 struct drm_plane_state *old_plane_state;
8ad27806
NK
9013 int i;
9014
9015 /*
9016 * TODO: Make this per-stream so we don't issue redundant updates for
9017 * commits with multiple streams.
9018 */
5760dcb9 9019 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
9020 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9021 handle_cursor_update(plane, old_plane_state);
9022}
9023
3be5262e 9024static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 9025 struct dc_state *dc_state,
3ee6b26b
AD
9026 struct drm_device *dev,
9027 struct amdgpu_display_manager *dm,
9028 struct drm_crtc *pcrtc,
420cd472 9029 bool wait_for_vblank)
e7b07cee 9030{
efc8278e 9031 uint32_t i;
8a48b44c 9032 uint64_t timestamp_ns;
e7b07cee 9033 struct drm_plane *plane;
0bc9706d 9034 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 9035 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
9036 struct drm_crtc_state *new_pcrtc_state =
9037 drm_atomic_get_new_crtc_state(state, pcrtc);
9038 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
9039 struct dm_crtc_state *dm_old_crtc_state =
9040 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 9041 int planes_count = 0, vpos, hpos;
570c91d5 9042 long r;
e7b07cee 9043 unsigned long flags;
8a48b44c 9044 struct amdgpu_bo *abo;
fdd1fe57
MK
9045 uint32_t target_vblank, last_flip_vblank;
9046 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 9047 bool pflip_present = false;
bc7f670e
DF
9048 struct {
9049 struct dc_surface_update surface_updates[MAX_SURFACES];
9050 struct dc_plane_info plane_infos[MAX_SURFACES];
9051 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 9052 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 9053 struct dc_stream_update stream_update;
74aa7bd4 9054 } *bundle;
bc7f670e 9055
74aa7bd4 9056 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 9057
74aa7bd4
DF
9058 if (!bundle) {
9059 dm_error("Failed to allocate update bundle\n");
4b510503
NK
9060 goto cleanup;
9061 }
e7b07cee 9062
8ad27806
NK
9063 /*
9064 * Disable the cursor first if we're disabling all the planes.
9065 * It'll remain on the screen after the planes are re-enabled
9066 * if we don't.
9067 */
9068 if (acrtc_state->active_planes == 0)
9069 amdgpu_dm_commit_cursors(state);
9070
e7b07cee 9071 /* update planes when needed */
efc8278e 9072 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9073 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9074 struct drm_crtc_state *new_crtc_state;
0bc9706d 9075 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9076 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9077 bool plane_needs_flip;
c7af5f77 9078 struct dc_plane_state *dc_plane;
54d76575 9079 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9080
80c218d5
NK
9081 /* Cursor plane is handled after stream updates */
9082 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9083 continue;
e7b07cee 9084
f5ba60fe
DD
9085 if (!fb || !crtc || pcrtc != crtc)
9086 continue;
9087
9088 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9089 if (!new_crtc_state->active)
e7b07cee
HW
9090 continue;
9091
bc7f670e 9092 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9093
74aa7bd4 9094 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9095 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9096 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9097 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9098 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9099 }
8a48b44c 9100
4375d625 9101 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9102 &bundle->scaling_infos[planes_count]);
8a48b44c 9103
695af5f9
NK
9104 bundle->surface_updates[planes_count].scaling_info =
9105 &bundle->scaling_infos[planes_count];
8a48b44c 9106
f5031000 9107 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9108
f5031000 9109 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9110
f5031000
DF
9111 if (!plane_needs_flip) {
9112 planes_count += 1;
9113 continue;
9114 }
8a48b44c 9115
2fac0f53
CK
9116 abo = gem_to_amdgpu_bo(fb->obj[0]);
9117
f8308898
AG
9118 /*
9119 * Wait for all fences on this FB. Do limited wait to avoid
9120 * deadlock during GPU reset when this fence will not signal
9121 * but we hold reservation lock for the BO.
9122 */
d3fae3b3
CK
9123 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9124 msecs_to_jiffies(5000));
f8308898 9125 if (unlikely(r <= 0))
ed8a5fb2 9126 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9127
695af5f9 9128 fill_dc_plane_info_and_addr(
8ce5d842 9129 dm->adev, new_plane_state,
6eed95b0 9130 afb->tiling_flags,
695af5f9 9131 &bundle->plane_infos[planes_count],
87b7ebc2 9132 &bundle->flip_addrs[planes_count].address,
6eed95b0 9133 afb->tmz_surface, false);
87b7ebc2 9134
4711c033 9135 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9136 new_plane_state->plane->index,
9137 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9138
9139 bundle->surface_updates[planes_count].plane_info =
9140 &bundle->plane_infos[planes_count];
8a48b44c 9141
caff0e66
NK
9142 /*
9143 * Only allow immediate flips for fast updates that don't
9144 * change FB pitch, DCC state, rotation or mirroing.
9145 */
f5031000 9146 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9147 crtc->state->async_flip &&
caff0e66 9148 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9149
f5031000
DF
9150 timestamp_ns = ktime_get_ns();
9151 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9152 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9153 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9154
f5031000
DF
9155 if (!bundle->surface_updates[planes_count].surface) {
9156 DRM_ERROR("No surface for CRTC: id=%d\n",
9157 acrtc_attach->crtc_id);
9158 continue;
bc7f670e
DF
9159 }
9160
f5031000
DF
9161 if (plane == pcrtc->primary)
9162 update_freesync_state_on_stream(
9163 dm,
9164 acrtc_state,
9165 acrtc_state->stream,
9166 dc_plane,
9167 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9168
4711c033 9169 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9170 __func__,
9171 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9172 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9173
9174 planes_count += 1;
9175
8a48b44c
DF
9176 }
9177
74aa7bd4 9178 if (pflip_present) {
634092b1
MK
9179 if (!vrr_active) {
9180 /* Use old throttling in non-vrr fixed refresh rate mode
9181 * to keep flip scheduling based on target vblank counts
9182 * working in a backwards compatible way, e.g., for
9183 * clients using the GLX_OML_sync_control extension or
9184 * DRI3/Present extension with defined target_msc.
9185 */
e3eff4b5 9186 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9187 }
9188 else {
9189 /* For variable refresh rate mode only:
9190 * Get vblank of last completed flip to avoid > 1 vrr
9191 * flips per video frame by use of throttling, but allow
9192 * flip programming anywhere in the possibly large
9193 * variable vrr vblank interval for fine-grained flip
9194 * timing control and more opportunity to avoid stutter
9195 * on late submission of flips.
9196 */
9197 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9198 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9199 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9200 }
9201
fdd1fe57 9202 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9203
9204 /*
9205 * Wait until we're out of the vertical blank period before the one
9206 * targeted by the flip
9207 */
9208 while ((acrtc_attach->enabled &&
9209 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9210 0, &vpos, &hpos, NULL,
9211 NULL, &pcrtc->hwmode)
9212 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9213 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9214 (int)(target_vblank -
e3eff4b5 9215 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9216 usleep_range(1000, 1100);
9217 }
9218
8fe684e9
NK
9219 /**
9220 * Prepare the flip event for the pageflip interrupt to handle.
9221 *
9222 * This only works in the case where we've already turned on the
9223 * appropriate hardware blocks (eg. HUBP) so in the transition case
9224 * from 0 -> n planes we have to skip a hardware generated event
9225 * and rely on sending it from software.
9226 */
9227 if (acrtc_attach->base.state->event &&
035f5496
AP
9228 acrtc_state->active_planes > 0 &&
9229 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9230 drm_crtc_vblank_get(pcrtc);
9231
9232 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9233
9234 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9235 prepare_flip_isr(acrtc_attach);
9236
9237 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9238 }
9239
9240 if (acrtc_state->stream) {
8a48b44c 9241 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9242 bundle->stream_update.vrr_infopacket =
8a48b44c 9243 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9244 }
e7b07cee
HW
9245 }
9246
bc92c065 9247 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9248 if ((planes_count || acrtc_state->active_planes == 0) &&
9249 acrtc_state->stream) {
96160687 9250#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9251 /*
9252 * If PSR or idle optimizations are enabled then flush out
9253 * any pending work before hardware programming.
9254 */
06dd1888
NK
9255 if (dm->vblank_control_workqueue)
9256 flush_workqueue(dm->vblank_control_workqueue);
96160687 9257#endif
58aa1c50 9258
b6e881c9 9259 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9260 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9261 bundle->stream_update.src = acrtc_state->stream->src;
9262 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9263 }
9264
cf020d49
NK
9265 if (new_pcrtc_state->color_mgmt_changed) {
9266 /*
9267 * TODO: This isn't fully correct since we've actually
9268 * already modified the stream in place.
9269 */
9270 bundle->stream_update.gamut_remap =
9271 &acrtc_state->stream->gamut_remap_matrix;
9272 bundle->stream_update.output_csc_transform =
9273 &acrtc_state->stream->csc_color_matrix;
9274 bundle->stream_update.out_transfer_func =
9275 acrtc_state->stream->out_transfer_func;
9276 }
bc7f670e 9277
8a48b44c 9278 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9279 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9280 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9281
e63e2491
EB
9282 /*
9283 * If FreeSync state on the stream has changed then we need to
9284 * re-adjust the min/max bounds now that DC doesn't handle this
9285 * as part of commit.
9286 */
a85ba005 9287 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9288 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9289 dc_stream_adjust_vmin_vmax(
9290 dm->dc, acrtc_state->stream,
585d450c 9291 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9292 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9293 }
bc7f670e 9294 mutex_lock(&dm->dc_lock);
8c322309 9295 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9296 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9297 amdgpu_dm_psr_disable(acrtc_state->stream);
9298
bc7f670e 9299 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9300 bundle->surface_updates,
bc7f670e
DF
9301 planes_count,
9302 acrtc_state->stream,
efc8278e
AJ
9303 &bundle->stream_update,
9304 dc_state);
8c322309 9305
8fe684e9
NK
9306 /**
9307 * Enable or disable the interrupts on the backend.
9308 *
9309 * Most pipes are put into power gating when unused.
9310 *
9311 * When power gating is enabled on a pipe we lose the
9312 * interrupt enablement state when power gating is disabled.
9313 *
9314 * So we need to update the IRQ control state in hardware
9315 * whenever the pipe turns on (since it could be previously
9316 * power gated) or off (since some pipes can't be power gated
9317 * on some ASICs).
9318 */
9319 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9320 dm_update_pflip_irq_state(drm_to_adev(dev),
9321 acrtc_attach);
8fe684e9 9322
8c322309 9323 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9324 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9325 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9326 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9327
9328 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9329 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9330 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9331 struct amdgpu_dm_connector *aconn =
9332 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9333
9334 if (aconn->psr_skip_count > 0)
9335 aconn->psr_skip_count--;
58aa1c50
NK
9336
9337 /* Allow PSR when skip count is 0. */
9338 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9339 } else {
9340 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9341 }
9342
bc7f670e 9343 mutex_unlock(&dm->dc_lock);
e7b07cee 9344 }
4b510503 9345
8ad27806
NK
9346 /*
9347 * Update cursor state *after* programming all the planes.
9348 * This avoids redundant programming in the case where we're going
9349 * to be disabling a single plane - those pipes are being disabled.
9350 */
9351 if (acrtc_state->active_planes)
9352 amdgpu_dm_commit_cursors(state);
80c218d5 9353
4b510503 9354cleanup:
74aa7bd4 9355 kfree(bundle);
e7b07cee
HW
9356}
9357
6ce8f316
NK
9358static void amdgpu_dm_commit_audio(struct drm_device *dev,
9359 struct drm_atomic_state *state)
9360{
1348969a 9361 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9362 struct amdgpu_dm_connector *aconnector;
9363 struct drm_connector *connector;
9364 struct drm_connector_state *old_con_state, *new_con_state;
9365 struct drm_crtc_state *new_crtc_state;
9366 struct dm_crtc_state *new_dm_crtc_state;
9367 const struct dc_stream_status *status;
9368 int i, inst;
9369
9370 /* Notify device removals. */
9371 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9372 if (old_con_state->crtc != new_con_state->crtc) {
9373 /* CRTC changes require notification. */
9374 goto notify;
9375 }
9376
9377 if (!new_con_state->crtc)
9378 continue;
9379
9380 new_crtc_state = drm_atomic_get_new_crtc_state(
9381 state, new_con_state->crtc);
9382
9383 if (!new_crtc_state)
9384 continue;
9385
9386 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9387 continue;
9388
9389 notify:
9390 aconnector = to_amdgpu_dm_connector(connector);
9391
9392 mutex_lock(&adev->dm.audio_lock);
9393 inst = aconnector->audio_inst;
9394 aconnector->audio_inst = -1;
9395 mutex_unlock(&adev->dm.audio_lock);
9396
9397 amdgpu_dm_audio_eld_notify(adev, inst);
9398 }
9399
9400 /* Notify audio device additions. */
9401 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9402 if (!new_con_state->crtc)
9403 continue;
9404
9405 new_crtc_state = drm_atomic_get_new_crtc_state(
9406 state, new_con_state->crtc);
9407
9408 if (!new_crtc_state)
9409 continue;
9410
9411 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9412 continue;
9413
9414 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9415 if (!new_dm_crtc_state->stream)
9416 continue;
9417
9418 status = dc_stream_get_status(new_dm_crtc_state->stream);
9419 if (!status)
9420 continue;
9421
9422 aconnector = to_amdgpu_dm_connector(connector);
9423
9424 mutex_lock(&adev->dm.audio_lock);
9425 inst = status->audio_inst;
9426 aconnector->audio_inst = inst;
9427 mutex_unlock(&adev->dm.audio_lock);
9428
9429 amdgpu_dm_audio_eld_notify(adev, inst);
9430 }
9431}
9432
1f6010a9 9433/*
27b3f4fc
LSL
9434 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9435 * @crtc_state: the DRM CRTC state
9436 * @stream_state: the DC stream state.
9437 *
9438 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9439 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9440 */
9441static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9442 struct dc_stream_state *stream_state)
9443{
b9952f93 9444 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9445}
e7b07cee 9446
b8592b48
LL
9447/**
9448 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9449 * @state: The atomic state to commit
9450 *
9451 * This will tell DC to commit the constructed DC state from atomic_check,
9452 * programming the hardware. Any failures here implies a hardware failure, since
9453 * atomic check should have filtered anything non-kosher.
9454 */
7578ecda 9455static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9456{
9457 struct drm_device *dev = state->dev;
1348969a 9458 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9459 struct amdgpu_display_manager *dm = &adev->dm;
9460 struct dm_atomic_state *dm_state;
eb3dc897 9461 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9462 uint32_t i, j;
5cc6dcbd 9463 struct drm_crtc *crtc;
0bc9706d 9464 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9465 unsigned long flags;
9466 bool wait_for_vblank = true;
9467 struct drm_connector *connector;
c2cea706 9468 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9469 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9470 int crtc_disable_count = 0;
6ee90e88 9471 bool mode_set_reset_required = false;
e7b07cee 9472
e8a98235
RS
9473 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9474
e7b07cee
HW
9475 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9476
eb3dc897
NK
9477 dm_state = dm_atomic_get_new_state(state);
9478 if (dm_state && dm_state->context) {
9479 dc_state = dm_state->context;
9480 } else {
9481 /* No state changes, retain current state. */
813d20dc 9482 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9483 ASSERT(dc_state_temp);
9484 dc_state = dc_state_temp;
9485 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9486 }
e7b07cee 9487
6d90a208
AP
9488 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9489 new_crtc_state, i) {
9490 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9491
9492 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9493
9494 if (old_crtc_state->active &&
9495 (!new_crtc_state->active ||
9496 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9497 manage_dm_interrupts(adev, acrtc, false);
9498 dc_stream_release(dm_old_crtc_state->stream);
9499 }
9500 }
9501
8976f73b
RS
9502 drm_atomic_helper_calc_timestamping_constants(state);
9503
e7b07cee 9504 /* update changed items */
0bc9706d 9505 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9506 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9507
54d76575
LSL
9508 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9509 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9510
4711c033 9511 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9512 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9513 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9514 "connectors_changed:%d\n",
9515 acrtc->crtc_id,
0bc9706d
LSL
9516 new_crtc_state->enable,
9517 new_crtc_state->active,
9518 new_crtc_state->planes_changed,
9519 new_crtc_state->mode_changed,
9520 new_crtc_state->active_changed,
9521 new_crtc_state->connectors_changed);
e7b07cee 9522
5c68c652
VL
9523 /* Disable cursor if disabling crtc */
9524 if (old_crtc_state->active && !new_crtc_state->active) {
9525 struct dc_cursor_position position;
9526
9527 memset(&position, 0, sizeof(position));
9528 mutex_lock(&dm->dc_lock);
9529 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9530 mutex_unlock(&dm->dc_lock);
9531 }
9532
27b3f4fc
LSL
9533 /* Copy all transient state flags into dc state */
9534 if (dm_new_crtc_state->stream) {
9535 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9536 dm_new_crtc_state->stream);
9537 }
9538
e7b07cee
HW
9539 /* handles headless hotplug case, updating new_state and
9540 * aconnector as needed
9541 */
9542
54d76575 9543 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9544
4711c033 9545 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9546
54d76575 9547 if (!dm_new_crtc_state->stream) {
e7b07cee 9548 /*
b830ebc9
HW
9549 * this could happen because of issues with
9550 * userspace notifications delivery.
9551 * In this case userspace tries to set mode on
1f6010a9
DF
9552 * display which is disconnected in fact.
9553 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9554 * We expect reset mode will come soon.
9555 *
9556 * This can also happen when unplug is done
9557 * during resume sequence ended
9558 *
9559 * In this case, we want to pretend we still
9560 * have a sink to keep the pipe running so that
9561 * hw state is consistent with the sw state
9562 */
f1ad2f5e 9563 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9564 __func__, acrtc->base.base.id);
9565 continue;
9566 }
9567
54d76575
LSL
9568 if (dm_old_crtc_state->stream)
9569 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9570
97028037
LP
9571 pm_runtime_get_noresume(dev->dev);
9572
e7b07cee 9573 acrtc->enabled = true;
0bc9706d
LSL
9574 acrtc->hw_mode = new_crtc_state->mode;
9575 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9576 mode_set_reset_required = true;
0bc9706d 9577 } else if (modereset_required(new_crtc_state)) {
4711c033 9578 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9579 /* i.e. reset mode */
6ee90e88 9580 if (dm_old_crtc_state->stream)
54d76575 9581 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9582
6ee90e88 9583 mode_set_reset_required = true;
e7b07cee
HW
9584 }
9585 } /* for_each_crtc_in_state() */
9586
eb3dc897 9587 if (dc_state) {
6ee90e88 9588 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9589 if (mode_set_reset_required) {
96160687 9590#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9591 if (dm->vblank_control_workqueue)
9592 flush_workqueue(dm->vblank_control_workqueue);
96160687 9593#endif
6ee90e88 9594 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9595 }
6ee90e88 9596
eb3dc897 9597 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9598 mutex_lock(&dm->dc_lock);
eb3dc897 9599 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9600#if defined(CONFIG_DRM_AMD_DC_DCN)
9601 /* Allow idle optimization when vblank count is 0 for display off */
9602 if (dm->active_vblank_irq_count == 0)
9603 dc_allow_idle_optimizations(dm->dc,true);
9604#endif
674e78ac 9605 mutex_unlock(&dm->dc_lock);
fa2123db 9606 }
fe8858bb 9607
0bc9706d 9608 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9609 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9610
54d76575 9611 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9612
54d76575 9613 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9614 const struct dc_stream_status *status =
54d76575 9615 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9616
eb3dc897 9617 if (!status)
09f609c3
LL
9618 status = dc_stream_get_status_from_state(dc_state,
9619 dm_new_crtc_state->stream);
e7b07cee 9620 if (!status)
54d76575 9621 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9622 else
9623 acrtc->otg_inst = status->primary_otg_inst;
9624 }
9625 }
0c8620d6
BL
9626#ifdef CONFIG_DRM_AMD_DC_HDCP
9627 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9628 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9629 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9630 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9631
9632 new_crtc_state = NULL;
9633
9634 if (acrtc)
9635 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9636
9637 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9638
9639 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9640 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9641 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9642 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9643 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9644 continue;
9645 }
9646
9647 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9648 hdcp_update_display(
9649 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9650 new_con_state->hdcp_content_type,
0e86d3d4 9651 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9652 }
9653#endif
e7b07cee 9654
02d6a6fc 9655 /* Handle connector state changes */
c2cea706 9656 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9657 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9658 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9659 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9660 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9661 struct dc_stream_update stream_update;
b232d4ed 9662 struct dc_info_packet hdr_packet;
e7b07cee 9663 struct dc_stream_status *status = NULL;
b232d4ed 9664 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9665
efc8278e 9666 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9667 memset(&stream_update, 0, sizeof(stream_update));
9668
44d09c6a 9669 if (acrtc) {
0bc9706d 9670 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9671 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9672 }
0bc9706d 9673
e7b07cee 9674 /* Skip any modesets/resets */
0bc9706d 9675 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9676 continue;
9677
54d76575 9678 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9679 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9680
b232d4ed
NK
9681 scaling_changed = is_scaling_state_different(dm_new_con_state,
9682 dm_old_con_state);
9683
9684 abm_changed = dm_new_crtc_state->abm_level !=
9685 dm_old_crtc_state->abm_level;
9686
9687 hdr_changed =
72921cdf 9688 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9689
9690 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9691 continue;
e7b07cee 9692
b6e881c9 9693 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9694 if (scaling_changed) {
02d6a6fc 9695 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9696 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9697
02d6a6fc
DF
9698 stream_update.src = dm_new_crtc_state->stream->src;
9699 stream_update.dst = dm_new_crtc_state->stream->dst;
9700 }
9701
b232d4ed 9702 if (abm_changed) {
02d6a6fc
DF
9703 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9704
9705 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9706 }
70e8ffc5 9707
b232d4ed
NK
9708 if (hdr_changed) {
9709 fill_hdr_info_packet(new_con_state, &hdr_packet);
9710 stream_update.hdr_static_metadata = &hdr_packet;
9711 }
9712
54d76575 9713 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9714
9715 if (WARN_ON(!status))
9716 continue;
9717
3be5262e 9718 WARN_ON(!status->plane_count);
e7b07cee 9719
02d6a6fc
DF
9720 /*
9721 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9722 * Here we create an empty update on each plane.
9723 * To fix this, DC should permit updating only stream properties.
9724 */
9725 for (j = 0; j < status->plane_count; j++)
efc8278e 9726 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9727
9728
9729 mutex_lock(&dm->dc_lock);
9730 dc_commit_updates_for_stream(dm->dc,
efc8278e 9731 dummy_updates,
02d6a6fc
DF
9732 status->plane_count,
9733 dm_new_crtc_state->stream,
efc8278e
AJ
9734 &stream_update,
9735 dc_state);
02d6a6fc 9736 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9737 }
9738
b5e83f6f 9739 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9740 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9741 new_crtc_state, i) {
fe2a1965
LP
9742 if (old_crtc_state->active && !new_crtc_state->active)
9743 crtc_disable_count++;
9744
54d76575 9745 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9746 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9747
585d450c
AP
9748 /* For freesync config update on crtc state and params for irq */
9749 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9750
66b0c973
MK
9751 /* Handle vrr on->off / off->on transitions */
9752 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9753 dm_new_crtc_state);
e7b07cee
HW
9754 }
9755
8fe684e9
NK
9756 /**
9757 * Enable interrupts for CRTCs that are newly enabled or went through
9758 * a modeset. It was intentionally deferred until after the front end
9759 * state was modified to wait until the OTG was on and so the IRQ
9760 * handlers didn't access stale or invalid state.
9761 */
9762 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9763 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9764#ifdef CONFIG_DEBUG_FS
86bc2219 9765 bool configure_crc = false;
8e7b6fee 9766 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9767#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9768 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9769#endif
9770 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9771 cur_crc_src = acrtc->dm_irq_params.crc_src;
9772 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9773#endif
585d450c
AP
9774 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9775
8fe684e9
NK
9776 if (new_crtc_state->active &&
9777 (!old_crtc_state->active ||
9778 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9779 dc_stream_retain(dm_new_crtc_state->stream);
9780 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9781 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9782
24eb9374 9783#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9784 /**
9785 * Frontend may have changed so reapply the CRC capture
9786 * settings for the stream.
9787 */
9788 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9789
8e7b6fee 9790 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9791 configure_crc = true;
9792#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9793 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9794 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9795 acrtc->dm_irq_params.crc_window.update_win = true;
9796 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9797 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9798 crc_rd_wrk->crtc = crtc;
9799 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9800 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9801 }
86bc2219 9802#endif
e2881d6d 9803 }
c920888c 9804
86bc2219 9805 if (configure_crc)
bbc49fc0
WL
9806 if (amdgpu_dm_crtc_configure_crc_source(
9807 crtc, dm_new_crtc_state, cur_crc_src))
9808 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9809#endif
8fe684e9
NK
9810 }
9811 }
e7b07cee 9812
420cd472 9813 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9814 if (new_crtc_state->async_flip)
420cd472
DF
9815 wait_for_vblank = false;
9816
e7b07cee 9817 /* update planes when needed per crtc*/
5cc6dcbd 9818 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9819 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9820
54d76575 9821 if (dm_new_crtc_state->stream)
eb3dc897 9822 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9823 dm, crtc, wait_for_vblank);
e7b07cee
HW
9824 }
9825
6ce8f316
NK
9826 /* Update audio instances for each connector. */
9827 amdgpu_dm_commit_audio(dev, state);
9828
7230362c
AD
9829#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9830 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9831 /* restore the backlight level */
7fd13bae
AD
9832 for (i = 0; i < dm->num_of_edps; i++) {
9833 if (dm->backlight_dev[i] &&
9834 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9835 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9836 }
7230362c 9837#endif
e7b07cee
HW
9838 /*
9839 * send vblank event on all events not handled in flip and
9840 * mark consumed event for drm_atomic_helper_commit_hw_done
9841 */
4a580877 9842 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9843 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9844
0bc9706d
LSL
9845 if (new_crtc_state->event)
9846 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9847
0bc9706d 9848 new_crtc_state->event = NULL;
e7b07cee 9849 }
4a580877 9850 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9851
29c8f234
LL
9852 /* Signal HW programming completion */
9853 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9854
9855 if (wait_for_vblank)
320a1274 9856 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9857
9858 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9859
5f6fab24
AD
9860 /* return the stolen vga memory back to VRAM */
9861 if (!adev->mman.keep_stolen_vga_memory)
9862 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9863 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9864
1f6010a9
DF
9865 /*
9866 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9867 * so we can put the GPU into runtime suspend if we're not driving any
9868 * displays anymore
9869 */
fe2a1965
LP
9870 for (i = 0; i < crtc_disable_count; i++)
9871 pm_runtime_put_autosuspend(dev->dev);
97028037 9872 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9873
9874 if (dc_state_temp)
9875 dc_release_state(dc_state_temp);
e7b07cee
HW
9876}
9877
9878
9879static int dm_force_atomic_commit(struct drm_connector *connector)
9880{
9881 int ret = 0;
9882 struct drm_device *ddev = connector->dev;
9883 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9884 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9885 struct drm_plane *plane = disconnected_acrtc->base.primary;
9886 struct drm_connector_state *conn_state;
9887 struct drm_crtc_state *crtc_state;
9888 struct drm_plane_state *plane_state;
9889
9890 if (!state)
9891 return -ENOMEM;
9892
9893 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9894
9895 /* Construct an atomic state to restore previous display setting */
9896
9897 /*
9898 * Attach connectors to drm_atomic_state
9899 */
9900 conn_state = drm_atomic_get_connector_state(state, connector);
9901
9902 ret = PTR_ERR_OR_ZERO(conn_state);
9903 if (ret)
2dc39051 9904 goto out;
e7b07cee
HW
9905
9906 /* Attach crtc to drm_atomic_state*/
9907 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9908
9909 ret = PTR_ERR_OR_ZERO(crtc_state);
9910 if (ret)
2dc39051 9911 goto out;
e7b07cee
HW
9912
9913 /* force a restore */
9914 crtc_state->mode_changed = true;
9915
9916 /* Attach plane to drm_atomic_state */
9917 plane_state = drm_atomic_get_plane_state(state, plane);
9918
9919 ret = PTR_ERR_OR_ZERO(plane_state);
9920 if (ret)
2dc39051 9921 goto out;
e7b07cee
HW
9922
9923 /* Call commit internally with the state we just constructed */
9924 ret = drm_atomic_commit(state);
e7b07cee 9925
2dc39051 9926out:
e7b07cee 9927 drm_atomic_state_put(state);
2dc39051
VL
9928 if (ret)
9929 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9930
9931 return ret;
9932}
9933
9934/*
1f6010a9
DF
9935 * This function handles all cases when set mode does not come upon hotplug.
9936 * This includes when a display is unplugged then plugged back into the
9937 * same port and when running without usermode desktop manager supprot
e7b07cee 9938 */
3ee6b26b
AD
9939void dm_restore_drm_connector_state(struct drm_device *dev,
9940 struct drm_connector *connector)
e7b07cee 9941{
c84dec2f 9942 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9943 struct amdgpu_crtc *disconnected_acrtc;
9944 struct dm_crtc_state *acrtc_state;
9945
9946 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9947 return;
9948
9949 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9950 if (!disconnected_acrtc)
9951 return;
e7b07cee 9952
70e8ffc5
HW
9953 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9954 if (!acrtc_state->stream)
e7b07cee
HW
9955 return;
9956
9957 /*
9958 * If the previous sink is not released and different from the current,
9959 * we deduce we are in a state where we can not rely on usermode call
9960 * to turn on the display, so we do it here
9961 */
9962 if (acrtc_state->stream->sink != aconnector->dc_sink)
9963 dm_force_atomic_commit(&aconnector->base);
9964}
9965
1f6010a9 9966/*
e7b07cee
HW
9967 * Grabs all modesetting locks to serialize against any blocking commits,
9968 * Waits for completion of all non blocking commits.
9969 */
3ee6b26b
AD
9970static int do_aquire_global_lock(struct drm_device *dev,
9971 struct drm_atomic_state *state)
e7b07cee
HW
9972{
9973 struct drm_crtc *crtc;
9974 struct drm_crtc_commit *commit;
9975 long ret;
9976
1f6010a9
DF
9977 /*
9978 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9979 * ensure that when the framework release it the
9980 * extra locks we are locking here will get released to
9981 */
9982 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9983 if (ret)
9984 return ret;
9985
9986 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9987 spin_lock(&crtc->commit_lock);
9988 commit = list_first_entry_or_null(&crtc->commit_list,
9989 struct drm_crtc_commit, commit_entry);
9990 if (commit)
9991 drm_crtc_commit_get(commit);
9992 spin_unlock(&crtc->commit_lock);
9993
9994 if (!commit)
9995 continue;
9996
1f6010a9
DF
9997 /*
9998 * Make sure all pending HW programming completed and
e7b07cee
HW
9999 * page flips done
10000 */
10001 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10002
10003 if (ret > 0)
10004 ret = wait_for_completion_interruptible_timeout(
10005 &commit->flip_done, 10*HZ);
10006
10007 if (ret == 0)
10008 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 10009 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
10010
10011 drm_crtc_commit_put(commit);
10012 }
10013
10014 return ret < 0 ? ret : 0;
10015}
10016
bb47de73
NK
10017static void get_freesync_config_for_crtc(
10018 struct dm_crtc_state *new_crtc_state,
10019 struct dm_connector_state *new_con_state)
98e6436d
AK
10020{
10021 struct mod_freesync_config config = {0};
98e6436d
AK
10022 struct amdgpu_dm_connector *aconnector =
10023 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 10024 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 10025 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 10026 bool fs_vid_mode = false;
98e6436d 10027
a057ec46 10028 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
10029 vrefresh >= aconnector->min_vfreq &&
10030 vrefresh <= aconnector->max_vfreq;
bb47de73 10031
a057ec46
IB
10032 if (new_crtc_state->vrr_supported) {
10033 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
10034 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10035
10036 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10037 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 10038 config.vsif_supported = true;
180db303 10039 config.btr = true;
98e6436d 10040
a85ba005
NC
10041 if (fs_vid_mode) {
10042 config.state = VRR_STATE_ACTIVE_FIXED;
10043 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10044 goto out;
10045 } else if (new_crtc_state->base.vrr_enabled) {
10046 config.state = VRR_STATE_ACTIVE_VARIABLE;
10047 } else {
10048 config.state = VRR_STATE_INACTIVE;
10049 }
10050 }
10051out:
bb47de73
NK
10052 new_crtc_state->freesync_config = config;
10053}
98e6436d 10054
bb47de73
NK
10055static void reset_freesync_config_for_crtc(
10056 struct dm_crtc_state *new_crtc_state)
10057{
10058 new_crtc_state->vrr_supported = false;
98e6436d 10059
bb47de73
NK
10060 memset(&new_crtc_state->vrr_infopacket, 0,
10061 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
10062}
10063
a85ba005
NC
10064static bool
10065is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10066 struct drm_crtc_state *new_crtc_state)
10067{
10068 struct drm_display_mode old_mode, new_mode;
10069
10070 if (!old_crtc_state || !new_crtc_state)
10071 return false;
10072
10073 old_mode = old_crtc_state->mode;
10074 new_mode = new_crtc_state->mode;
10075
10076 if (old_mode.clock == new_mode.clock &&
10077 old_mode.hdisplay == new_mode.hdisplay &&
10078 old_mode.vdisplay == new_mode.vdisplay &&
10079 old_mode.htotal == new_mode.htotal &&
10080 old_mode.vtotal != new_mode.vtotal &&
10081 old_mode.hsync_start == new_mode.hsync_start &&
10082 old_mode.vsync_start != new_mode.vsync_start &&
10083 old_mode.hsync_end == new_mode.hsync_end &&
10084 old_mode.vsync_end != new_mode.vsync_end &&
10085 old_mode.hskew == new_mode.hskew &&
10086 old_mode.vscan == new_mode.vscan &&
10087 (old_mode.vsync_end - old_mode.vsync_start) ==
10088 (new_mode.vsync_end - new_mode.vsync_start))
10089 return true;
10090
10091 return false;
10092}
10093
10094static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10095 uint64_t num, den, res;
10096 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10097
10098 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10099
10100 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10101 den = (unsigned long long)new_crtc_state->mode.htotal *
10102 (unsigned long long)new_crtc_state->mode.vtotal;
10103
10104 res = div_u64(num, den);
10105 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10106}
10107
4b9674e5
LL
10108static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10109 struct drm_atomic_state *state,
10110 struct drm_crtc *crtc,
10111 struct drm_crtc_state *old_crtc_state,
10112 struct drm_crtc_state *new_crtc_state,
10113 bool enable,
10114 bool *lock_and_validation_needed)
e7b07cee 10115{
eb3dc897 10116 struct dm_atomic_state *dm_state = NULL;
54d76575 10117 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10118 struct dc_stream_state *new_stream;
62f55537 10119 int ret = 0;
d4d4a645 10120
1f6010a9
DF
10121 /*
10122 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10123 * update changed items
10124 */
4b9674e5
LL
10125 struct amdgpu_crtc *acrtc = NULL;
10126 struct amdgpu_dm_connector *aconnector = NULL;
10127 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10128 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10129
4b9674e5 10130 new_stream = NULL;
9635b754 10131
4b9674e5
LL
10132 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10133 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10134 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10135 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10136
4b9674e5
LL
10137 /* TODO This hack should go away */
10138 if (aconnector && enable) {
10139 /* Make sure fake sink is created in plug-in scenario */
10140 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10141 &aconnector->base);
10142 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10143 &aconnector->base);
19f89e23 10144
4b9674e5
LL
10145 if (IS_ERR(drm_new_conn_state)) {
10146 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10147 goto fail;
10148 }
19f89e23 10149
4b9674e5
LL
10150 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10151 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10152
02d35a67
JFZ
10153 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10154 goto skip_modeset;
10155
cbd14ae7
SW
10156 new_stream = create_validate_stream_for_sink(aconnector,
10157 &new_crtc_state->mode,
10158 dm_new_conn_state,
10159 dm_old_crtc_state->stream);
19f89e23 10160
4b9674e5
LL
10161 /*
10162 * we can have no stream on ACTION_SET if a display
10163 * was disconnected during S3, in this case it is not an
10164 * error, the OS will be updated after detection, and
10165 * will do the right thing on next atomic commit
10166 */
19f89e23 10167
4b9674e5
LL
10168 if (!new_stream) {
10169 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10170 __func__, acrtc->base.base.id);
10171 ret = -ENOMEM;
10172 goto fail;
10173 }
e7b07cee 10174
3d4e52d0
VL
10175 /*
10176 * TODO: Check VSDB bits to decide whether this should
10177 * be enabled or not.
10178 */
10179 new_stream->triggered_crtc_reset.enabled =
10180 dm->force_timing_sync;
10181
4b9674e5 10182 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10183
88694af9
NK
10184 ret = fill_hdr_info_packet(drm_new_conn_state,
10185 &new_stream->hdr_static_metadata);
10186 if (ret)
10187 goto fail;
10188
7e930949
NK
10189 /*
10190 * If we already removed the old stream from the context
10191 * (and set the new stream to NULL) then we can't reuse
10192 * the old stream even if the stream and scaling are unchanged.
10193 * We'll hit the BUG_ON and black screen.
10194 *
10195 * TODO: Refactor this function to allow this check to work
10196 * in all conditions.
10197 */
a85ba005
NC
10198 if (amdgpu_freesync_vid_mode &&
10199 dm_new_crtc_state->stream &&
10200 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10201 goto skip_modeset;
10202
7e930949
NK
10203 if (dm_new_crtc_state->stream &&
10204 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10205 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10206 new_crtc_state->mode_changed = false;
10207 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10208 new_crtc_state->mode_changed);
62f55537 10209 }
4b9674e5 10210 }
b830ebc9 10211
02d35a67 10212 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10213 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10214 goto skip_modeset;
e7b07cee 10215
4711c033 10216 DRM_DEBUG_ATOMIC(
4b9674e5
LL
10217 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10218 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10219 "connectors_changed:%d\n",
10220 acrtc->crtc_id,
10221 new_crtc_state->enable,
10222 new_crtc_state->active,
10223 new_crtc_state->planes_changed,
10224 new_crtc_state->mode_changed,
10225 new_crtc_state->active_changed,
10226 new_crtc_state->connectors_changed);
62f55537 10227
4b9674e5
LL
10228 /* Remove stream for any changed/disabled CRTC */
10229 if (!enable) {
62f55537 10230
4b9674e5
LL
10231 if (!dm_old_crtc_state->stream)
10232 goto skip_modeset;
eb3dc897 10233
a85ba005
NC
10234 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10235 is_timing_unchanged_for_freesync(new_crtc_state,
10236 old_crtc_state)) {
10237 new_crtc_state->mode_changed = false;
10238 DRM_DEBUG_DRIVER(
10239 "Mode change not required for front porch change, "
10240 "setting mode_changed to %d",
10241 new_crtc_state->mode_changed);
10242
10243 set_freesync_fixed_config(dm_new_crtc_state);
10244
10245 goto skip_modeset;
10246 } else if (amdgpu_freesync_vid_mode && aconnector &&
10247 is_freesync_video_mode(&new_crtc_state->mode,
10248 aconnector)) {
e88ebd83
SC
10249 struct drm_display_mode *high_mode;
10250
10251 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10252 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10253 set_freesync_fixed_config(dm_new_crtc_state);
10254 }
a85ba005
NC
10255 }
10256
4b9674e5
LL
10257 ret = dm_atomic_get_state(state, &dm_state);
10258 if (ret)
10259 goto fail;
e7b07cee 10260
4b9674e5
LL
10261 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10262 crtc->base.id);
62f55537 10263
4b9674e5
LL
10264 /* i.e. reset mode */
10265 if (dc_remove_stream_from_ctx(
10266 dm->dc,
10267 dm_state->context,
10268 dm_old_crtc_state->stream) != DC_OK) {
10269 ret = -EINVAL;
10270 goto fail;
10271 }
62f55537 10272
4b9674e5
LL
10273 dc_stream_release(dm_old_crtc_state->stream);
10274 dm_new_crtc_state->stream = NULL;
bb47de73 10275
4b9674e5 10276 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10277
4b9674e5 10278 *lock_and_validation_needed = true;
62f55537 10279
4b9674e5
LL
10280 } else {/* Add stream for any updated/enabled CRTC */
10281 /*
10282 * Quick fix to prevent NULL pointer on new_stream when
10283 * added MST connectors not found in existing crtc_state in the chained mode
10284 * TODO: need to dig out the root cause of that
10285 */
10286 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10287 goto skip_modeset;
62f55537 10288
4b9674e5
LL
10289 if (modereset_required(new_crtc_state))
10290 goto skip_modeset;
62f55537 10291
4b9674e5
LL
10292 if (modeset_required(new_crtc_state, new_stream,
10293 dm_old_crtc_state->stream)) {
62f55537 10294
4b9674e5 10295 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10296
4b9674e5
LL
10297 ret = dm_atomic_get_state(state, &dm_state);
10298 if (ret)
10299 goto fail;
27b3f4fc 10300
4b9674e5 10301 dm_new_crtc_state->stream = new_stream;
62f55537 10302
4b9674e5 10303 dc_stream_retain(new_stream);
1dc90497 10304
4711c033
LT
10305 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10306 crtc->base.id);
1dc90497 10307
4b9674e5
LL
10308 if (dc_add_stream_to_ctx(
10309 dm->dc,
10310 dm_state->context,
10311 dm_new_crtc_state->stream) != DC_OK) {
10312 ret = -EINVAL;
10313 goto fail;
9b690ef3
BL
10314 }
10315
4b9674e5
LL
10316 *lock_and_validation_needed = true;
10317 }
10318 }
e277adc5 10319
4b9674e5
LL
10320skip_modeset:
10321 /* Release extra reference */
10322 if (new_stream)
10323 dc_stream_release(new_stream);
e277adc5 10324
4b9674e5
LL
10325 /*
10326 * We want to do dc stream updates that do not require a
10327 * full modeset below.
10328 */
2afda735 10329 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10330 return 0;
10331 /*
10332 * Given above conditions, the dc state cannot be NULL because:
10333 * 1. We're in the process of enabling CRTCs (just been added
10334 * to the dc context, or already is on the context)
10335 * 2. Has a valid connector attached, and
10336 * 3. Is currently active and enabled.
10337 * => The dc stream state currently exists.
10338 */
10339 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10340
4b9674e5 10341 /* Scaling or underscan settings */
c521fc31
RL
10342 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10343 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10344 update_stream_scaling_settings(
10345 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10346
b05e2c5e
DF
10347 /* ABM settings */
10348 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10349
4b9674e5
LL
10350 /*
10351 * Color management settings. We also update color properties
10352 * when a modeset is needed, to ensure it gets reprogrammed.
10353 */
10354 if (dm_new_crtc_state->base.color_mgmt_changed ||
10355 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10356 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10357 if (ret)
10358 goto fail;
62f55537 10359 }
e7b07cee 10360
4b9674e5
LL
10361 /* Update Freesync settings. */
10362 get_freesync_config_for_crtc(dm_new_crtc_state,
10363 dm_new_conn_state);
10364
62f55537 10365 return ret;
9635b754
DS
10366
10367fail:
10368 if (new_stream)
10369 dc_stream_release(new_stream);
10370 return ret;
62f55537 10371}
9b690ef3 10372
f6ff2a08
NK
10373static bool should_reset_plane(struct drm_atomic_state *state,
10374 struct drm_plane *plane,
10375 struct drm_plane_state *old_plane_state,
10376 struct drm_plane_state *new_plane_state)
10377{
10378 struct drm_plane *other;
10379 struct drm_plane_state *old_other_state, *new_other_state;
10380 struct drm_crtc_state *new_crtc_state;
10381 int i;
10382
70a1efac
NK
10383 /*
10384 * TODO: Remove this hack once the checks below are sufficient
10385 * enough to determine when we need to reset all the planes on
10386 * the stream.
10387 */
10388 if (state->allow_modeset)
10389 return true;
10390
f6ff2a08
NK
10391 /* Exit early if we know that we're adding or removing the plane. */
10392 if (old_plane_state->crtc != new_plane_state->crtc)
10393 return true;
10394
10395 /* old crtc == new_crtc == NULL, plane not in context. */
10396 if (!new_plane_state->crtc)
10397 return false;
10398
10399 new_crtc_state =
10400 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10401
10402 if (!new_crtc_state)
10403 return true;
10404
7316c4ad
NK
10405 /* CRTC Degamma changes currently require us to recreate planes. */
10406 if (new_crtc_state->color_mgmt_changed)
10407 return true;
10408
f6ff2a08
NK
10409 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10410 return true;
10411
10412 /*
10413 * If there are any new primary or overlay planes being added or
10414 * removed then the z-order can potentially change. To ensure
10415 * correct z-order and pipe acquisition the current DC architecture
10416 * requires us to remove and recreate all existing planes.
10417 *
10418 * TODO: Come up with a more elegant solution for this.
10419 */
10420 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10421 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10422 if (other->type == DRM_PLANE_TYPE_CURSOR)
10423 continue;
10424
10425 if (old_other_state->crtc != new_plane_state->crtc &&
10426 new_other_state->crtc != new_plane_state->crtc)
10427 continue;
10428
10429 if (old_other_state->crtc != new_other_state->crtc)
10430 return true;
10431
dc4cb30d
NK
10432 /* Src/dst size and scaling updates. */
10433 if (old_other_state->src_w != new_other_state->src_w ||
10434 old_other_state->src_h != new_other_state->src_h ||
10435 old_other_state->crtc_w != new_other_state->crtc_w ||
10436 old_other_state->crtc_h != new_other_state->crtc_h)
10437 return true;
10438
10439 /* Rotation / mirroring updates. */
10440 if (old_other_state->rotation != new_other_state->rotation)
10441 return true;
10442
10443 /* Blending updates. */
10444 if (old_other_state->pixel_blend_mode !=
10445 new_other_state->pixel_blend_mode)
10446 return true;
10447
10448 /* Alpha updates. */
10449 if (old_other_state->alpha != new_other_state->alpha)
10450 return true;
10451
10452 /* Colorspace changes. */
10453 if (old_other_state->color_range != new_other_state->color_range ||
10454 old_other_state->color_encoding != new_other_state->color_encoding)
10455 return true;
10456
9a81cc60
NK
10457 /* Framebuffer checks fall at the end. */
10458 if (!old_other_state->fb || !new_other_state->fb)
10459 continue;
10460
10461 /* Pixel format changes can require bandwidth updates. */
10462 if (old_other_state->fb->format != new_other_state->fb->format)
10463 return true;
10464
6eed95b0
BN
10465 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10466 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10467
10468 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10469 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10470 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10471 return true;
10472 }
10473
10474 return false;
10475}
10476
b0455fda
SS
10477static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10478 struct drm_plane_state *new_plane_state,
10479 struct drm_framebuffer *fb)
10480{
e72868c4
SS
10481 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10482 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10483 unsigned int pitch;
e72868c4 10484 bool linear;
b0455fda
SS
10485
10486 if (fb->width > new_acrtc->max_cursor_width ||
10487 fb->height > new_acrtc->max_cursor_height) {
10488 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10489 new_plane_state->fb->width,
10490 new_plane_state->fb->height);
10491 return -EINVAL;
10492 }
10493 if (new_plane_state->src_w != fb->width << 16 ||
10494 new_plane_state->src_h != fb->height << 16) {
10495 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10496 return -EINVAL;
10497 }
10498
10499 /* Pitch in pixels */
10500 pitch = fb->pitches[0] / fb->format->cpp[0];
10501
10502 if (fb->width != pitch) {
10503 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10504 fb->width, pitch);
10505 return -EINVAL;
10506 }
10507
10508 switch (pitch) {
10509 case 64:
10510 case 128:
10511 case 256:
10512 /* FB pitch is supported by cursor plane */
10513 break;
10514 default:
10515 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10516 return -EINVAL;
10517 }
10518
e72868c4
SS
10519 /* Core DRM takes care of checking FB modifiers, so we only need to
10520 * check tiling flags when the FB doesn't have a modifier. */
10521 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10522 if (adev->family < AMDGPU_FAMILY_AI) {
10523 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10524 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10525 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10526 } else {
10527 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10528 }
10529 if (!linear) {
10530 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10531 return -EINVAL;
10532 }
10533 }
10534
b0455fda
SS
10535 return 0;
10536}
10537
9e869063
LL
10538static int dm_update_plane_state(struct dc *dc,
10539 struct drm_atomic_state *state,
10540 struct drm_plane *plane,
10541 struct drm_plane_state *old_plane_state,
10542 struct drm_plane_state *new_plane_state,
10543 bool enable,
10544 bool *lock_and_validation_needed)
62f55537 10545{
eb3dc897
NK
10546
10547 struct dm_atomic_state *dm_state = NULL;
62f55537 10548 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10549 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10550 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10551 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10552 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10553 bool needs_reset;
62f55537 10554 int ret = 0;
e7b07cee 10555
9b690ef3 10556
9e869063
LL
10557 new_plane_crtc = new_plane_state->crtc;
10558 old_plane_crtc = old_plane_state->crtc;
10559 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10560 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10561
626bf90f
SS
10562 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10563 if (!enable || !new_plane_crtc ||
10564 drm_atomic_plane_disabling(plane->state, new_plane_state))
10565 return 0;
10566
10567 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10568
5f581248
SS
10569 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10570 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10571 return -EINVAL;
10572 }
10573
24f99d2b 10574 if (new_plane_state->fb) {
b0455fda
SS
10575 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10576 new_plane_state->fb);
10577 if (ret)
10578 return ret;
24f99d2b
SS
10579 }
10580
9e869063 10581 return 0;
626bf90f 10582 }
9b690ef3 10583
f6ff2a08
NK
10584 needs_reset = should_reset_plane(state, plane, old_plane_state,
10585 new_plane_state);
10586
9e869063
LL
10587 /* Remove any changed/removed planes */
10588 if (!enable) {
f6ff2a08 10589 if (!needs_reset)
9e869063 10590 return 0;
a7b06724 10591
9e869063
LL
10592 if (!old_plane_crtc)
10593 return 0;
62f55537 10594
9e869063
LL
10595 old_crtc_state = drm_atomic_get_old_crtc_state(
10596 state, old_plane_crtc);
10597 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10598
9e869063
LL
10599 if (!dm_old_crtc_state->stream)
10600 return 0;
62f55537 10601
9e869063
LL
10602 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10603 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10604
9e869063
LL
10605 ret = dm_atomic_get_state(state, &dm_state);
10606 if (ret)
10607 return ret;
eb3dc897 10608
9e869063
LL
10609 if (!dc_remove_plane_from_context(
10610 dc,
10611 dm_old_crtc_state->stream,
10612 dm_old_plane_state->dc_state,
10613 dm_state->context)) {
62f55537 10614
c3537613 10615 return -EINVAL;
9e869063 10616 }
e7b07cee 10617
9b690ef3 10618
9e869063
LL
10619 dc_plane_state_release(dm_old_plane_state->dc_state);
10620 dm_new_plane_state->dc_state = NULL;
1dc90497 10621
9e869063 10622 *lock_and_validation_needed = true;
1dc90497 10623
9e869063
LL
10624 } else { /* Add new planes */
10625 struct dc_plane_state *dc_new_plane_state;
1dc90497 10626
9e869063
LL
10627 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10628 return 0;
e7b07cee 10629
9e869063
LL
10630 if (!new_plane_crtc)
10631 return 0;
e7b07cee 10632
9e869063
LL
10633 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10634 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10635
9e869063
LL
10636 if (!dm_new_crtc_state->stream)
10637 return 0;
62f55537 10638
f6ff2a08 10639 if (!needs_reset)
9e869063 10640 return 0;
62f55537 10641
8c44515b
AP
10642 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10643 if (ret)
10644 return ret;
10645
9e869063 10646 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10647
9e869063
LL
10648 dc_new_plane_state = dc_create_plane_state(dc);
10649 if (!dc_new_plane_state)
10650 return -ENOMEM;
62f55537 10651
4711c033
LT
10652 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10653 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10654
695af5f9 10655 ret = fill_dc_plane_attributes(
1348969a 10656 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10657 dc_new_plane_state,
10658 new_plane_state,
10659 new_crtc_state);
10660 if (ret) {
10661 dc_plane_state_release(dc_new_plane_state);
10662 return ret;
10663 }
62f55537 10664
9e869063
LL
10665 ret = dm_atomic_get_state(state, &dm_state);
10666 if (ret) {
10667 dc_plane_state_release(dc_new_plane_state);
10668 return ret;
10669 }
eb3dc897 10670
9e869063
LL
10671 /*
10672 * Any atomic check errors that occur after this will
10673 * not need a release. The plane state will be attached
10674 * to the stream, and therefore part of the atomic
10675 * state. It'll be released when the atomic state is
10676 * cleaned.
10677 */
10678 if (!dc_add_plane_to_context(
10679 dc,
10680 dm_new_crtc_state->stream,
10681 dc_new_plane_state,
10682 dm_state->context)) {
62f55537 10683
9e869063
LL
10684 dc_plane_state_release(dc_new_plane_state);
10685 return -EINVAL;
10686 }
8c45c5db 10687
9e869063 10688 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10689
9e869063
LL
10690 /* Tell DC to do a full surface update every time there
10691 * is a plane change. Inefficient, but works for now.
10692 */
10693 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10694
10695 *lock_and_validation_needed = true;
62f55537 10696 }
e7b07cee
HW
10697
10698
62f55537
AG
10699 return ret;
10700}
a87fa993 10701
12f4849a
SS
10702static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10703 struct drm_crtc *crtc,
10704 struct drm_crtc_state *new_crtc_state)
10705{
d1bfbe8a
SS
10706 struct drm_plane *cursor = crtc->cursor, *underlying;
10707 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10708 int i;
10709 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
12f4849a
SS
10710
10711 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10712 * cursor per pipe but it's going to inherit the scaling and
10713 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 10714 * blending properties match the underlying planes'. */
12f4849a 10715
d1bfbe8a
SS
10716 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10717 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
10718 return 0;
10719 }
10720
10721 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10722 (new_cursor_state->src_w >> 16);
10723 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10724 (new_cursor_state->src_h >> 16);
10725
d1bfbe8a
SS
10726 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10727 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10728 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10729 continue;
12f4849a 10730
d1bfbe8a
SS
10731 /* Ignore disabled planes */
10732 if (!new_underlying_state->fb)
10733 continue;
10734
10735 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10736 (new_underlying_state->src_w >> 16);
10737 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10738 (new_underlying_state->src_h >> 16);
10739
10740 if (cursor_scale_w != underlying_scale_w ||
10741 cursor_scale_h != underlying_scale_h) {
10742 drm_dbg_atomic(crtc->dev,
10743 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10744 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10745 return -EINVAL;
10746 }
10747
10748 /* If this plane covers the whole CRTC, no need to check planes underneath */
10749 if (new_underlying_state->crtc_x <= 0 &&
10750 new_underlying_state->crtc_y <= 0 &&
10751 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10752 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10753 break;
12f4849a
SS
10754 }
10755
10756 return 0;
10757}
10758
e10517b3 10759#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10760static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10761{
10762 struct drm_connector *connector;
10763 struct drm_connector_state *conn_state;
10764 struct amdgpu_dm_connector *aconnector = NULL;
10765 int i;
10766 for_each_new_connector_in_state(state, connector, conn_state, i) {
10767 if (conn_state->crtc != crtc)
10768 continue;
10769
10770 aconnector = to_amdgpu_dm_connector(connector);
10771 if (!aconnector->port || !aconnector->mst_port)
10772 aconnector = NULL;
10773 else
10774 break;
10775 }
10776
10777 if (!aconnector)
10778 return 0;
10779
10780 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10781}
e10517b3 10782#endif
44be939f 10783
b8592b48
LL
10784/**
10785 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10786 * @dev: The DRM device
10787 * @state: The atomic state to commit
10788 *
10789 * Validate that the given atomic state is programmable by DC into hardware.
10790 * This involves constructing a &struct dc_state reflecting the new hardware
10791 * state we wish to commit, then querying DC to see if it is programmable. It's
10792 * important not to modify the existing DC state. Otherwise, atomic_check
10793 * may unexpectedly commit hardware changes.
10794 *
10795 * When validating the DC state, it's important that the right locks are
10796 * acquired. For full updates case which removes/adds/updates streams on one
10797 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10798 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10799 * flip using DRMs synchronization events.
b8592b48
LL
10800 *
10801 * Note that DM adds the affected connectors for all CRTCs in state, when that
10802 * might not seem necessary. This is because DC stream creation requires the
10803 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10804 * be possible but non-trivial - a possible TODO item.
10805 *
10806 * Return: -Error code if validation failed.
10807 */
7578ecda
AD
10808static int amdgpu_dm_atomic_check(struct drm_device *dev,
10809 struct drm_atomic_state *state)
62f55537 10810{
1348969a 10811 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10812 struct dm_atomic_state *dm_state = NULL;
62f55537 10813 struct dc *dc = adev->dm.dc;
62f55537 10814 struct drm_connector *connector;
c2cea706 10815 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10816 struct drm_crtc *crtc;
fc9e9920 10817 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10818 struct drm_plane *plane;
10819 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10820 enum dc_status status;
1e88ad0a 10821 int ret, i;
62f55537 10822 bool lock_and_validation_needed = false;
886876ec 10823 struct dm_crtc_state *dm_old_crtc_state;
6513104b
HW
10824#if defined(CONFIG_DRM_AMD_DC_DCN)
10825 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
10826 struct drm_dp_mst_topology_state *mst_state;
10827 struct drm_dp_mst_topology_mgr *mgr;
6513104b 10828#endif
62f55537 10829
e8a98235 10830 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10831
62f55537 10832 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
10833 if (ret) {
10834 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 10835 goto fail;
68ca1c3e 10836 }
62f55537 10837
c5892a10
SW
10838 /* Check connector changes */
10839 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10840 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10841 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10842
10843 /* Skip connectors that are disabled or part of modeset already. */
10844 if (!old_con_state->crtc && !new_con_state->crtc)
10845 continue;
10846
10847 if (!new_con_state->crtc)
10848 continue;
10849
10850 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10851 if (IS_ERR(new_crtc_state)) {
68ca1c3e 10852 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
10853 ret = PTR_ERR(new_crtc_state);
10854 goto fail;
10855 }
10856
10857 if (dm_old_con_state->abm_level !=
10858 dm_new_con_state->abm_level)
10859 new_crtc_state->connectors_changed = true;
10860 }
10861
e10517b3 10862#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10863 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10864 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10865 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10866 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
10867 if (ret) {
10868 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 10869 goto fail;
68ca1c3e 10870 }
44be939f
ML
10871 }
10872 }
10873 }
e10517b3 10874#endif
1e88ad0a 10875 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10876 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10877
1e88ad0a 10878 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10879 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10880 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10881 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10882 continue;
7bef1af3 10883
03fc4cf4 10884 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
10885 if (ret) {
10886 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 10887 goto fail;
68ca1c3e 10888 }
03fc4cf4 10889
1e88ad0a
S
10890 if (!new_crtc_state->enable)
10891 continue;
fc9e9920 10892
1e88ad0a 10893 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
10894 if (ret) {
10895 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 10896 goto fail;
68ca1c3e 10897 }
fc9e9920 10898
1e88ad0a 10899 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
10900 if (ret) {
10901 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 10902 goto fail;
68ca1c3e 10903 }
115a385c 10904
cbac53f7 10905 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10906 new_crtc_state->mode_changed = true;
e7b07cee
HW
10907 }
10908
2d9e6431
NK
10909 /*
10910 * Add all primary and overlay planes on the CRTC to the state
10911 * whenever a plane is enabled to maintain correct z-ordering
10912 * and to enable fast surface updates.
10913 */
10914 drm_for_each_crtc(crtc, dev) {
10915 bool modified = false;
10916
10917 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10918 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10919 continue;
10920
10921 if (new_plane_state->crtc == crtc ||
10922 old_plane_state->crtc == crtc) {
10923 modified = true;
10924 break;
10925 }
10926 }
10927
10928 if (!modified)
10929 continue;
10930
10931 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10932 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10933 continue;
10934
10935 new_plane_state =
10936 drm_atomic_get_plane_state(state, plane);
10937
10938 if (IS_ERR(new_plane_state)) {
10939 ret = PTR_ERR(new_plane_state);
68ca1c3e 10940 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
10941 goto fail;
10942 }
10943 }
10944 }
10945
62f55537 10946 /* Remove exiting planes if they are modified */
9e869063
LL
10947 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10948 ret = dm_update_plane_state(dc, state, plane,
10949 old_plane_state,
10950 new_plane_state,
10951 false,
10952 &lock_and_validation_needed);
68ca1c3e
S
10953 if (ret) {
10954 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 10955 goto fail;
68ca1c3e 10956 }
62f55537
AG
10957 }
10958
10959 /* Disable all crtcs which require disable */
4b9674e5
LL
10960 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10961 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10962 old_crtc_state,
10963 new_crtc_state,
10964 false,
10965 &lock_and_validation_needed);
68ca1c3e
S
10966 if (ret) {
10967 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 10968 goto fail;
68ca1c3e 10969 }
62f55537
AG
10970 }
10971
10972 /* Enable all crtcs which require enable */
4b9674e5
LL
10973 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10974 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10975 old_crtc_state,
10976 new_crtc_state,
10977 true,
10978 &lock_and_validation_needed);
68ca1c3e
S
10979 if (ret) {
10980 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 10981 goto fail;
68ca1c3e 10982 }
62f55537
AG
10983 }
10984
10985 /* Add new/modified planes */
9e869063
LL
10986 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10987 ret = dm_update_plane_state(dc, state, plane,
10988 old_plane_state,
10989 new_plane_state,
10990 true,
10991 &lock_and_validation_needed);
68ca1c3e
S
10992 if (ret) {
10993 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 10994 goto fail;
68ca1c3e 10995 }
62f55537
AG
10996 }
10997
b349f76e
ES
10998 /* Run this here since we want to validate the streams we created */
10999 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
11000 if (ret) {
11001 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 11002 goto fail;
68ca1c3e 11003 }
62f55537 11004
12f4849a
SS
11005 /* Check cursor planes scaling */
11006 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11007 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
11008 if (ret) {
11009 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 11010 goto fail;
68ca1c3e 11011 }
12f4849a
SS
11012 }
11013
43d10d30
NK
11014 if (state->legacy_cursor_update) {
11015 /*
11016 * This is a fast cursor update coming from the plane update
11017 * helper, check if it can be done asynchronously for better
11018 * performance.
11019 */
11020 state->async_update =
11021 !drm_atomic_helper_async_check(dev, state);
11022
11023 /*
11024 * Skip the remaining global validation if this is an async
11025 * update. Cursor updates can be done without affecting
11026 * state or bandwidth calcs and this avoids the performance
11027 * penalty of locking the private state object and
11028 * allocating a new dc_state.
11029 */
11030 if (state->async_update)
11031 return 0;
11032 }
11033
ebdd27e1 11034 /* Check scaling and underscan changes*/
1f6010a9 11035 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
11036 * new stream into context w\o causing full reset. Need to
11037 * decide how to handle.
11038 */
c2cea706 11039 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
11040 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11041 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11042 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
11043
11044 /* Skip any modesets/resets */
0bc9706d
LSL
11045 if (!acrtc || drm_atomic_crtc_needs_modeset(
11046 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
11047 continue;
11048
b830ebc9 11049 /* Skip any thing not scale or underscan changes */
54d76575 11050 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
11051 continue;
11052
11053 lock_and_validation_needed = true;
11054 }
11055
41724ea2
BL
11056#if defined(CONFIG_DRM_AMD_DC_DCN)
11057 /* set the slot info for each mst_state based on the link encoding format */
11058 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11059 struct amdgpu_dm_connector *aconnector;
11060 struct drm_connector *connector;
11061 struct drm_connector_list_iter iter;
11062 u8 link_coding_cap;
11063
11064 if (!mgr->mst_state )
11065 continue;
11066
11067 drm_connector_list_iter_begin(dev, &iter);
11068 drm_for_each_connector_iter(connector, &iter) {
11069 int id = connector->index;
11070
11071 if (id == mst_state->mgr->conn_base_id) {
11072 aconnector = to_amdgpu_dm_connector(connector);
11073 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11074 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11075
11076 break;
11077 }
11078 }
11079 drm_connector_list_iter_end(&iter);
11080
11081 }
11082#endif
f6d7c7fa
NK
11083 /**
11084 * Streams and planes are reset when there are changes that affect
11085 * bandwidth. Anything that affects bandwidth needs to go through
11086 * DC global validation to ensure that the configuration can be applied
11087 * to hardware.
11088 *
11089 * We have to currently stall out here in atomic_check for outstanding
11090 * commits to finish in this case because our IRQ handlers reference
11091 * DRM state directly - we can end up disabling interrupts too early
11092 * if we don't.
11093 *
11094 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11095 */
f6d7c7fa 11096 if (lock_and_validation_needed) {
eb3dc897 11097 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11098 if (ret) {
11099 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11100 goto fail;
68ca1c3e 11101 }
e7b07cee
HW
11102
11103 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11104 if (ret) {
11105 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11106 goto fail;
68ca1c3e 11107 }
1dc90497 11108
d9fe1a4c 11109#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11110 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11111 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
8c20a1ed 11112 goto fail;
68ca1c3e 11113 }
8c20a1ed 11114
6513104b 11115 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11116 if (ret) {
11117 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11118 goto fail;
68ca1c3e 11119 }
d9fe1a4c 11120#endif
29b9ba74 11121
ded58c7b
ZL
11122 /*
11123 * Perform validation of MST topology in the state:
11124 * We need to perform MST atomic check before calling
11125 * dc_validate_global_state(), or there is a chance
11126 * to get stuck in an infinite loop and hang eventually.
11127 */
11128 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11129 if (ret) {
11130 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11131 goto fail;
68ca1c3e 11132 }
74a16675
RS
11133 status = dc_validate_global_state(dc, dm_state->context, false);
11134 if (status != DC_OK) {
68ca1c3e 11135 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11136 dc_status_to_str(status), status);
e7b07cee
HW
11137 ret = -EINVAL;
11138 goto fail;
11139 }
bd200d19 11140 } else {
674e78ac 11141 /*
bd200d19
NK
11142 * The commit is a fast update. Fast updates shouldn't change
11143 * the DC context, affect global validation, and can have their
11144 * commit work done in parallel with other commits not touching
11145 * the same resource. If we have a new DC context as part of
11146 * the DM atomic state from validation we need to free it and
11147 * retain the existing one instead.
fde9f39a
MR
11148 *
11149 * Furthermore, since the DM atomic state only contains the DC
11150 * context and can safely be annulled, we can free the state
11151 * and clear the associated private object now to free
11152 * some memory and avoid a possible use-after-free later.
674e78ac 11153 */
bd200d19 11154
fde9f39a
MR
11155 for (i = 0; i < state->num_private_objs; i++) {
11156 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11157
fde9f39a
MR
11158 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11159 int j = state->num_private_objs-1;
bd200d19 11160
fde9f39a
MR
11161 dm_atomic_destroy_state(obj,
11162 state->private_objs[i].state);
11163
11164 /* If i is not at the end of the array then the
11165 * last element needs to be moved to where i was
11166 * before the array can safely be truncated.
11167 */
11168 if (i != j)
11169 state->private_objs[i] =
11170 state->private_objs[j];
bd200d19 11171
fde9f39a
MR
11172 state->private_objs[j].ptr = NULL;
11173 state->private_objs[j].state = NULL;
11174 state->private_objs[j].old_state = NULL;
11175 state->private_objs[j].new_state = NULL;
11176
11177 state->num_private_objs = j;
11178 break;
11179 }
bd200d19 11180 }
e7b07cee
HW
11181 }
11182
caff0e66
NK
11183 /* Store the overall update type for use later in atomic check. */
11184 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11185 struct dm_crtc_state *dm_new_crtc_state =
11186 to_dm_crtc_state(new_crtc_state);
11187
f6d7c7fa
NK
11188 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11189 UPDATE_TYPE_FULL :
11190 UPDATE_TYPE_FAST;
e7b07cee
HW
11191 }
11192
11193 /* Must be success */
11194 WARN_ON(ret);
e8a98235
RS
11195
11196 trace_amdgpu_dm_atomic_check_finish(state, ret);
11197
e7b07cee
HW
11198 return ret;
11199
11200fail:
11201 if (ret == -EDEADLK)
01e28f9c 11202 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11203 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11204 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11205 else
01e28f9c 11206 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11207
e8a98235
RS
11208 trace_amdgpu_dm_atomic_check_finish(state, ret);
11209
e7b07cee
HW
11210 return ret;
11211}
11212
3ee6b26b
AD
11213static bool is_dp_capable_without_timing_msa(struct dc *dc,
11214 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11215{
11216 uint8_t dpcd_data;
11217 bool capable = false;
11218
c84dec2f 11219 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11220 dm_helpers_dp_read_dpcd(
11221 NULL,
c84dec2f 11222 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11223 DP_DOWN_STREAM_PORT_COUNT,
11224 &dpcd_data,
11225 sizeof(dpcd_data))) {
11226 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11227 }
11228
11229 return capable;
11230}
f9b4f20c 11231
46db138d
SW
11232static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11233 unsigned int offset,
11234 unsigned int total_length,
11235 uint8_t *data,
11236 unsigned int length,
11237 struct amdgpu_hdmi_vsdb_info *vsdb)
11238{
11239 bool res;
11240 union dmub_rb_cmd cmd;
11241 struct dmub_cmd_send_edid_cea *input;
11242 struct dmub_cmd_edid_cea_output *output;
11243
11244 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11245 return false;
11246
11247 memset(&cmd, 0, sizeof(cmd));
11248
11249 input = &cmd.edid_cea.data.input;
11250
11251 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11252 cmd.edid_cea.header.sub_type = 0;
11253 cmd.edid_cea.header.payload_bytes =
11254 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11255 input->offset = offset;
11256 input->length = length;
11257 input->total_length = total_length;
11258 memcpy(input->payload, data, length);
11259
11260 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11261 if (!res) {
11262 DRM_ERROR("EDID CEA parser failed\n");
11263 return false;
11264 }
11265
11266 output = &cmd.edid_cea.data.output;
11267
11268 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11269 if (!output->ack.success) {
11270 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11271 output->ack.offset);
11272 }
11273 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11274 if (!output->amd_vsdb.vsdb_found)
11275 return false;
11276
11277 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11278 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11279 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11280 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11281 } else {
b76a8062 11282 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11283 return false;
11284 }
11285
11286 return true;
11287}
11288
11289static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11290 uint8_t *edid_ext, int len,
11291 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11292{
11293 int i;
f9b4f20c
SW
11294
11295 /* send extension block to DMCU for parsing */
11296 for (i = 0; i < len; i += 8) {
11297 bool res;
11298 int offset;
11299
11300 /* send 8 bytes a time */
46db138d 11301 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11302 return false;
11303
11304 if (i+8 == len) {
11305 /* EDID block sent completed, expect result */
11306 int version, min_rate, max_rate;
11307
46db138d 11308 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11309 if (res) {
11310 /* amd vsdb found */
11311 vsdb_info->freesync_supported = 1;
11312 vsdb_info->amd_vsdb_version = version;
11313 vsdb_info->min_refresh_rate_hz = min_rate;
11314 vsdb_info->max_refresh_rate_hz = max_rate;
11315 return true;
11316 }
11317 /* not amd vsdb */
11318 return false;
11319 }
11320
11321 /* check for ack*/
46db138d 11322 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11323 if (!res)
11324 return false;
11325 }
11326
11327 return false;
11328}
11329
46db138d
SW
11330static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11331 uint8_t *edid_ext, int len,
11332 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11333{
11334 int i;
11335
11336 /* send extension block to DMCU for parsing */
11337 for (i = 0; i < len; i += 8) {
11338 /* send 8 bytes a time */
11339 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11340 return false;
11341 }
11342
11343 return vsdb_info->freesync_supported;
11344}
11345
11346static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11347 uint8_t *edid_ext, int len,
11348 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11349{
11350 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11351
11352 if (adev->dm.dmub_srv)
11353 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11354 else
11355 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11356}
11357
7c7dd774 11358static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11359 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11360{
11361 uint8_t *edid_ext = NULL;
11362 int i;
11363 bool valid_vsdb_found = false;
11364
11365 /*----- drm_find_cea_extension() -----*/
11366 /* No EDID or EDID extensions */
11367 if (edid == NULL || edid->extensions == 0)
7c7dd774 11368 return -ENODEV;
f9b4f20c
SW
11369
11370 /* Find CEA extension */
11371 for (i = 0; i < edid->extensions; i++) {
11372 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11373 if (edid_ext[0] == CEA_EXT)
11374 break;
11375 }
11376
11377 if (i == edid->extensions)
7c7dd774 11378 return -ENODEV;
f9b4f20c
SW
11379
11380 /*----- cea_db_offsets() -----*/
11381 if (edid_ext[0] != CEA_EXT)
7c7dd774 11382 return -ENODEV;
f9b4f20c
SW
11383
11384 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11385
11386 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11387}
11388
98e6436d
AK
11389void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11390 struct edid *edid)
e7b07cee 11391{
eb0709ba 11392 int i = 0;
e7b07cee
HW
11393 struct detailed_timing *timing;
11394 struct detailed_non_pixel *data;
11395 struct detailed_data_monitor_range *range;
c84dec2f
HW
11396 struct amdgpu_dm_connector *amdgpu_dm_connector =
11397 to_amdgpu_dm_connector(connector);
bb47de73 11398 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11399 struct dc_sink *sink;
e7b07cee
HW
11400
11401 struct drm_device *dev = connector->dev;
1348969a 11402 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11403 bool freesync_capable = false;
f9b4f20c 11404 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11405
8218d7f1
HW
11406 if (!connector->state) {
11407 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11408 goto update;
8218d7f1
HW
11409 }
11410
9b2fdc33
AP
11411 sink = amdgpu_dm_connector->dc_sink ?
11412 amdgpu_dm_connector->dc_sink :
11413 amdgpu_dm_connector->dc_em_sink;
11414
11415 if (!edid || !sink) {
98e6436d
AK
11416 dm_con_state = to_dm_connector_state(connector->state);
11417
11418 amdgpu_dm_connector->min_vfreq = 0;
11419 amdgpu_dm_connector->max_vfreq = 0;
11420 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11421 connector->display_info.monitor_range.min_vfreq = 0;
11422 connector->display_info.monitor_range.max_vfreq = 0;
11423 freesync_capable = false;
98e6436d 11424
bb47de73 11425 goto update;
98e6436d
AK
11426 }
11427
8218d7f1
HW
11428 dm_con_state = to_dm_connector_state(connector->state);
11429
e7b07cee 11430 if (!adev->dm.freesync_module)
bb47de73 11431 goto update;
f9b4f20c
SW
11432
11433
9b2fdc33
AP
11434 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11435 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11436 bool edid_check_required = false;
11437
11438 if (edid) {
e7b07cee
HW
11439 edid_check_required = is_dp_capable_without_timing_msa(
11440 adev->dm.dc,
c84dec2f 11441 amdgpu_dm_connector);
e7b07cee 11442 }
e7b07cee 11443
f9b4f20c
SW
11444 if (edid_check_required == true && (edid->version > 1 ||
11445 (edid->version == 1 && edid->revision > 1))) {
11446 for (i = 0; i < 4; i++) {
e7b07cee 11447
f9b4f20c
SW
11448 timing = &edid->detailed_timings[i];
11449 data = &timing->data.other_data;
11450 range = &data->data.range;
11451 /*
11452 * Check if monitor has continuous frequency mode
11453 */
11454 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11455 continue;
11456 /*
11457 * Check for flag range limits only. If flag == 1 then
11458 * no additional timing information provided.
11459 * Default GTF, GTF Secondary curve and CVT are not
11460 * supported
11461 */
11462 if (range->flags != 1)
11463 continue;
a0ffc3fd 11464
f9b4f20c
SW
11465 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11466 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11467 amdgpu_dm_connector->pixel_clock_mhz =
11468 range->pixel_clock_mhz * 10;
a0ffc3fd 11469
f9b4f20c
SW
11470 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11471 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11472
f9b4f20c
SW
11473 break;
11474 }
98e6436d 11475
f9b4f20c
SW
11476 if (amdgpu_dm_connector->max_vfreq -
11477 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11478
f9b4f20c
SW
11479 freesync_capable = true;
11480 }
11481 }
9b2fdc33 11482 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11483 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11484 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11485 timing = &edid->detailed_timings[i];
11486 data = &timing->data.other_data;
11487
11488 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11489 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11490 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11491 freesync_capable = true;
11492
11493 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11494 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11495 }
11496 }
bb47de73
NK
11497
11498update:
11499 if (dm_con_state)
11500 dm_con_state->freesync_capable = freesync_capable;
11501
11502 if (connector->vrr_capable_property)
11503 drm_connector_set_vrr_capable_property(connector,
11504 freesync_capable);
e7b07cee
HW
11505}
11506
3d4e52d0
VL
11507void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11508{
1348969a 11509 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11510 struct dc *dc = adev->dm.dc;
11511 int i;
11512
11513 mutex_lock(&adev->dm.dc_lock);
11514 if (dc->current_state) {
11515 for (i = 0; i < dc->current_state->stream_count; ++i)
11516 dc->current_state->streams[i]
11517 ->triggered_crtc_reset.enabled =
11518 adev->dm.force_timing_sync;
11519
11520 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11521 dc_trigger_sync(dc, dc->current_state);
11522 }
11523 mutex_unlock(&adev->dm.dc_lock);
11524}
9d83722d
RS
11525
11526void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11527 uint32_t value, const char *func_name)
11528{
11529#ifdef DM_CHECK_ADDR_0
11530 if (address == 0) {
11531 DC_ERR("invalid register write. address = 0");
11532 return;
11533 }
11534#endif
11535 cgs_write_register(ctx->cgs_device, address, value);
11536 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11537}
11538
11539uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11540 const char *func_name)
11541{
11542 uint32_t value;
11543#ifdef DM_CHECK_ADDR_0
11544 if (address == 0) {
11545 DC_ERR("invalid register read; address = 0\n");
11546 return 0;
11547 }
11548#endif
11549
11550 if (ctx->dmub_srv &&
11551 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11552 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11553 ASSERT(false);
11554 return 0;
11555 }
11556
11557 value = cgs_read_register(ctx->cgs_device, address);
11558
11559 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11560
11561 return value;
11562}
81927e28 11563
88f52b1f
JS
11564int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11565 uint8_t status_type, uint32_t *operation_result)
11566{
11567 struct amdgpu_device *adev = ctx->driver_context;
11568 int return_status = -1;
11569 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11570
11571 if (is_cmd_aux) {
11572 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11573 return_status = p_notify->aux_reply.length;
11574 *operation_result = p_notify->result;
11575 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11576 *operation_result = AUX_RET_ERROR_TIMEOUT;
11577 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11578 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11579 } else {
11580 *operation_result = AUX_RET_ERROR_UNKNOWN;
11581 }
11582 } else {
11583 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11584 return_status = 0;
11585 *operation_result = p_notify->sc_status;
11586 } else {
11587 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11588 }
11589 }
11590
11591 return return_status;
11592}
11593
11594int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11595 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11596{
11597 struct amdgpu_device *adev = ctx->driver_context;
11598 int ret = 0;
11599
88f52b1f
JS
11600 if (is_cmd_aux) {
11601 dc_process_dmub_aux_transfer_async(ctx->dc,
11602 link_index, (struct aux_payload *)cmd_payload);
11603 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11604 (struct set_config_cmd_payload *)cmd_payload,
11605 adev->dm.dmub_notify)) {
11606 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11607 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11608 (uint32_t *)operation_result);
11609 }
11610
9e3a50d2 11611 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11612 if (ret == 0) {
9e3a50d2 11613 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11614 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11615 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11616 (uint32_t *)operation_result);
81927e28 11617 }
81927e28 11618
88f52b1f
JS
11619 if (is_cmd_aux) {
11620 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11621 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11622
88f52b1f
JS
11623 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11624 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11625 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11626 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11627 adev->dm.dmub_notify->aux_reply.length);
11628 }
11629 }
81927e28
JS
11630 }
11631
88f52b1f
JS
11632 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11633 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11634 (uint32_t *)operation_result);
81927e28 11635}