drm/amd/display: Fix LTTPR not Enabled
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
CommitLineData
4562236b
HW
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
0cf5eb76
DF
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
4562236b
HW
29#include "dm_services_types.h"
30#include "dc.h"
d2aa1356 31#include "dc_link_dp.h"
f6e03f80 32#include "link_enc_cfg.h"
1dc90497 33#include "dc/inc/core_types.h"
a7669aff 34#include "dal_asic_id.h"
cdca3f21 35#include "dmub/dmub_srv.h"
743b9786
NK
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
9a71c7d3 38#include "dc/dc_dmub_srv.h"
f9b4f20c 39#include "dc/dc_edid_parser.h"
81927e28 40#include "dc/dc_stat.h"
9d83722d 41#include "amdgpu_dm_trace.h"
4562236b
HW
42
43#include "vid.h"
44#include "amdgpu.h"
a49dcb88 45#include "amdgpu_display.h"
a94d5569 46#include "amdgpu_ucode.h"
4562236b
HW
47#include "atom.h"
48#include "amdgpu_dm.h"
52704fca
BL
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
53e108aa 51#include <drm/drm_hdcp.h>
52704fca 52#endif
e7b07cee 53#include "amdgpu_pm.h"
4562236b
HW
54
55#include "amd_shared.h"
56#include "amdgpu_dm_irq.h"
57#include "dm_helpers.h"
e7b07cee 58#include "amdgpu_dm_mst_types.h"
dc38fd9d
DF
59#if defined(CONFIG_DEBUG_FS)
60#include "amdgpu_dm_debugfs.h"
61#endif
f4594cd1 62#include "amdgpu_dm_psr.h"
4562236b
HW
63
64#include "ivsrcid/ivsrcid_vislands30.h"
65
81927e28 66#include "i2caux_interface.h"
4562236b
HW
67#include <linux/module.h>
68#include <linux/moduleparam.h>
e7b07cee 69#include <linux/types.h>
97028037 70#include <linux/pm_runtime.h>
09d21852 71#include <linux/pci.h>
a94d5569 72#include <linux/firmware.h>
6ce8f316 73#include <linux/component.h>
4562236b
HW
74
75#include <drm/drm_atomic.h>
674e78ac 76#include <drm/drm_atomic_uapi.h>
4562236b
HW
77#include <drm/drm_atomic_helper.h>
78#include <drm/drm_dp_mst_helper.h>
e7b07cee 79#include <drm/drm_fb_helper.h>
09d21852 80#include <drm/drm_fourcc.h>
e7b07cee 81#include <drm/drm_edid.h>
09d21852 82#include <drm/drm_vblank.h>
6ce8f316 83#include <drm/drm_audio_component.h>
4562236b 84
b86a1aa3 85#if defined(CONFIG_DRM_AMD_DC_DCN)
5527cd06 86#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
ff5ef992 87
ad941f7a
FX
88#include "dcn/dcn_1_0_offset.h"
89#include "dcn/dcn_1_0_sh_mask.h"
407e7517
HZ
90#include "soc15_hw_ip.h"
91#include "vega10_ip_offset.h"
ff5ef992
AD
92
93#include "soc15_common.h"
94#endif
95
e7b07cee 96#include "modules/inc/mod_freesync.h"
bbf854dc 97#include "modules/power/power_helpers.h"
ecd0136b 98#include "modules/inc/mod_info_packet.h"
e7b07cee 99
743b9786
NK
100#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
79037324
BL
102#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
5ce868fc
BL
104#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
71c0fd92
RL
106#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
469989ca
RL
108#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
2a411205
BL
110#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
656fe9b6
AP
112#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
1ebcaebd
NK
114#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
2200eb9e 116
a94d5569
DF
117#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
118MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
e7b07cee 119
5ea23931
RL
120#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
121MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122
8c7aea40
NK
123/* Number of bytes in PSP header for firmware. */
124#define PSP_HEADER_BYTES 0x100
125
126/* Number of bytes in PSP footer for firmware. */
127#define PSP_FOOTER_BYTES 0x100
128
b8592b48
LL
129/**
130 * DOC: overview
131 *
132 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
ec5c0ffa 133 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
b8592b48
LL
134 * requests into DC requests, and DC responses into DRM responses.
135 *
136 * The root control structure is &struct amdgpu_display_manager.
137 */
138
7578ecda
AD
139/* basic init/fini API */
140static int amdgpu_dm_init(struct amdgpu_device *adev);
141static void amdgpu_dm_fini(struct amdgpu_device *adev);
fe8858bb 142static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
7578ecda 143
0f877894
OV
144static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145{
146 switch (link->dpcd_caps.dongle_type) {
147 case DISPLAY_DONGLE_NONE:
148 return DRM_MODE_SUBCONNECTOR_Native;
149 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 return DRM_MODE_SUBCONNECTOR_VGA;
151 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 return DRM_MODE_SUBCONNECTOR_DVID;
154 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 return DRM_MODE_SUBCONNECTOR_HDMIA;
157 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 default:
159 return DRM_MODE_SUBCONNECTOR_Unknown;
160 }
161}
162
163static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164{
165 struct dc_link *link = aconnector->dc_link;
166 struct drm_connector *connector = &aconnector->base;
167 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168
169 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 return;
171
172 if (aconnector->dc_sink)
173 subconnector = get_subconnector_type(link);
174
175 drm_object_property_set_value(&connector->base,
176 connector->dev->mode_config.dp_subconnector_property,
177 subconnector);
178}
179
1f6010a9
DF
180/*
181 * initializes drm_device display related structures, based on the information
7578ecda
AD
182 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183 * drm_encoder, drm_mode_config
184 *
185 * Returns 0 on success
186 */
187static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188/* removes and deallocates the drm structures, created by the above function */
189static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190
7578ecda 191static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
f180b4bc 192 struct drm_plane *plane,
cc1fec57
NK
193 unsigned long possible_crtcs,
194 const struct dc_plane_cap *plane_cap);
7578ecda
AD
195static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 struct drm_plane *plane,
197 uint32_t link_index);
198static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 struct amdgpu_dm_connector *amdgpu_dm_connector,
200 uint32_t link_index,
201 struct amdgpu_encoder *amdgpu_encoder);
202static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 struct amdgpu_encoder *aencoder,
204 uint32_t link_index);
205
206static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207
7578ecda
AD
208static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209
210static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 struct drm_atomic_state *state);
212
674e78ac
NK
213static void handle_cursor_update(struct drm_plane *plane,
214 struct drm_plane_state *old_plane_state);
7578ecda 215
dfbbfe3c
BN
216static const struct drm_format_info *
217amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218
e27c41d5 219static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
c40a09e5 220static void handle_hpd_rx_irq(void *param);
e27c41d5 221
a85ba005
NC
222static bool
223is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
224 struct drm_crtc_state *new_crtc_state);
4562236b
HW
225/*
226 * dm_vblank_get_counter
227 *
228 * @brief
229 * Get counter for number of vertical blanks
230 *
231 * @param
232 * struct amdgpu_device *adev - [in] desired amdgpu device
233 * int disp_idx - [in] which CRTC to get the counter from
234 *
235 * @return
236 * Counter for vertical blanks
237 */
238static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239{
240 if (crtc >= adev->mode_info.num_crtc)
241 return 0;
242 else {
243 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244
585d450c 245 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
246 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
247 crtc);
4562236b
HW
248 return 0;
249 }
250
585d450c 251 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
4562236b
HW
252 }
253}
254
255static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
3ee6b26b 256 u32 *vbl, u32 *position)
4562236b 257{
81c50963
ST
258 uint32_t v_blank_start, v_blank_end, h_position, v_position;
259
4562236b
HW
260 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
261 return -EINVAL;
262 else {
263 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264
585d450c 265 if (acrtc->dm_irq_params.stream == NULL) {
0971c40e
HW
266 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
267 crtc);
4562236b
HW
268 return 0;
269 }
270
81c50963
ST
271 /*
272 * TODO rework base driver to use values directly.
273 * for now parse it back into reg-format
274 */
585d450c 275 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
81c50963
ST
276 &v_blank_start,
277 &v_blank_end,
278 &h_position,
279 &v_position);
280
e806208d
AG
281 *position = v_position | (h_position << 16);
282 *vbl = v_blank_start | (v_blank_end << 16);
4562236b
HW
283 }
284
285 return 0;
286}
287
288static bool dm_is_idle(void *handle)
289{
290 /* XXX todo */
291 return true;
292}
293
294static int dm_wait_for_idle(void *handle)
295{
296 /* XXX todo */
297 return 0;
298}
299
300static bool dm_check_soft_reset(void *handle)
301{
302 return false;
303}
304
305static int dm_soft_reset(void *handle)
306{
307 /* XXX todo */
308 return 0;
309}
310
3ee6b26b
AD
311static struct amdgpu_crtc *
312get_crtc_by_otg_inst(struct amdgpu_device *adev,
313 int otg_inst)
4562236b 314{
4a580877 315 struct drm_device *dev = adev_to_drm(adev);
4562236b
HW
316 struct drm_crtc *crtc;
317 struct amdgpu_crtc *amdgpu_crtc;
318
bcd74374 319 if (WARN_ON(otg_inst == -1))
4562236b 320 return adev->mode_info.crtcs[0];
4562236b
HW
321
322 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
323 amdgpu_crtc = to_amdgpu_crtc(crtc);
324
325 if (amdgpu_crtc->otg_inst == otg_inst)
326 return amdgpu_crtc;
327 }
328
329 return NULL;
330}
331
585d450c
AP
332static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333{
334 return acrtc->dm_irq_params.freesync_config.state ==
335 VRR_STATE_ACTIVE_VARIABLE ||
336 acrtc->dm_irq_params.freesync_config.state ==
337 VRR_STATE_ACTIVE_FIXED;
338}
339
66b0c973
MK
340static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341{
342 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
343 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344}
345
a85ba005
NC
346static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
347 struct dm_crtc_state *new_state)
348{
349 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
350 return true;
351 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
352 return true;
353 else
354 return false;
355}
356
b8e8c934
HW
357/**
358 * dm_pflip_high_irq() - Handle pageflip interrupt
359 * @interrupt_params: ignored
360 *
361 * Handles the pageflip interrupt by notifying all interested parties
362 * that the pageflip has been completed.
363 */
4562236b
HW
364static void dm_pflip_high_irq(void *interrupt_params)
365{
4562236b
HW
366 struct amdgpu_crtc *amdgpu_crtc;
367 struct common_irq_params *irq_params = interrupt_params;
368 struct amdgpu_device *adev = irq_params->adev;
369 unsigned long flags;
71bbe51a 370 struct drm_pending_vblank_event *e;
71bbe51a
MK
371 uint32_t vpos, hpos, v_blank_start, v_blank_end;
372 bool vrr_active;
4562236b
HW
373
374 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375
376 /* IRQ could occur when in initial stage */
1f6010a9 377 /* TODO work and BO cleanup */
4562236b 378 if (amdgpu_crtc == NULL) {
cb2318b7 379 DC_LOG_PFLIP("CRTC is null, returning.\n");
4562236b
HW
380 return;
381 }
382
4a580877 383 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
384
385 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
cb2318b7 386 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
4562236b
HW
387 amdgpu_crtc->pflip_status,
388 AMDGPU_FLIP_SUBMITTED,
389 amdgpu_crtc->crtc_id,
390 amdgpu_crtc);
4a580877 391 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b
HW
392 return;
393 }
394
71bbe51a
MK
395 /* page flip completed. */
396 e = amdgpu_crtc->event;
397 amdgpu_crtc->event = NULL;
4562236b 398
bcd74374 399 WARN_ON(!e);
1159898a 400
585d450c 401 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
71bbe51a
MK
402
403 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
404 if (!vrr_active ||
585d450c 405 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
71bbe51a
MK
406 &v_blank_end, &hpos, &vpos) ||
407 (vpos < v_blank_start)) {
408 /* Update to correct count and vblank timestamp if racing with
409 * vblank irq. This also updates to the correct vblank timestamp
410 * even in VRR mode, as scanout is past the front-porch atm.
411 */
412 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
1159898a 413
71bbe51a
MK
414 /* Wake up userspace by sending the pageflip event with proper
415 * count and timestamp of vblank of flip completion.
416 */
417 if (e) {
418 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
419
420 /* Event sent, so done with vblank for this flip */
421 drm_crtc_vblank_put(&amdgpu_crtc->base);
422 }
423 } else if (e) {
424 /* VRR active and inside front-porch: vblank count and
425 * timestamp for pageflip event will only be up to date after
426 * drm_crtc_handle_vblank() has been executed from late vblank
427 * irq handler after start of back-porch (vline 0). We queue the
428 * pageflip event for send-out by drm_crtc_handle_vblank() with
429 * updated timestamp and count, once it runs after us.
430 *
431 * We need to open-code this instead of using the helper
432 * drm_crtc_arm_vblank_event(), as that helper would
433 * call drm_crtc_accurate_vblank_count(), which we must
434 * not call in VRR mode while we are in front-porch!
435 */
436
437 /* sequence will be replaced by real count during send-out. */
438 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
439 e->pipe = amdgpu_crtc->crtc_id;
440
4a580877 441 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
71bbe51a
MK
442 e = NULL;
443 }
4562236b 444
fdd1fe57
MK
445 /* Keep track of vblank of this flip for flip throttling. We use the
446 * cooked hw counter, as that one incremented at start of this vblank
447 * of pageflip completion, so last_flip_vblank is the forbidden count
448 * for queueing new pageflips if vsync + VRR is enabled.
449 */
5d1c59c4 450 amdgpu_crtc->dm_irq_params.last_flip_vblank =
e3eff4b5 451 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
fdd1fe57 452
54f5499a 453 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
4a580877 454 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
4562236b 455
cb2318b7
VL
456 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
457 amdgpu_crtc->crtc_id, amdgpu_crtc,
458 vrr_active, (int) !e);
4562236b
HW
459}
460
d2574c33
MK
461static void dm_vupdate_high_irq(void *interrupt_params)
462{
463 struct common_irq_params *irq_params = interrupt_params;
464 struct amdgpu_device *adev = irq_params->adev;
465 struct amdgpu_crtc *acrtc;
47588233
RS
466 struct drm_device *drm_dev;
467 struct drm_vblank_crtc *vblank;
468 ktime_t frame_duration_ns, previous_timestamp;
09aef2c4 469 unsigned long flags;
585d450c 470 int vrr_active;
d2574c33
MK
471
472 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
473
474 if (acrtc) {
585d450c 475 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
47588233
RS
476 drm_dev = acrtc->base.dev;
477 vblank = &drm_dev->vblank[acrtc->base.index];
478 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
479 frame_duration_ns = vblank->time - previous_timestamp;
480
481 if (frame_duration_ns > 0) {
482 trace_amdgpu_refresh_rate_track(acrtc->base.index,
483 frame_duration_ns,
484 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
485 atomic64_set(&irq_params->previous_timestamp, vblank->time);
486 }
d2574c33 487
cb2318b7 488 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
7f2be468 489 acrtc->crtc_id,
585d450c 490 vrr_active);
d2574c33
MK
491
492 /* Core vblank handling is done here after end of front-porch in
493 * vrr mode, as vblank timestamping will give valid results
494 * while now done after front-porch. This will also deliver
495 * page-flip completion events that have been queued to us
496 * if a pageflip happened inside front-porch.
497 */
585d450c 498 if (vrr_active) {
d2574c33 499 drm_crtc_handle_vblank(&acrtc->base);
09aef2c4
MK
500
501 /* BTR processing for pre-DCE12 ASICs */
585d450c 502 if (acrtc->dm_irq_params.stream &&
09aef2c4 503 adev->family < AMDGPU_FAMILY_AI) {
4a580877 504 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
505 mod_freesync_handle_v_update(
506 adev->dm.freesync_module,
585d450c
AP
507 acrtc->dm_irq_params.stream,
508 &acrtc->dm_irq_params.vrr_params);
09aef2c4
MK
509
510 dc_stream_adjust_vmin_vmax(
511 adev->dm.dc,
585d450c
AP
512 acrtc->dm_irq_params.stream,
513 &acrtc->dm_irq_params.vrr_params.adjust);
4a580877 514 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
09aef2c4
MK
515 }
516 }
d2574c33
MK
517 }
518}
519
b8e8c934
HW
520/**
521 * dm_crtc_high_irq() - Handles CRTC interrupt
2346ef47 522 * @interrupt_params: used for determining the CRTC instance
b8e8c934
HW
523 *
524 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
525 * event handler.
526 */
4562236b
HW
527static void dm_crtc_high_irq(void *interrupt_params)
528{
529 struct common_irq_params *irq_params = interrupt_params;
530 struct amdgpu_device *adev = irq_params->adev;
4562236b 531 struct amdgpu_crtc *acrtc;
09aef2c4 532 unsigned long flags;
585d450c 533 int vrr_active;
4562236b 534
b57de80a 535 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
16f17eda
LL
536 if (!acrtc)
537 return;
538
585d450c 539 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
16f17eda 540
cb2318b7 541 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585d450c 542 vrr_active, acrtc->dm_irq_params.active_planes);
16f17eda 543
2346ef47
NK
544 /**
545 * Core vblank handling at start of front-porch is only possible
546 * in non-vrr mode, as only there vblank timestamping will give
547 * valid results while done in front-porch. Otherwise defer it
548 * to dm_vupdate_high_irq after end of front-porch.
549 */
585d450c 550 if (!vrr_active)
2346ef47
NK
551 drm_crtc_handle_vblank(&acrtc->base);
552
553 /**
554 * Following stuff must happen at start of vblank, for crc
555 * computation and below-the-range btr support in vrr mode.
556 */
16f17eda 557 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
2346ef47
NK
558
559 /* BTR updates need to happen before VUPDATE on Vega and above. */
560 if (adev->family < AMDGPU_FAMILY_AI)
561 return;
16f17eda 562
4a580877 563 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
16f17eda 564
585d450c
AP
565 if (acrtc->dm_irq_params.stream &&
566 acrtc->dm_irq_params.vrr_params.supported &&
567 acrtc->dm_irq_params.freesync_config.state ==
568 VRR_STATE_ACTIVE_VARIABLE) {
2346ef47 569 mod_freesync_handle_v_update(adev->dm.freesync_module,
585d450c
AP
570 acrtc->dm_irq_params.stream,
571 &acrtc->dm_irq_params.vrr_params);
16f17eda 572
585d450c
AP
573 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
574 &acrtc->dm_irq_params.vrr_params.adjust);
16f17eda
LL
575 }
576
2b5aed9a
MK
577 /*
578 * If there aren't any active_planes then DCH HUBP may be clock-gated.
579 * In that case, pageflip completion interrupts won't fire and pageflip
580 * completion events won't get delivered. Prevent this by sending
581 * pending pageflip events from here if a flip is still pending.
582 *
583 * If any planes are enabled, use dm_pflip_high_irq() instead, to
584 * avoid race conditions between flip programming and completion,
585 * which could cause too early flip completion events.
586 */
2346ef47
NK
587 if (adev->family >= AMDGPU_FAMILY_RV &&
588 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585d450c 589 acrtc->dm_irq_params.active_planes == 0) {
16f17eda
LL
590 if (acrtc->event) {
591 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
592 acrtc->event = NULL;
593 drm_crtc_vblank_put(&acrtc->base);
594 }
595 acrtc->pflip_status = AMDGPU_FLIP_NONE;
596 }
597
4a580877 598 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
16f17eda
LL
599}
600
86bc2219 601#if defined(CONFIG_DRM_AMD_DC_DCN)
9e1178ef 602#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
86bc2219
WL
603/**
604 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
605 * DCN generation ASICs
48e01bf4 606 * @interrupt_params: interrupt parameters
86bc2219
WL
607 *
608 * Used to set crc window/read out crc value at vertical line 0 position
609 */
86bc2219
WL
610static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
611{
612 struct common_irq_params *irq_params = interrupt_params;
613 struct amdgpu_device *adev = irq_params->adev;
614 struct amdgpu_crtc *acrtc;
615
616 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
617
618 if (!acrtc)
619 return;
620
621 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
622}
433e5dec 623#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
86bc2219 624
e27c41d5
JS
625/**
626 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
627 * @adev: amdgpu_device pointer
628 * @notify: dmub notification structure
629 *
630 * Dmub AUX or SET_CONFIG command completion processing callback
631 * Copies dmub notification to DM which is to be read by AUX command.
632 * issuing thread and also signals the event to wake up the thread.
633 */
634void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
635{
636 if (adev->dm.dmub_notify)
637 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
638 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
639 complete(&adev->dm.dmub_aux_transfer_done);
640}
641
642/**
643 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
644 * @adev: amdgpu_device pointer
645 * @notify: dmub notification structure
646 *
647 * Dmub Hpd interrupt processing callback. Gets displayindex through the
648 * ink index and calls helper to do the processing.
649 */
650void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
651{
652 struct amdgpu_dm_connector *aconnector;
f6e03f80 653 struct amdgpu_dm_connector *hpd_aconnector = NULL;
e27c41d5
JS
654 struct drm_connector *connector;
655 struct drm_connector_list_iter iter;
656 struct dc_link *link;
657 uint8_t link_index = 0;
658 struct drm_device *dev = adev->dm.ddev;
659
660 if (adev == NULL)
661 return;
662
663 if (notify == NULL) {
664 DRM_ERROR("DMUB HPD callback notification was NULL");
665 return;
666 }
667
668 if (notify->link_index > adev->dm.dc->link_count) {
669 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
670 return;
671 }
672
e27c41d5 673 link_index = notify->link_index;
e27c41d5
JS
674 link = adev->dm.dc->links[link_index];
675
676 drm_connector_list_iter_begin(dev, &iter);
677 drm_for_each_connector_iter(connector, &iter) {
678 aconnector = to_amdgpu_dm_connector(connector);
679 if (link && aconnector->dc_link == link) {
680 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
f6e03f80 681 hpd_aconnector = aconnector;
e27c41d5
JS
682 break;
683 }
684 }
685 drm_connector_list_iter_end(&iter);
e27c41d5 686
c40a09e5
NK
687 if (hpd_aconnector) {
688 if (notify->type == DMUB_NOTIFICATION_HPD)
689 handle_hpd_irq_helper(hpd_aconnector);
690 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
691 handle_hpd_rx_irq(hpd_aconnector);
692 }
e27c41d5
JS
693}
694
695/**
696 * register_dmub_notify_callback - Sets callback for DMUB notify
697 * @adev: amdgpu_device pointer
698 * @type: Type of dmub notification
699 * @callback: Dmub interrupt callback function
700 * @dmub_int_thread_offload: offload indicator
701 *
702 * API to register a dmub callback handler for a dmub notification
703 * Also sets indicator whether callback processing to be offloaded.
704 * to dmub interrupt handling thread
705 * Return: true if successfully registered, false if there is existing registration
706 */
707bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
708dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
709{
710 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
711 adev->dm.dmub_callback[type] = callback;
712 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
713 } else
714 return false;
715
716 return true;
717}
718
719static void dm_handle_hpd_work(struct work_struct *work)
720{
721 struct dmub_hpd_work *dmub_hpd_wrk;
722
723 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
724
725 if (!dmub_hpd_wrk->dmub_notify) {
726 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
727 return;
728 }
729
730 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
731 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
732 dmub_hpd_wrk->dmub_notify);
733 }
094b21c1
JS
734
735 kfree(dmub_hpd_wrk->dmub_notify);
e27c41d5
JS
736 kfree(dmub_hpd_wrk);
737
738}
739
e25515e2 740#define DMUB_TRACE_MAX_READ 64
81927e28
JS
741/**
742 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
743 * @interrupt_params: used for determining the Outbox instance
744 *
745 * Handles the Outbox Interrupt
746 * event handler.
747 */
81927e28
JS
748static void dm_dmub_outbox1_low_irq(void *interrupt_params)
749{
750 struct dmub_notification notify;
751 struct common_irq_params *irq_params = interrupt_params;
752 struct amdgpu_device *adev = irq_params->adev;
753 struct amdgpu_display_manager *dm = &adev->dm;
754 struct dmcub_trace_buf_entry entry = { 0 };
755 uint32_t count = 0;
e27c41d5 756 struct dmub_hpd_work *dmub_hpd_wrk;
f6e03f80 757 struct dc_link *plink = NULL;
81927e28 758
f6e03f80
JS
759 if (dc_enable_dmub_notifications(adev->dm.dc) &&
760 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
e27c41d5 761
f6e03f80
JS
762 do {
763 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
764 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
765 DRM_ERROR("DM: notify type %d invalid!", notify.type);
766 continue;
767 }
c40a09e5
NK
768 if (!dm->dmub_callback[notify.type]) {
769 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
770 continue;
771 }
f6e03f80 772 if (dm->dmub_thread_offload[notify.type] == true) {
094b21c1
JS
773 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
774 if (!dmub_hpd_wrk) {
775 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
776 return;
777 }
778 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
779 if (!dmub_hpd_wrk->dmub_notify) {
780 kfree(dmub_hpd_wrk);
781 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
782 return;
783 }
784 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
785 if (dmub_hpd_wrk->dmub_notify)
786 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
f6e03f80
JS
787 dmub_hpd_wrk->adev = adev;
788 if (notify.type == DMUB_NOTIFICATION_HPD) {
789 plink = adev->dm.dc->links[notify.link_index];
790 if (plink) {
791 plink->hpd_status =
b97788e5 792 notify.hpd_status == DP_HPD_PLUG;
f6e03f80 793 }
e27c41d5 794 }
f6e03f80
JS
795 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
796 } else {
797 dm->dmub_callback[notify.type](adev, &notify);
798 }
799 } while (notify.pending_notification);
81927e28
JS
800 }
801
802
803 do {
804 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
805 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
806 entry.param0, entry.param1);
807
808 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
809 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
810 } else
811 break;
812
813 count++;
814
815 } while (count <= DMUB_TRACE_MAX_READ);
816
f6e03f80
JS
817 if (count > DMUB_TRACE_MAX_READ)
818 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
81927e28 819}
433e5dec 820#endif /* CONFIG_DRM_AMD_DC_DCN */
86bc2219 821
4562236b
HW
822static int dm_set_clockgating_state(void *handle,
823 enum amd_clockgating_state state)
824{
825 return 0;
826}
827
828static int dm_set_powergating_state(void *handle,
829 enum amd_powergating_state state)
830{
831 return 0;
832}
833
834/* Prototypes of private functions */
835static int dm_early_init(void* handle);
836
a32e24b4 837/* Allocate memory for FBC compressed data */
3e332d3a 838static void amdgpu_dm_fbc_init(struct drm_connector *connector)
a32e24b4 839{
3e332d3a 840 struct drm_device *dev = connector->dev;
1348969a 841 struct amdgpu_device *adev = drm_to_adev(dev);
4d154b85 842 struct dm_compressor_info *compressor = &adev->dm.compressor;
3e332d3a
RL
843 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
844 struct drm_display_mode *mode;
42e67c3b
RL
845 unsigned long max_size = 0;
846
847 if (adev->dm.dc->fbc_compressor == NULL)
848 return;
a32e24b4 849
3e332d3a 850 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
42e67c3b
RL
851 return;
852
3e332d3a
RL
853 if (compressor->bo_ptr)
854 return;
42e67c3b 855
42e67c3b 856
3e332d3a
RL
857 list_for_each_entry(mode, &connector->modes, head) {
858 if (max_size < mode->htotal * mode->vtotal)
859 max_size = mode->htotal * mode->vtotal;
42e67c3b
RL
860 }
861
862 if (max_size) {
863 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
0e5916ff 864 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
42e67c3b 865 &compressor->gpu_addr, &compressor->cpu_addr);
a32e24b4
RL
866
867 if (r)
42e67c3b
RL
868 DRM_ERROR("DM: Failed to initialize FBC\n");
869 else {
870 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
871 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
872 }
873
a32e24b4
RL
874 }
875
876}
a32e24b4 877
6ce8f316
NK
878static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
879 int pipe, bool *enabled,
880 unsigned char *buf, int max_bytes)
881{
882 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 883 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
884 struct drm_connector *connector;
885 struct drm_connector_list_iter conn_iter;
886 struct amdgpu_dm_connector *aconnector;
887 int ret = 0;
888
889 *enabled = false;
890
891 mutex_lock(&adev->dm.audio_lock);
892
893 drm_connector_list_iter_begin(dev, &conn_iter);
894 drm_for_each_connector_iter(connector, &conn_iter) {
895 aconnector = to_amdgpu_dm_connector(connector);
896 if (aconnector->audio_inst != port)
897 continue;
898
899 *enabled = true;
900 ret = drm_eld_size(connector->eld);
901 memcpy(buf, connector->eld, min(max_bytes, ret));
902
903 break;
904 }
905 drm_connector_list_iter_end(&conn_iter);
906
907 mutex_unlock(&adev->dm.audio_lock);
908
909 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
910
911 return ret;
912}
913
914static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
915 .get_eld = amdgpu_dm_audio_component_get_eld,
916};
917
918static int amdgpu_dm_audio_component_bind(struct device *kdev,
919 struct device *hda_kdev, void *data)
920{
921 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 922 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
923 struct drm_audio_component *acomp = data;
924
925 acomp->ops = &amdgpu_dm_audio_component_ops;
926 acomp->dev = kdev;
927 adev->dm.audio_component = acomp;
928
929 return 0;
930}
931
932static void amdgpu_dm_audio_component_unbind(struct device *kdev,
933 struct device *hda_kdev, void *data)
934{
935 struct drm_device *dev = dev_get_drvdata(kdev);
1348969a 936 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
937 struct drm_audio_component *acomp = data;
938
939 acomp->ops = NULL;
940 acomp->dev = NULL;
941 adev->dm.audio_component = NULL;
942}
943
944static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
945 .bind = amdgpu_dm_audio_component_bind,
946 .unbind = amdgpu_dm_audio_component_unbind,
947};
948
949static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
950{
951 int i, ret;
952
953 if (!amdgpu_audio)
954 return 0;
955
956 adev->mode_info.audio.enabled = true;
957
958 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
959
960 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
961 adev->mode_info.audio.pin[i].channels = -1;
962 adev->mode_info.audio.pin[i].rate = -1;
963 adev->mode_info.audio.pin[i].bits_per_sample = -1;
964 adev->mode_info.audio.pin[i].status_bits = 0;
965 adev->mode_info.audio.pin[i].category_code = 0;
966 adev->mode_info.audio.pin[i].connected = false;
967 adev->mode_info.audio.pin[i].id =
968 adev->dm.dc->res_pool->audios[i]->inst;
969 adev->mode_info.audio.pin[i].offset = 0;
970 }
971
972 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
973 if (ret < 0)
974 return ret;
975
976 adev->dm.audio_registered = true;
977
978 return 0;
979}
980
981static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
982{
983 if (!amdgpu_audio)
984 return;
985
986 if (!adev->mode_info.audio.enabled)
987 return;
988
989 if (adev->dm.audio_registered) {
990 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
991 adev->dm.audio_registered = false;
992 }
993
994 /* TODO: Disable audio? */
995
996 adev->mode_info.audio.enabled = false;
997}
998
dfd84d90 999static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
6ce8f316
NK
1000{
1001 struct drm_audio_component *acomp = adev->dm.audio_component;
1002
1003 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1004 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1005
1006 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1007 pin, -1);
1008 }
1009}
1010
743b9786
NK
1011static int dm_dmub_hw_init(struct amdgpu_device *adev)
1012{
743b9786
NK
1013 const struct dmcub_firmware_header_v1_0 *hdr;
1014 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
8c7aea40 1015 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
743b9786
NK
1016 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1017 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1018 struct abm *abm = adev->dm.dc->res_pool->abm;
743b9786
NK
1019 struct dmub_srv_hw_params hw_params;
1020 enum dmub_status status;
1021 const unsigned char *fw_inst_const, *fw_bss_data;
8c7aea40 1022 uint32_t i, fw_inst_const_size, fw_bss_data_size;
743b9786 1023 bool has_hw_support;
5b109397 1024 struct dc *dc = adev->dm.dc;
743b9786
NK
1025
1026 if (!dmub_srv)
1027 /* DMUB isn't supported on the ASIC. */
1028 return 0;
1029
8c7aea40
NK
1030 if (!fb_info) {
1031 DRM_ERROR("No framebuffer info for DMUB service.\n");
1032 return -EINVAL;
1033 }
1034
743b9786
NK
1035 if (!dmub_fw) {
1036 /* Firmware required for DMUB support. */
1037 DRM_ERROR("No firmware provided for DMUB.\n");
1038 return -EINVAL;
1039 }
1040
1041 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1042 if (status != DMUB_STATUS_OK) {
1043 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1044 return -EINVAL;
1045 }
1046
1047 if (!has_hw_support) {
1048 DRM_INFO("DMUB unsupported on ASIC\n");
1049 return 0;
1050 }
1051
1052 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1053
743b9786
NK
1054 fw_inst_const = dmub_fw->data +
1055 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
8c7aea40 1056 PSP_HEADER_BYTES;
743b9786
NK
1057
1058 fw_bss_data = dmub_fw->data +
1059 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060 le32_to_cpu(hdr->inst_const_bytes);
1061
1062 /* Copy firmware and bios info into FB memory. */
8c7aea40
NK
1063 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1064 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1065
1066 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1067
ddde28a5
HW
1068 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1069 * amdgpu_ucode_init_single_fw will load dmub firmware
1070 * fw_inst_const part to cw0; otherwise, the firmware back door load
1071 * will be done by dm_dmub_hw_init
1072 */
1073 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1075 fw_inst_const_size);
1076 }
1077
a576b345
NK
1078 if (fw_bss_data_size)
1079 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1080 fw_bss_data, fw_bss_data_size);
ddde28a5
HW
1081
1082 /* Copy firmware bios info into FB memory. */
8c7aea40
NK
1083 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1084 adev->bios_size);
1085
1086 /* Reset regions that need to be reset. */
1087 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1088 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1089
1090 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1091 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1092
1093 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1094 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
743b9786
NK
1095
1096 /* Initialize hardware. */
1097 memset(&hw_params, 0, sizeof(hw_params));
1098 hw_params.fb_base = adev->gmc.fb_start;
1099 hw_params.fb_offset = adev->gmc.aper_base;
1100
31a7f4bb
HW
1101 /* backdoor load firmware and trigger dmub running */
1102 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1103 hw_params.load_inst_const = true;
1104
743b9786
NK
1105 if (dmcu)
1106 hw_params.psp_version = dmcu->psp_version;
1107
8c7aea40
NK
1108 for (i = 0; i < fb_info->num_fb; ++i)
1109 hw_params.fb[i] = &fb_info->fb[i];
743b9786 1110
5b109397
JS
1111 switch (adev->asic_type) {
1112 case CHIP_YELLOW_CARP:
1113 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1114 hw_params.dpia_supported = true;
1115#if defined(CONFIG_DRM_AMD_DC_DCN)
1116 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1117#endif
1118 }
1119 break;
1120 default:
1121 break;
1122 }
1123
743b9786
NK
1124 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1125 if (status != DMUB_STATUS_OK) {
1126 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1127 return -EINVAL;
1128 }
1129
1130 /* Wait for firmware load to finish. */
1131 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1132 if (status != DMUB_STATUS_OK)
1133 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1134
1135 /* Init DMCU and ABM if available. */
1136 if (dmcu && abm) {
1137 dmcu->funcs->dmcu_init(dmcu);
1138 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1139 }
1140
051b7887
RL
1141 if (!adev->dm.dc->ctx->dmub_srv)
1142 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
9a71c7d3
NK
1143 if (!adev->dm.dc->ctx->dmub_srv) {
1144 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1145 return -ENOMEM;
1146 }
1147
743b9786
NK
1148 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1149 adev->dm.dmcub_fw_version);
1150
1151 return 0;
1152}
1153
a3fe0e33 1154#if defined(CONFIG_DRM_AMD_DC_DCN)
c0fb85ae 1155static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
c44a22b3 1156{
c0fb85ae
YZ
1157 uint64_t pt_base;
1158 uint32_t logical_addr_low;
1159 uint32_t logical_addr_high;
1160 uint32_t agp_base, agp_bot, agp_top;
1161 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
c44a22b3 1162
a0f884f5
NK
1163 memset(pa_config, 0, sizeof(*pa_config));
1164
c0fb85ae
YZ
1165 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1166 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
c44a22b3 1167
c0fb85ae
YZ
1168 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1169 /*
1170 * Raven2 has a HW issue that it is unable to use the vram which
1171 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1172 * workaround that increase system aperture high address (add 1)
1173 * to get rid of the VM fault and hardware hang.
1174 */
1175 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1176 else
1177 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
c44a22b3 1178
c0fb85ae
YZ
1179 agp_base = 0;
1180 agp_bot = adev->gmc.agp_start >> 24;
1181 agp_top = adev->gmc.agp_end >> 24;
c44a22b3 1182
c44a22b3 1183
c0fb85ae
YZ
1184 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1185 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1186 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1187 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1188 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1189 page_table_base.low_part = lower_32_bits(pt_base);
c44a22b3 1190
c0fb85ae
YZ
1191 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1192 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1193
1194 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1195 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1196 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1197
1198 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1199 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1200 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1201
1202 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1203 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1204 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1205
1206 pa_config->is_hvm_enabled = 0;
c44a22b3 1207
c44a22b3 1208}
e6cd859d 1209#endif
ea3b4242 1210#if defined(CONFIG_DRM_AMD_DC_DCN)
09a5df6c 1211static void vblank_control_worker(struct work_struct *work)
ea3b4242 1212{
09a5df6c
NK
1213 struct vblank_control_work *vblank_work =
1214 container_of(work, struct vblank_control_work, work);
ea3b4242
QZ
1215 struct amdgpu_display_manager *dm = vblank_work->dm;
1216
1217 mutex_lock(&dm->dc_lock);
1218
1219 if (vblank_work->enable)
1220 dm->active_vblank_irq_count++;
5af50b0b 1221 else if(dm->active_vblank_irq_count)
ea3b4242
QZ
1222 dm->active_vblank_irq_count--;
1223
2cbcb78c 1224 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
ea3b4242 1225
4711c033 1226 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
ea3b4242 1227
58aa1c50
NK
1228 /* Control PSR based on vblank requirements from OS */
1229 if (vblank_work->stream && vblank_work->stream->link) {
1230 if (vblank_work->enable) {
1231 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1232 amdgpu_dm_psr_disable(vblank_work->stream);
1233 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1234 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1235 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1236 amdgpu_dm_psr_enable(vblank_work->stream);
1237 }
1238 }
1239
ea3b4242 1240 mutex_unlock(&dm->dc_lock);
58aa1c50
NK
1241
1242 dc_stream_release(vblank_work->stream);
1243
09a5df6c 1244 kfree(vblank_work);
ea3b4242
QZ
1245}
1246
ea3b4242 1247#endif
8e794421
WL
1248
1249static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1250{
1251 struct hpd_rx_irq_offload_work *offload_work;
1252 struct amdgpu_dm_connector *aconnector;
1253 struct dc_link *dc_link;
1254 struct amdgpu_device *adev;
1255 enum dc_connection_type new_connection_type = dc_connection_none;
1256 unsigned long flags;
1257
1258 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1259 aconnector = offload_work->offload_wq->aconnector;
1260
1261 if (!aconnector) {
1262 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1263 goto skip;
1264 }
1265
1266 adev = drm_to_adev(aconnector->base.dev);
1267 dc_link = aconnector->dc_link;
1268
1269 mutex_lock(&aconnector->hpd_lock);
1270 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1271 DRM_ERROR("KMS: Failed to detect connector\n");
1272 mutex_unlock(&aconnector->hpd_lock);
1273
1274 if (new_connection_type == dc_connection_none)
1275 goto skip;
1276
1277 if (amdgpu_in_reset(adev))
1278 goto skip;
1279
1280 mutex_lock(&adev->dm.dc_lock);
1281 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1282 dc_link_dp_handle_automated_test(dc_link);
1283 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1284 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1285 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1286 dc_link_dp_handle_link_loss(dc_link);
1287 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1288 offload_work->offload_wq->is_handling_link_loss = false;
1289 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1290 }
1291 mutex_unlock(&adev->dm.dc_lock);
1292
1293skip:
1294 kfree(offload_work);
1295
1296}
1297
1298static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1299{
1300 int max_caps = dc->caps.max_links;
1301 int i = 0;
1302 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1303
1304 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1305
1306 if (!hpd_rx_offload_wq)
1307 return NULL;
1308
1309
1310 for (i = 0; i < max_caps; i++) {
1311 hpd_rx_offload_wq[i].wq =
1312 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1313
1314 if (hpd_rx_offload_wq[i].wq == NULL) {
1315 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1316 return NULL;
1317 }
1318
1319 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1320 }
1321
1322 return hpd_rx_offload_wq;
1323}
1324
3ce51649
AD
1325struct amdgpu_stutter_quirk {
1326 u16 chip_vendor;
1327 u16 chip_device;
1328 u16 subsys_vendor;
1329 u16 subsys_device;
1330 u8 revision;
1331};
1332
1333static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1334 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1335 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1336 { 0, 0, 0, 0, 0 },
1337};
1338
1339static bool dm_should_disable_stutter(struct pci_dev *pdev)
1340{
1341 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1342
1343 while (p && p->chip_device != 0) {
1344 if (pdev->vendor == p->chip_vendor &&
1345 pdev->device == p->chip_device &&
1346 pdev->subsystem_vendor == p->subsys_vendor &&
1347 pdev->subsystem_device == p->subsys_device &&
1348 pdev->revision == p->revision) {
1349 return true;
1350 }
1351 ++p;
1352 }
1353 return false;
1354}
1355
7578ecda 1356static int amdgpu_dm_init(struct amdgpu_device *adev)
4562236b
HW
1357{
1358 struct dc_init_data init_data;
52704fca
BL
1359#ifdef CONFIG_DRM_AMD_DC_HDCP
1360 struct dc_callback_init init_params;
1361#endif
743b9786 1362 int r;
52704fca 1363
4a580877 1364 adev->dm.ddev = adev_to_drm(adev);
4562236b
HW
1365 adev->dm.adev = adev;
1366
4562236b
HW
1367 /* Zero all the fields */
1368 memset(&init_data, 0, sizeof(init_data));
52704fca
BL
1369#ifdef CONFIG_DRM_AMD_DC_HDCP
1370 memset(&init_params, 0, sizeof(init_params));
1371#endif
4562236b 1372
674e78ac 1373 mutex_init(&adev->dm.dc_lock);
6ce8f316 1374 mutex_init(&adev->dm.audio_lock);
ea3b4242
QZ
1375#if defined(CONFIG_DRM_AMD_DC_DCN)
1376 spin_lock_init(&adev->dm.vblank_lock);
1377#endif
674e78ac 1378
4562236b
HW
1379 if(amdgpu_dm_irq_init(adev)) {
1380 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1381 goto error;
1382 }
1383
1384 init_data.asic_id.chip_family = adev->family;
1385
2dc31ca1 1386 init_data.asic_id.pci_revision_id = adev->pdev->revision;
4562236b 1387 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
dae66a04 1388 init_data.asic_id.chip_id = adev->pdev->device;
4562236b 1389
770d13b1 1390 init_data.asic_id.vram_width = adev->gmc.vram_width;
4562236b
HW
1391 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1392 init_data.asic_id.atombios_base_address =
1393 adev->mode_info.atom_context->bios;
1394
1395 init_data.driver = adev;
1396
1397 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1398
1399 if (!adev->dm.cgs_device) {
1400 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1401 goto error;
1402 }
1403
1404 init_data.cgs_device = adev->dm.cgs_device;
1405
4562236b
HW
1406 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1407
60fb100b
AD
1408 switch (adev->asic_type) {
1409 case CHIP_CARRIZO:
1410 case CHIP_STONEY:
1ebcaebd
NK
1411 init_data.flags.gpu_vm_support = true;
1412 break;
60fb100b 1413 default:
1d789535 1414 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1415 case IP_VERSION(2, 1, 0):
1416 init_data.flags.gpu_vm_support = true;
91adec9e
ML
1417 switch (adev->dm.dmcub_fw_version) {
1418 case 0: /* development */
1419 case 0x1: /* linux-firmware.git hash 6d9f399 */
1420 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1421 init_data.flags.disable_dmcu = false;
1422 break;
1423 default:
1424 init_data.flags.disable_dmcu = true;
1425 }
c08182f2 1426 break;
559f591d
AD
1427 case IP_VERSION(1, 0, 0):
1428 case IP_VERSION(1, 0, 1):
c08182f2
AD
1429 case IP_VERSION(3, 0, 1):
1430 case IP_VERSION(3, 1, 2):
1431 case IP_VERSION(3, 1, 3):
1432 init_data.flags.gpu_vm_support = true;
1433 break;
1434 case IP_VERSION(2, 0, 3):
1435 init_data.flags.disable_dmcu = true;
1436 break;
1437 default:
1438 break;
1439 }
60fb100b
AD
1440 break;
1441 }
6e227308 1442
04b94af4
AD
1443 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1444 init_data.flags.fbc_support = true;
1445
d99f38ae
AD
1446 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1447 init_data.flags.multi_mon_pp_mclk_switch = true;
1448
eaf56410
LL
1449 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1450 init_data.flags.disable_fractional_pwm = true;
a5148245
ZL
1451
1452 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1453 init_data.flags.edp_no_power_sequencing = true;
eaf56410 1454
27eaa492 1455 init_data.flags.power_down_display_on_boot = true;
78ad75f8 1456
0dd79532 1457 INIT_LIST_HEAD(&adev->dm.da_list);
4562236b
HW
1458 /* Display Core create. */
1459 adev->dm.dc = dc_create(&init_data);
1460
423788c7 1461 if (adev->dm.dc) {
76121231 1462 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
423788c7 1463 } else {
76121231 1464 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
423788c7
ES
1465 goto error;
1466 }
4562236b 1467
8a791dab
HW
1468 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1469 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1470 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1471 }
1472
f99d8762
HW
1473 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1474 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3ce51649
AD
1475 if (dm_should_disable_stutter(adev->pdev))
1476 adev->dm.dc->debug.disable_stutter = true;
f99d8762 1477
8a791dab
HW
1478 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1479 adev->dm.dc->debug.disable_stutter = true;
1480
1481 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1482 adev->dm.dc->debug.disable_dsc = true;
1483
1484 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1485 adev->dm.dc->debug.disable_clock_gate = true;
1486
743b9786
NK
1487 r = dm_dmub_hw_init(adev);
1488 if (r) {
1489 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1490 goto error;
1491 }
1492
bb6785c1
NK
1493 dc_hardware_init(adev->dm.dc);
1494
8e794421
WL
1495 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1496 if (!adev->dm.hpd_rx_offload_wq) {
1497 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1498 goto error;
1499 }
1500
0b08c54b 1501#if defined(CONFIG_DRM_AMD_DC_DCN)
3ca001af 1502 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
e6cd859d
AD
1503 struct dc_phy_addr_space_config pa_config;
1504
0b08c54b 1505 mmhub_read_system_context(adev, &pa_config);
c0fb85ae 1506
0b08c54b
YZ
1507 // Call the DC init_memory func
1508 dc_setup_system_context(adev->dm.dc, &pa_config);
1509 }
1510#endif
c0fb85ae 1511
4562236b
HW
1512 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1513 if (!adev->dm.freesync_module) {
1514 DRM_ERROR(
1515 "amdgpu: failed to initialize freesync_module.\n");
1516 } else
f1ad2f5e 1517 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
4562236b
HW
1518 adev->dm.freesync_module);
1519
e277adc5
LSL
1520 amdgpu_dm_init_color_mod();
1521
ea3b4242
QZ
1522#if defined(CONFIG_DRM_AMD_DC_DCN)
1523 if (adev->dm.dc->caps.max_links > 0) {
09a5df6c
NK
1524 adev->dm.vblank_control_workqueue =
1525 create_singlethread_workqueue("dm_vblank_control_workqueue");
1526 if (!adev->dm.vblank_control_workqueue)
ea3b4242 1527 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
ea3b4242
QZ
1528 }
1529#endif
1530
52704fca 1531#ifdef CONFIG_DRM_AMD_DC_HDCP
c08182f2 1532 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
e50dc171 1533 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
52704fca 1534
96a3b32e
BL
1535 if (!adev->dm.hdcp_workqueue)
1536 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1537 else
1538 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
52704fca 1539
96a3b32e
BL
1540 dc_init_callbacks(adev->dm.dc, &init_params);
1541 }
9a65df19
WL
1542#endif
1543#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1544 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
52704fca 1545#endif
81927e28
JS
1546 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1547 init_completion(&adev->dm.dmub_aux_transfer_done);
1548 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1549 if (!adev->dm.dmub_notify) {
1550 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1551 goto error;
1552 }
e27c41d5
JS
1553
1554 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1555 if (!adev->dm.delayed_hpd_wq) {
1556 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1557 goto error;
1558 }
1559
81927e28 1560 amdgpu_dm_outbox_init(adev);
e27c41d5
JS
1561#if defined(CONFIG_DRM_AMD_DC_DCN)
1562 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1563 dmub_aux_setconfig_callback, false)) {
1564 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1565 goto error;
1566 }
1567 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1568 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1569 goto error;
1570 }
c40a09e5
NK
1571 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1572 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1573 goto error;
1574 }
433e5dec 1575#endif /* CONFIG_DRM_AMD_DC_DCN */
81927e28
JS
1576 }
1577
4562236b
HW
1578 if (amdgpu_dm_initialize_drm_device(adev)) {
1579 DRM_ERROR(
1580 "amdgpu: failed to initialize sw for display support.\n");
1581 goto error;
1582 }
1583
f74367e4
AD
1584 /* create fake encoders for MST */
1585 dm_dp_create_fake_mst_encoders(adev);
1586
4562236b
HW
1587 /* TODO: Add_display_info? */
1588
1589 /* TODO use dynamic cursor width */
4a580877
LT
1590 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1591 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
4562236b 1592
4a580877 1593 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
4562236b
HW
1594 DRM_ERROR(
1595 "amdgpu: failed to initialize sw for display support.\n");
1596 goto error;
1597 }
1598
c0fb85ae 1599
f1ad2f5e 1600 DRM_DEBUG_DRIVER("KMS initialized.\n");
4562236b
HW
1601
1602 return 0;
1603error:
1604 amdgpu_dm_fini(adev);
1605
59d0f396 1606 return -EINVAL;
4562236b
HW
1607}
1608
e9669fb7
AG
1609static int amdgpu_dm_early_fini(void *handle)
1610{
1611 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1612
1613 amdgpu_dm_audio_fini(adev);
1614
1615 return 0;
1616}
1617
7578ecda 1618static void amdgpu_dm_fini(struct amdgpu_device *adev)
4562236b 1619{
f74367e4
AD
1620 int i;
1621
09a5df6c
NK
1622#if defined(CONFIG_DRM_AMD_DC_DCN)
1623 if (adev->dm.vblank_control_workqueue) {
1624 destroy_workqueue(adev->dm.vblank_control_workqueue);
1625 adev->dm.vblank_control_workqueue = NULL;
1626 }
1627#endif
1628
f74367e4
AD
1629 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1630 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1631 }
1632
4562236b 1633 amdgpu_dm_destroy_drm_device(&adev->dm);
c8bdf2b6 1634
9a65df19
WL
1635#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1636 if (adev->dm.crc_rd_wrk) {
1637 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1638 kfree(adev->dm.crc_rd_wrk);
1639 adev->dm.crc_rd_wrk = NULL;
1640 }
1641#endif
52704fca
BL
1642#ifdef CONFIG_DRM_AMD_DC_HDCP
1643 if (adev->dm.hdcp_workqueue) {
e96b1b29 1644 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
52704fca
BL
1645 adev->dm.hdcp_workqueue = NULL;
1646 }
1647
1648 if (adev->dm.dc)
1649 dc_deinit_callbacks(adev->dm.dc);
1650#endif
51ba6912 1651
3beac533 1652 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
9a71c7d3 1653
81927e28
JS
1654 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1655 kfree(adev->dm.dmub_notify);
1656 adev->dm.dmub_notify = NULL;
e27c41d5
JS
1657 destroy_workqueue(adev->dm.delayed_hpd_wq);
1658 adev->dm.delayed_hpd_wq = NULL;
81927e28
JS
1659 }
1660
743b9786
NK
1661 if (adev->dm.dmub_bo)
1662 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1663 &adev->dm.dmub_bo_gpu_addr,
1664 &adev->dm.dmub_bo_cpu_addr);
52704fca 1665
006c26a0
AG
1666 if (adev->dm.hpd_rx_offload_wq) {
1667 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1668 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1669 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1670 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1671 }
1672 }
1673
1674 kfree(adev->dm.hpd_rx_offload_wq);
1675 adev->dm.hpd_rx_offload_wq = NULL;
1676 }
1677
c8bdf2b6
ED
1678 /* DC Destroy TODO: Replace destroy DAL */
1679 if (adev->dm.dc)
1680 dc_destroy(&adev->dm.dc);
4562236b
HW
1681 /*
1682 * TODO: pageflip, vlank interrupt
1683 *
1684 * amdgpu_dm_irq_fini(adev);
1685 */
1686
1687 if (adev->dm.cgs_device) {
1688 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1689 adev->dm.cgs_device = NULL;
1690 }
1691 if (adev->dm.freesync_module) {
1692 mod_freesync_destroy(adev->dm.freesync_module);
1693 adev->dm.freesync_module = NULL;
1694 }
674e78ac 1695
6ce8f316 1696 mutex_destroy(&adev->dm.audio_lock);
674e78ac
NK
1697 mutex_destroy(&adev->dm.dc_lock);
1698
4562236b
HW
1699 return;
1700}
1701
a94d5569 1702static int load_dmcu_fw(struct amdgpu_device *adev)
4562236b 1703{
a7669aff 1704 const char *fw_name_dmcu = NULL;
a94d5569
DF
1705 int r;
1706 const struct dmcu_firmware_header_v1_0 *hdr;
1707
1708 switch(adev->asic_type) {
55e56389
MR
1709#if defined(CONFIG_DRM_AMD_DC_SI)
1710 case CHIP_TAHITI:
1711 case CHIP_PITCAIRN:
1712 case CHIP_VERDE:
1713 case CHIP_OLAND:
1714#endif
a94d5569
DF
1715 case CHIP_BONAIRE:
1716 case CHIP_HAWAII:
1717 case CHIP_KAVERI:
1718 case CHIP_KABINI:
1719 case CHIP_MULLINS:
1720 case CHIP_TONGA:
1721 case CHIP_FIJI:
1722 case CHIP_CARRIZO:
1723 case CHIP_STONEY:
1724 case CHIP_POLARIS11:
1725 case CHIP_POLARIS10:
1726 case CHIP_POLARIS12:
1727 case CHIP_VEGAM:
1728 case CHIP_VEGA10:
1729 case CHIP_VEGA12:
1730 case CHIP_VEGA20:
1731 return 0;
5ea23931
RL
1732 case CHIP_NAVI12:
1733 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1734 break;
a94d5569 1735 case CHIP_RAVEN:
a7669aff
HW
1736 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1737 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1738 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1739 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1740 else
a7669aff 1741 return 0;
a94d5569
DF
1742 break;
1743 default:
1d789535 1744 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
1745 case IP_VERSION(2, 0, 2):
1746 case IP_VERSION(2, 0, 3):
1747 case IP_VERSION(2, 0, 0):
1748 case IP_VERSION(2, 1, 0):
1749 case IP_VERSION(3, 0, 0):
1750 case IP_VERSION(3, 0, 2):
1751 case IP_VERSION(3, 0, 3):
1752 case IP_VERSION(3, 0, 1):
1753 case IP_VERSION(3, 1, 2):
1754 case IP_VERSION(3, 1, 3):
1755 return 0;
1756 default:
1757 break;
1758 }
a94d5569 1759 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
59d0f396 1760 return -EINVAL;
a94d5569
DF
1761 }
1762
1763 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1764 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1765 return 0;
1766 }
1767
1768 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1769 if (r == -ENOENT) {
1770 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1771 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1772 adev->dm.fw_dmcu = NULL;
1773 return 0;
1774 }
1775 if (r) {
1776 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1777 fw_name_dmcu);
1778 return r;
1779 }
1780
1781 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1782 if (r) {
1783 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1784 fw_name_dmcu);
1785 release_firmware(adev->dm.fw_dmcu);
1786 adev->dm.fw_dmcu = NULL;
1787 return r;
1788 }
1789
1790 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1791 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1792 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1793 adev->firmware.fw_size +=
1794 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1795
1796 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1797 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1798 adev->firmware.fw_size +=
1799 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1800
ee6e89c0
DF
1801 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1802
a94d5569
DF
1803 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1804
4562236b
HW
1805 return 0;
1806}
1807
743b9786
NK
1808static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1809{
1810 struct amdgpu_device *adev = ctx;
1811
1812 return dm_read_reg(adev->dm.dc->ctx, address);
1813}
1814
1815static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1816 uint32_t value)
1817{
1818 struct amdgpu_device *adev = ctx;
1819
1820 return dm_write_reg(adev->dm.dc->ctx, address, value);
1821}
1822
1823static int dm_dmub_sw_init(struct amdgpu_device *adev)
1824{
1825 struct dmub_srv_create_params create_params;
8c7aea40
NK
1826 struct dmub_srv_region_params region_params;
1827 struct dmub_srv_region_info region_info;
1828 struct dmub_srv_fb_params fb_params;
1829 struct dmub_srv_fb_info *fb_info;
1830 struct dmub_srv *dmub_srv;
743b9786
NK
1831 const struct dmcub_firmware_header_v1_0 *hdr;
1832 const char *fw_name_dmub;
1833 enum dmub_asic dmub_asic;
1834 enum dmub_status status;
1835 int r;
1836
1d789535 1837 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2 1838 case IP_VERSION(2, 1, 0):
743b9786
NK
1839 dmub_asic = DMUB_ASIC_DCN21;
1840 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
71c0fd92
RL
1841 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1842 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
743b9786 1843 break;
c08182f2 1844 case IP_VERSION(3, 0, 0):
1d789535 1845 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
c08182f2
AD
1846 dmub_asic = DMUB_ASIC_DCN30;
1847 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1848 } else {
1849 dmub_asic = DMUB_ASIC_DCN30;
1850 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1851 }
79037324 1852 break;
c08182f2 1853 case IP_VERSION(3, 0, 1):
469989ca
RL
1854 dmub_asic = DMUB_ASIC_DCN301;
1855 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1856 break;
c08182f2 1857 case IP_VERSION(3, 0, 2):
2a411205
BL
1858 dmub_asic = DMUB_ASIC_DCN302;
1859 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1860 break;
c08182f2 1861 case IP_VERSION(3, 0, 3):
656fe9b6
AP
1862 dmub_asic = DMUB_ASIC_DCN303;
1863 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1864 break;
c08182f2
AD
1865 case IP_VERSION(3, 1, 2):
1866 case IP_VERSION(3, 1, 3):
3137f792 1867 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1ebcaebd
NK
1868 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1869 break;
743b9786
NK
1870
1871 default:
1872 /* ASIC doesn't support DMUB. */
1873 return 0;
1874 }
1875
743b9786
NK
1876 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1877 if (r) {
1878 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1879 return 0;
1880 }
1881
1882 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1883 if (r) {
1884 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1885 return 0;
1886 }
1887
743b9786 1888 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
72a74a18 1889 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
743b9786 1890
9a6ed547
NK
1891 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1892 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1893 AMDGPU_UCODE_ID_DMCUB;
1894 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1895 adev->dm.dmub_fw;
1896 adev->firmware.fw_size +=
1897 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
743b9786 1898
9a6ed547
NK
1899 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1900 adev->dm.dmcub_fw_version);
1901 }
1902
743b9786 1903
8c7aea40
NK
1904 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1905 dmub_srv = adev->dm.dmub_srv;
1906
1907 if (!dmub_srv) {
1908 DRM_ERROR("Failed to allocate DMUB service!\n");
1909 return -ENOMEM;
1910 }
1911
1912 memset(&create_params, 0, sizeof(create_params));
1913 create_params.user_ctx = adev;
1914 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1915 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1916 create_params.asic = dmub_asic;
1917
1918 /* Create the DMUB service. */
1919 status = dmub_srv_create(dmub_srv, &create_params);
1920 if (status != DMUB_STATUS_OK) {
1921 DRM_ERROR("Error creating DMUB service: %d\n", status);
1922 return -EINVAL;
1923 }
1924
1925 /* Calculate the size of all the regions for the DMUB service. */
1926 memset(&region_params, 0, sizeof(region_params));
1927
1928 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1929 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1930 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1931 region_params.vbios_size = adev->bios_size;
0922b899 1932 region_params.fw_bss_data = region_params.bss_data_size ?
1f0674fd
NK
1933 adev->dm.dmub_fw->data +
1934 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
0922b899 1935 le32_to_cpu(hdr->inst_const_bytes) : NULL;
a576b345
NK
1936 region_params.fw_inst_const =
1937 adev->dm.dmub_fw->data +
1938 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1939 PSP_HEADER_BYTES;
8c7aea40
NK
1940
1941 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1942 &region_info);
1943
1944 if (status != DMUB_STATUS_OK) {
1945 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1946 return -EINVAL;
1947 }
1948
1949 /*
1950 * Allocate a framebuffer based on the total size of all the regions.
1951 * TODO: Move this into GART.
1952 */
1953 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1954 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1955 &adev->dm.dmub_bo_gpu_addr,
1956 &adev->dm.dmub_bo_cpu_addr);
1957 if (r)
1958 return r;
1959
1960 /* Rebase the regions on the framebuffer address. */
1961 memset(&fb_params, 0, sizeof(fb_params));
1962 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1963 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1964 fb_params.region_info = &region_info;
1965
1966 adev->dm.dmub_fb_info =
1967 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1968 fb_info = adev->dm.dmub_fb_info;
1969
1970 if (!fb_info) {
1971 DRM_ERROR(
1972 "Failed to allocate framebuffer info for DMUB service!\n");
1973 return -ENOMEM;
1974 }
1975
1976 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1977 if (status != DMUB_STATUS_OK) {
1978 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1979 return -EINVAL;
1980 }
1981
743b9786
NK
1982 return 0;
1983}
1984
a94d5569
DF
1985static int dm_sw_init(void *handle)
1986{
1987 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743b9786
NK
1988 int r;
1989
1990 r = dm_dmub_sw_init(adev);
1991 if (r)
1992 return r;
a94d5569
DF
1993
1994 return load_dmcu_fw(adev);
1995}
1996
4562236b
HW
1997static int dm_sw_fini(void *handle)
1998{
a94d5569
DF
1999 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2000
8c7aea40
NK
2001 kfree(adev->dm.dmub_fb_info);
2002 adev->dm.dmub_fb_info = NULL;
2003
743b9786
NK
2004 if (adev->dm.dmub_srv) {
2005 dmub_srv_destroy(adev->dm.dmub_srv);
2006 adev->dm.dmub_srv = NULL;
2007 }
2008
75e1658e
ND
2009 release_firmware(adev->dm.dmub_fw);
2010 adev->dm.dmub_fw = NULL;
743b9786 2011
75e1658e
ND
2012 release_firmware(adev->dm.fw_dmcu);
2013 adev->dm.fw_dmcu = NULL;
a94d5569 2014
4562236b
HW
2015 return 0;
2016}
2017
7abcf6b5 2018static int detect_mst_link_for_all_connectors(struct drm_device *dev)
4562236b 2019{
c84dec2f 2020 struct amdgpu_dm_connector *aconnector;
4562236b 2021 struct drm_connector *connector;
f8d2d39e 2022 struct drm_connector_list_iter iter;
7abcf6b5 2023 int ret = 0;
4562236b 2024
f8d2d39e
LP
2025 drm_connector_list_iter_begin(dev, &iter);
2026 drm_for_each_connector_iter(connector, &iter) {
b349f76e 2027 aconnector = to_amdgpu_dm_connector(connector);
30ec2b97
JFZ
2028 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2029 aconnector->mst_mgr.aux) {
f1ad2f5e 2030 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
f8d2d39e
LP
2031 aconnector,
2032 aconnector->base.base.id);
7abcf6b5
AG
2033
2034 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2035 if (ret < 0) {
2036 DRM_ERROR("DM_MST: Failed to start MST\n");
f8d2d39e
LP
2037 aconnector->dc_link->type =
2038 dc_connection_single;
2039 break;
7abcf6b5 2040 }
f8d2d39e 2041 }
4562236b 2042 }
f8d2d39e 2043 drm_connector_list_iter_end(&iter);
4562236b 2044
7abcf6b5
AG
2045 return ret;
2046}
2047
2048static int dm_late_init(void *handle)
2049{
42e67c3b 2050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7abcf6b5 2051
bbf854dc
DF
2052 struct dmcu_iram_parameters params;
2053 unsigned int linear_lut[16];
2054 int i;
17bdb4a8 2055 struct dmcu *dmcu = NULL;
bbf854dc 2056
17bdb4a8
JFZ
2057 dmcu = adev->dm.dc->res_pool->dmcu;
2058
bbf854dc
DF
2059 for (i = 0; i < 16; i++)
2060 linear_lut[i] = 0xFFFF * i / 15;
2061
2062 params.set = 0;
75068994 2063 params.backlight_ramping_override = false;
bbf854dc
DF
2064 params.backlight_ramping_start = 0xCCCC;
2065 params.backlight_ramping_reduction = 0xCCCCCCCC;
2066 params.backlight_lut_array_size = 16;
2067 params.backlight_lut_array = linear_lut;
2068
2ad0cdf9
AK
2069 /* Min backlight level after ABM reduction, Don't allow below 1%
2070 * 0xFFFF x 0.01 = 0x28F
2071 */
2072 params.min_abm_backlight = 0x28F;
5cb32419 2073 /* In the case where abm is implemented on dmcub,
6e568e43
JW
2074 * dmcu object will be null.
2075 * ABM 2.4 and up are implemented on dmcub.
2076 */
2077 if (dmcu) {
2078 if (!dmcu_load_iram(dmcu, params))
2079 return -EINVAL;
2080 } else if (adev->dm.dc->ctx->dmub_srv) {
2081 struct dc_link *edp_links[MAX_NUM_EDP];
2082 int edp_num;
bbf854dc 2083
6e568e43
JW
2084 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2085 for (i = 0; i < edp_num; i++) {
2086 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2087 return -EINVAL;
2088 }
2089 }
bbf854dc 2090
4a580877 2091 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
4562236b
HW
2092}
2093
2094static void s3_handle_mst(struct drm_device *dev, bool suspend)
2095{
c84dec2f 2096 struct amdgpu_dm_connector *aconnector;
4562236b 2097 struct drm_connector *connector;
f8d2d39e 2098 struct drm_connector_list_iter iter;
fe7553be
LP
2099 struct drm_dp_mst_topology_mgr *mgr;
2100 int ret;
2101 bool need_hotplug = false;
4562236b 2102
f8d2d39e
LP
2103 drm_connector_list_iter_begin(dev, &iter);
2104 drm_for_each_connector_iter(connector, &iter) {
fe7553be
LP
2105 aconnector = to_amdgpu_dm_connector(connector);
2106 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2107 aconnector->mst_port)
2108 continue;
2109
2110 mgr = &aconnector->mst_mgr;
2111
2112 if (suspend) {
2113 drm_dp_mst_topology_mgr_suspend(mgr);
2114 } else {
6f85f738 2115 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
fe7553be
LP
2116 if (ret < 0) {
2117 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2118 need_hotplug = true;
2119 }
2120 }
4562236b 2121 }
f8d2d39e 2122 drm_connector_list_iter_end(&iter);
fe7553be
LP
2123
2124 if (need_hotplug)
2125 drm_kms_helper_hotplug_event(dev);
4562236b
HW
2126}
2127
9340dfd3
HW
2128static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2129{
2130 struct smu_context *smu = &adev->smu;
2131 int ret = 0;
2132
2133 if (!is_support_sw_smu(adev))
2134 return 0;
2135
2136 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2137 * on window driver dc implementation.
2138 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2139 * should be passed to smu during boot up and resume from s3.
2140 * boot up: dc calculate dcn watermark clock settings within dc_create,
2141 * dcn20_resource_construct
2142 * then call pplib functions below to pass the settings to smu:
2143 * smu_set_watermarks_for_clock_ranges
2144 * smu_set_watermarks_table
2145 * navi10_set_watermarks_table
2146 * smu_write_watermarks_table
2147 *
2148 * For Renoir, clock settings of dcn watermark are also fixed values.
2149 * dc has implemented different flow for window driver:
2150 * dc_hardware_init / dc_set_power_state
2151 * dcn10_init_hw
2152 * notify_wm_ranges
2153 * set_wm_ranges
2154 * -- Linux
2155 * smu_set_watermarks_for_clock_ranges
2156 * renoir_set_watermarks_table
2157 * smu_write_watermarks_table
2158 *
2159 * For Linux,
2160 * dc_hardware_init -> amdgpu_dm_init
2161 * dc_set_power_state --> dm_resume
2162 *
2163 * therefore, this function apply to navi10/12/14 but not Renoir
2164 * *
2165 */
1d789535 2166 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
2167 case IP_VERSION(2, 0, 2):
2168 case IP_VERSION(2, 0, 0):
9340dfd3
HW
2169 break;
2170 default:
2171 return 0;
2172 }
2173
e7a95eea
EQ
2174 ret = smu_write_watermarks_table(smu);
2175 if (ret) {
2176 DRM_ERROR("Failed to update WMTABLE!\n");
2177 return ret;
9340dfd3
HW
2178 }
2179
9340dfd3
HW
2180 return 0;
2181}
2182
b8592b48
LL
2183/**
2184 * dm_hw_init() - Initialize DC device
28d687ea 2185 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2186 *
2187 * Initialize the &struct amdgpu_display_manager device. This involves calling
2188 * the initializers of each DM component, then populating the struct with them.
2189 *
2190 * Although the function implies hardware initialization, both hardware and
2191 * software are initialized here. Splitting them out to their relevant init
2192 * hooks is a future TODO item.
2193 *
2194 * Some notable things that are initialized here:
2195 *
2196 * - Display Core, both software and hardware
2197 * - DC modules that we need (freesync and color management)
2198 * - DRM software states
2199 * - Interrupt sources and handlers
2200 * - Vblank support
2201 * - Debug FS entries, if enabled
2202 */
4562236b
HW
2203static int dm_hw_init(void *handle)
2204{
2205 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2206 /* Create DAL display manager */
2207 amdgpu_dm_init(adev);
4562236b
HW
2208 amdgpu_dm_hpd_init(adev);
2209
4562236b
HW
2210 return 0;
2211}
2212
b8592b48
LL
2213/**
2214 * dm_hw_fini() - Teardown DC device
28d687ea 2215 * @handle: The base driver device containing the amdgpu_dm device.
b8592b48
LL
2216 *
2217 * Teardown components within &struct amdgpu_display_manager that require
2218 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2219 * were loaded. Also flush IRQ workqueues and disable them.
2220 */
4562236b
HW
2221static int dm_hw_fini(void *handle)
2222{
2223 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2224
2225 amdgpu_dm_hpd_fini(adev);
2226
2227 amdgpu_dm_irq_fini(adev);
21de3396 2228 amdgpu_dm_fini(adev);
4562236b
HW
2229 return 0;
2230}
2231
cdaae837
BL
2232
2233static int dm_enable_vblank(struct drm_crtc *crtc);
2234static void dm_disable_vblank(struct drm_crtc *crtc);
2235
2236static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2237 struct dc_state *state, bool enable)
2238{
2239 enum dc_irq_source irq_source;
2240 struct amdgpu_crtc *acrtc;
2241 int rc = -EBUSY;
2242 int i = 0;
2243
2244 for (i = 0; i < state->stream_count; i++) {
2245 acrtc = get_crtc_by_otg_inst(
2246 adev, state->stream_status[i].primary_otg_inst);
2247
2248 if (acrtc && state->stream_status[i].plane_count != 0) {
2249 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2250 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711c033
LT
2251 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2252 acrtc->crtc_id, enable ? "en" : "dis", rc);
cdaae837
BL
2253 if (rc)
2254 DRM_WARN("Failed to %s pflip interrupts\n",
2255 enable ? "enable" : "disable");
2256
2257 if (enable) {
2258 rc = dm_enable_vblank(&acrtc->base);
2259 if (rc)
2260 DRM_WARN("Failed to enable vblank interrupts\n");
2261 } else {
2262 dm_disable_vblank(&acrtc->base);
2263 }
2264
2265 }
2266 }
2267
2268}
2269
dfd84d90 2270static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
cdaae837
BL
2271{
2272 struct dc_state *context = NULL;
2273 enum dc_status res = DC_ERROR_UNEXPECTED;
2274 int i;
2275 struct dc_stream_state *del_streams[MAX_PIPES];
2276 int del_streams_count = 0;
2277
2278 memset(del_streams, 0, sizeof(del_streams));
2279
2280 context = dc_create_state(dc);
2281 if (context == NULL)
2282 goto context_alloc_fail;
2283
2284 dc_resource_state_copy_construct_current(dc, context);
2285
2286 /* First remove from context all streams */
2287 for (i = 0; i < context->stream_count; i++) {
2288 struct dc_stream_state *stream = context->streams[i];
2289
2290 del_streams[del_streams_count++] = stream;
2291 }
2292
2293 /* Remove all planes for removed streams and then remove the streams */
2294 for (i = 0; i < del_streams_count; i++) {
2295 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2296 res = DC_FAIL_DETACH_SURFACES;
2297 goto fail;
2298 }
2299
2300 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2301 if (res != DC_OK)
2302 goto fail;
2303 }
2304
2305
2306 res = dc_validate_global_state(dc, context, false);
2307
2308 if (res != DC_OK) {
2309 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2310 goto fail;
2311 }
2312
2313 res = dc_commit_state(dc, context);
2314
2315fail:
2316 dc_release_state(context);
2317
2318context_alloc_fail:
2319 return res;
2320}
2321
8e794421
WL
2322static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2323{
2324 int i;
2325
2326 if (dm->hpd_rx_offload_wq) {
2327 for (i = 0; i < dm->dc->caps.max_links; i++)
2328 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2329 }
2330}
2331
4562236b
HW
2332static int dm_suspend(void *handle)
2333{
2334 struct amdgpu_device *adev = handle;
2335 struct amdgpu_display_manager *dm = &adev->dm;
2336 int ret = 0;
4562236b 2337
53b3f8f4 2338 if (amdgpu_in_reset(adev)) {
cdaae837 2339 mutex_lock(&dm->dc_lock);
98ab5f35
BL
2340
2341#if defined(CONFIG_DRM_AMD_DC_DCN)
2342 dc_allow_idle_optimizations(adev->dm.dc, false);
2343#endif
2344
cdaae837
BL
2345 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2346
2347 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2348
2349 amdgpu_dm_commit_zero_streams(dm->dc);
2350
2351 amdgpu_dm_irq_suspend(adev);
2352
8e794421
WL
2353 hpd_rx_irq_work_suspend(dm);
2354
cdaae837
BL
2355 return ret;
2356 }
4562236b 2357
d2f0b53b 2358 WARN_ON(adev->dm.cached_state);
4a580877 2359 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
d2f0b53b 2360
4a580877 2361 s3_handle_mst(adev_to_drm(adev), true);
4562236b 2362
4562236b
HW
2363 amdgpu_dm_irq_suspend(adev);
2364
8e794421
WL
2365 hpd_rx_irq_work_suspend(dm);
2366
32f5062d 2367 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
4562236b 2368
1c2075d4 2369 return 0;
4562236b
HW
2370}
2371
1daf8c63
AD
2372static struct amdgpu_dm_connector *
2373amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2374 struct drm_crtc *crtc)
4562236b
HW
2375{
2376 uint32_t i;
c2cea706 2377 struct drm_connector_state *new_con_state;
4562236b
HW
2378 struct drm_connector *connector;
2379 struct drm_crtc *crtc_from_state;
2380
c2cea706
LSL
2381 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2382 crtc_from_state = new_con_state->crtc;
4562236b
HW
2383
2384 if (crtc_from_state == crtc)
c84dec2f 2385 return to_amdgpu_dm_connector(connector);
4562236b
HW
2386 }
2387
2388 return NULL;
2389}
2390
fbbdadf2
BL
2391static void emulated_link_detect(struct dc_link *link)
2392{
2393 struct dc_sink_init_data sink_init_data = { 0 };
2394 struct display_sink_capability sink_caps = { 0 };
2395 enum dc_edid_status edid_status;
2396 struct dc_context *dc_ctx = link->ctx;
2397 struct dc_sink *sink = NULL;
2398 struct dc_sink *prev_sink = NULL;
2399
2400 link->type = dc_connection_none;
2401 prev_sink = link->local_sink;
2402
30164a16
VL
2403 if (prev_sink)
2404 dc_sink_release(prev_sink);
fbbdadf2
BL
2405
2406 switch (link->connector_signal) {
2407 case SIGNAL_TYPE_HDMI_TYPE_A: {
2408 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2409 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2410 break;
2411 }
2412
2413 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2414 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2415 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2416 break;
2417 }
2418
2419 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2420 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2421 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2422 break;
2423 }
2424
2425 case SIGNAL_TYPE_LVDS: {
2426 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2427 sink_caps.signal = SIGNAL_TYPE_LVDS;
2428 break;
2429 }
2430
2431 case SIGNAL_TYPE_EDP: {
2432 sink_caps.transaction_type =
2433 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2434 sink_caps.signal = SIGNAL_TYPE_EDP;
2435 break;
2436 }
2437
2438 case SIGNAL_TYPE_DISPLAY_PORT: {
2439 sink_caps.transaction_type =
2440 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2441 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2442 break;
2443 }
2444
2445 default:
2446 DC_ERROR("Invalid connector type! signal:%d\n",
2447 link->connector_signal);
2448 return;
2449 }
2450
2451 sink_init_data.link = link;
2452 sink_init_data.sink_signal = sink_caps.signal;
2453
2454 sink = dc_sink_create(&sink_init_data);
2455 if (!sink) {
2456 DC_ERROR("Failed to create sink!\n");
2457 return;
2458 }
2459
dcd5fb82 2460 /* dc_sink_create returns a new reference */
fbbdadf2
BL
2461 link->local_sink = sink;
2462
2463 edid_status = dm_helpers_read_local_edid(
2464 link->ctx,
2465 link,
2466 sink);
2467
2468 if (edid_status != EDID_OK)
2469 DC_ERROR("Failed to read EDID");
2470
2471}
2472
cdaae837
BL
2473static void dm_gpureset_commit_state(struct dc_state *dc_state,
2474 struct amdgpu_display_manager *dm)
2475{
2476 struct {
2477 struct dc_surface_update surface_updates[MAX_SURFACES];
2478 struct dc_plane_info plane_infos[MAX_SURFACES];
2479 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2480 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2481 struct dc_stream_update stream_update;
2482 } * bundle;
2483 int k, m;
2484
2485 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2486
2487 if (!bundle) {
2488 dm_error("Failed to allocate update bundle\n");
2489 goto cleanup;
2490 }
2491
2492 for (k = 0; k < dc_state->stream_count; k++) {
2493 bundle->stream_update.stream = dc_state->streams[k];
2494
2495 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2496 bundle->surface_updates[m].surface =
2497 dc_state->stream_status->plane_states[m];
2498 bundle->surface_updates[m].surface->force_full_update =
2499 true;
2500 }
2501 dc_commit_updates_for_stream(
2502 dm->dc, bundle->surface_updates,
2503 dc_state->stream_status->plane_count,
efc8278e 2504 dc_state->streams[k], &bundle->stream_update, dc_state);
cdaae837
BL
2505 }
2506
2507cleanup:
2508 kfree(bundle);
2509
2510 return;
2511}
2512
035f5496 2513static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
3c4d55c9
AP
2514{
2515 struct dc_stream_state *stream_state;
2516 struct amdgpu_dm_connector *aconnector = link->priv;
2517 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2518 struct dc_stream_update stream_update;
2519 bool dpms_off = true;
2520
2521 memset(&stream_update, 0, sizeof(stream_update));
2522 stream_update.dpms_off = &dpms_off;
2523
2524 mutex_lock(&adev->dm.dc_lock);
2525 stream_state = dc_stream_find_from_link(link);
2526
2527 if (stream_state == NULL) {
2528 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2529 mutex_unlock(&adev->dm.dc_lock);
2530 return;
2531 }
2532
2533 stream_update.stream = stream_state;
035f5496 2534 acrtc_state->force_dpms_off = true;
3c4d55c9 2535 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
efc8278e
AJ
2536 stream_state, &stream_update,
2537 stream_state->ctx->dc->current_state);
3c4d55c9
AP
2538 mutex_unlock(&adev->dm.dc_lock);
2539}
2540
4562236b
HW
2541static int dm_resume(void *handle)
2542{
2543 struct amdgpu_device *adev = handle;
4a580877 2544 struct drm_device *ddev = adev_to_drm(adev);
4562236b 2545 struct amdgpu_display_manager *dm = &adev->dm;
c84dec2f 2546 struct amdgpu_dm_connector *aconnector;
4562236b 2547 struct drm_connector *connector;
f8d2d39e 2548 struct drm_connector_list_iter iter;
4562236b 2549 struct drm_crtc *crtc;
c2cea706 2550 struct drm_crtc_state *new_crtc_state;
fcb4019e
LSL
2551 struct dm_crtc_state *dm_new_crtc_state;
2552 struct drm_plane *plane;
2553 struct drm_plane_state *new_plane_state;
2554 struct dm_plane_state *dm_new_plane_state;
113b7a01 2555 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
fbbdadf2 2556 enum dc_connection_type new_connection_type = dc_connection_none;
cdaae837
BL
2557 struct dc_state *dc_state;
2558 int i, r, j;
4562236b 2559
53b3f8f4 2560 if (amdgpu_in_reset(adev)) {
cdaae837
BL
2561 dc_state = dm->cached_dc_state;
2562
2563 r = dm_dmub_hw_init(adev);
2564 if (r)
2565 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2566
2567 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2568 dc_resume(dm->dc);
2569
2570 amdgpu_dm_irq_resume_early(adev);
2571
2572 for (i = 0; i < dc_state->stream_count; i++) {
2573 dc_state->streams[i]->mode_changed = true;
2574 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2575 dc_state->stream_status->plane_states[j]->update_flags.raw
2576 = 0xffffffff;
2577 }
2578 }
8fe44c08 2579#if defined(CONFIG_DRM_AMD_DC_DCN)
1ebcaebd
NK
2580 /*
2581 * Resource allocation happens for link encoders for newer ASIC in
2582 * dc_validate_global_state, so we need to revalidate it.
2583 *
2584 * This shouldn't fail (it passed once before), so warn if it does.
2585 */
2586 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2587#endif
cdaae837
BL
2588
2589 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4562236b 2590
cdaae837
BL
2591 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2592
2593 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2594
2595 dc_release_state(dm->cached_dc_state);
2596 dm->cached_dc_state = NULL;
2597
2598 amdgpu_dm_irq_resume_late(adev);
2599
2600 mutex_unlock(&dm->dc_lock);
2601
2602 return 0;
2603 }
113b7a01
LL
2604 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2605 dc_release_state(dm_state->context);
2606 dm_state->context = dc_create_state(dm->dc);
2607 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2608 dc_resource_state_construct(dm->dc, dm_state->context);
2609
8c7aea40
NK
2610 /* Before powering on DC we need to re-initialize DMUB. */
2611 r = dm_dmub_hw_init(adev);
2612 if (r)
2613 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2614
a80aa93d
ML
2615 /* power on hardware */
2616 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2617
4562236b
HW
2618 /* program HPD filter */
2619 dc_resume(dm->dc);
2620
4562236b
HW
2621 /*
2622 * early enable HPD Rx IRQ, should be done before set mode as short
2623 * pulse interrupts are used for MST
2624 */
2625 amdgpu_dm_irq_resume_early(adev);
2626
d20ebea8 2627 /* On resume we need to rewrite the MSTM control bits to enable MST*/
684cd480
LP
2628 s3_handle_mst(ddev, false);
2629
4562236b 2630 /* Do detection*/
f8d2d39e
LP
2631 drm_connector_list_iter_begin(ddev, &iter);
2632 drm_for_each_connector_iter(connector, &iter) {
c84dec2f 2633 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
2634
2635 /*
2636 * this is the case when traversing through already created
2637 * MST connectors, should be skipped
2638 */
2639 if (aconnector->mst_port)
2640 continue;
2641
03ea364c 2642 mutex_lock(&aconnector->hpd_lock);
fbbdadf2
BL
2643 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2644 DRM_ERROR("KMS: Failed to detect connector\n");
2645
2646 if (aconnector->base.force && new_connection_type == dc_connection_none)
2647 emulated_link_detect(aconnector->dc_link);
2648 else
2649 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3eb4eba4
RL
2650
2651 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2652 aconnector->fake_enable = false;
2653
dcd5fb82
MF
2654 if (aconnector->dc_sink)
2655 dc_sink_release(aconnector->dc_sink);
4562236b
HW
2656 aconnector->dc_sink = NULL;
2657 amdgpu_dm_update_connector_after_detect(aconnector);
03ea364c 2658 mutex_unlock(&aconnector->hpd_lock);
4562236b 2659 }
f8d2d39e 2660 drm_connector_list_iter_end(&iter);
4562236b 2661
1f6010a9 2662 /* Force mode set in atomic commit */
a80aa93d 2663 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
c2cea706 2664 new_crtc_state->active_changed = true;
4f346e65 2665
fcb4019e
LSL
2666 /*
2667 * atomic_check is expected to create the dc states. We need to release
2668 * them here, since they were duplicated as part of the suspend
2669 * procedure.
2670 */
a80aa93d 2671 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
fcb4019e
LSL
2672 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2673 if (dm_new_crtc_state->stream) {
2674 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2675 dc_stream_release(dm_new_crtc_state->stream);
2676 dm_new_crtc_state->stream = NULL;
2677 }
2678 }
2679
a80aa93d 2680 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
fcb4019e
LSL
2681 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2682 if (dm_new_plane_state->dc_state) {
2683 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2684 dc_plane_state_release(dm_new_plane_state->dc_state);
2685 dm_new_plane_state->dc_state = NULL;
2686 }
2687 }
2688
2d1af6a1 2689 drm_atomic_helper_resume(ddev, dm->cached_state);
4562236b 2690
a80aa93d 2691 dm->cached_state = NULL;
0a214e2f 2692
9faa4237 2693 amdgpu_dm_irq_resume_late(adev);
4562236b 2694
9340dfd3
HW
2695 amdgpu_dm_smu_write_watermarks_table(adev);
2696
2d1af6a1 2697 return 0;
4562236b
HW
2698}
2699
b8592b48
LL
2700/**
2701 * DOC: DM Lifecycle
2702 *
2703 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2704 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2705 * the base driver's device list to be initialized and torn down accordingly.
2706 *
2707 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2708 */
2709
4562236b
HW
2710static const struct amd_ip_funcs amdgpu_dm_funcs = {
2711 .name = "dm",
2712 .early_init = dm_early_init,
7abcf6b5 2713 .late_init = dm_late_init,
4562236b
HW
2714 .sw_init = dm_sw_init,
2715 .sw_fini = dm_sw_fini,
e9669fb7 2716 .early_fini = amdgpu_dm_early_fini,
4562236b
HW
2717 .hw_init = dm_hw_init,
2718 .hw_fini = dm_hw_fini,
2719 .suspend = dm_suspend,
2720 .resume = dm_resume,
2721 .is_idle = dm_is_idle,
2722 .wait_for_idle = dm_wait_for_idle,
2723 .check_soft_reset = dm_check_soft_reset,
2724 .soft_reset = dm_soft_reset,
2725 .set_clockgating_state = dm_set_clockgating_state,
2726 .set_powergating_state = dm_set_powergating_state,
2727};
2728
2729const struct amdgpu_ip_block_version dm_ip_block =
2730{
2731 .type = AMD_IP_BLOCK_TYPE_DCE,
2732 .major = 1,
2733 .minor = 0,
2734 .rev = 0,
2735 .funcs = &amdgpu_dm_funcs,
2736};
2737
ca3268c4 2738
b8592b48
LL
2739/**
2740 * DOC: atomic
2741 *
2742 * *WIP*
2743 */
0a323b84 2744
b3663f70 2745static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
4d4772f6 2746 .fb_create = amdgpu_display_user_framebuffer_create,
dfbbfe3c 2747 .get_format_info = amd_get_format_info,
366c1baa 2748 .output_poll_changed = drm_fb_helper_output_poll_changed,
4562236b 2749 .atomic_check = amdgpu_dm_atomic_check,
0269764a 2750 .atomic_commit = drm_atomic_helper_commit,
54f5499a
AG
2751};
2752
2753static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2754 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
4562236b
HW
2755};
2756
94562810
RS
2757static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2758{
2759 u32 max_cll, min_cll, max, min, q, r;
2760 struct amdgpu_dm_backlight_caps *caps;
2761 struct amdgpu_display_manager *dm;
2762 struct drm_connector *conn_base;
2763 struct amdgpu_device *adev;
ec11fe37 2764 struct dc_link *link = NULL;
94562810
RS
2765 static const u8 pre_computed_values[] = {
2766 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2767 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
7fd13bae 2768 int i;
94562810
RS
2769
2770 if (!aconnector || !aconnector->dc_link)
2771 return;
2772
ec11fe37 2773 link = aconnector->dc_link;
2774 if (link->connector_signal != SIGNAL_TYPE_EDP)
2775 return;
2776
94562810 2777 conn_base = &aconnector->base;
1348969a 2778 adev = drm_to_adev(conn_base->dev);
94562810 2779 dm = &adev->dm;
7fd13bae
AD
2780 for (i = 0; i < dm->num_of_edps; i++) {
2781 if (link == dm->backlight_link[i])
2782 break;
2783 }
2784 if (i >= dm->num_of_edps)
2785 return;
2786 caps = &dm->backlight_caps[i];
94562810
RS
2787 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2788 caps->aux_support = false;
2789 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2790 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2791
d0ae0b64 2792 if (caps->ext_caps->bits.oled == 1 /*||
94562810 2793 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
d0ae0b64 2794 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
94562810
RS
2795 caps->aux_support = true;
2796
7a46f05e
TI
2797 if (amdgpu_backlight == 0)
2798 caps->aux_support = false;
2799 else if (amdgpu_backlight == 1)
2800 caps->aux_support = true;
2801
94562810
RS
2802 /* From the specification (CTA-861-G), for calculating the maximum
2803 * luminance we need to use:
2804 * Luminance = 50*2**(CV/32)
2805 * Where CV is a one-byte value.
2806 * For calculating this expression we may need float point precision;
2807 * to avoid this complexity level, we take advantage that CV is divided
2808 * by a constant. From the Euclids division algorithm, we know that CV
2809 * can be written as: CV = 32*q + r. Next, we replace CV in the
2810 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2811 * need to pre-compute the value of r/32. For pre-computing the values
2812 * We just used the following Ruby line:
2813 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2814 * The results of the above expressions can be verified at
2815 * pre_computed_values.
2816 */
2817 q = max_cll >> 5;
2818 r = max_cll % 32;
2819 max = (1 << q) * pre_computed_values[r];
2820
2821 // min luminance: maxLum * (CV/255)^2 / 100
2822 q = DIV_ROUND_CLOSEST(min_cll, 255);
2823 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2824
2825 caps->aux_max_input_signal = max;
2826 caps->aux_min_input_signal = min;
2827}
2828
97e51c16
HW
2829void amdgpu_dm_update_connector_after_detect(
2830 struct amdgpu_dm_connector *aconnector)
4562236b
HW
2831{
2832 struct drm_connector *connector = &aconnector->base;
2833 struct drm_device *dev = connector->dev;
b73a22d3 2834 struct dc_sink *sink;
4562236b
HW
2835
2836 /* MST handled by drm_mst framework */
2837 if (aconnector->mst_mgr.mst_state == true)
2838 return;
2839
4562236b 2840 sink = aconnector->dc_link->local_sink;
dcd5fb82
MF
2841 if (sink)
2842 dc_sink_retain(sink);
4562236b 2843
1f6010a9
DF
2844 /*
2845 * Edid mgmt connector gets first update only in mode_valid hook and then
4562236b 2846 * the connector sink is set to either fake or physical sink depends on link status.
1f6010a9 2847 * Skip if already done during boot.
4562236b
HW
2848 */
2849 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2850 && aconnector->dc_em_sink) {
2851
1f6010a9
DF
2852 /*
2853 * For S3 resume with headless use eml_sink to fake stream
2854 * because on resume connector->sink is set to NULL
4562236b
HW
2855 */
2856 mutex_lock(&dev->mode_config.mutex);
2857
2858 if (sink) {
922aa1e1 2859 if (aconnector->dc_sink) {
98e6436d 2860 amdgpu_dm_update_freesync_caps(connector, NULL);
1f6010a9
DF
2861 /*
2862 * retain and release below are used to
2863 * bump up refcount for sink because the link doesn't point
2864 * to it anymore after disconnect, so on next crtc to connector
922aa1e1
AG
2865 * reshuffle by UMD we will get into unwanted dc_sink release
2866 */
dcd5fb82 2867 dc_sink_release(aconnector->dc_sink);
922aa1e1 2868 }
4562236b 2869 aconnector->dc_sink = sink;
dcd5fb82 2870 dc_sink_retain(aconnector->dc_sink);
98e6436d
AK
2871 amdgpu_dm_update_freesync_caps(connector,
2872 aconnector->edid);
4562236b 2873 } else {
98e6436d 2874 amdgpu_dm_update_freesync_caps(connector, NULL);
dcd5fb82 2875 if (!aconnector->dc_sink) {
4562236b 2876 aconnector->dc_sink = aconnector->dc_em_sink;
922aa1e1 2877 dc_sink_retain(aconnector->dc_sink);
dcd5fb82 2878 }
4562236b
HW
2879 }
2880
2881 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82
MF
2882
2883 if (sink)
2884 dc_sink_release(sink);
4562236b
HW
2885 return;
2886 }
2887
2888 /*
2889 * TODO: temporary guard to look for proper fix
2890 * if this sink is MST sink, we should not do anything
2891 */
dcd5fb82
MF
2892 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2893 dc_sink_release(sink);
4562236b 2894 return;
dcd5fb82 2895 }
4562236b
HW
2896
2897 if (aconnector->dc_sink == sink) {
1f6010a9
DF
2898 /*
2899 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2900 * Do nothing!!
2901 */
f1ad2f5e 2902 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
4562236b 2903 aconnector->connector_id);
dcd5fb82
MF
2904 if (sink)
2905 dc_sink_release(sink);
4562236b
HW
2906 return;
2907 }
2908
f1ad2f5e 2909 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
4562236b
HW
2910 aconnector->connector_id, aconnector->dc_sink, sink);
2911
2912 mutex_lock(&dev->mode_config.mutex);
2913
1f6010a9
DF
2914 /*
2915 * 1. Update status of the drm connector
2916 * 2. Send an event and let userspace tell us what to do
2917 */
4562236b 2918 if (sink) {
1f6010a9
DF
2919 /*
2920 * TODO: check if we still need the S3 mode update workaround.
2921 * If yes, put it here.
2922 */
c64b0d6b 2923 if (aconnector->dc_sink) {
98e6436d 2924 amdgpu_dm_update_freesync_caps(connector, NULL);
c64b0d6b
VL
2925 dc_sink_release(aconnector->dc_sink);
2926 }
4562236b
HW
2927
2928 aconnector->dc_sink = sink;
dcd5fb82 2929 dc_sink_retain(aconnector->dc_sink);
900b3cb1 2930 if (sink->dc_edid.length == 0) {
4562236b 2931 aconnector->edid = NULL;
e6142dd5
AP
2932 if (aconnector->dc_link->aux_mode) {
2933 drm_dp_cec_unset_edid(
2934 &aconnector->dm_dp_aux.aux);
2935 }
900b3cb1 2936 } else {
4562236b 2937 aconnector->edid =
e6142dd5 2938 (struct edid *)sink->dc_edid.raw_edid;
4562236b 2939
c555f023 2940 drm_connector_update_edid_property(connector,
e6142dd5 2941 aconnector->edid);
e6142dd5
AP
2942 if (aconnector->dc_link->aux_mode)
2943 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2944 aconnector->edid);
4562236b 2945 }
e6142dd5 2946
98e6436d 2947 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
94562810 2948 update_connector_ext_caps(aconnector);
4562236b 2949 } else {
e86e8947 2950 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
98e6436d 2951 amdgpu_dm_update_freesync_caps(connector, NULL);
c555f023 2952 drm_connector_update_edid_property(connector, NULL);
4562236b 2953 aconnector->num_modes = 0;
dcd5fb82 2954 dc_sink_release(aconnector->dc_sink);
4562236b 2955 aconnector->dc_sink = NULL;
5326c452 2956 aconnector->edid = NULL;
0c8620d6
BL
2957#ifdef CONFIG_DRM_AMD_DC_HDCP
2958 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2959 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2960 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2961#endif
4562236b
HW
2962 }
2963
2964 mutex_unlock(&dev->mode_config.mutex);
dcd5fb82 2965
0f877894
OV
2966 update_subconnector_property(aconnector);
2967
dcd5fb82
MF
2968 if (sink)
2969 dc_sink_release(sink);
4562236b
HW
2970}
2971
e27c41d5 2972static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
4562236b 2973{
4562236b
HW
2974 struct drm_connector *connector = &aconnector->base;
2975 struct drm_device *dev = connector->dev;
fbbdadf2 2976 enum dc_connection_type new_connection_type = dc_connection_none;
1348969a 2977 struct amdgpu_device *adev = drm_to_adev(dev);
97f6c917 2978 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
035f5496 2979 struct dm_crtc_state *dm_crtc_state = NULL;
4562236b 2980
b972b4f9
HW
2981 if (adev->dm.disable_hpd_irq)
2982 return;
2983
035f5496
AP
2984 if (dm_con_state->base.state && dm_con_state->base.crtc)
2985 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2986 dm_con_state->base.state,
2987 dm_con_state->base.crtc));
1f6010a9
DF
2988 /*
2989 * In case of failure or MST no need to update connector status or notify the OS
2990 * since (for MST case) MST does this in its own context.
4562236b
HW
2991 */
2992 mutex_lock(&aconnector->hpd_lock);
2e0ac3d6 2993
0c8620d6 2994#ifdef CONFIG_DRM_AMD_DC_HDCP
97f6c917 2995 if (adev->dm.hdcp_workqueue) {
96a3b32e 2996 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
97f6c917
BL
2997 dm_con_state->update_hdcp = true;
2998 }
0c8620d6 2999#endif
2e0ac3d6
HW
3000 if (aconnector->fake_enable)
3001 aconnector->fake_enable = false;
3002
fbbdadf2
BL
3003 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3004 DRM_ERROR("KMS: Failed to detect connector\n");
3005
3006 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3007 emulated_link_detect(aconnector->dc_link);
3008
fbbdadf2
BL
3009 drm_modeset_lock_all(dev);
3010 dm_restore_drm_connector_state(dev, connector);
3011 drm_modeset_unlock_all(dev);
3012
3013 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3014 drm_kms_helper_hotplug_event(dev);
3015
3016 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3c4d55c9 3017 if (new_connection_type == dc_connection_none &&
035f5496
AP
3018 aconnector->dc_link->type == dc_connection_none &&
3019 dm_crtc_state)
3020 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
4562236b 3021
3c4d55c9 3022 amdgpu_dm_update_connector_after_detect(aconnector);
4562236b
HW
3023
3024 drm_modeset_lock_all(dev);
3025 dm_restore_drm_connector_state(dev, connector);
3026 drm_modeset_unlock_all(dev);
3027
3028 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3029 drm_kms_helper_hotplug_event(dev);
3030 }
3031 mutex_unlock(&aconnector->hpd_lock);
3032
3033}
3034
e27c41d5
JS
3035static void handle_hpd_irq(void *param)
3036{
3037 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3038
3039 handle_hpd_irq_helper(aconnector);
3040
3041}
3042
8e794421 3043static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
4562236b
HW
3044{
3045 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3046 uint8_t dret;
3047 bool new_irq_handled = false;
3048 int dpcd_addr;
3049 int dpcd_bytes_to_read;
3050
3051 const int max_process_count = 30;
3052 int process_count = 0;
3053
3054 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3055
3056 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3057 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3058 /* DPCD 0x200 - 0x201 for downstream IRQ */
3059 dpcd_addr = DP_SINK_COUNT;
3060 } else {
3061 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3062 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3063 dpcd_addr = DP_SINK_COUNT_ESI;
3064 }
3065
3066 dret = drm_dp_dpcd_read(
3067 &aconnector->dm_dp_aux.aux,
3068 dpcd_addr,
3069 esi,
3070 dpcd_bytes_to_read);
3071
3072 while (dret == dpcd_bytes_to_read &&
3073 process_count < max_process_count) {
3074 uint8_t retry;
3075 dret = 0;
3076
3077 process_count++;
3078
f1ad2f5e 3079 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
4562236b
HW
3080 /* handle HPD short pulse irq */
3081 if (aconnector->mst_mgr.mst_state)
3082 drm_dp_mst_hpd_irq(
3083 &aconnector->mst_mgr,
3084 esi,
3085 &new_irq_handled);
4562236b
HW
3086
3087 if (new_irq_handled) {
3088 /* ACK at DPCD to notify down stream */
3089 const int ack_dpcd_bytes_to_write =
3090 dpcd_bytes_to_read - 1;
3091
3092 for (retry = 0; retry < 3; retry++) {
3093 uint8_t wret;
3094
3095 wret = drm_dp_dpcd_write(
3096 &aconnector->dm_dp_aux.aux,
3097 dpcd_addr + 1,
3098 &esi[1],
3099 ack_dpcd_bytes_to_write);
3100 if (wret == ack_dpcd_bytes_to_write)
3101 break;
3102 }
3103
1f6010a9 3104 /* check if there is new irq to be handled */
4562236b
HW
3105 dret = drm_dp_dpcd_read(
3106 &aconnector->dm_dp_aux.aux,
3107 dpcd_addr,
3108 esi,
3109 dpcd_bytes_to_read);
3110
3111 new_irq_handled = false;
d4a6e8a9 3112 } else {
4562236b 3113 break;
d4a6e8a9 3114 }
4562236b
HW
3115 }
3116
3117 if (process_count == max_process_count)
f1ad2f5e 3118 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
4562236b
HW
3119}
3120
8e794421
WL
3121static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3122 union hpd_irq_data hpd_irq_data)
3123{
3124 struct hpd_rx_irq_offload_work *offload_work =
3125 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3126
3127 if (!offload_work) {
3128 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3129 return;
3130 }
3131
3132 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3133 offload_work->data = hpd_irq_data;
3134 offload_work->offload_wq = offload_wq;
3135
3136 queue_work(offload_wq->wq, &offload_work->work);
3137 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3138}
3139
4562236b
HW
3140static void handle_hpd_rx_irq(void *param)
3141{
c84dec2f 3142 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
4562236b
HW
3143 struct drm_connector *connector = &aconnector->base;
3144 struct drm_device *dev = connector->dev;
53cbf65c 3145 struct dc_link *dc_link = aconnector->dc_link;
4562236b 3146 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
c8ea79a8 3147 bool result = false;
fbbdadf2 3148 enum dc_connection_type new_connection_type = dc_connection_none;
c8ea79a8 3149 struct amdgpu_device *adev = drm_to_adev(dev);
2a0f9270 3150 union hpd_irq_data hpd_irq_data;
8e794421
WL
3151 bool link_loss = false;
3152 bool has_left_work = false;
3153 int idx = aconnector->base.index;
3154 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
2a0f9270
BL
3155
3156 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
4562236b 3157
b972b4f9
HW
3158 if (adev->dm.disable_hpd_irq)
3159 return;
3160
1f6010a9
DF
3161 /*
3162 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
4562236b
HW
3163 * conflict, after implement i2c helper, this mutex should be
3164 * retired.
3165 */
b86e7eef 3166 mutex_lock(&aconnector->hpd_lock);
4562236b 3167
8e794421
WL
3168 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3169 &link_loss, true, &has_left_work);
3083a984 3170
8e794421
WL
3171 if (!has_left_work)
3172 goto out;
3173
3174 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3175 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3176 goto out;
3177 }
3178
3179 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3180 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3181 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3182 dm_handle_mst_sideband_msg(aconnector);
3083a984
QZ
3183 goto out;
3184 }
3083a984 3185
8e794421
WL
3186 if (link_loss) {
3187 bool skip = false;
d2aa1356 3188
8e794421
WL
3189 spin_lock(&offload_wq->offload_lock);
3190 skip = offload_wq->is_handling_link_loss;
3191
3192 if (!skip)
3193 offload_wq->is_handling_link_loss = true;
3194
3195 spin_unlock(&offload_wq->offload_lock);
3196
3197 if (!skip)
3198 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3199
3200 goto out;
3201 }
3202 }
c8ea79a8 3203
3083a984 3204out:
c8ea79a8 3205 if (result && !is_mst_root_connector) {
4562236b 3206 /* Downstream Port status changed. */
fbbdadf2
BL
3207 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3208 DRM_ERROR("KMS: Failed to detect connector\n");
3209
3210 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3211 emulated_link_detect(dc_link);
3212
3213 if (aconnector->fake_enable)
3214 aconnector->fake_enable = false;
3215
3216 amdgpu_dm_update_connector_after_detect(aconnector);
3217
3218
3219 drm_modeset_lock_all(dev);
3220 dm_restore_drm_connector_state(dev, connector);
3221 drm_modeset_unlock_all(dev);
3222
3223 drm_kms_helper_hotplug_event(dev);
3224 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
88ac3dda
RL
3225
3226 if (aconnector->fake_enable)
3227 aconnector->fake_enable = false;
3228
4562236b
HW
3229 amdgpu_dm_update_connector_after_detect(aconnector);
3230
3231
3232 drm_modeset_lock_all(dev);
3233 dm_restore_drm_connector_state(dev, connector);
3234 drm_modeset_unlock_all(dev);
3235
3236 drm_kms_helper_hotplug_event(dev);
3237 }
3238 }
2a0f9270 3239#ifdef CONFIG_DRM_AMD_DC_HDCP
95f247e7
DC
3240 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3241 if (adev->dm.hdcp_workqueue)
3242 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3243 }
2a0f9270 3244#endif
4562236b 3245
b86e7eef 3246 if (dc_link->type != dc_connection_mst_branch)
e86e8947 3247 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
b86e7eef
NC
3248
3249 mutex_unlock(&aconnector->hpd_lock);
4562236b
HW
3250}
3251
3252static void register_hpd_handlers(struct amdgpu_device *adev)
3253{
4a580877 3254 struct drm_device *dev = adev_to_drm(adev);
4562236b 3255 struct drm_connector *connector;
c84dec2f 3256 struct amdgpu_dm_connector *aconnector;
4562236b
HW
3257 const struct dc_link *dc_link;
3258 struct dc_interrupt_params int_params = {0};
3259
3260 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3261 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3262
3263 list_for_each_entry(connector,
3264 &dev->mode_config.connector_list, head) {
3265
c84dec2f 3266 aconnector = to_amdgpu_dm_connector(connector);
4562236b
HW
3267 dc_link = aconnector->dc_link;
3268
3269 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3270 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3271 int_params.irq_source = dc_link->irq_source_hpd;
3272
3273 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3274 handle_hpd_irq,
3275 (void *) aconnector);
3276 }
3277
3278 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3279
3280 /* Also register for DP short pulse (hpd_rx). */
3281 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3282 int_params.irq_source = dc_link->irq_source_hpd_rx;
3283
3284 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3285 handle_hpd_rx_irq,
3286 (void *) aconnector);
8e794421
WL
3287
3288 if (adev->dm.hpd_rx_offload_wq)
3289 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3290 aconnector;
4562236b
HW
3291 }
3292 }
3293}
3294
55e56389
MR
3295#if defined(CONFIG_DRM_AMD_DC_SI)
3296/* Register IRQ sources and initialize IRQ callbacks */
3297static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3298{
3299 struct dc *dc = adev->dm.dc;
3300 struct common_irq_params *c_irq_params;
3301 struct dc_interrupt_params int_params = {0};
3302 int r;
3303 int i;
3304 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3305
3306 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3307 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3308
3309 /*
3310 * Actions of amdgpu_irq_add_id():
3311 * 1. Register a set() function with base driver.
3312 * Base driver will call set() function to enable/disable an
3313 * interrupt in DC hardware.
3314 * 2. Register amdgpu_dm_irq_handler().
3315 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3316 * coming from DC hardware.
3317 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3318 * for acknowledging and handling. */
3319
3320 /* Use VBLANK interrupt */
3321 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3322 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3323 if (r) {
3324 DRM_ERROR("Failed to add crtc irq id!\n");
3325 return r;
3326 }
3327
3328 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3329 int_params.irq_source =
3330 dc_interrupt_to_irq_source(dc, i+1 , 0);
3331
3332 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3333
3334 c_irq_params->adev = adev;
3335 c_irq_params->irq_src = int_params.irq_source;
3336
3337 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3338 dm_crtc_high_irq, c_irq_params);
3339 }
3340
3341 /* Use GRPH_PFLIP interrupt */
3342 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3343 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3344 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3345 if (r) {
3346 DRM_ERROR("Failed to add page flip irq id!\n");
3347 return r;
3348 }
3349
3350 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3351 int_params.irq_source =
3352 dc_interrupt_to_irq_source(dc, i, 0);
3353
3354 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3355
3356 c_irq_params->adev = adev;
3357 c_irq_params->irq_src = int_params.irq_source;
3358
3359 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3360 dm_pflip_high_irq, c_irq_params);
3361
3362 }
3363
3364 /* HPD */
3365 r = amdgpu_irq_add_id(adev, client_id,
3366 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3367 if (r) {
3368 DRM_ERROR("Failed to add hpd irq id!\n");
3369 return r;
3370 }
3371
3372 register_hpd_handlers(adev);
3373
3374 return 0;
3375}
3376#endif
3377
4562236b
HW
3378/* Register IRQ sources and initialize IRQ callbacks */
3379static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3380{
3381 struct dc *dc = adev->dm.dc;
3382 struct common_irq_params *c_irq_params;
3383 struct dc_interrupt_params int_params = {0};
3384 int r;
3385 int i;
1ffdeca6 3386 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2c8ad2d5 3387
c08182f2 3388 if (adev->family >= AMDGPU_FAMILY_AI)
3760f76c 3389 client_id = SOC15_IH_CLIENTID_DCE;
4562236b
HW
3390
3391 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3392 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3393
1f6010a9
DF
3394 /*
3395 * Actions of amdgpu_irq_add_id():
4562236b
HW
3396 * 1. Register a set() function with base driver.
3397 * Base driver will call set() function to enable/disable an
3398 * interrupt in DC hardware.
3399 * 2. Register amdgpu_dm_irq_handler().
3400 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3401 * coming from DC hardware.
3402 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3403 * for acknowledging and handling. */
3404
b57de80a 3405 /* Use VBLANK interrupt */
e9029155 3406 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2c8ad2d5 3407 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
4562236b
HW
3408 if (r) {
3409 DRM_ERROR("Failed to add crtc irq id!\n");
3410 return r;
3411 }
3412
3413 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3414 int_params.irq_source =
3d761e79 3415 dc_interrupt_to_irq_source(dc, i, 0);
4562236b 3416
b57de80a 3417 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4562236b
HW
3418
3419 c_irq_params->adev = adev;
3420 c_irq_params->irq_src = int_params.irq_source;
3421
3422 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3423 dm_crtc_high_irq, c_irq_params);
3424 }
3425
d2574c33
MK
3426 /* Use VUPDATE interrupt */
3427 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3428 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3429 if (r) {
3430 DRM_ERROR("Failed to add vupdate irq id!\n");
3431 return r;
3432 }
3433
3434 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3435 int_params.irq_source =
3436 dc_interrupt_to_irq_source(dc, i, 0);
3437
3438 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3439
3440 c_irq_params->adev = adev;
3441 c_irq_params->irq_src = int_params.irq_source;
3442
3443 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3444 dm_vupdate_high_irq, c_irq_params);
3445 }
3446
3d761e79 3447 /* Use GRPH_PFLIP interrupt */
4562236b
HW
3448 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3449 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2c8ad2d5 3450 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
4562236b
HW
3451 if (r) {
3452 DRM_ERROR("Failed to add page flip irq id!\n");
3453 return r;
3454 }
3455
3456 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3457 int_params.irq_source =
3458 dc_interrupt_to_irq_source(dc, i, 0);
3459
3460 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3461
3462 c_irq_params->adev = adev;
3463 c_irq_params->irq_src = int_params.irq_source;
3464
3465 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3466 dm_pflip_high_irq, c_irq_params);
3467
3468 }
3469
3470 /* HPD */
2c8ad2d5
AD
3471 r = amdgpu_irq_add_id(adev, client_id,
3472 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
4562236b
HW
3473 if (r) {
3474 DRM_ERROR("Failed to add hpd irq id!\n");
3475 return r;
3476 }
3477
3478 register_hpd_handlers(adev);
3479
3480 return 0;
3481}
3482
b86a1aa3 3483#if defined(CONFIG_DRM_AMD_DC_DCN)
ff5ef992
AD
3484/* Register IRQ sources and initialize IRQ callbacks */
3485static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3486{
3487 struct dc *dc = adev->dm.dc;
3488 struct common_irq_params *c_irq_params;
3489 struct dc_interrupt_params int_params = {0};
3490 int r;
3491 int i;
660d5406
WL
3492#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3493 static const unsigned int vrtl_int_srcid[] = {
3494 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3495 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3496 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3497 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3498 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3499 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3500 };
3501#endif
ff5ef992
AD
3502
3503 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3504 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3505
1f6010a9
DF
3506 /*
3507 * Actions of amdgpu_irq_add_id():
ff5ef992
AD
3508 * 1. Register a set() function with base driver.
3509 * Base driver will call set() function to enable/disable an
3510 * interrupt in DC hardware.
3511 * 2. Register amdgpu_dm_irq_handler().
3512 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3513 * coming from DC hardware.
3514 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3515 * for acknowledging and handling.
1f6010a9 3516 */
ff5ef992
AD
3517
3518 /* Use VSTARTUP interrupt */
3519 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3520 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3521 i++) {
3760f76c 3522 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
ff5ef992
AD
3523
3524 if (r) {
3525 DRM_ERROR("Failed to add crtc irq id!\n");
3526 return r;
3527 }
3528
3529 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3530 int_params.irq_source =
3531 dc_interrupt_to_irq_source(dc, i, 0);
3532
3533 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3534
3535 c_irq_params->adev = adev;
3536 c_irq_params->irq_src = int_params.irq_source;
3537
2346ef47
NK
3538 amdgpu_dm_irq_register_interrupt(
3539 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3540 }
3541
86bc2219
WL
3542 /* Use otg vertical line interrupt */
3543#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
660d5406
WL
3544 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3545 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3546 vrtl_int_srcid[i], &adev->vline0_irq);
86bc2219
WL
3547
3548 if (r) {
3549 DRM_ERROR("Failed to add vline0 irq id!\n");
3550 return r;
3551 }
3552
3553 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3554 int_params.irq_source =
660d5406
WL
3555 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3556
3557 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3558 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3559 break;
3560 }
86bc2219
WL
3561
3562 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3563 - DC_IRQ_SOURCE_DC1_VLINE0];
3564
3565 c_irq_params->adev = adev;
3566 c_irq_params->irq_src = int_params.irq_source;
3567
3568 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3569 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3570 }
3571#endif
3572
2346ef47
NK
3573 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3574 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3575 * to trigger at end of each vblank, regardless of state of the lock,
3576 * matching DCE behaviour.
3577 */
3578 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3579 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3580 i++) {
3581 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3582
3583 if (r) {
3584 DRM_ERROR("Failed to add vupdate irq id!\n");
3585 return r;
3586 }
3587
3588 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3589 int_params.irq_source =
3590 dc_interrupt_to_irq_source(dc, i, 0);
3591
3592 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3593
3594 c_irq_params->adev = adev;
3595 c_irq_params->irq_src = int_params.irq_source;
3596
ff5ef992 3597 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2346ef47 3598 dm_vupdate_high_irq, c_irq_params);
d2574c33
MK
3599 }
3600
ff5ef992
AD
3601 /* Use GRPH_PFLIP interrupt */
3602 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3603 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3604 i++) {
3760f76c 3605 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
ff5ef992
AD
3606 if (r) {
3607 DRM_ERROR("Failed to add page flip irq id!\n");
3608 return r;
3609 }
3610
3611 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3612 int_params.irq_source =
3613 dc_interrupt_to_irq_source(dc, i, 0);
3614
3615 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3616
3617 c_irq_params->adev = adev;
3618 c_irq_params->irq_src = int_params.irq_source;
3619
3620 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3621 dm_pflip_high_irq, c_irq_params);
3622
3623 }
3624
81927e28
JS
3625 /* HPD */
3626 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3627 &adev->hpd_irq);
3628 if (r) {
3629 DRM_ERROR("Failed to add hpd irq id!\n");
3630 return r;
3631 }
a08f16cf 3632
81927e28 3633 register_hpd_handlers(adev);
a08f16cf 3634
81927e28
JS
3635 return 0;
3636}
3637/* Register Outbox IRQ sources and initialize IRQ callbacks */
3638static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3639{
3640 struct dc *dc = adev->dm.dc;
3641 struct common_irq_params *c_irq_params;
3642 struct dc_interrupt_params int_params = {0};
3643 int r, i;
3644
3645 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3646 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3647
3648 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3649 &adev->dmub_outbox_irq);
3650 if (r) {
3651 DRM_ERROR("Failed to add outbox irq id!\n");
3652 return r;
3653 }
3654
3655 if (dc->ctx->dmub_srv) {
3656 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3657 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
a08f16cf 3658 int_params.irq_source =
81927e28 3659 dc_interrupt_to_irq_source(dc, i, 0);
a08f16cf 3660
81927e28 3661 c_irq_params = &adev->dm.dmub_outbox_params[0];
a08f16cf
LHM
3662
3663 c_irq_params->adev = adev;
3664 c_irq_params->irq_src = int_params.irq_source;
3665
3666 amdgpu_dm_irq_register_interrupt(adev, &int_params,
81927e28 3667 dm_dmub_outbox1_low_irq, c_irq_params);
ff5ef992
AD
3668 }
3669
ff5ef992
AD
3670 return 0;
3671}
3672#endif
3673
eb3dc897
NK
3674/*
3675 * Acquires the lock for the atomic state object and returns
3676 * the new atomic state.
3677 *
3678 * This should only be called during atomic check.
3679 */
3680static int dm_atomic_get_state(struct drm_atomic_state *state,
3681 struct dm_atomic_state **dm_state)
3682{
3683 struct drm_device *dev = state->dev;
1348969a 3684 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3685 struct amdgpu_display_manager *dm = &adev->dm;
3686 struct drm_private_state *priv_state;
eb3dc897
NK
3687
3688 if (*dm_state)
3689 return 0;
3690
eb3dc897
NK
3691 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3692 if (IS_ERR(priv_state))
3693 return PTR_ERR(priv_state);
3694
3695 *dm_state = to_dm_atomic_state(priv_state);
3696
3697 return 0;
3698}
3699
dfd84d90 3700static struct dm_atomic_state *
eb3dc897
NK
3701dm_atomic_get_new_state(struct drm_atomic_state *state)
3702{
3703 struct drm_device *dev = state->dev;
1348969a 3704 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897
NK
3705 struct amdgpu_display_manager *dm = &adev->dm;
3706 struct drm_private_obj *obj;
3707 struct drm_private_state *new_obj_state;
3708 int i;
3709
3710 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3711 if (obj->funcs == dm->atomic_obj.funcs)
3712 return to_dm_atomic_state(new_obj_state);
3713 }
3714
3715 return NULL;
3716}
3717
eb3dc897
NK
3718static struct drm_private_state *
3719dm_atomic_duplicate_state(struct drm_private_obj *obj)
3720{
3721 struct dm_atomic_state *old_state, *new_state;
3722
3723 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3724 if (!new_state)
3725 return NULL;
3726
3727 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3728
813d20dc
AW
3729 old_state = to_dm_atomic_state(obj->state);
3730
3731 if (old_state && old_state->context)
3732 new_state->context = dc_copy_state(old_state->context);
3733
eb3dc897
NK
3734 if (!new_state->context) {
3735 kfree(new_state);
3736 return NULL;
3737 }
3738
eb3dc897
NK
3739 return &new_state->base;
3740}
3741
3742static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3743 struct drm_private_state *state)
3744{
3745 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3746
3747 if (dm_state && dm_state->context)
3748 dc_release_state(dm_state->context);
3749
3750 kfree(dm_state);
3751}
3752
3753static struct drm_private_state_funcs dm_atomic_state_funcs = {
3754 .atomic_duplicate_state = dm_atomic_duplicate_state,
3755 .atomic_destroy_state = dm_atomic_destroy_state,
3756};
3757
4562236b
HW
3758static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3759{
eb3dc897 3760 struct dm_atomic_state *state;
4562236b
HW
3761 int r;
3762
3763 adev->mode_info.mode_config_initialized = true;
3764
4a580877
LT
3765 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3766 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4562236b 3767
4a580877
LT
3768 adev_to_drm(adev)->mode_config.max_width = 16384;
3769 adev_to_drm(adev)->mode_config.max_height = 16384;
4562236b 3770
4a580877
LT
3771 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3772 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
1f6010a9 3773 /* indicates support for immediate flip */
4a580877 3774 adev_to_drm(adev)->mode_config.async_page_flip = true;
4562236b 3775
4a580877 3776 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
4562236b 3777
eb3dc897
NK
3778 state = kzalloc(sizeof(*state), GFP_KERNEL);
3779 if (!state)
3780 return -ENOMEM;
3781
813d20dc 3782 state->context = dc_create_state(adev->dm.dc);
eb3dc897
NK
3783 if (!state->context) {
3784 kfree(state);
3785 return -ENOMEM;
3786 }
3787
3788 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3789
4a580877 3790 drm_atomic_private_obj_init(adev_to_drm(adev),
8c1a765b 3791 &adev->dm.atomic_obj,
eb3dc897
NK
3792 &state->base,
3793 &dm_atomic_state_funcs);
3794
3dc9b1ce 3795 r = amdgpu_display_modeset_create_props(adev);
b67a468a
DL
3796 if (r) {
3797 dc_release_state(state->context);
3798 kfree(state);
4562236b 3799 return r;
b67a468a 3800 }
4562236b 3801
6ce8f316 3802 r = amdgpu_dm_audio_init(adev);
b67a468a
DL
3803 if (r) {
3804 dc_release_state(state->context);
3805 kfree(state);
6ce8f316 3806 return r;
b67a468a 3807 }
6ce8f316 3808
4562236b
HW
3809 return 0;
3810}
3811
206bbafe
DF
3812#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3813#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
94562810 3814#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
206bbafe 3815
4562236b
HW
3816#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3817 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3818
7fd13bae
AD
3819static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3820 int bl_idx)
206bbafe
DF
3821{
3822#if defined(CONFIG_ACPI)
3823 struct amdgpu_dm_backlight_caps caps;
3824
58965855
FS
3825 memset(&caps, 0, sizeof(caps));
3826
7fd13bae 3827 if (dm->backlight_caps[bl_idx].caps_valid)
206bbafe
DF
3828 return;
3829
f9b7f370 3830 amdgpu_acpi_get_backlight_caps(&caps);
206bbafe 3831 if (caps.caps_valid) {
7fd13bae 3832 dm->backlight_caps[bl_idx].caps_valid = true;
94562810
RS
3833 if (caps.aux_support)
3834 return;
7fd13bae
AD
3835 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3836 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
206bbafe 3837 } else {
7fd13bae 3838 dm->backlight_caps[bl_idx].min_input_signal =
206bbafe 3839 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
7fd13bae 3840 dm->backlight_caps[bl_idx].max_input_signal =
206bbafe
DF
3841 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3842 }
3843#else
7fd13bae 3844 if (dm->backlight_caps[bl_idx].aux_support)
94562810
RS
3845 return;
3846
7fd13bae
AD
3847 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3848 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
206bbafe
DF
3849#endif
3850}
3851
69d9f427
AM
3852static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3853 unsigned *min, unsigned *max)
94562810 3854{
94562810 3855 if (!caps)
69d9f427 3856 return 0;
94562810 3857
69d9f427
AM
3858 if (caps->aux_support) {
3859 // Firmware limits are in nits, DC API wants millinits.
3860 *max = 1000 * caps->aux_max_input_signal;
3861 *min = 1000 * caps->aux_min_input_signal;
94562810 3862 } else {
69d9f427
AM
3863 // Firmware limits are 8-bit, PWM control is 16-bit.
3864 *max = 0x101 * caps->max_input_signal;
3865 *min = 0x101 * caps->min_input_signal;
94562810 3866 }
69d9f427
AM
3867 return 1;
3868}
94562810 3869
69d9f427
AM
3870static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3871 uint32_t brightness)
3872{
3873 unsigned min, max;
94562810 3874
69d9f427
AM
3875 if (!get_brightness_range(caps, &min, &max))
3876 return brightness;
3877
3878 // Rescale 0..255 to min..max
3879 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3880 AMDGPU_MAX_BL_LEVEL);
3881}
3882
3883static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3884 uint32_t brightness)
3885{
3886 unsigned min, max;
3887
3888 if (!get_brightness_range(caps, &min, &max))
3889 return brightness;
3890
3891 if (brightness < min)
3892 return 0;
3893 // Rescale min..max to 0..255
3894 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3895 max - min);
94562810
RS
3896}
3897
3d6c9164 3898static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
7fd13bae 3899 int bl_idx,
3d6c9164 3900 u32 user_brightness)
4562236b 3901{
206bbafe 3902 struct amdgpu_dm_backlight_caps caps;
7fd13bae
AD
3903 struct dc_link *link;
3904 u32 brightness;
94562810 3905 bool rc;
4562236b 3906
7fd13bae
AD
3907 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3908 caps = dm->backlight_caps[bl_idx];
94562810 3909
7fd13bae
AD
3910 dm->brightness[bl_idx] = user_brightness;
3911 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3912 link = (struct dc_link *)dm->backlight_link[bl_idx];
94562810 3913
3d6c9164 3914 /* Change brightness based on AUX property */
118b4627 3915 if (caps.aux_support) {
7fd13bae
AD
3916 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3917 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3918 if (!rc)
3919 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
118b4627 3920 } else {
7fd13bae
AD
3921 rc = dc_link_set_backlight_level(link, brightness, 0);
3922 if (!rc)
3923 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
118b4627 3924 }
94562810
RS
3925
3926 return rc ? 0 : 1;
4562236b
HW
3927}
3928
3d6c9164 3929static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4562236b 3930{
620a0d27 3931 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3932 int i;
3d6c9164 3933
7fd13bae
AD
3934 for (i = 0; i < dm->num_of_edps; i++) {
3935 if (bd == dm->backlight_dev[i])
3936 break;
3937 }
3938 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3939 i = 0;
3940 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3d6c9164
AD
3941
3942 return 0;
3943}
3944
7fd13bae
AD
3945static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3946 int bl_idx)
3d6c9164 3947{
0ad3e64e 3948 struct amdgpu_dm_backlight_caps caps;
7fd13bae 3949 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
0ad3e64e 3950
7fd13bae
AD
3951 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3952 caps = dm->backlight_caps[bl_idx];
620a0d27 3953
0ad3e64e 3954 if (caps.aux_support) {
0ad3e64e
AD
3955 u32 avg, peak;
3956 bool rc;
3957
3958 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3959 if (!rc)
7fd13bae 3960 return dm->brightness[bl_idx];
0ad3e64e
AD
3961 return convert_brightness_to_user(&caps, avg);
3962 } else {
7fd13bae 3963 int ret = dc_link_get_backlight_level(link);
0ad3e64e
AD
3964
3965 if (ret == DC_ERROR_UNEXPECTED)
7fd13bae 3966 return dm->brightness[bl_idx];
0ad3e64e
AD
3967 return convert_brightness_to_user(&caps, ret);
3968 }
4562236b
HW
3969}
3970
3d6c9164
AD
3971static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3972{
3973 struct amdgpu_display_manager *dm = bl_get_data(bd);
7fd13bae 3974 int i;
3d6c9164 3975
7fd13bae
AD
3976 for (i = 0; i < dm->num_of_edps; i++) {
3977 if (bd == dm->backlight_dev[i])
3978 break;
3979 }
3980 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3981 i = 0;
3982 return amdgpu_dm_backlight_get_level(dm, i);
3d6c9164
AD
3983}
3984
4562236b 3985static const struct backlight_ops amdgpu_dm_backlight_ops = {
bb264220 3986 .options = BL_CORE_SUSPENDRESUME,
4562236b
HW
3987 .get_brightness = amdgpu_dm_backlight_get_brightness,
3988 .update_status = amdgpu_dm_backlight_update_status,
3989};
3990
7578ecda
AD
3991static void
3992amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4562236b
HW
3993{
3994 char bl_name[16];
3995 struct backlight_properties props = { 0 };
3996
7fd13bae
AD
3997 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3998 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
206bbafe 3999
4562236b 4000 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
53a53f86 4001 props.brightness = AMDGPU_MAX_BL_LEVEL;
4562236b
HW
4002 props.type = BACKLIGHT_RAW;
4003
4004 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
7fd13bae 4005 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4562236b 4006
7fd13bae
AD
4007 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4008 adev_to_drm(dm->adev)->dev,
4009 dm,
4010 &amdgpu_dm_backlight_ops,
4011 &props);
4562236b 4012
7fd13bae 4013 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4562236b
HW
4014 DRM_ERROR("DM: Backlight registration failed!\n");
4015 else
f1ad2f5e 4016 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4562236b 4017}
4562236b
HW
4018#endif
4019
df534fff 4020static int initialize_plane(struct amdgpu_display_manager *dm,
b2fddb13 4021 struct amdgpu_mode_info *mode_info, int plane_id,
cc1fec57
NK
4022 enum drm_plane_type plane_type,
4023 const struct dc_plane_cap *plane_cap)
df534fff 4024{
f180b4bc 4025 struct drm_plane *plane;
df534fff
S
4026 unsigned long possible_crtcs;
4027 int ret = 0;
4028
f180b4bc 4029 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
df534fff
S
4030 if (!plane) {
4031 DRM_ERROR("KMS: Failed to allocate plane\n");
4032 return -ENOMEM;
4033 }
b2fddb13 4034 plane->type = plane_type;
df534fff
S
4035
4036 /*
b2fddb13
NK
4037 * HACK: IGT tests expect that the primary plane for a CRTC
4038 * can only have one possible CRTC. Only expose support for
4039 * any CRTC if they're not going to be used as a primary plane
4040 * for a CRTC - like overlay or underlay planes.
df534fff
S
4041 */
4042 possible_crtcs = 1 << plane_id;
4043 if (plane_id >= dm->dc->caps.max_streams)
4044 possible_crtcs = 0xff;
4045
cc1fec57 4046 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
df534fff
S
4047
4048 if (ret) {
4049 DRM_ERROR("KMS: Failed to initialize plane\n");
54087768 4050 kfree(plane);
df534fff
S
4051 return ret;
4052 }
4053
54087768
NK
4054 if (mode_info)
4055 mode_info->planes[plane_id] = plane;
4056
df534fff
S
4057 return ret;
4058}
4059
89fc8d4e
HW
4060
4061static void register_backlight_device(struct amdgpu_display_manager *dm,
4062 struct dc_link *link)
4063{
4064#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4065 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4066
4067 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4068 link->type != dc_connection_none) {
1f6010a9
DF
4069 /*
4070 * Event if registration failed, we should continue with
89fc8d4e
HW
4071 * DM initialization because not having a backlight control
4072 * is better then a black screen.
4073 */
7fd13bae 4074 if (!dm->backlight_dev[dm->num_of_edps])
118b4627 4075 amdgpu_dm_register_backlight_device(dm);
89fc8d4e 4076
7fd13bae 4077 if (dm->backlight_dev[dm->num_of_edps]) {
118b4627
ML
4078 dm->backlight_link[dm->num_of_edps] = link;
4079 dm->num_of_edps++;
4080 }
89fc8d4e
HW
4081 }
4082#endif
4083}
4084
4085
1f6010a9
DF
4086/*
4087 * In this architecture, the association
4562236b
HW
4088 * connector -> encoder -> crtc
4089 * id not really requried. The crtc and connector will hold the
4090 * display_index as an abstraction to use with DAL component
4091 *
4092 * Returns 0 on success
4093 */
7578ecda 4094static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4562236b
HW
4095{
4096 struct amdgpu_display_manager *dm = &adev->dm;
df534fff 4097 int32_t i;
c84dec2f 4098 struct amdgpu_dm_connector *aconnector = NULL;
f2a0f5e6 4099 struct amdgpu_encoder *aencoder = NULL;
d4e13b0d 4100 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4562236b 4101 uint32_t link_cnt;
cc1fec57 4102 int32_t primary_planes;
fbbdadf2 4103 enum dc_connection_type new_connection_type = dc_connection_none;
cc1fec57 4104 const struct dc_plane_cap *plane;
9470620e 4105 bool psr_feature_enabled = false;
4562236b 4106
d58159de
AD
4107 dm->display_indexes_num = dm->dc->caps.max_streams;
4108 /* Update the actual used number of crtc */
4109 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4110
4562236b 4111 link_cnt = dm->dc->caps.max_links;
4562236b
HW
4112 if (amdgpu_dm_mode_config_init(dm->adev)) {
4113 DRM_ERROR("DM: Failed to initialize mode config\n");
59d0f396 4114 return -EINVAL;
4562236b
HW
4115 }
4116
b2fddb13
NK
4117 /* There is one primary plane per CRTC */
4118 primary_planes = dm->dc->caps.max_streams;
54087768 4119 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
efa6a8b7 4120
b2fddb13
NK
4121 /*
4122 * Initialize primary planes, implicit planes for legacy IOCTLS.
4123 * Order is reversed to match iteration order in atomic check.
4124 */
4125 for (i = (primary_planes - 1); i >= 0; i--) {
cc1fec57
NK
4126 plane = &dm->dc->caps.planes[i];
4127
b2fddb13 4128 if (initialize_plane(dm, mode_info, i,
cc1fec57 4129 DRM_PLANE_TYPE_PRIMARY, plane)) {
df534fff 4130 DRM_ERROR("KMS: Failed to initialize primary plane\n");
cd8a2ae8 4131 goto fail;
d4e13b0d 4132 }
df534fff 4133 }
92f3ac40 4134
0d579c7e
NK
4135 /*
4136 * Initialize overlay planes, index starting after primary planes.
4137 * These planes have a higher DRM index than the primary planes since
4138 * they should be considered as having a higher z-order.
4139 * Order is reversed to match iteration order in atomic check.
cc1fec57
NK
4140 *
4141 * Only support DCN for now, and only expose one so we don't encourage
4142 * userspace to use up all the pipes.
0d579c7e 4143 */
cc1fec57
NK
4144 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4145 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4146
4147 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4148 continue;
4149
4150 if (!plane->blends_with_above || !plane->blends_with_below)
4151 continue;
4152
ea36ad34 4153 if (!plane->pixel_format_support.argb8888)
cc1fec57
NK
4154 continue;
4155
54087768 4156 if (initialize_plane(dm, NULL, primary_planes + i,
cc1fec57 4157 DRM_PLANE_TYPE_OVERLAY, plane)) {
0d579c7e 4158 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
cd8a2ae8 4159 goto fail;
d4e13b0d 4160 }
cc1fec57
NK
4161
4162 /* Only create one overlay plane. */
4163 break;
d4e13b0d 4164 }
4562236b 4165
d4e13b0d 4166 for (i = 0; i < dm->dc->caps.max_streams; i++)
f180b4bc 4167 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4562236b 4168 DRM_ERROR("KMS: Failed to initialize crtc\n");
cd8a2ae8 4169 goto fail;
4562236b 4170 }
4562236b 4171
50610b74 4172#if defined(CONFIG_DRM_AMD_DC_DCN)
81927e28 4173 /* Use Outbox interrupt */
1d789535 4174 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4175 case IP_VERSION(3, 0, 0):
4176 case IP_VERSION(3, 1, 2):
4177 case IP_VERSION(3, 1, 3):
4178 case IP_VERSION(2, 1, 0):
81927e28
JS
4179 if (register_outbox_irq_handlers(dm->adev)) {
4180 DRM_ERROR("DM: Failed to initialize IRQ\n");
4181 goto fail;
4182 }
4183 break;
4184 default:
c08182f2 4185 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
1d789535 4186 adev->ip_versions[DCE_HWIP][0]);
81927e28 4187 }
9470620e
NK
4188
4189 /* Determine whether to enable PSR support by default. */
4190 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4191 switch (adev->ip_versions[DCE_HWIP][0]) {
4192 case IP_VERSION(3, 1, 2):
4193 case IP_VERSION(3, 1, 3):
4194 psr_feature_enabled = true;
4195 break;
4196 default:
4197 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4198 break;
4199 }
4200 }
50610b74 4201#endif
81927e28 4202
4562236b
HW
4203 /* loops over all connectors on the board */
4204 for (i = 0; i < link_cnt; i++) {
89fc8d4e 4205 struct dc_link *link = NULL;
4562236b
HW
4206
4207 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4208 DRM_ERROR(
4209 "KMS: Cannot support more than %d display indexes\n",
4210 AMDGPU_DM_MAX_DISPLAY_INDEX);
4211 continue;
4212 }
4213
4214 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4215 if (!aconnector)
cd8a2ae8 4216 goto fail;
4562236b
HW
4217
4218 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
8440c304 4219 if (!aencoder)
cd8a2ae8 4220 goto fail;
4562236b
HW
4221
4222 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4223 DRM_ERROR("KMS: Failed to initialize encoder\n");
cd8a2ae8 4224 goto fail;
4562236b
HW
4225 }
4226
4227 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4228 DRM_ERROR("KMS: Failed to initialize connector\n");
cd8a2ae8 4229 goto fail;
4562236b
HW
4230 }
4231
89fc8d4e
HW
4232 link = dc_get_link_at_index(dm->dc, i);
4233
fbbdadf2
BL
4234 if (!dc_link_detect_sink(link, &new_connection_type))
4235 DRM_ERROR("KMS: Failed to detect connector\n");
4236
4237 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4238 emulated_link_detect(link);
4239 amdgpu_dm_update_connector_after_detect(aconnector);
4240
4241 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4562236b 4242 amdgpu_dm_update_connector_after_detect(aconnector);
89fc8d4e 4243 register_backlight_device(dm, link);
9470620e
NK
4244
4245 if (psr_feature_enabled)
397a9bc5 4246 amdgpu_dm_set_psr_caps(link);
89fc8d4e
HW
4247 }
4248
4249
4562236b
HW
4250 }
4251
4252 /* Software is initialized. Now we can register interrupt handlers. */
4253 switch (adev->asic_type) {
55e56389
MR
4254#if defined(CONFIG_DRM_AMD_DC_SI)
4255 case CHIP_TAHITI:
4256 case CHIP_PITCAIRN:
4257 case CHIP_VERDE:
4258 case CHIP_OLAND:
4259 if (dce60_register_irq_handlers(dm->adev)) {
4260 DRM_ERROR("DM: Failed to initialize IRQ\n");
4261 goto fail;
4262 }
4263 break;
4264#endif
4562236b
HW
4265 case CHIP_BONAIRE:
4266 case CHIP_HAWAII:
cd4b356f
AD
4267 case CHIP_KAVERI:
4268 case CHIP_KABINI:
4269 case CHIP_MULLINS:
4562236b
HW
4270 case CHIP_TONGA:
4271 case CHIP_FIJI:
4272 case CHIP_CARRIZO:
4273 case CHIP_STONEY:
4274 case CHIP_POLARIS11:
4275 case CHIP_POLARIS10:
b264d345 4276 case CHIP_POLARIS12:
7737de91 4277 case CHIP_VEGAM:
2c8ad2d5 4278 case CHIP_VEGA10:
2325ff30 4279 case CHIP_VEGA12:
1fe6bf2f 4280 case CHIP_VEGA20:
4562236b
HW
4281 if (dce110_register_irq_handlers(dm->adev)) {
4282 DRM_ERROR("DM: Failed to initialize IRQ\n");
cd8a2ae8 4283 goto fail;
4562236b
HW
4284 }
4285 break;
4286 default:
c08182f2 4287#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4288 switch (adev->ip_versions[DCE_HWIP][0]) {
559f591d
AD
4289 case IP_VERSION(1, 0, 0):
4290 case IP_VERSION(1, 0, 1):
c08182f2
AD
4291 case IP_VERSION(2, 0, 2):
4292 case IP_VERSION(2, 0, 3):
4293 case IP_VERSION(2, 0, 0):
4294 case IP_VERSION(2, 1, 0):
4295 case IP_VERSION(3, 0, 0):
4296 case IP_VERSION(3, 0, 2):
4297 case IP_VERSION(3, 0, 3):
4298 case IP_VERSION(3, 0, 1):
4299 case IP_VERSION(3, 1, 2):
4300 case IP_VERSION(3, 1, 3):
4301 if (dcn10_register_irq_handlers(dm->adev)) {
4302 DRM_ERROR("DM: Failed to initialize IRQ\n");
4303 goto fail;
4304 }
4305 break;
4306 default:
2cbc6f42 4307 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
1d789535 4308 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4309 goto fail;
c08182f2
AD
4310 }
4311#endif
2cbc6f42 4312 break;
4562236b
HW
4313 }
4314
4562236b 4315 return 0;
cd8a2ae8 4316fail:
4562236b 4317 kfree(aencoder);
4562236b 4318 kfree(aconnector);
54087768 4319
59d0f396 4320 return -EINVAL;
4562236b
HW
4321}
4322
7578ecda 4323static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4562236b 4324{
eb3dc897 4325 drm_atomic_private_obj_fini(&dm->atomic_obj);
4562236b
HW
4326 return;
4327}
4328
4329/******************************************************************************
4330 * amdgpu_display_funcs functions
4331 *****************************************************************************/
4332
1f6010a9 4333/*
4562236b
HW
4334 * dm_bandwidth_update - program display watermarks
4335 *
4336 * @adev: amdgpu_device pointer
4337 *
4338 * Calculate and program the display watermarks and line buffer allocation.
4339 */
4340static void dm_bandwidth_update(struct amdgpu_device *adev)
4341{
49c07a99 4342 /* TODO: implement later */
4562236b
HW
4343}
4344
39cc5be2 4345static const struct amdgpu_display_funcs dm_display_funcs = {
4562236b
HW
4346 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4347 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
7b42573b
HW
4348 .backlight_set_level = NULL, /* never called for DC */
4349 .backlight_get_level = NULL, /* never called for DC */
4562236b
HW
4350 .hpd_sense = NULL,/* called unconditionally */
4351 .hpd_set_polarity = NULL, /* called unconditionally */
4352 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4353 .page_flip_get_scanoutpos =
4354 dm_crtc_get_scanoutpos,/* called unconditionally */
4355 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4356 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4562236b
HW
4357};
4358
4359#if defined(CONFIG_DEBUG_KERNEL_DC)
4360
3ee6b26b
AD
4361static ssize_t s3_debug_store(struct device *device,
4362 struct device_attribute *attr,
4363 const char *buf,
4364 size_t count)
4562236b
HW
4365{
4366 int ret;
4367 int s3_state;
ef1de361 4368 struct drm_device *drm_dev = dev_get_drvdata(device);
1348969a 4369 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4562236b
HW
4370
4371 ret = kstrtoint(buf, 0, &s3_state);
4372
4373 if (ret == 0) {
4374 if (s3_state) {
4375 dm_resume(adev);
4a580877 4376 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4562236b
HW
4377 } else
4378 dm_suspend(adev);
4379 }
4380
4381 return ret == 0 ? count : 0;
4382}
4383
4384DEVICE_ATTR_WO(s3_debug);
4385
4386#endif
4387
4388static int dm_early_init(void *handle)
4389{
4390 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4391
4562236b 4392 switch (adev->asic_type) {
55e56389
MR
4393#if defined(CONFIG_DRM_AMD_DC_SI)
4394 case CHIP_TAHITI:
4395 case CHIP_PITCAIRN:
4396 case CHIP_VERDE:
4397 adev->mode_info.num_crtc = 6;
4398 adev->mode_info.num_hpd = 6;
4399 adev->mode_info.num_dig = 6;
4400 break;
4401 case CHIP_OLAND:
4402 adev->mode_info.num_crtc = 2;
4403 adev->mode_info.num_hpd = 2;
4404 adev->mode_info.num_dig = 2;
4405 break;
4406#endif
4562236b
HW
4407 case CHIP_BONAIRE:
4408 case CHIP_HAWAII:
4409 adev->mode_info.num_crtc = 6;
4410 adev->mode_info.num_hpd = 6;
4411 adev->mode_info.num_dig = 6;
4562236b 4412 break;
cd4b356f
AD
4413 case CHIP_KAVERI:
4414 adev->mode_info.num_crtc = 4;
4415 adev->mode_info.num_hpd = 6;
4416 adev->mode_info.num_dig = 7;
cd4b356f
AD
4417 break;
4418 case CHIP_KABINI:
4419 case CHIP_MULLINS:
4420 adev->mode_info.num_crtc = 2;
4421 adev->mode_info.num_hpd = 6;
4422 adev->mode_info.num_dig = 6;
cd4b356f 4423 break;
4562236b
HW
4424 case CHIP_FIJI:
4425 case CHIP_TONGA:
4426 adev->mode_info.num_crtc = 6;
4427 adev->mode_info.num_hpd = 6;
4428 adev->mode_info.num_dig = 7;
4562236b
HW
4429 break;
4430 case CHIP_CARRIZO:
4431 adev->mode_info.num_crtc = 3;
4432 adev->mode_info.num_hpd = 6;
4433 adev->mode_info.num_dig = 9;
4562236b
HW
4434 break;
4435 case CHIP_STONEY:
4436 adev->mode_info.num_crtc = 2;
4437 adev->mode_info.num_hpd = 6;
4438 adev->mode_info.num_dig = 9;
4562236b
HW
4439 break;
4440 case CHIP_POLARIS11:
b264d345 4441 case CHIP_POLARIS12:
4562236b
HW
4442 adev->mode_info.num_crtc = 5;
4443 adev->mode_info.num_hpd = 5;
4444 adev->mode_info.num_dig = 5;
4562236b
HW
4445 break;
4446 case CHIP_POLARIS10:
7737de91 4447 case CHIP_VEGAM:
4562236b
HW
4448 adev->mode_info.num_crtc = 6;
4449 adev->mode_info.num_hpd = 6;
4450 adev->mode_info.num_dig = 6;
4562236b 4451 break;
2c8ad2d5 4452 case CHIP_VEGA10:
2325ff30 4453 case CHIP_VEGA12:
1fe6bf2f 4454 case CHIP_VEGA20:
2c8ad2d5
AD
4455 adev->mode_info.num_crtc = 6;
4456 adev->mode_info.num_hpd = 6;
4457 adev->mode_info.num_dig = 6;
4458 break;
4562236b 4459 default:
c08182f2 4460#if defined(CONFIG_DRM_AMD_DC_DCN)
1d789535 4461 switch (adev->ip_versions[DCE_HWIP][0]) {
c08182f2
AD
4462 case IP_VERSION(2, 0, 2):
4463 case IP_VERSION(3, 0, 0):
4464 adev->mode_info.num_crtc = 6;
4465 adev->mode_info.num_hpd = 6;
4466 adev->mode_info.num_dig = 6;
4467 break;
4468 case IP_VERSION(2, 0, 0):
4469 case IP_VERSION(3, 0, 2):
4470 adev->mode_info.num_crtc = 5;
4471 adev->mode_info.num_hpd = 5;
4472 adev->mode_info.num_dig = 5;
4473 break;
4474 case IP_VERSION(2, 0, 3):
4475 case IP_VERSION(3, 0, 3):
4476 adev->mode_info.num_crtc = 2;
4477 adev->mode_info.num_hpd = 2;
4478 adev->mode_info.num_dig = 2;
4479 break;
559f591d
AD
4480 case IP_VERSION(1, 0, 0):
4481 case IP_VERSION(1, 0, 1):
c08182f2
AD
4482 case IP_VERSION(3, 0, 1):
4483 case IP_VERSION(2, 1, 0):
4484 case IP_VERSION(3, 1, 2):
4485 case IP_VERSION(3, 1, 3):
4486 adev->mode_info.num_crtc = 4;
4487 adev->mode_info.num_hpd = 4;
4488 adev->mode_info.num_dig = 4;
4489 break;
4490 default:
2cbc6f42 4491 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
1d789535 4492 adev->ip_versions[DCE_HWIP][0]);
2cbc6f42 4493 return -EINVAL;
c08182f2
AD
4494 }
4495#endif
2cbc6f42 4496 break;
4562236b
HW
4497 }
4498
c8dd5715
MD
4499 amdgpu_dm_set_irq_funcs(adev);
4500
39cc5be2
AD
4501 if (adev->mode_info.funcs == NULL)
4502 adev->mode_info.funcs = &dm_display_funcs;
4503
1f6010a9
DF
4504 /*
4505 * Note: Do NOT change adev->audio_endpt_rreg and
4562236b 4506 * adev->audio_endpt_wreg because they are initialised in
1f6010a9
DF
4507 * amdgpu_device_init()
4508 */
4562236b
HW
4509#if defined(CONFIG_DEBUG_KERNEL_DC)
4510 device_create_file(
4a580877 4511 adev_to_drm(adev)->dev,
4562236b
HW
4512 &dev_attr_s3_debug);
4513#endif
4514
4515 return 0;
4516}
4517
9b690ef3 4518static bool modeset_required(struct drm_crtc_state *crtc_state,
0971c40e
HW
4519 struct dc_stream_state *new_stream,
4520 struct dc_stream_state *old_stream)
9b690ef3 4521{
2afda735 4522 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4523}
4524
4525static bool modereset_required(struct drm_crtc_state *crtc_state)
4526{
2afda735 4527 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
e7b07cee
HW
4528}
4529
7578ecda 4530static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
e7b07cee
HW
4531{
4532 drm_encoder_cleanup(encoder);
4533 kfree(encoder);
4534}
4535
4536static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4537 .destroy = amdgpu_dm_encoder_destroy,
4538};
4539
e7b07cee 4540
6300b3bd
MK
4541static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4542 struct drm_framebuffer *fb,
4543 int *min_downscale, int *max_upscale)
4544{
4545 struct amdgpu_device *adev = drm_to_adev(dev);
4546 struct dc *dc = adev->dm.dc;
4547 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4548 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4549
4550 switch (fb->format->format) {
4551 case DRM_FORMAT_P010:
4552 case DRM_FORMAT_NV12:
4553 case DRM_FORMAT_NV21:
4554 *max_upscale = plane_cap->max_upscale_factor.nv12;
4555 *min_downscale = plane_cap->max_downscale_factor.nv12;
4556 break;
4557
4558 case DRM_FORMAT_XRGB16161616F:
4559 case DRM_FORMAT_ARGB16161616F:
4560 case DRM_FORMAT_XBGR16161616F:
4561 case DRM_FORMAT_ABGR16161616F:
4562 *max_upscale = plane_cap->max_upscale_factor.fp16;
4563 *min_downscale = plane_cap->max_downscale_factor.fp16;
4564 break;
4565
4566 default:
4567 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4568 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4569 break;
4570 }
4571
4572 /*
4573 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4574 * scaling factor of 1.0 == 1000 units.
4575 */
4576 if (*max_upscale == 1)
4577 *max_upscale = 1000;
4578
4579 if (*min_downscale == 1)
4580 *min_downscale = 1000;
4581}
4582
4583
4375d625
S
4584static int fill_dc_scaling_info(struct amdgpu_device *adev,
4585 const struct drm_plane_state *state,
695af5f9 4586 struct dc_scaling_info *scaling_info)
e7b07cee 4587{
6300b3bd 4588 int scale_w, scale_h, min_downscale, max_upscale;
e7b07cee 4589
695af5f9 4590 memset(scaling_info, 0, sizeof(*scaling_info));
e7b07cee 4591
695af5f9
NK
4592 /* Source is fixed 16.16 but we ignore mantissa for now... */
4593 scaling_info->src_rect.x = state->src_x >> 16;
4594 scaling_info->src_rect.y = state->src_y >> 16;
e7b07cee 4595
d89f6048
HW
4596 /*
4597 * For reasons we don't (yet) fully understand a non-zero
4598 * src_y coordinate into an NV12 buffer can cause a
4375d625
S
4599 * system hang on DCN1x.
4600 * To avoid hangs (and maybe be overly cautious)
d89f6048
HW
4601 * let's reject both non-zero src_x and src_y.
4602 *
4603 * We currently know of only one use-case to reproduce a
4604 * scenario with non-zero src_x and src_y for NV12, which
4605 * is to gesture the YouTube Android app into full screen
4606 * on ChromeOS.
4607 */
4375d625
S
4608 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4609 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4610 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4611 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
d89f6048
HW
4612 return -EINVAL;
4613
695af5f9
NK
4614 scaling_info->src_rect.width = state->src_w >> 16;
4615 if (scaling_info->src_rect.width == 0)
4616 return -EINVAL;
4617
4618 scaling_info->src_rect.height = state->src_h >> 16;
4619 if (scaling_info->src_rect.height == 0)
4620 return -EINVAL;
4621
4622 scaling_info->dst_rect.x = state->crtc_x;
4623 scaling_info->dst_rect.y = state->crtc_y;
e7b07cee
HW
4624
4625 if (state->crtc_w == 0)
695af5f9 4626 return -EINVAL;
e7b07cee 4627
695af5f9 4628 scaling_info->dst_rect.width = state->crtc_w;
e7b07cee
HW
4629
4630 if (state->crtc_h == 0)
695af5f9 4631 return -EINVAL;
e7b07cee 4632
695af5f9 4633 scaling_info->dst_rect.height = state->crtc_h;
e7b07cee 4634
695af5f9
NK
4635 /* DRM doesn't specify clipping on destination output. */
4636 scaling_info->clip_rect = scaling_info->dst_rect;
e7b07cee 4637
6300b3bd
MK
4638 /* Validate scaling per-format with DC plane caps */
4639 if (state->plane && state->plane->dev && state->fb) {
4640 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4641 &min_downscale, &max_upscale);
4642 } else {
4643 min_downscale = 250;
4644 max_upscale = 16000;
4645 }
4646
6491f0c0
NK
4647 scale_w = scaling_info->dst_rect.width * 1000 /
4648 scaling_info->src_rect.width;
e7b07cee 4649
6300b3bd 4650 if (scale_w < min_downscale || scale_w > max_upscale)
6491f0c0
NK
4651 return -EINVAL;
4652
4653 scale_h = scaling_info->dst_rect.height * 1000 /
4654 scaling_info->src_rect.height;
4655
6300b3bd 4656 if (scale_h < min_downscale || scale_h > max_upscale)
6491f0c0
NK
4657 return -EINVAL;
4658
695af5f9
NK
4659 /*
4660 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4661 * assume reasonable defaults based on the format.
4662 */
e7b07cee 4663
695af5f9 4664 return 0;
4562236b 4665}
695af5f9 4666
a3241991
BN
4667static void
4668fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4669 uint64_t tiling_flags)
e7b07cee 4670{
a3241991
BN
4671 /* Fill GFX8 params */
4672 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4673 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
707477b0 4674
a3241991
BN
4675 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4676 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4677 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4678 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4679 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
b830ebc9 4680
a3241991
BN
4681 /* XXX fix me for VI */
4682 tiling_info->gfx8.num_banks = num_banks;
4683 tiling_info->gfx8.array_mode =
4684 DC_ARRAY_2D_TILED_THIN1;
4685 tiling_info->gfx8.tile_split = tile_split;
4686 tiling_info->gfx8.bank_width = bankw;
4687 tiling_info->gfx8.bank_height = bankh;
4688 tiling_info->gfx8.tile_aspect = mtaspect;
4689 tiling_info->gfx8.tile_mode =
4690 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4691 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4692 == DC_ARRAY_1D_TILED_THIN1) {
4693 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
e7b07cee
HW
4694 }
4695
a3241991
BN
4696 tiling_info->gfx8.pipe_config =
4697 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
e7b07cee
HW
4698}
4699
a3241991
BN
4700static void
4701fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4702 union dc_tiling_info *tiling_info)
4703{
4704 tiling_info->gfx9.num_pipes =
4705 adev->gfx.config.gb_addr_config_fields.num_pipes;
4706 tiling_info->gfx9.num_banks =
4707 adev->gfx.config.gb_addr_config_fields.num_banks;
4708 tiling_info->gfx9.pipe_interleave =
4709 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4710 tiling_info->gfx9.num_shader_engines =
4711 adev->gfx.config.gb_addr_config_fields.num_se;
4712 tiling_info->gfx9.max_compressed_frags =
4713 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4714 tiling_info->gfx9.num_rb_per_se =
4715 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4716 tiling_info->gfx9.shaderEnable = 1;
1d789535 4717 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
a3241991 4718 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
7df7e505
NK
4719}
4720
695af5f9 4721static int
a3241991
BN
4722validate_dcc(struct amdgpu_device *adev,
4723 const enum surface_pixel_format format,
4724 const enum dc_rotation_angle rotation,
4725 const union dc_tiling_info *tiling_info,
4726 const struct dc_plane_dcc_param *dcc,
4727 const struct dc_plane_address *address,
4728 const struct plane_size *plane_size)
7df7e505
NK
4729{
4730 struct dc *dc = adev->dm.dc;
8daa1218
NC
4731 struct dc_dcc_surface_param input;
4732 struct dc_surface_dcc_cap output;
7df7e505 4733
8daa1218
NC
4734 memset(&input, 0, sizeof(input));
4735 memset(&output, 0, sizeof(output));
4736
a3241991 4737 if (!dcc->enable)
87b7ebc2
RS
4738 return 0;
4739
a3241991
BN
4740 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4741 !dc->cap_funcs.get_dcc_compression_cap)
09e5665a 4742 return -EINVAL;
7df7e505 4743
695af5f9 4744 input.format = format;
12e2b2d4
DL
4745 input.surface_size.width = plane_size->surface_size.width;
4746 input.surface_size.height = plane_size->surface_size.height;
695af5f9 4747 input.swizzle_mode = tiling_info->gfx9.swizzle;
7df7e505 4748
695af5f9 4749 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
7df7e505 4750 input.scan = SCAN_DIRECTION_HORIZONTAL;
695af5f9 4751 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
7df7e505
NK
4752 input.scan = SCAN_DIRECTION_VERTICAL;
4753
4754 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
09e5665a 4755 return -EINVAL;
7df7e505
NK
4756
4757 if (!output.capable)
09e5665a 4758 return -EINVAL;
7df7e505 4759
a3241991
BN
4760 if (dcc->independent_64b_blks == 0 &&
4761 output.grph.rgb.independent_64b_blks != 0)
09e5665a 4762 return -EINVAL;
7df7e505 4763
a3241991
BN
4764 return 0;
4765}
4766
37384b3f
BN
4767static bool
4768modifier_has_dcc(uint64_t modifier)
4769{
4770 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4771}
4772
4773static unsigned
4774modifier_gfx9_swizzle_mode(uint64_t modifier)
4775{
4776 if (modifier == DRM_FORMAT_MOD_LINEAR)
4777 return 0;
4778
4779 return AMD_FMT_MOD_GET(TILE, modifier);
4780}
4781
dfbbfe3c
BN
4782static const struct drm_format_info *
4783amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4784{
816853f9 4785 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
dfbbfe3c
BN
4786}
4787
37384b3f
BN
4788static void
4789fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4790 union dc_tiling_info *tiling_info,
4791 uint64_t modifier)
4792{
4793 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4794 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4795 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4796 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4797
4798 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4799
4800 if (!IS_AMD_FMT_MOD(modifier))
4801 return;
4802
4803 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4804 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4805
4806 if (adev->family >= AMDGPU_FAMILY_NV) {
4807 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4808 } else {
4809 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4810
4811 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4812 }
4813}
4814
faa37f54
BN
4815enum dm_micro_swizzle {
4816 MICRO_SWIZZLE_Z = 0,
4817 MICRO_SWIZZLE_S = 1,
4818 MICRO_SWIZZLE_D = 2,
4819 MICRO_SWIZZLE_R = 3
4820};
4821
4822static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4823 uint32_t format,
4824 uint64_t modifier)
4825{
4826 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4827 const struct drm_format_info *info = drm_format_info(format);
fe180178 4828 int i;
faa37f54
BN
4829
4830 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4831
4832 if (!info)
4833 return false;
4834
4835 /*
fe180178
QZ
4836 * We always have to allow these modifiers:
4837 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4838 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
faa37f54 4839 */
fe180178
QZ
4840 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4841 modifier == DRM_FORMAT_MOD_INVALID) {
faa37f54 4842 return true;
fe180178 4843 }
faa37f54 4844
fe180178
QZ
4845 /* Check that the modifier is on the list of the plane's supported modifiers. */
4846 for (i = 0; i < plane->modifier_count; i++) {
4847 if (modifier == plane->modifiers[i])
4848 break;
4849 }
4850 if (i == plane->modifier_count)
faa37f54
BN
4851 return false;
4852
4853 /*
4854 * For D swizzle the canonical modifier depends on the bpp, so check
4855 * it here.
4856 */
4857 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4858 adev->family >= AMDGPU_FAMILY_NV) {
4859 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4860 return false;
4861 }
4862
4863 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4864 info->cpp[0] < 8)
4865 return false;
4866
4867 if (modifier_has_dcc(modifier)) {
4868 /* Per radeonsi comments 16/64 bpp are more complicated. */
4869 if (info->cpp[0] != 4)
4870 return false;
951796f2
SS
4871 /* We support multi-planar formats, but not when combined with
4872 * additional DCC metadata planes. */
4873 if (info->num_planes > 1)
4874 return false;
faa37f54
BN
4875 }
4876
4877 return true;
4878}
4879
4880static void
4881add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4882{
4883 if (!*mods)
4884 return;
4885
4886 if (*cap - *size < 1) {
4887 uint64_t new_cap = *cap * 2;
4888 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4889
4890 if (!new_mods) {
4891 kfree(*mods);
4892 *mods = NULL;
4893 return;
4894 }
4895
4896 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4897 kfree(*mods);
4898 *mods = new_mods;
4899 *cap = new_cap;
4900 }
4901
4902 (*mods)[*size] = mod;
4903 *size += 1;
4904}
4905
4906static void
4907add_gfx9_modifiers(const struct amdgpu_device *adev,
4908 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4909{
4910 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4911 int pipe_xor_bits = min(8, pipes +
4912 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4913 int bank_xor_bits = min(8 - pipe_xor_bits,
4914 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4915 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4916 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4917
4918
4919 if (adev->family == AMDGPU_FAMILY_RV) {
4920 /* Raven2 and later */
4921 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4922
4923 /*
4924 * No _D DCC swizzles yet because we only allow 32bpp, which
4925 * doesn't support _D on DCN
4926 */
4927
4928 if (has_constant_encode) {
4929 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4930 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4931 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4932 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4933 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4934 AMD_FMT_MOD_SET(DCC, 1) |
4935 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4936 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4937 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4938 }
4939
4940 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4941 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4942 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4943 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4944 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4945 AMD_FMT_MOD_SET(DCC, 1) |
4946 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4947 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4948 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4949
4950 if (has_constant_encode) {
4951 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4952 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4953 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4954 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4955 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4956 AMD_FMT_MOD_SET(DCC, 1) |
4957 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4958 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4959 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4960
4961 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4962 AMD_FMT_MOD_SET(RB, rb) |
4963 AMD_FMT_MOD_SET(PIPE, pipes));
4964 }
4965
4966 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4967 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4968 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4969 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4970 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4971 AMD_FMT_MOD_SET(DCC, 1) |
4972 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4973 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4974 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4975 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4976 AMD_FMT_MOD_SET(RB, rb) |
4977 AMD_FMT_MOD_SET(PIPE, pipes));
4978 }
4979
4980 /*
4981 * Only supported for 64bpp on Raven, will be filtered on format in
4982 * dm_plane_format_mod_supported.
4983 */
4984 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4985 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4986 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4987 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4988 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4989
4990 if (adev->family == AMDGPU_FAMILY_RV) {
4991 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4992 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4993 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4994 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4995 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4996 }
4997
4998 /*
4999 * Only supported for 64bpp on Raven, will be filtered on format in
5000 * dm_plane_format_mod_supported.
5001 */
5002 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5003 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5004 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5005
5006 if (adev->family == AMDGPU_FAMILY_RV) {
5007 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5008 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5009 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5010 }
5011}
5012
5013static void
5014add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5015 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5016{
5017 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5018
5019 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5020 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5021 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5022 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5023 AMD_FMT_MOD_SET(DCC, 1) |
5024 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5025 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5026 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5027
5028 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5029 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5030 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5031 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5032 AMD_FMT_MOD_SET(DCC, 1) |
5033 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5034 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5035 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5036 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5037
5038 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5039 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5040 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5041 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5042
5043 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5044 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5045 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5046 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5047
5048
5049 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5050 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5051 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5052 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5053
5054 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5055 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5056 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5057}
5058
5059static void
5060add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5061 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5062{
5063 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5064 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5065
5066 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5067 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5068 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5069 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5070 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5071 AMD_FMT_MOD_SET(DCC, 1) |
5072 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5073 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5074 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5075 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5076
7f6ab50a
JA
5077 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5078 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5079 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5080 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5081 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5082 AMD_FMT_MOD_SET(DCC, 1) |
5083 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5084 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5085 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5086
faa37f54
BN
5087 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5088 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5089 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5090 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5091 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5092 AMD_FMT_MOD_SET(DCC, 1) |
5093 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5094 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5095 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5096 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
2b608182 5097 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
faa37f54 5098
7f6ab50a
JA
5099 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5100 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5101 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5102 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5103 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5104 AMD_FMT_MOD_SET(DCC, 1) |
5105 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5106 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5107 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5108 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5109
faa37f54
BN
5110 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5111 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5112 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5113 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5114 AMD_FMT_MOD_SET(PACKERS, pkrs));
5115
5116 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5117 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5118 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5119 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5120 AMD_FMT_MOD_SET(PACKERS, pkrs));
5121
5122 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5123 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5124 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5125 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5126
5127 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5128 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5129 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5130}
5131
5132static int
5133get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5134{
5135 uint64_t size = 0, capacity = 128;
5136 *mods = NULL;
5137
5138 /* We have not hooked up any pre-GFX9 modifiers. */
5139 if (adev->family < AMDGPU_FAMILY_AI)
5140 return 0;
5141
5142 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5143
5144 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5145 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5146 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5147 return *mods ? 0 : -ENOMEM;
5148 }
5149
5150 switch (adev->family) {
5151 case AMDGPU_FAMILY_AI:
5152 case AMDGPU_FAMILY_RV:
5153 add_gfx9_modifiers(adev, mods, &size, &capacity);
5154 break;
5155 case AMDGPU_FAMILY_NV:
5156 case AMDGPU_FAMILY_VGH:
1ebcaebd 5157 case AMDGPU_FAMILY_YC:
1d789535 5158 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
faa37f54
BN
5159 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5160 else
5161 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5162 break;
5163 }
5164
5165 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5166
5167 /* INVALID marks the end of the list. */
5168 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5169
5170 if (!*mods)
5171 return -ENOMEM;
5172
5173 return 0;
5174}
5175
37384b3f
BN
5176static int
5177fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5178 const struct amdgpu_framebuffer *afb,
5179 const enum surface_pixel_format format,
5180 const enum dc_rotation_angle rotation,
5181 const struct plane_size *plane_size,
5182 union dc_tiling_info *tiling_info,
5183 struct dc_plane_dcc_param *dcc,
5184 struct dc_plane_address *address,
5185 const bool force_disable_dcc)
5186{
5187 const uint64_t modifier = afb->base.modifier;
2be7f77f 5188 int ret = 0;
37384b3f
BN
5189
5190 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5191 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5192
5193 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5194 uint64_t dcc_address = afb->address + afb->base.offsets[1];
3d360154 5195 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
a86396c3 5196 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
37384b3f
BN
5197
5198 dcc->enable = 1;
5199 dcc->meta_pitch = afb->base.pitches[1];
3d360154 5200 dcc->independent_64b_blks = independent_64b_blks;
a86396c3
JA
5201 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5202 if (independent_64b_blks && independent_128b_blks)
f2e7d856 5203 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
a86396c3
JA
5204 else if (independent_128b_blks)
5205 dcc->dcc_ind_blk = hubp_ind_block_128b;
5206 else if (independent_64b_blks && !independent_128b_blks)
f2e7d856 5207 dcc->dcc_ind_blk = hubp_ind_block_64b;
a86396c3
JA
5208 else
5209 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5210 } else {
5211 if (independent_64b_blks)
5212 dcc->dcc_ind_blk = hubp_ind_block_64b;
5213 else
5214 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5215 }
37384b3f
BN
5216
5217 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5218 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5219 }
5220
5221 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5222 if (ret)
2be7f77f 5223 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
7df7e505 5224
2be7f77f 5225 return ret;
09e5665a
NK
5226}
5227
5228static int
320932bf 5229fill_plane_buffer_attributes(struct amdgpu_device *adev,
09e5665a 5230 const struct amdgpu_framebuffer *afb,
695af5f9
NK
5231 const enum surface_pixel_format format,
5232 const enum dc_rotation_angle rotation,
5233 const uint64_t tiling_flags,
09e5665a 5234 union dc_tiling_info *tiling_info,
12e2b2d4 5235 struct plane_size *plane_size,
09e5665a 5236 struct dc_plane_dcc_param *dcc,
87b7ebc2 5237 struct dc_plane_address *address,
5888f07a 5238 bool tmz_surface,
87b7ebc2 5239 bool force_disable_dcc)
09e5665a 5240{
320932bf 5241 const struct drm_framebuffer *fb = &afb->base;
09e5665a
NK
5242 int ret;
5243
5244 memset(tiling_info, 0, sizeof(*tiling_info));
320932bf 5245 memset(plane_size, 0, sizeof(*plane_size));
09e5665a 5246 memset(dcc, 0, sizeof(*dcc));
e0634e8d
NK
5247 memset(address, 0, sizeof(*address));
5248
5888f07a
HW
5249 address->tmz_surface = tmz_surface;
5250
695af5f9 5251 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
be7b9b32
BN
5252 uint64_t addr = afb->address + fb->offsets[0];
5253
12e2b2d4
DL
5254 plane_size->surface_size.x = 0;
5255 plane_size->surface_size.y = 0;
5256 plane_size->surface_size.width = fb->width;
5257 plane_size->surface_size.height = fb->height;
5258 plane_size->surface_pitch =
320932bf
NK
5259 fb->pitches[0] / fb->format->cpp[0];
5260
e0634e8d 5261 address->type = PLN_ADDR_TYPE_GRAPHICS;
be7b9b32
BN
5262 address->grph.addr.low_part = lower_32_bits(addr);
5263 address->grph.addr.high_part = upper_32_bits(addr);
1894478a 5264 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
be7b9b32 5265 uint64_t luma_addr = afb->address + fb->offsets[0];
1791e54f 5266 uint64_t chroma_addr = afb->address + fb->offsets[1];
e0634e8d 5267
12e2b2d4
DL
5268 plane_size->surface_size.x = 0;
5269 plane_size->surface_size.y = 0;
5270 plane_size->surface_size.width = fb->width;
5271 plane_size->surface_size.height = fb->height;
5272 plane_size->surface_pitch =
320932bf
NK
5273 fb->pitches[0] / fb->format->cpp[0];
5274
12e2b2d4
DL
5275 plane_size->chroma_size.x = 0;
5276 plane_size->chroma_size.y = 0;
320932bf 5277 /* TODO: set these based on surface format */
12e2b2d4
DL
5278 plane_size->chroma_size.width = fb->width / 2;
5279 plane_size->chroma_size.height = fb->height / 2;
320932bf 5280
12e2b2d4 5281 plane_size->chroma_pitch =
320932bf
NK
5282 fb->pitches[1] / fb->format->cpp[1];
5283
e0634e8d
NK
5284 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5285 address->video_progressive.luma_addr.low_part =
be7b9b32 5286 lower_32_bits(luma_addr);
e0634e8d 5287 address->video_progressive.luma_addr.high_part =
be7b9b32 5288 upper_32_bits(luma_addr);
e0634e8d
NK
5289 address->video_progressive.chroma_addr.low_part =
5290 lower_32_bits(chroma_addr);
5291 address->video_progressive.chroma_addr.high_part =
5292 upper_32_bits(chroma_addr);
5293 }
09e5665a 5294
a3241991 5295 if (adev->family >= AMDGPU_FAMILY_AI) {
9a33e881
BN
5296 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5297 rotation, plane_size,
5298 tiling_info, dcc,
5299 address,
5300 force_disable_dcc);
09e5665a
NK
5301 if (ret)
5302 return ret;
a3241991
BN
5303 } else {
5304 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
09e5665a
NK
5305 }
5306
5307 return 0;
7df7e505
NK
5308}
5309
d74004b6 5310static void
695af5f9 5311fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
d74004b6
NK
5312 bool *per_pixel_alpha, bool *global_alpha,
5313 int *global_alpha_value)
5314{
5315 *per_pixel_alpha = false;
5316 *global_alpha = false;
5317 *global_alpha_value = 0xff;
5318
5319 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5320 return;
5321
5322 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5323 static const uint32_t alpha_formats[] = {
5324 DRM_FORMAT_ARGB8888,
5325 DRM_FORMAT_RGBA8888,
5326 DRM_FORMAT_ABGR8888,
5327 };
5328 uint32_t format = plane_state->fb->format->format;
5329 unsigned int i;
5330
5331 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5332 if (format == alpha_formats[i]) {
5333 *per_pixel_alpha = true;
5334 break;
5335 }
5336 }
5337 }
5338
5339 if (plane_state->alpha < 0xffff) {
5340 *global_alpha = true;
5341 *global_alpha_value = plane_state->alpha >> 8;
5342 }
5343}
5344
004fefa3
NK
5345static int
5346fill_plane_color_attributes(const struct drm_plane_state *plane_state,
695af5f9 5347 const enum surface_pixel_format format,
004fefa3
NK
5348 enum dc_color_space *color_space)
5349{
5350 bool full_range;
5351
5352 *color_space = COLOR_SPACE_SRGB;
5353
5354 /* DRM color properties only affect non-RGB formats. */
695af5f9 5355 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
004fefa3
NK
5356 return 0;
5357
5358 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5359
5360 switch (plane_state->color_encoding) {
5361 case DRM_COLOR_YCBCR_BT601:
5362 if (full_range)
5363 *color_space = COLOR_SPACE_YCBCR601;
5364 else
5365 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5366 break;
5367
5368 case DRM_COLOR_YCBCR_BT709:
5369 if (full_range)
5370 *color_space = COLOR_SPACE_YCBCR709;
5371 else
5372 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5373 break;
5374
5375 case DRM_COLOR_YCBCR_BT2020:
5376 if (full_range)
5377 *color_space = COLOR_SPACE_2020_YCBCR;
5378 else
5379 return -EINVAL;
5380 break;
5381
5382 default:
5383 return -EINVAL;
5384 }
5385
5386 return 0;
5387}
5388
695af5f9
NK
5389static int
5390fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5391 const struct drm_plane_state *plane_state,
5392 const uint64_t tiling_flags,
5393 struct dc_plane_info *plane_info,
87b7ebc2 5394 struct dc_plane_address *address,
5888f07a 5395 bool tmz_surface,
87b7ebc2 5396 bool force_disable_dcc)
695af5f9
NK
5397{
5398 const struct drm_framebuffer *fb = plane_state->fb;
5399 const struct amdgpu_framebuffer *afb =
5400 to_amdgpu_framebuffer(plane_state->fb);
695af5f9
NK
5401 int ret;
5402
5403 memset(plane_info, 0, sizeof(*plane_info));
5404
5405 switch (fb->format->format) {
5406 case DRM_FORMAT_C8:
5407 plane_info->format =
5408 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5409 break;
5410 case DRM_FORMAT_RGB565:
5411 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5412 break;
5413 case DRM_FORMAT_XRGB8888:
5414 case DRM_FORMAT_ARGB8888:
5415 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5416 break;
5417 case DRM_FORMAT_XRGB2101010:
5418 case DRM_FORMAT_ARGB2101010:
5419 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5420 break;
5421 case DRM_FORMAT_XBGR2101010:
5422 case DRM_FORMAT_ABGR2101010:
5423 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5424 break;
5425 case DRM_FORMAT_XBGR8888:
5426 case DRM_FORMAT_ABGR8888:
5427 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5428 break;
5429 case DRM_FORMAT_NV21:
5430 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5431 break;
5432 case DRM_FORMAT_NV12:
5433 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5434 break;
cbec6477
SW
5435 case DRM_FORMAT_P010:
5436 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5437 break;
492548dc
SW
5438 case DRM_FORMAT_XRGB16161616F:
5439 case DRM_FORMAT_ARGB16161616F:
5440 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5441 break;
2a5195dc
MK
5442 case DRM_FORMAT_XBGR16161616F:
5443 case DRM_FORMAT_ABGR16161616F:
5444 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5445 break;
58020403
MK
5446 case DRM_FORMAT_XRGB16161616:
5447 case DRM_FORMAT_ARGB16161616:
5448 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5449 break;
5450 case DRM_FORMAT_XBGR16161616:
5451 case DRM_FORMAT_ABGR16161616:
5452 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5453 break;
695af5f9
NK
5454 default:
5455 DRM_ERROR(
92f1d09c
SA
5456 "Unsupported screen format %p4cc\n",
5457 &fb->format->format);
695af5f9
NK
5458 return -EINVAL;
5459 }
5460
5461 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5462 case DRM_MODE_ROTATE_0:
5463 plane_info->rotation = ROTATION_ANGLE_0;
5464 break;
5465 case DRM_MODE_ROTATE_90:
5466 plane_info->rotation = ROTATION_ANGLE_90;
5467 break;
5468 case DRM_MODE_ROTATE_180:
5469 plane_info->rotation = ROTATION_ANGLE_180;
5470 break;
5471 case DRM_MODE_ROTATE_270:
5472 plane_info->rotation = ROTATION_ANGLE_270;
5473 break;
5474 default:
5475 plane_info->rotation = ROTATION_ANGLE_0;
5476 break;
5477 }
5478
5479 plane_info->visible = true;
5480 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5481
6d83a32d
MS
5482 plane_info->layer_index = 0;
5483
695af5f9
NK
5484 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5485 &plane_info->color_space);
5486 if (ret)
5487 return ret;
5488
5489 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5490 plane_info->rotation, tiling_flags,
5491 &plane_info->tiling_info,
5492 &plane_info->plane_size,
5888f07a 5493 &plane_info->dcc, address, tmz_surface,
87b7ebc2 5494 force_disable_dcc);
695af5f9
NK
5495 if (ret)
5496 return ret;
5497
5498 fill_blending_from_plane_state(
5499 plane_state, &plane_info->per_pixel_alpha,
5500 &plane_info->global_alpha, &plane_info->global_alpha_value);
5501
5502 return 0;
5503}
5504
5505static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5506 struct dc_plane_state *dc_plane_state,
5507 struct drm_plane_state *plane_state,
5508 struct drm_crtc_state *crtc_state)
e7b07cee 5509{
cf020d49 5510 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6eed95b0 5511 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
695af5f9
NK
5512 struct dc_scaling_info scaling_info;
5513 struct dc_plane_info plane_info;
695af5f9 5514 int ret;
87b7ebc2 5515 bool force_disable_dcc = false;
e7b07cee 5516
4375d625 5517 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
695af5f9
NK
5518 if (ret)
5519 return ret;
e7b07cee 5520
695af5f9
NK
5521 dc_plane_state->src_rect = scaling_info.src_rect;
5522 dc_plane_state->dst_rect = scaling_info.dst_rect;
5523 dc_plane_state->clip_rect = scaling_info.clip_rect;
5524 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
e7b07cee 5525
87b7ebc2 5526 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
707477b0 5527 ret = fill_dc_plane_info_and_addr(adev, plane_state,
6eed95b0 5528 afb->tiling_flags,
695af5f9 5529 &plane_info,
87b7ebc2 5530 &dc_plane_state->address,
6eed95b0 5531 afb->tmz_surface,
87b7ebc2 5532 force_disable_dcc);
004fefa3
NK
5533 if (ret)
5534 return ret;
5535
695af5f9
NK
5536 dc_plane_state->format = plane_info.format;
5537 dc_plane_state->color_space = plane_info.color_space;
5538 dc_plane_state->format = plane_info.format;
5539 dc_plane_state->plane_size = plane_info.plane_size;
5540 dc_plane_state->rotation = plane_info.rotation;
5541 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5542 dc_plane_state->stereo_format = plane_info.stereo_format;
5543 dc_plane_state->tiling_info = plane_info.tiling_info;
5544 dc_plane_state->visible = plane_info.visible;
5545 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5546 dc_plane_state->global_alpha = plane_info.global_alpha;
5547 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5548 dc_plane_state->dcc = plane_info.dcc;
6d83a32d 5549 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
7afa0033 5550 dc_plane_state->flip_int_enabled = true;
695af5f9 5551
e277adc5
LSL
5552 /*
5553 * Always set input transfer function, since plane state is refreshed
5554 * every time.
5555 */
cf020d49
NK
5556 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5557 if (ret)
5558 return ret;
e7b07cee 5559
cf020d49 5560 return 0;
e7b07cee
HW
5561}
5562
3ee6b26b
AD
5563static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5564 const struct dm_connector_state *dm_state,
5565 struct dc_stream_state *stream)
e7b07cee
HW
5566{
5567 enum amdgpu_rmx_type rmx_type;
5568
5569 struct rect src = { 0 }; /* viewport in composition space*/
5570 struct rect dst = { 0 }; /* stream addressable area */
5571
5572 /* no mode. nothing to be done */
5573 if (!mode)
5574 return;
5575
5576 /* Full screen scaling by default */
5577 src.width = mode->hdisplay;
5578 src.height = mode->vdisplay;
5579 dst.width = stream->timing.h_addressable;
5580 dst.height = stream->timing.v_addressable;
5581
f4791779
HW
5582 if (dm_state) {
5583 rmx_type = dm_state->scaling;
5584 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5585 if (src.width * dst.height <
5586 src.height * dst.width) {
5587 /* height needs less upscaling/more downscaling */
5588 dst.width = src.width *
5589 dst.height / src.height;
5590 } else {
5591 /* width needs less upscaling/more downscaling */
5592 dst.height = src.height *
5593 dst.width / src.width;
5594 }
5595 } else if (rmx_type == RMX_CENTER) {
5596 dst = src;
e7b07cee 5597 }
e7b07cee 5598
f4791779
HW
5599 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5600 dst.y = (stream->timing.v_addressable - dst.height) / 2;
e7b07cee 5601
f4791779
HW
5602 if (dm_state->underscan_enable) {
5603 dst.x += dm_state->underscan_hborder / 2;
5604 dst.y += dm_state->underscan_vborder / 2;
5605 dst.width -= dm_state->underscan_hborder;
5606 dst.height -= dm_state->underscan_vborder;
5607 }
e7b07cee
HW
5608 }
5609
5610 stream->src = src;
5611 stream->dst = dst;
5612
4711c033
LT
5613 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5614 dst.x, dst.y, dst.width, dst.height);
e7b07cee
HW
5615
5616}
5617
3ee6b26b 5618static enum dc_color_depth
42ba01fc 5619convert_color_depth_from_display_info(const struct drm_connector *connector,
cbd14ae7 5620 bool is_y420, int requested_bpc)
e7b07cee 5621{
1bc22f20 5622 uint8_t bpc;
01c22997 5623
1bc22f20
SW
5624 if (is_y420) {
5625 bpc = 8;
5626
5627 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5628 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5629 bpc = 16;
5630 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5631 bpc = 12;
5632 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5633 bpc = 10;
5634 } else {
5635 bpc = (uint8_t)connector->display_info.bpc;
5636 /* Assume 8 bpc by default if no bpc is specified. */
5637 bpc = bpc ? bpc : 8;
5638 }
e7b07cee 5639
cbd14ae7 5640 if (requested_bpc > 0) {
01c22997
NK
5641 /*
5642 * Cap display bpc based on the user requested value.
5643 *
5644 * The value for state->max_bpc may not correctly updated
5645 * depending on when the connector gets added to the state
5646 * or if this was called outside of atomic check, so it
5647 * can't be used directly.
5648 */
cbd14ae7 5649 bpc = min_t(u8, bpc, requested_bpc);
01c22997 5650
1825fd34
NK
5651 /* Round down to the nearest even number. */
5652 bpc = bpc - (bpc & 1);
5653 }
07e3a1cf 5654
e7b07cee
HW
5655 switch (bpc) {
5656 case 0:
1f6010a9
DF
5657 /*
5658 * Temporary Work around, DRM doesn't parse color depth for
e7b07cee
HW
5659 * EDID revision before 1.4
5660 * TODO: Fix edid parsing
5661 */
5662 return COLOR_DEPTH_888;
5663 case 6:
5664 return COLOR_DEPTH_666;
5665 case 8:
5666 return COLOR_DEPTH_888;
5667 case 10:
5668 return COLOR_DEPTH_101010;
5669 case 12:
5670 return COLOR_DEPTH_121212;
5671 case 14:
5672 return COLOR_DEPTH_141414;
5673 case 16:
5674 return COLOR_DEPTH_161616;
5675 default:
5676 return COLOR_DEPTH_UNDEFINED;
5677 }
5678}
5679
3ee6b26b
AD
5680static enum dc_aspect_ratio
5681get_aspect_ratio(const struct drm_display_mode *mode_in)
e7b07cee 5682{
e11d4147
LSL
5683 /* 1-1 mapping, since both enums follow the HDMI spec. */
5684 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
e7b07cee
HW
5685}
5686
3ee6b26b
AD
5687static enum dc_color_space
5688get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
e7b07cee
HW
5689{
5690 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5691
5692 switch (dc_crtc_timing->pixel_encoding) {
5693 case PIXEL_ENCODING_YCBCR422:
5694 case PIXEL_ENCODING_YCBCR444:
5695 case PIXEL_ENCODING_YCBCR420:
5696 {
5697 /*
5698 * 27030khz is the separation point between HDTV and SDTV
5699 * according to HDMI spec, we use YCbCr709 and YCbCr601
5700 * respectively
5701 */
380604e2 5702 if (dc_crtc_timing->pix_clk_100hz > 270300) {
e7b07cee
HW
5703 if (dc_crtc_timing->flags.Y_ONLY)
5704 color_space =
5705 COLOR_SPACE_YCBCR709_LIMITED;
5706 else
5707 color_space = COLOR_SPACE_YCBCR709;
5708 } else {
5709 if (dc_crtc_timing->flags.Y_ONLY)
5710 color_space =
5711 COLOR_SPACE_YCBCR601_LIMITED;
5712 else
5713 color_space = COLOR_SPACE_YCBCR601;
5714 }
5715
5716 }
5717 break;
5718 case PIXEL_ENCODING_RGB:
5719 color_space = COLOR_SPACE_SRGB;
5720 break;
5721
5722 default:
5723 WARN_ON(1);
5724 break;
5725 }
5726
5727 return color_space;
5728}
5729
ea117312
TA
5730static bool adjust_colour_depth_from_display_info(
5731 struct dc_crtc_timing *timing_out,
5732 const struct drm_display_info *info)
400443e8 5733{
ea117312 5734 enum dc_color_depth depth = timing_out->display_color_depth;
400443e8 5735 int normalized_clk;
400443e8 5736 do {
380604e2 5737 normalized_clk = timing_out->pix_clk_100hz / 10;
400443e8
ML
5738 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5739 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5740 normalized_clk /= 2;
5741 /* Adjusting pix clock following on HDMI spec based on colour depth */
ea117312
TA
5742 switch (depth) {
5743 case COLOR_DEPTH_888:
5744 break;
400443e8
ML
5745 case COLOR_DEPTH_101010:
5746 normalized_clk = (normalized_clk * 30) / 24;
5747 break;
5748 case COLOR_DEPTH_121212:
5749 normalized_clk = (normalized_clk * 36) / 24;
5750 break;
5751 case COLOR_DEPTH_161616:
5752 normalized_clk = (normalized_clk * 48) / 24;
5753 break;
5754 default:
ea117312
TA
5755 /* The above depths are the only ones valid for HDMI. */
5756 return false;
400443e8 5757 }
ea117312
TA
5758 if (normalized_clk <= info->max_tmds_clock) {
5759 timing_out->display_color_depth = depth;
5760 return true;
5761 }
5762 } while (--depth > COLOR_DEPTH_666);
5763 return false;
400443e8 5764}
e7b07cee 5765
42ba01fc
NK
5766static void fill_stream_properties_from_drm_display_mode(
5767 struct dc_stream_state *stream,
5768 const struct drm_display_mode *mode_in,
5769 const struct drm_connector *connector,
5770 const struct drm_connector_state *connector_state,
cbd14ae7
SW
5771 const struct dc_stream_state *old_stream,
5772 int requested_bpc)
e7b07cee
HW
5773{
5774 struct dc_crtc_timing *timing_out = &stream->timing;
fe61a2f1 5775 const struct drm_display_info *info = &connector->display_info;
d4252eee 5776 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1cb1d477
WL
5777 struct hdmi_vendor_infoframe hv_frame;
5778 struct hdmi_avi_infoframe avi_frame;
e7b07cee 5779
acf83f86
WL
5780 memset(&hv_frame, 0, sizeof(hv_frame));
5781 memset(&avi_frame, 0, sizeof(avi_frame));
5782
e7b07cee
HW
5783 timing_out->h_border_left = 0;
5784 timing_out->h_border_right = 0;
5785 timing_out->v_border_top = 0;
5786 timing_out->v_border_bottom = 0;
5787 /* TODO: un-hardcode */
fe61a2f1 5788 if (drm_mode_is_420_only(info, mode_in)
ceb3dbb4 5789 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
fe61a2f1 5790 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
d4252eee
SW
5791 else if (drm_mode_is_420_also(info, mode_in)
5792 && aconnector->force_yuv420_output)
5793 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
fe61a2f1 5794 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
ceb3dbb4 5795 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
e7b07cee
HW
5796 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5797 else
5798 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5799
5800 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5801 timing_out->display_color_depth = convert_color_depth_from_display_info(
cbd14ae7
SW
5802 connector,
5803 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5804 requested_bpc);
e7b07cee
HW
5805 timing_out->scan_type = SCANNING_TYPE_NODATA;
5806 timing_out->hdmi_vic = 0;
b333730d
BL
5807
5808 if(old_stream) {
5809 timing_out->vic = old_stream->timing.vic;
5810 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5811 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5812 } else {
5813 timing_out->vic = drm_match_cea_mode(mode_in);
5814 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5815 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5816 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5817 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5818 }
e7b07cee 5819
1cb1d477
WL
5820 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5821 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5822 timing_out->vic = avi_frame.video_code;
5823 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5824 timing_out->hdmi_vic = hv_frame.vic;
5825 }
5826
fe8858bb
NC
5827 if (is_freesync_video_mode(mode_in, aconnector)) {
5828 timing_out->h_addressable = mode_in->hdisplay;
5829 timing_out->h_total = mode_in->htotal;
5830 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5831 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5832 timing_out->v_total = mode_in->vtotal;
5833 timing_out->v_addressable = mode_in->vdisplay;
5834 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5835 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5836 timing_out->pix_clk_100hz = mode_in->clock * 10;
5837 } else {
5838 timing_out->h_addressable = mode_in->crtc_hdisplay;
5839 timing_out->h_total = mode_in->crtc_htotal;
5840 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5841 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5842 timing_out->v_total = mode_in->crtc_vtotal;
5843 timing_out->v_addressable = mode_in->crtc_vdisplay;
5844 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5845 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5846 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5847 }
a85ba005 5848
e7b07cee 5849 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
e7b07cee
HW
5850
5851 stream->output_color_space = get_output_color_space(timing_out);
5852
e43a432c
AK
5853 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5854 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
ea117312
TA
5855 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5856 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5857 drm_mode_is_420_also(info, mode_in) &&
5858 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5859 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5860 adjust_colour_depth_from_display_info(timing_out, info);
5861 }
5862 }
e7b07cee
HW
5863}
5864
3ee6b26b
AD
5865static void fill_audio_info(struct audio_info *audio_info,
5866 const struct drm_connector *drm_connector,
5867 const struct dc_sink *dc_sink)
e7b07cee
HW
5868{
5869 int i = 0;
5870 int cea_revision = 0;
5871 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5872
5873 audio_info->manufacture_id = edid_caps->manufacturer_id;
5874 audio_info->product_id = edid_caps->product_id;
5875
5876 cea_revision = drm_connector->display_info.cea_rev;
5877
090afc1e 5878 strscpy(audio_info->display_name,
d2b2562c 5879 edid_caps->display_name,
090afc1e 5880 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
e7b07cee 5881
b830ebc9 5882 if (cea_revision >= 3) {
e7b07cee
HW
5883 audio_info->mode_count = edid_caps->audio_mode_count;
5884
5885 for (i = 0; i < audio_info->mode_count; ++i) {
5886 audio_info->modes[i].format_code =
5887 (enum audio_format_code)
5888 (edid_caps->audio_modes[i].format_code);
5889 audio_info->modes[i].channel_count =
5890 edid_caps->audio_modes[i].channel_count;
5891 audio_info->modes[i].sample_rates.all =
5892 edid_caps->audio_modes[i].sample_rate;
5893 audio_info->modes[i].sample_size =
5894 edid_caps->audio_modes[i].sample_size;
5895 }
5896 }
5897
5898 audio_info->flags.all = edid_caps->speaker_flags;
5899
5900 /* TODO: We only check for the progressive mode, check for interlace mode too */
b830ebc9 5901 if (drm_connector->latency_present[0]) {
e7b07cee
HW
5902 audio_info->video_latency = drm_connector->video_latency[0];
5903 audio_info->audio_latency = drm_connector->audio_latency[0];
5904 }
5905
5906 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5907
5908}
5909
3ee6b26b
AD
5910static void
5911copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5912 struct drm_display_mode *dst_mode)
e7b07cee
HW
5913{
5914 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5915 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5916 dst_mode->crtc_clock = src_mode->crtc_clock;
5917 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5918 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
b830ebc9 5919 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
e7b07cee
HW
5920 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5921 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5922 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5923 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5924 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5925 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5926 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5927 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5928}
5929
3ee6b26b
AD
5930static void
5931decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5932 const struct drm_display_mode *native_mode,
5933 bool scale_enabled)
e7b07cee
HW
5934{
5935 if (scale_enabled) {
5936 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5937 } else if (native_mode->clock == drm_mode->clock &&
5938 native_mode->htotal == drm_mode->htotal &&
5939 native_mode->vtotal == drm_mode->vtotal) {
5940 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5941 } else {
5942 /* no scaling nor amdgpu inserted, no need to patch */
5943 }
5944}
5945
aed15309
ML
5946static struct dc_sink *
5947create_fake_sink(struct amdgpu_dm_connector *aconnector)
2e0ac3d6 5948{
2e0ac3d6 5949 struct dc_sink_init_data sink_init_data = { 0 };
aed15309 5950 struct dc_sink *sink = NULL;
2e0ac3d6
HW
5951 sink_init_data.link = aconnector->dc_link;
5952 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5953
5954 sink = dc_sink_create(&sink_init_data);
423788c7 5955 if (!sink) {
2e0ac3d6 5956 DRM_ERROR("Failed to create sink!\n");
aed15309 5957 return NULL;
423788c7 5958 }
2e0ac3d6 5959 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
423788c7 5960
aed15309 5961 return sink;
2e0ac3d6
HW
5962}
5963
fa2123db
ML
5964static void set_multisync_trigger_params(
5965 struct dc_stream_state *stream)
5966{
ec372186
ML
5967 struct dc_stream_state *master = NULL;
5968
fa2123db 5969 if (stream->triggered_crtc_reset.enabled) {
ec372186
ML
5970 master = stream->triggered_crtc_reset.event_source;
5971 stream->triggered_crtc_reset.event =
5972 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5973 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5974 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
fa2123db
ML
5975 }
5976}
5977
5978static void set_master_stream(struct dc_stream_state *stream_set[],
5979 int stream_count)
5980{
5981 int j, highest_rfr = 0, master_stream = 0;
5982
5983 for (j = 0; j < stream_count; j++) {
5984 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5985 int refresh_rate = 0;
5986
380604e2 5987 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
fa2123db
ML
5988 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5989 if (refresh_rate > highest_rfr) {
5990 highest_rfr = refresh_rate;
5991 master_stream = j;
5992 }
5993 }
5994 }
5995 for (j = 0; j < stream_count; j++) {
03736f4c 5996 if (stream_set[j])
fa2123db
ML
5997 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5998 }
5999}
6000
6001static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6002{
6003 int i = 0;
ec372186 6004 struct dc_stream_state *stream;
fa2123db
ML
6005
6006 if (context->stream_count < 2)
6007 return;
6008 for (i = 0; i < context->stream_count ; i++) {
6009 if (!context->streams[i])
6010 continue;
1f6010a9
DF
6011 /*
6012 * TODO: add a function to read AMD VSDB bits and set
fa2123db 6013 * crtc_sync_master.multi_sync_enabled flag
1f6010a9 6014 * For now it's set to false
fa2123db 6015 */
fa2123db 6016 }
ec372186 6017
fa2123db 6018 set_master_stream(context->streams, context->stream_count);
ec372186
ML
6019
6020 for (i = 0; i < context->stream_count ; i++) {
6021 stream = context->streams[i];
6022
6023 if (!stream)
6024 continue;
6025
6026 set_multisync_trigger_params(stream);
6027 }
fa2123db
ML
6028}
6029
ea2be5c0 6030#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6031static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6032 struct dc_sink *sink, struct dc_stream_state *stream,
6033 struct dsc_dec_dpcd_caps *dsc_caps)
6034{
6035 stream->timing.flags.DSC = 0;
6036
6037 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
998b7ad2
FZ
6038 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6039 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6040 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6041 dsc_caps);
998b7ad2
FZ
6042 }
6043}
6044
6045static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6046 struct dc_sink *sink, struct dc_stream_state *stream,
6047 struct dsc_dec_dpcd_caps *dsc_caps)
6048{
6049 struct drm_connector *drm_connector = &aconnector->base;
6050 uint32_t link_bandwidth_kbps;
f1c1a982 6051 uint32_t max_dsc_target_bpp_limit_override = 0;
998b7ad2
FZ
6052
6053 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6054 dc_link_get_link_cap(aconnector->dc_link));
f1c1a982
RL
6055
6056 if (stream->link && stream->link->local_sink)
6057 max_dsc_target_bpp_limit_override =
6058 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6059
998b7ad2
FZ
6060 /* Set DSC policy according to dsc_clock_en */
6061 dc_dsc_policy_set_enable_dsc_when_not_needed(
6062 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6063
6064 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6065
6066 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6067 dsc_caps,
6068 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
f1c1a982 6069 max_dsc_target_bpp_limit_override,
998b7ad2
FZ
6070 link_bandwidth_kbps,
6071 &stream->timing,
6072 &stream->timing.dsc_cfg)) {
6073 stream->timing.flags.DSC = 1;
6074 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6075 }
6076 }
6077
6078 /* Overwrite the stream flag if DSC is enabled through debugfs */
6079 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6080 stream->timing.flags.DSC = 1;
6081
6082 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6083 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6084
6085 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6086 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6087
6088 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6089 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
998b7ad2 6090}
433e5dec 6091#endif /* CONFIG_DRM_AMD_DC_DCN */
998b7ad2 6092
5fd953a3
RS
6093/**
6094 * DOC: FreeSync Video
6095 *
6096 * When a userspace application wants to play a video, the content follows a
6097 * standard format definition that usually specifies the FPS for that format.
6098 * The below list illustrates some video format and the expected FPS,
6099 * respectively:
6100 *
6101 * - TV/NTSC (23.976 FPS)
6102 * - Cinema (24 FPS)
6103 * - TV/PAL (25 FPS)
6104 * - TV/NTSC (29.97 FPS)
6105 * - TV/NTSC (30 FPS)
6106 * - Cinema HFR (48 FPS)
6107 * - TV/PAL (50 FPS)
6108 * - Commonly used (60 FPS)
12cdff6b 6109 * - Multiples of 24 (48,72,96,120 FPS)
5fd953a3
RS
6110 *
6111 * The list of standards video format is not huge and can be added to the
6112 * connector modeset list beforehand. With that, userspace can leverage
6113 * FreeSync to extends the front porch in order to attain the target refresh
6114 * rate. Such a switch will happen seamlessly, without screen blanking or
6115 * reprogramming of the output in any other way. If the userspace requests a
6116 * modesetting change compatible with FreeSync modes that only differ in the
6117 * refresh rate, DC will skip the full update and avoid blink during the
6118 * transition. For example, the video player can change the modesetting from
6119 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6120 * causing any display blink. This same concept can be applied to a mode
6121 * setting change.
6122 */
a85ba005
NC
6123static struct drm_display_mode *
6124get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6125 bool use_probed_modes)
6126{
6127 struct drm_display_mode *m, *m_pref = NULL;
6128 u16 current_refresh, highest_refresh;
6129 struct list_head *list_head = use_probed_modes ?
6130 &aconnector->base.probed_modes :
6131 &aconnector->base.modes;
6132
6133 if (aconnector->freesync_vid_base.clock != 0)
6134 return &aconnector->freesync_vid_base;
6135
6136 /* Find the preferred mode */
6137 list_for_each_entry (m, list_head, head) {
6138 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6139 m_pref = m;
6140 break;
6141 }
6142 }
6143
6144 if (!m_pref) {
6145 /* Probably an EDID with no preferred mode. Fallback to first entry */
6146 m_pref = list_first_entry_or_null(
6147 &aconnector->base.modes, struct drm_display_mode, head);
6148 if (!m_pref) {
6149 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6150 return NULL;
6151 }
6152 }
6153
6154 highest_refresh = drm_mode_vrefresh(m_pref);
6155
6156 /*
6157 * Find the mode with highest refresh rate with same resolution.
6158 * For some monitors, preferred mode is not the mode with highest
6159 * supported refresh rate.
6160 */
6161 list_for_each_entry (m, list_head, head) {
6162 current_refresh = drm_mode_vrefresh(m);
6163
6164 if (m->hdisplay == m_pref->hdisplay &&
6165 m->vdisplay == m_pref->vdisplay &&
6166 highest_refresh < current_refresh) {
6167 highest_refresh = current_refresh;
6168 m_pref = m;
6169 }
6170 }
6171
6172 aconnector->freesync_vid_base = *m_pref;
6173 return m_pref;
6174}
6175
fe8858bb 6176static bool is_freesync_video_mode(const struct drm_display_mode *mode,
a85ba005
NC
6177 struct amdgpu_dm_connector *aconnector)
6178{
6179 struct drm_display_mode *high_mode;
6180 int timing_diff;
6181
6182 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6183 if (!high_mode || !mode)
6184 return false;
6185
6186 timing_diff = high_mode->vtotal - mode->vtotal;
6187
6188 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6189 high_mode->hdisplay != mode->hdisplay ||
6190 high_mode->vdisplay != mode->vdisplay ||
6191 high_mode->hsync_start != mode->hsync_start ||
6192 high_mode->hsync_end != mode->hsync_end ||
6193 high_mode->htotal != mode->htotal ||
6194 high_mode->hskew != mode->hskew ||
6195 high_mode->vscan != mode->vscan ||
6196 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6197 high_mode->vsync_end - mode->vsync_end != timing_diff)
6198 return false;
6199 else
6200 return true;
6201}
6202
3ee6b26b
AD
6203static struct dc_stream_state *
6204create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6205 const struct drm_display_mode *drm_mode,
b333730d 6206 const struct dm_connector_state *dm_state,
cbd14ae7
SW
6207 const struct dc_stream_state *old_stream,
6208 int requested_bpc)
e7b07cee
HW
6209{
6210 struct drm_display_mode *preferred_mode = NULL;
391ef035 6211 struct drm_connector *drm_connector;
42ba01fc
NK
6212 const struct drm_connector_state *con_state =
6213 dm_state ? &dm_state->base : NULL;
0971c40e 6214 struct dc_stream_state *stream = NULL;
e7b07cee 6215 struct drm_display_mode mode = *drm_mode;
a85ba005
NC
6216 struct drm_display_mode saved_mode;
6217 struct drm_display_mode *freesync_mode = NULL;
e7b07cee 6218 bool native_mode_found = false;
b0781603
NK
6219 bool recalculate_timing = false;
6220 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
b333730d 6221 int mode_refresh;
58124bf8 6222 int preferred_refresh = 0;
defeb878 6223#if defined(CONFIG_DRM_AMD_DC_DCN)
df2f1015 6224 struct dsc_dec_dpcd_caps dsc_caps;
7c431455 6225#endif
aed15309 6226 struct dc_sink *sink = NULL;
a85ba005
NC
6227
6228 memset(&saved_mode, 0, sizeof(saved_mode));
6229
b830ebc9 6230 if (aconnector == NULL) {
e7b07cee 6231 DRM_ERROR("aconnector is NULL!\n");
64245fa7 6232 return stream;
e7b07cee
HW
6233 }
6234
e7b07cee 6235 drm_connector = &aconnector->base;
2e0ac3d6 6236
f4ac176e 6237 if (!aconnector->dc_sink) {
e3fa5c4c
JFZ
6238 sink = create_fake_sink(aconnector);
6239 if (!sink)
6240 return stream;
aed15309
ML
6241 } else {
6242 sink = aconnector->dc_sink;
dcd5fb82 6243 dc_sink_retain(sink);
f4ac176e 6244 }
2e0ac3d6 6245
aed15309 6246 stream = dc_create_stream_for_sink(sink);
4562236b 6247
b830ebc9 6248 if (stream == NULL) {
e7b07cee 6249 DRM_ERROR("Failed to create stream for sink!\n");
aed15309 6250 goto finish;
e7b07cee
HW
6251 }
6252
ceb3dbb4
JL
6253 stream->dm_stream_context = aconnector;
6254
4a36fcba
WL
6255 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6256 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6257
e7b07cee
HW
6258 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6259 /* Search for preferred mode */
6260 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6261 native_mode_found = true;
6262 break;
6263 }
6264 }
6265 if (!native_mode_found)
6266 preferred_mode = list_first_entry_or_null(
6267 &aconnector->base.modes,
6268 struct drm_display_mode,
6269 head);
6270
b333730d
BL
6271 mode_refresh = drm_mode_vrefresh(&mode);
6272
b830ebc9 6273 if (preferred_mode == NULL) {
1f6010a9
DF
6274 /*
6275 * This may not be an error, the use case is when we have no
e7b07cee
HW
6276 * usermode calls to reset and set mode upon hotplug. In this
6277 * case, we call set mode ourselves to restore the previous mode
6278 * and the modelist may not be filled in in time.
6279 */
f1ad2f5e 6280 DRM_DEBUG_DRIVER("No preferred mode found\n");
e7b07cee 6281 } else {
b0781603 6282 recalculate_timing = amdgpu_freesync_vid_mode &&
a85ba005
NC
6283 is_freesync_video_mode(&mode, aconnector);
6284 if (recalculate_timing) {
6285 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6286 saved_mode = mode;
6287 mode = *freesync_mode;
6288 } else {
6289 decide_crtc_timing_for_drm_display_mode(
b0781603 6290 &mode, preferred_mode, scale);
a85ba005 6291
b0781603
NK
6292 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6293 }
e7b07cee
HW
6294 }
6295
a85ba005
NC
6296 if (recalculate_timing)
6297 drm_mode_set_crtcinfo(&saved_mode, 0);
fe8858bb 6298 else if (!dm_state)
f783577c
JFZ
6299 drm_mode_set_crtcinfo(&mode, 0);
6300
a85ba005 6301 /*
b333730d
BL
6302 * If scaling is enabled and refresh rate didn't change
6303 * we copy the vic and polarities of the old timings
6304 */
b0781603 6305 if (!scale || mode_refresh != preferred_refresh)
a85ba005
NC
6306 fill_stream_properties_from_drm_display_mode(
6307 stream, &mode, &aconnector->base, con_state, NULL,
6308 requested_bpc);
b333730d 6309 else
a85ba005
NC
6310 fill_stream_properties_from_drm_display_mode(
6311 stream, &mode, &aconnector->base, con_state, old_stream,
6312 requested_bpc);
b333730d 6313
defeb878 6314#if defined(CONFIG_DRM_AMD_DC_DCN)
998b7ad2
FZ
6315 /* SST DSC determination policy */
6316 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6317 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6318 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
39a4eb85
WL
6319#endif
6320
e7b07cee
HW
6321 update_stream_scaling_settings(&mode, dm_state, stream);
6322
6323 fill_audio_info(
6324 &stream->audio_info,
6325 drm_connector,
aed15309 6326 sink);
e7b07cee 6327
ceb3dbb4 6328 update_stream_signal(stream, sink);
9182b4cb 6329
d832fc3b 6330 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
75f77aaf
WL
6331 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6332
8a488f5d
RL
6333 if (stream->link->psr_settings.psr_feature_enabled) {
6334 //
6335 // should decide stream support vsc sdp colorimetry capability
6336 // before building vsc info packet
6337 //
6338 stream->use_vsc_sdp_for_colorimetry = false;
6339 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6340 stream->use_vsc_sdp_for_colorimetry =
6341 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6342 } else {
6343 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6344 stream->use_vsc_sdp_for_colorimetry = true;
8c322309 6345 }
8a488f5d 6346 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
1a365683
RL
6347 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6348
8c322309 6349 }
aed15309 6350finish:
dcd5fb82 6351 dc_sink_release(sink);
9e3efe3e 6352
e7b07cee
HW
6353 return stream;
6354}
6355
7578ecda 6356static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
e7b07cee
HW
6357{
6358 drm_crtc_cleanup(crtc);
6359 kfree(crtc);
6360}
6361
6362static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3ee6b26b 6363 struct drm_crtc_state *state)
e7b07cee
HW
6364{
6365 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6366
6367 /* TODO Destroy dc_stream objects are stream object is flattened */
6368 if (cur->stream)
6369 dc_stream_release(cur->stream);
6370
6371
6372 __drm_atomic_helper_crtc_destroy_state(state);
6373
6374
6375 kfree(state);
6376}
6377
6378static void dm_crtc_reset_state(struct drm_crtc *crtc)
6379{
6380 struct dm_crtc_state *state;
6381
6382 if (crtc->state)
6383 dm_crtc_destroy_state(crtc, crtc->state);
6384
6385 state = kzalloc(sizeof(*state), GFP_KERNEL);
6386 if (WARN_ON(!state))
6387 return;
6388
1f8a52ec 6389 __drm_atomic_helper_crtc_reset(crtc, &state->base);
e7b07cee
HW
6390}
6391
6392static struct drm_crtc_state *
6393dm_crtc_duplicate_state(struct drm_crtc *crtc)
6394{
6395 struct dm_crtc_state *state, *cur;
6396
6397 cur = to_dm_crtc_state(crtc->state);
6398
6399 if (WARN_ON(!crtc->state))
6400 return NULL;
6401
2004f45e 6402 state = kzalloc(sizeof(*state), GFP_KERNEL);
2a55f096
ES
6403 if (!state)
6404 return NULL;
e7b07cee
HW
6405
6406 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6407
6408 if (cur->stream) {
6409 state->stream = cur->stream;
6410 dc_stream_retain(state->stream);
6411 }
6412
d6ef9b41 6413 state->active_planes = cur->active_planes;
98e6436d 6414 state->vrr_infopacket = cur->vrr_infopacket;
c1ee92f9 6415 state->abm_level = cur->abm_level;
bb47de73
NK
6416 state->vrr_supported = cur->vrr_supported;
6417 state->freesync_config = cur->freesync_config;
cf020d49
NK
6418 state->cm_has_degamma = cur->cm_has_degamma;
6419 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
035f5496 6420 state->force_dpms_off = cur->force_dpms_off;
e7b07cee
HW
6421 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6422
6423 return &state->base;
6424}
6425
86bc2219 6426#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
e69231c4 6427static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
86bc2219
WL
6428{
6429 crtc_debugfs_init(crtc);
6430
6431 return 0;
6432}
6433#endif
6434
d2574c33
MK
6435static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6436{
6437 enum dc_irq_source irq_source;
6438 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6439 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33
MK
6440 int rc;
6441
6442 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6443
6444 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6445
4711c033
LT
6446 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6447 acrtc->crtc_id, enable ? "en" : "dis", rc);
d2574c33
MK
6448 return rc;
6449}
589d2739
HW
6450
6451static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6452{
6453 enum dc_irq_source irq_source;
6454 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1348969a 6455 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
d2574c33 6456 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
ea3b4242 6457#if defined(CONFIG_DRM_AMD_DC_DCN)
71338cb4 6458 struct amdgpu_display_manager *dm = &adev->dm;
09a5df6c 6459 struct vblank_control_work *work;
ea3b4242 6460#endif
d2574c33
MK
6461 int rc = 0;
6462
6463 if (enable) {
6464 /* vblank irq on -> Only need vupdate irq in vrr mode */
6465 if (amdgpu_dm_vrr_active(acrtc_state))
6466 rc = dm_set_vupdate_irq(crtc, true);
6467 } else {
6468 /* vblank irq off -> vupdate irq off */
6469 rc = dm_set_vupdate_irq(crtc, false);
6470 }
6471
6472 if (rc)
6473 return rc;
589d2739
HW
6474
6475 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
71338cb4
BL
6476
6477 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6478 return -EBUSY;
6479
98ab5f35
BL
6480 if (amdgpu_in_reset(adev))
6481 return 0;
6482
4928b480 6483#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
6484 if (dm->vblank_control_workqueue) {
6485 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6486 if (!work)
6487 return -ENOMEM;
09a5df6c 6488
06dd1888
NK
6489 INIT_WORK(&work->work, vblank_control_worker);
6490 work->dm = dm;
6491 work->acrtc = acrtc;
6492 work->enable = enable;
09a5df6c 6493
06dd1888
NK
6494 if (acrtc_state->stream) {
6495 dc_stream_retain(acrtc_state->stream);
6496 work->stream = acrtc_state->stream;
6497 }
58aa1c50 6498
06dd1888
NK
6499 queue_work(dm->vblank_control_workqueue, &work->work);
6500 }
4928b480 6501#endif
71338cb4 6502
71338cb4 6503 return 0;
589d2739
HW
6504}
6505
6506static int dm_enable_vblank(struct drm_crtc *crtc)
6507{
6508 return dm_set_vblank(crtc, true);
6509}
6510
6511static void dm_disable_vblank(struct drm_crtc *crtc)
6512{
6513 dm_set_vblank(crtc, false);
6514}
6515
e7b07cee
HW
6516/* Implemented only the options currently availible for the driver */
6517static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6518 .reset = dm_crtc_reset_state,
6519 .destroy = amdgpu_dm_crtc_destroy,
e7b07cee
HW
6520 .set_config = drm_atomic_helper_set_config,
6521 .page_flip = drm_atomic_helper_page_flip,
6522 .atomic_duplicate_state = dm_crtc_duplicate_state,
6523 .atomic_destroy_state = dm_crtc_destroy_state,
31aec354 6524 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3b3b8448 6525 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
8fb843d1 6526 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
e3eff4b5 6527 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
589d2739
HW
6528 .enable_vblank = dm_enable_vblank,
6529 .disable_vblank = dm_disable_vblank,
e3eff4b5 6530 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
86bc2219
WL
6531#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6532 .late_register = amdgpu_dm_crtc_late_register,
6533#endif
e7b07cee
HW
6534};
6535
6536static enum drm_connector_status
6537amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6538{
6539 bool connected;
c84dec2f 6540 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6541
1f6010a9
DF
6542 /*
6543 * Notes:
e7b07cee
HW
6544 * 1. This interface is NOT called in context of HPD irq.
6545 * 2. This interface *is called* in context of user-mode ioctl. Which
1f6010a9
DF
6546 * makes it a bad place for *any* MST-related activity.
6547 */
e7b07cee 6548
8580d60b
HW
6549 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6550 !aconnector->fake_enable)
e7b07cee
HW
6551 connected = (aconnector->dc_sink != NULL);
6552 else
6553 connected = (aconnector->base.force == DRM_FORCE_ON);
6554
0f877894
OV
6555 update_subconnector_property(aconnector);
6556
e7b07cee
HW
6557 return (connected ? connector_status_connected :
6558 connector_status_disconnected);
6559}
6560
3ee6b26b
AD
6561int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6562 struct drm_connector_state *connector_state,
6563 struct drm_property *property,
6564 uint64_t val)
e7b07cee
HW
6565{
6566 struct drm_device *dev = connector->dev;
1348969a 6567 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6568 struct dm_connector_state *dm_old_state =
6569 to_dm_connector_state(connector->state);
6570 struct dm_connector_state *dm_new_state =
6571 to_dm_connector_state(connector_state);
6572
6573 int ret = -EINVAL;
6574
6575 if (property == dev->mode_config.scaling_mode_property) {
6576 enum amdgpu_rmx_type rmx_type;
6577
6578 switch (val) {
6579 case DRM_MODE_SCALE_CENTER:
6580 rmx_type = RMX_CENTER;
6581 break;
6582 case DRM_MODE_SCALE_ASPECT:
6583 rmx_type = RMX_ASPECT;
6584 break;
6585 case DRM_MODE_SCALE_FULLSCREEN:
6586 rmx_type = RMX_FULL;
6587 break;
6588 case DRM_MODE_SCALE_NONE:
6589 default:
6590 rmx_type = RMX_OFF;
6591 break;
6592 }
6593
6594 if (dm_old_state->scaling == rmx_type)
6595 return 0;
6596
6597 dm_new_state->scaling = rmx_type;
6598 ret = 0;
6599 } else if (property == adev->mode_info.underscan_hborder_property) {
6600 dm_new_state->underscan_hborder = val;
6601 ret = 0;
6602 } else if (property == adev->mode_info.underscan_vborder_property) {
6603 dm_new_state->underscan_vborder = val;
6604 ret = 0;
6605 } else if (property == adev->mode_info.underscan_property) {
6606 dm_new_state->underscan_enable = val;
6607 ret = 0;
c1ee92f9
DF
6608 } else if (property == adev->mode_info.abm_level_property) {
6609 dm_new_state->abm_level = val;
6610 ret = 0;
e7b07cee
HW
6611 }
6612
6613 return ret;
6614}
6615
3ee6b26b
AD
6616int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6617 const struct drm_connector_state *state,
6618 struct drm_property *property,
6619 uint64_t *val)
e7b07cee
HW
6620{
6621 struct drm_device *dev = connector->dev;
1348969a 6622 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
6623 struct dm_connector_state *dm_state =
6624 to_dm_connector_state(state);
6625 int ret = -EINVAL;
6626
6627 if (property == dev->mode_config.scaling_mode_property) {
6628 switch (dm_state->scaling) {
6629 case RMX_CENTER:
6630 *val = DRM_MODE_SCALE_CENTER;
6631 break;
6632 case RMX_ASPECT:
6633 *val = DRM_MODE_SCALE_ASPECT;
6634 break;
6635 case RMX_FULL:
6636 *val = DRM_MODE_SCALE_FULLSCREEN;
6637 break;
6638 case RMX_OFF:
6639 default:
6640 *val = DRM_MODE_SCALE_NONE;
6641 break;
6642 }
6643 ret = 0;
6644 } else if (property == adev->mode_info.underscan_hborder_property) {
6645 *val = dm_state->underscan_hborder;
6646 ret = 0;
6647 } else if (property == adev->mode_info.underscan_vborder_property) {
6648 *val = dm_state->underscan_vborder;
6649 ret = 0;
6650 } else if (property == adev->mode_info.underscan_property) {
6651 *val = dm_state->underscan_enable;
6652 ret = 0;
c1ee92f9
DF
6653 } else if (property == adev->mode_info.abm_level_property) {
6654 *val = dm_state->abm_level;
6655 ret = 0;
e7b07cee 6656 }
c1ee92f9 6657
e7b07cee
HW
6658 return ret;
6659}
6660
526c654a
ED
6661static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6662{
6663 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6664
6665 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6666}
6667
7578ecda 6668static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
e7b07cee 6669{
c84dec2f 6670 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee 6671 const struct dc_link *link = aconnector->dc_link;
1348969a 6672 struct amdgpu_device *adev = drm_to_adev(connector->dev);
e7b07cee 6673 struct amdgpu_display_manager *dm = &adev->dm;
7fd13bae 6674 int i;
ada8ce15 6675
5dff80bd
AG
6676 /*
6677 * Call only if mst_mgr was iniitalized before since it's not done
6678 * for all connector types.
6679 */
6680 if (aconnector->mst_mgr.dev)
6681 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6682
e7b07cee
HW
6683#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6684 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7fd13bae
AD
6685 for (i = 0; i < dm->num_of_edps; i++) {
6686 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6687 backlight_device_unregister(dm->backlight_dev[i]);
6688 dm->backlight_dev[i] = NULL;
6689 }
e7b07cee
HW
6690 }
6691#endif
dcd5fb82
MF
6692
6693 if (aconnector->dc_em_sink)
6694 dc_sink_release(aconnector->dc_em_sink);
6695 aconnector->dc_em_sink = NULL;
6696 if (aconnector->dc_sink)
6697 dc_sink_release(aconnector->dc_sink);
6698 aconnector->dc_sink = NULL;
6699
e86e8947 6700 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
e7b07cee
HW
6701 drm_connector_unregister(connector);
6702 drm_connector_cleanup(connector);
526c654a
ED
6703 if (aconnector->i2c) {
6704 i2c_del_adapter(&aconnector->i2c->base);
6705 kfree(aconnector->i2c);
6706 }
7daec99f 6707 kfree(aconnector->dm_dp_aux.aux.name);
526c654a 6708
e7b07cee
HW
6709 kfree(connector);
6710}
6711
6712void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6713{
6714 struct dm_connector_state *state =
6715 to_dm_connector_state(connector->state);
6716
df099b9b
LSL
6717 if (connector->state)
6718 __drm_atomic_helper_connector_destroy_state(connector->state);
6719
e7b07cee
HW
6720 kfree(state);
6721
6722 state = kzalloc(sizeof(*state), GFP_KERNEL);
6723
6724 if (state) {
6725 state->scaling = RMX_OFF;
6726 state->underscan_enable = false;
6727 state->underscan_hborder = 0;
6728 state->underscan_vborder = 0;
01933ba4 6729 state->base.max_requested_bpc = 8;
3261e013
ML
6730 state->vcpi_slots = 0;
6731 state->pbn = 0;
c3e50f89
NK
6732 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6733 state->abm_level = amdgpu_dm_abm_level;
6734
df099b9b 6735 __drm_atomic_helper_connector_reset(connector, &state->base);
e7b07cee
HW
6736 }
6737}
6738
3ee6b26b
AD
6739struct drm_connector_state *
6740amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
e7b07cee
HW
6741{
6742 struct dm_connector_state *state =
6743 to_dm_connector_state(connector->state);
6744
6745 struct dm_connector_state *new_state =
6746 kmemdup(state, sizeof(*state), GFP_KERNEL);
6747
98e6436d
AK
6748 if (!new_state)
6749 return NULL;
e7b07cee 6750
98e6436d
AK
6751 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6752
6753 new_state->freesync_capable = state->freesync_capable;
c1ee92f9 6754 new_state->abm_level = state->abm_level;
922454c2
NK
6755 new_state->scaling = state->scaling;
6756 new_state->underscan_enable = state->underscan_enable;
6757 new_state->underscan_hborder = state->underscan_hborder;
6758 new_state->underscan_vborder = state->underscan_vborder;
3261e013
ML
6759 new_state->vcpi_slots = state->vcpi_slots;
6760 new_state->pbn = state->pbn;
98e6436d 6761 return &new_state->base;
e7b07cee
HW
6762}
6763
14f04fa4
AD
6764static int
6765amdgpu_dm_connector_late_register(struct drm_connector *connector)
6766{
6767 struct amdgpu_dm_connector *amdgpu_dm_connector =
6768 to_amdgpu_dm_connector(connector);
00a8037e 6769 int r;
14f04fa4 6770
00a8037e
AD
6771 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6772 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6773 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6774 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6775 if (r)
6776 return r;
6777 }
6778
6779#if defined(CONFIG_DEBUG_FS)
14f04fa4
AD
6780 connector_debugfs_init(amdgpu_dm_connector);
6781#endif
6782
6783 return 0;
6784}
6785
e7b07cee
HW
6786static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6787 .reset = amdgpu_dm_connector_funcs_reset,
6788 .detect = amdgpu_dm_connector_detect,
6789 .fill_modes = drm_helper_probe_single_connector_modes,
6790 .destroy = amdgpu_dm_connector_destroy,
6791 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6792 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6793 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
526c654a 6794 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
14f04fa4 6795 .late_register = amdgpu_dm_connector_late_register,
526c654a 6796 .early_unregister = amdgpu_dm_connector_unregister
e7b07cee
HW
6797};
6798
e7b07cee
HW
6799static int get_modes(struct drm_connector *connector)
6800{
6801 return amdgpu_dm_connector_get_modes(connector);
6802}
6803
c84dec2f 6804static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6805{
6806 struct dc_sink_init_data init_params = {
6807 .link = aconnector->dc_link,
6808 .sink_signal = SIGNAL_TYPE_VIRTUAL
6809 };
70e8ffc5 6810 struct edid *edid;
e7b07cee 6811
a89ff457 6812 if (!aconnector->base.edid_blob_ptr) {
e7b07cee
HW
6813 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6814 aconnector->base.name);
6815
6816 aconnector->base.force = DRM_FORCE_OFF;
6817 aconnector->base.override_edid = false;
6818 return;
6819 }
6820
70e8ffc5
HW
6821 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6822
e7b07cee
HW
6823 aconnector->edid = edid;
6824
6825 aconnector->dc_em_sink = dc_link_add_remote_sink(
6826 aconnector->dc_link,
6827 (uint8_t *)edid,
6828 (edid->extensions + 1) * EDID_LENGTH,
6829 &init_params);
6830
dcd5fb82 6831 if (aconnector->base.force == DRM_FORCE_ON) {
e7b07cee
HW
6832 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6833 aconnector->dc_link->local_sink :
6834 aconnector->dc_em_sink;
dcd5fb82
MF
6835 dc_sink_retain(aconnector->dc_sink);
6836 }
e7b07cee
HW
6837}
6838
c84dec2f 6839static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
e7b07cee
HW
6840{
6841 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6842
1f6010a9
DF
6843 /*
6844 * In case of headless boot with force on for DP managed connector
e7b07cee
HW
6845 * Those settings have to be != 0 to get initial modeset
6846 */
6847 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6848 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6849 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6850 }
6851
6852
6853 aconnector->base.override_edid = true;
6854 create_eml_sink(aconnector);
6855}
6856
cbd14ae7
SW
6857static struct dc_stream_state *
6858create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6859 const struct drm_display_mode *drm_mode,
6860 const struct dm_connector_state *dm_state,
6861 const struct dc_stream_state *old_stream)
6862{
6863 struct drm_connector *connector = &aconnector->base;
1348969a 6864 struct amdgpu_device *adev = drm_to_adev(connector->dev);
cbd14ae7 6865 struct dc_stream_state *stream;
4b7da34b
SW
6866 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6867 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
cbd14ae7
SW
6868 enum dc_status dc_result = DC_OK;
6869
6870 do {
6871 stream = create_stream_for_sink(aconnector, drm_mode,
6872 dm_state, old_stream,
6873 requested_bpc);
6874 if (stream == NULL) {
6875 DRM_ERROR("Failed to create stream for sink!\n");
6876 break;
6877 }
6878
6879 dc_result = dc_validate_stream(adev->dm.dc, stream);
6880
6881 if (dc_result != DC_OK) {
74a16675 6882 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
cbd14ae7
SW
6883 drm_mode->hdisplay,
6884 drm_mode->vdisplay,
6885 drm_mode->clock,
74a16675
RS
6886 dc_result,
6887 dc_status_to_str(dc_result));
cbd14ae7
SW
6888
6889 dc_stream_release(stream);
6890 stream = NULL;
6891 requested_bpc -= 2; /* lower bpc to retry validation */
6892 }
6893
6894 } while (stream == NULL && requested_bpc >= 6);
6895
68eb3ae3
WS
6896 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6897 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6898
6899 aconnector->force_yuv420_output = true;
6900 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6901 dm_state, old_stream);
6902 aconnector->force_yuv420_output = false;
6903 }
6904
cbd14ae7
SW
6905 return stream;
6906}
6907
ba9ca088 6908enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3ee6b26b 6909 struct drm_display_mode *mode)
e7b07cee
HW
6910{
6911 int result = MODE_ERROR;
6912 struct dc_sink *dc_sink;
e7b07cee 6913 /* TODO: Unhardcode stream count */
0971c40e 6914 struct dc_stream_state *stream;
c84dec2f 6915 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
6916
6917 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6918 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6919 return result;
6920
1f6010a9
DF
6921 /*
6922 * Only run this the first time mode_valid is called to initilialize
e7b07cee
HW
6923 * EDID mgmt
6924 */
6925 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6926 !aconnector->dc_em_sink)
6927 handle_edid_mgmt(aconnector);
6928
c84dec2f 6929 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
e7b07cee 6930
ad975f44
VL
6931 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6932 aconnector->base.force != DRM_FORCE_ON) {
e7b07cee
HW
6933 DRM_ERROR("dc_sink is NULL!\n");
6934 goto fail;
6935 }
6936
cbd14ae7
SW
6937 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6938 if (stream) {
6939 dc_stream_release(stream);
e7b07cee 6940 result = MODE_OK;
cbd14ae7 6941 }
e7b07cee
HW
6942
6943fail:
6944 /* TODO: error handling*/
6945 return result;
6946}
6947
88694af9
NK
6948static int fill_hdr_info_packet(const struct drm_connector_state *state,
6949 struct dc_info_packet *out)
6950{
6951 struct hdmi_drm_infoframe frame;
6952 unsigned char buf[30]; /* 26 + 4 */
6953 ssize_t len;
6954 int ret, i;
6955
6956 memset(out, 0, sizeof(*out));
6957
6958 if (!state->hdr_output_metadata)
6959 return 0;
6960
6961 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6962 if (ret)
6963 return ret;
6964
6965 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6966 if (len < 0)
6967 return (int)len;
6968
6969 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6970 if (len != 30)
6971 return -EINVAL;
6972
6973 /* Prepare the infopacket for DC. */
6974 switch (state->connector->connector_type) {
6975 case DRM_MODE_CONNECTOR_HDMIA:
6976 out->hb0 = 0x87; /* type */
6977 out->hb1 = 0x01; /* version */
6978 out->hb2 = 0x1A; /* length */
6979 out->sb[0] = buf[3]; /* checksum */
6980 i = 1;
6981 break;
6982
6983 case DRM_MODE_CONNECTOR_DisplayPort:
6984 case DRM_MODE_CONNECTOR_eDP:
6985 out->hb0 = 0x00; /* sdp id, zero */
6986 out->hb1 = 0x87; /* type */
6987 out->hb2 = 0x1D; /* payload len - 1 */
6988 out->hb3 = (0x13 << 2); /* sdp version */
6989 out->sb[0] = 0x01; /* version */
6990 out->sb[1] = 0x1A; /* length */
6991 i = 2;
6992 break;
6993
6994 default:
6995 return -EINVAL;
6996 }
6997
6998 memcpy(&out->sb[i], &buf[4], 26);
6999 out->valid = true;
7000
7001 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7002 sizeof(out->sb), false);
7003
7004 return 0;
7005}
7006
88694af9
NK
7007static int
7008amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
51e857af 7009 struct drm_atomic_state *state)
88694af9 7010{
51e857af
SP
7011 struct drm_connector_state *new_con_state =
7012 drm_atomic_get_new_connector_state(state, conn);
88694af9
NK
7013 struct drm_connector_state *old_con_state =
7014 drm_atomic_get_old_connector_state(state, conn);
7015 struct drm_crtc *crtc = new_con_state->crtc;
7016 struct drm_crtc_state *new_crtc_state;
7017 int ret;
7018
e8a98235
RS
7019 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7020
88694af9
NK
7021 if (!crtc)
7022 return 0;
7023
72921cdf 7024 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
88694af9
NK
7025 struct dc_info_packet hdr_infopacket;
7026
7027 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7028 if (ret)
7029 return ret;
7030
7031 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7032 if (IS_ERR(new_crtc_state))
7033 return PTR_ERR(new_crtc_state);
7034
7035 /*
7036 * DC considers the stream backends changed if the
7037 * static metadata changes. Forcing the modeset also
7038 * gives a simple way for userspace to switch from
b232d4ed
NK
7039 * 8bpc to 10bpc when setting the metadata to enter
7040 * or exit HDR.
7041 *
7042 * Changing the static metadata after it's been
7043 * set is permissible, however. So only force a
7044 * modeset if we're entering or exiting HDR.
88694af9 7045 */
b232d4ed
NK
7046 new_crtc_state->mode_changed =
7047 !old_con_state->hdr_output_metadata ||
7048 !new_con_state->hdr_output_metadata;
88694af9
NK
7049 }
7050
7051 return 0;
7052}
7053
e7b07cee
HW
7054static const struct drm_connector_helper_funcs
7055amdgpu_dm_connector_helper_funcs = {
7056 /*
1f6010a9 7057 * If hotplugging a second bigger display in FB Con mode, bigger resolution
b830ebc9 7058 * modes will be filtered by drm_mode_validate_size(), and those modes
1f6010a9 7059 * are missing after user start lightdm. So we need to renew modes list.
b830ebc9
HW
7060 * in get_modes call back, not just return the modes count
7061 */
e7b07cee
HW
7062 .get_modes = get_modes,
7063 .mode_valid = amdgpu_dm_connector_mode_valid,
88694af9 7064 .atomic_check = amdgpu_dm_connector_atomic_check,
e7b07cee
HW
7065};
7066
7067static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7068{
7069}
7070
d6ef9b41 7071static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
c14a005c
NK
7072{
7073 struct drm_atomic_state *state = new_crtc_state->state;
7074 struct drm_plane *plane;
7075 int num_active = 0;
7076
7077 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7078 struct drm_plane_state *new_plane_state;
7079
7080 /* Cursor planes are "fake". */
7081 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7082 continue;
7083
7084 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7085
7086 if (!new_plane_state) {
7087 /*
7088 * The plane is enable on the CRTC and hasn't changed
7089 * state. This means that it previously passed
7090 * validation and is therefore enabled.
7091 */
7092 num_active += 1;
7093 continue;
7094 }
7095
7096 /* We need a framebuffer to be considered enabled. */
7097 num_active += (new_plane_state->fb != NULL);
7098 }
7099
d6ef9b41
NK
7100 return num_active;
7101}
7102
8fe684e9
NK
7103static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7104 struct drm_crtc_state *new_crtc_state)
d6ef9b41
NK
7105{
7106 struct dm_crtc_state *dm_new_crtc_state =
7107 to_dm_crtc_state(new_crtc_state);
7108
7109 dm_new_crtc_state->active_planes = 0;
d6ef9b41
NK
7110
7111 if (!dm_new_crtc_state->stream)
7112 return;
7113
7114 dm_new_crtc_state->active_planes =
7115 count_crtc_active_planes(new_crtc_state);
c14a005c
NK
7116}
7117
3ee6b26b 7118static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
29b77ad7 7119 struct drm_atomic_state *state)
e7b07cee 7120{
29b77ad7
MR
7121 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7122 crtc);
1348969a 7123 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
e7b07cee 7124 struct dc *dc = adev->dm.dc;
29b77ad7 7125 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
e7b07cee
HW
7126 int ret = -EINVAL;
7127
5b8c5969 7128 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
e8a98235 7129
29b77ad7 7130 dm_update_crtc_active_planes(crtc, crtc_state);
d6ef9b41 7131
bcd74374
ND
7132 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7133 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
e7b07cee
HW
7134 return ret;
7135 }
7136
bc92c065 7137 /*
b836a274
MD
7138 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7139 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7140 * planes are disabled, which is not supported by the hardware. And there is legacy
7141 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
bc92c065 7142 */
29b77ad7 7143 if (crtc_state->enable &&
ea9522f5
SS
7144 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7145 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
c14a005c 7146 return -EINVAL;
ea9522f5 7147 }
c14a005c 7148
b836a274
MD
7149 /* In some use cases, like reset, no stream is attached */
7150 if (!dm_crtc_state->stream)
7151 return 0;
7152
62c933f9 7153 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
e7b07cee
HW
7154 return 0;
7155
ea9522f5 7156 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
e7b07cee
HW
7157 return ret;
7158}
7159
3ee6b26b
AD
7160static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7161 const struct drm_display_mode *mode,
7162 struct drm_display_mode *adjusted_mode)
e7b07cee
HW
7163{
7164 return true;
7165}
7166
7167static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7168 .disable = dm_crtc_helper_disable,
7169 .atomic_check = dm_crtc_helper_atomic_check,
ea702333
TZ
7170 .mode_fixup = dm_crtc_helper_mode_fixup,
7171 .get_scanout_position = amdgpu_crtc_get_scanout_position,
e7b07cee
HW
7172};
7173
7174static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7175{
7176
7177}
7178
3261e013
ML
7179static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7180{
7181 switch (display_color_depth) {
7182 case COLOR_DEPTH_666:
7183 return 6;
7184 case COLOR_DEPTH_888:
7185 return 8;
7186 case COLOR_DEPTH_101010:
7187 return 10;
7188 case COLOR_DEPTH_121212:
7189 return 12;
7190 case COLOR_DEPTH_141414:
7191 return 14;
7192 case COLOR_DEPTH_161616:
7193 return 16;
7194 default:
7195 break;
7196 }
7197 return 0;
7198}
7199
3ee6b26b
AD
7200static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7201 struct drm_crtc_state *crtc_state,
7202 struct drm_connector_state *conn_state)
e7b07cee 7203{
3261e013
ML
7204 struct drm_atomic_state *state = crtc_state->state;
7205 struct drm_connector *connector = conn_state->connector;
7206 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7207 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7208 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7209 struct drm_dp_mst_topology_mgr *mst_mgr;
7210 struct drm_dp_mst_port *mst_port;
7211 enum dc_color_depth color_depth;
7212 int clock, bpp = 0;
1bc22f20 7213 bool is_y420 = false;
3261e013
ML
7214
7215 if (!aconnector->port || !aconnector->dc_sink)
7216 return 0;
7217
7218 mst_port = aconnector->port;
7219 mst_mgr = &aconnector->mst_port->mst_mgr;
7220
7221 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7222 return 0;
7223
7224 if (!state->duplicated) {
cbd14ae7 7225 int max_bpc = conn_state->max_requested_bpc;
1bc22f20
SW
7226 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7227 aconnector->force_yuv420_output;
cbd14ae7
SW
7228 color_depth = convert_color_depth_from_display_info(connector,
7229 is_y420,
7230 max_bpc);
3261e013
ML
7231 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7232 clock = adjusted_mode->clock;
dc48529f 7233 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
3261e013
ML
7234 }
7235 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7236 mst_mgr,
7237 mst_port,
1c6c1cb5 7238 dm_new_connector_state->pbn,
03ca9600 7239 dm_mst_get_pbn_divider(aconnector->dc_link));
3261e013
ML
7240 if (dm_new_connector_state->vcpi_slots < 0) {
7241 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7242 return dm_new_connector_state->vcpi_slots;
7243 }
e7b07cee
HW
7244 return 0;
7245}
7246
7247const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7248 .disable = dm_encoder_helper_disable,
7249 .atomic_check = dm_encoder_helper_atomic_check
7250};
7251
d9fe1a4c 7252#if defined(CONFIG_DRM_AMD_DC_DCN)
29b9ba74 7253static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6513104b
HW
7254 struct dc_state *dc_state,
7255 struct dsc_mst_fairness_vars *vars)
29b9ba74
ML
7256{
7257 struct dc_stream_state *stream = NULL;
7258 struct drm_connector *connector;
5760dcb9 7259 struct drm_connector_state *new_con_state;
29b9ba74
ML
7260 struct amdgpu_dm_connector *aconnector;
7261 struct dm_connector_state *dm_conn_state;
a550bb16
HW
7262 int i, j;
7263 int vcpi, pbn_div, pbn, slot_num = 0;
29b9ba74 7264
5760dcb9 7265 for_each_new_connector_in_state(state, connector, new_con_state, i) {
29b9ba74
ML
7266
7267 aconnector = to_amdgpu_dm_connector(connector);
7268
7269 if (!aconnector->port)
7270 continue;
7271
7272 if (!new_con_state || !new_con_state->crtc)
7273 continue;
7274
7275 dm_conn_state = to_dm_connector_state(new_con_state);
7276
7277 for (j = 0; j < dc_state->stream_count; j++) {
7278 stream = dc_state->streams[j];
7279 if (!stream)
7280 continue;
7281
7282 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7283 break;
7284
7285 stream = NULL;
7286 }
7287
7288 if (!stream)
7289 continue;
7290
29b9ba74 7291 pbn_div = dm_mst_get_pbn_divider(stream->link);
6513104b
HW
7292 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7293 for (j = 0; j < dc_state->stream_count; j++) {
7294 if (vars[j].aconnector == aconnector) {
7295 pbn = vars[j].pbn;
7296 break;
7297 }
7298 }
7299
a550bb16
HW
7300 if (j == dc_state->stream_count)
7301 continue;
7302
7303 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7304
7305 if (stream->timing.flags.DSC != 1) {
7306 dm_conn_state->pbn = pbn;
7307 dm_conn_state->vcpi_slots = slot_num;
7308
7309 drm_dp_mst_atomic_enable_dsc(state,
7310 aconnector->port,
7311 dm_conn_state->pbn,
7312 0,
7313 false);
7314 continue;
7315 }
7316
29b9ba74
ML
7317 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7318 aconnector->port,
7319 pbn, pbn_div,
7320 true);
7321 if (vcpi < 0)
7322 return vcpi;
7323
7324 dm_conn_state->pbn = pbn;
7325 dm_conn_state->vcpi_slots = vcpi;
7326 }
7327 return 0;
7328}
d9fe1a4c 7329#endif
29b9ba74 7330
e7b07cee
HW
7331static void dm_drm_plane_reset(struct drm_plane *plane)
7332{
7333 struct dm_plane_state *amdgpu_state = NULL;
7334
7335 if (plane->state)
7336 plane->funcs->atomic_destroy_state(plane, plane->state);
7337
7338 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
f922237d 7339 WARN_ON(amdgpu_state == NULL);
1f6010a9 7340
7ddaef96
NK
7341 if (amdgpu_state)
7342 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
e7b07cee
HW
7343}
7344
7345static struct drm_plane_state *
7346dm_drm_plane_duplicate_state(struct drm_plane *plane)
7347{
7348 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7349
7350 old_dm_plane_state = to_dm_plane_state(plane->state);
7351 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7352 if (!dm_plane_state)
7353 return NULL;
7354
7355 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7356
3be5262e
HW
7357 if (old_dm_plane_state->dc_state) {
7358 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7359 dc_plane_state_retain(dm_plane_state->dc_state);
e7b07cee
HW
7360 }
7361
7362 return &dm_plane_state->base;
7363}
7364
dfd84d90 7365static void dm_drm_plane_destroy_state(struct drm_plane *plane,
3ee6b26b 7366 struct drm_plane_state *state)
e7b07cee
HW
7367{
7368 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7369
3be5262e
HW
7370 if (dm_plane_state->dc_state)
7371 dc_plane_state_release(dm_plane_state->dc_state);
e7b07cee 7372
0627bbd3 7373 drm_atomic_helper_plane_destroy_state(plane, state);
e7b07cee
HW
7374}
7375
7376static const struct drm_plane_funcs dm_plane_funcs = {
7377 .update_plane = drm_atomic_helper_update_plane,
7378 .disable_plane = drm_atomic_helper_disable_plane,
02680efb 7379 .destroy = drm_primary_helper_destroy,
e7b07cee
HW
7380 .reset = dm_drm_plane_reset,
7381 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7382 .atomic_destroy_state = dm_drm_plane_destroy_state,
faa37f54 7383 .format_mod_supported = dm_plane_format_mod_supported,
e7b07cee
HW
7384};
7385
3ee6b26b
AD
7386static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7387 struct drm_plane_state *new_state)
e7b07cee
HW
7388{
7389 struct amdgpu_framebuffer *afb;
7390 struct drm_gem_object *obj;
5d43be0c 7391 struct amdgpu_device *adev;
e7b07cee 7392 struct amdgpu_bo *rbo;
e7b07cee 7393 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
0f257b09
CZ
7394 struct list_head list;
7395 struct ttm_validate_buffer tv;
7396 struct ww_acquire_ctx ticket;
5d43be0c
CK
7397 uint32_t domain;
7398 int r;
e7b07cee
HW
7399
7400 if (!new_state->fb) {
4711c033 7401 DRM_DEBUG_KMS("No FB bound\n");
e7b07cee
HW
7402 return 0;
7403 }
7404
7405 afb = to_amdgpu_framebuffer(new_state->fb);
e68d14dd 7406 obj = new_state->fb->obj[0];
e7b07cee 7407 rbo = gem_to_amdgpu_bo(obj);
5d43be0c 7408 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
0f257b09
CZ
7409 INIT_LIST_HEAD(&list);
7410
7411 tv.bo = &rbo->tbo;
7412 tv.num_shared = 1;
7413 list_add(&tv.head, &list);
7414
9165fb87 7415 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
0f257b09
CZ
7416 if (r) {
7417 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
e7b07cee 7418 return r;
0f257b09 7419 }
e7b07cee 7420
5d43be0c 7421 if (plane->type != DRM_PLANE_TYPE_CURSOR)
f2bd8a0e 7422 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5d43be0c
CK
7423 else
7424 domain = AMDGPU_GEM_DOMAIN_VRAM;
e7b07cee 7425
7b7c6c81 7426 r = amdgpu_bo_pin(rbo, domain);
e7b07cee 7427 if (unlikely(r != 0)) {
30b7c614
HW
7428 if (r != -ERESTARTSYS)
7429 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
0f257b09 7430 ttm_eu_backoff_reservation(&ticket, &list);
e7b07cee
HW
7431 return r;
7432 }
7433
bb812f1e
JZ
7434 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7435 if (unlikely(r != 0)) {
7436 amdgpu_bo_unpin(rbo);
0f257b09 7437 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7438 DRM_ERROR("%p bind failed\n", rbo);
e7b07cee
HW
7439 return r;
7440 }
7df7e505 7441
0f257b09 7442 ttm_eu_backoff_reservation(&ticket, &list);
bb812f1e 7443
7b7c6c81 7444 afb->address = amdgpu_bo_gpu_offset(rbo);
e7b07cee
HW
7445
7446 amdgpu_bo_ref(rbo);
7447
cf322b49
NK
7448 /**
7449 * We don't do surface updates on planes that have been newly created,
7450 * but we also don't have the afb->address during atomic check.
7451 *
7452 * Fill in buffer attributes depending on the address here, but only on
7453 * newly created planes since they're not being used by DC yet and this
7454 * won't modify global state.
7455 */
7456 dm_plane_state_old = to_dm_plane_state(plane->state);
7457 dm_plane_state_new = to_dm_plane_state(new_state);
7458
3be5262e 7459 if (dm_plane_state_new->dc_state &&
cf322b49
NK
7460 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7461 struct dc_plane_state *plane_state =
7462 dm_plane_state_new->dc_state;
7463 bool force_disable_dcc = !plane_state->dcc.enable;
e7b07cee 7464
320932bf 7465 fill_plane_buffer_attributes(
695af5f9 7466 adev, afb, plane_state->format, plane_state->rotation,
6eed95b0 7467 afb->tiling_flags,
cf322b49
NK
7468 &plane_state->tiling_info, &plane_state->plane_size,
7469 &plane_state->dcc, &plane_state->address,
6eed95b0 7470 afb->tmz_surface, force_disable_dcc);
e7b07cee
HW
7471 }
7472
e7b07cee
HW
7473 return 0;
7474}
7475
3ee6b26b
AD
7476static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7477 struct drm_plane_state *old_state)
e7b07cee
HW
7478{
7479 struct amdgpu_bo *rbo;
e7b07cee
HW
7480 int r;
7481
7482 if (!old_state->fb)
7483 return;
7484
e68d14dd 7485 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
e7b07cee
HW
7486 r = amdgpu_bo_reserve(rbo, false);
7487 if (unlikely(r)) {
7488 DRM_ERROR("failed to reserve rbo before unpin\n");
7489 return;
b830ebc9
HW
7490 }
7491
7492 amdgpu_bo_unpin(rbo);
7493 amdgpu_bo_unreserve(rbo);
7494 amdgpu_bo_unref(&rbo);
e7b07cee
HW
7495}
7496
8c44515b
AP
7497static int dm_plane_helper_check_state(struct drm_plane_state *state,
7498 struct drm_crtc_state *new_crtc_state)
7499{
6300b3bd
MK
7500 struct drm_framebuffer *fb = state->fb;
7501 int min_downscale, max_upscale;
7502 int min_scale = 0;
7503 int max_scale = INT_MAX;
7504
40d916a2 7505 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6300b3bd 7506 if (fb && state->crtc) {
40d916a2
NC
7507 /* Validate viewport to cover the case when only the position changes */
7508 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7509 int viewport_width = state->crtc_w;
7510 int viewport_height = state->crtc_h;
7511
7512 if (state->crtc_x < 0)
7513 viewport_width += state->crtc_x;
7514 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7515 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7516
7517 if (state->crtc_y < 0)
7518 viewport_height += state->crtc_y;
7519 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7520 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7521
4abdb72b
NC
7522 if (viewport_width < 0 || viewport_height < 0) {
7523 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7524 return -EINVAL;
7525 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7526 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
40d916a2 7527 return -EINVAL;
4abdb72b
NC
7528 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7529 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
40d916a2 7530 return -EINVAL;
4abdb72b
NC
7531 }
7532
40d916a2
NC
7533 }
7534
7535 /* Get min/max allowed scaling factors from plane caps. */
6300b3bd
MK
7536 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7537 &min_downscale, &max_upscale);
7538 /*
7539 * Convert to drm convention: 16.16 fixed point, instead of dc's
7540 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7541 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7542 */
7543 min_scale = (1000 << 16) / max_upscale;
7544 max_scale = (1000 << 16) / min_downscale;
7545 }
8c44515b 7546
8c44515b 7547 return drm_atomic_helper_check_plane_state(
6300b3bd 7548 state, new_crtc_state, min_scale, max_scale, true, true);
8c44515b
AP
7549}
7550
7578ecda 7551static int dm_plane_atomic_check(struct drm_plane *plane,
7c11b99a 7552 struct drm_atomic_state *state)
cbd19488 7553{
7c11b99a
MR
7554 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7555 plane);
1348969a 7556 struct amdgpu_device *adev = drm_to_adev(plane->dev);
cbd19488 7557 struct dc *dc = adev->dm.dc;
78171832 7558 struct dm_plane_state *dm_plane_state;
695af5f9 7559 struct dc_scaling_info scaling_info;
8c44515b 7560 struct drm_crtc_state *new_crtc_state;
695af5f9 7561 int ret;
78171832 7562
ba5c1649 7563 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
e8a98235 7564
ba5c1649 7565 dm_plane_state = to_dm_plane_state(new_plane_state);
cbd19488 7566
3be5262e 7567 if (!dm_plane_state->dc_state)
9a3329b1 7568 return 0;
cbd19488 7569
8c44515b 7570 new_crtc_state =
dec92020 7571 drm_atomic_get_new_crtc_state(state,
ba5c1649 7572 new_plane_state->crtc);
8c44515b
AP
7573 if (!new_crtc_state)
7574 return -EINVAL;
7575
ba5c1649 7576 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8c44515b
AP
7577 if (ret)
7578 return ret;
7579
4375d625 7580 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
695af5f9
NK
7581 if (ret)
7582 return ret;
a05bcff1 7583
62c933f9 7584 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
cbd19488
AG
7585 return 0;
7586
7587 return -EINVAL;
7588}
7589
674e78ac 7590static int dm_plane_atomic_async_check(struct drm_plane *plane,
5ddb0bd4 7591 struct drm_atomic_state *state)
674e78ac
NK
7592{
7593 /* Only support async updates on cursor planes. */
7594 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7595 return -EINVAL;
7596
7597 return 0;
7598}
7599
7600static void dm_plane_atomic_async_update(struct drm_plane *plane,
5ddb0bd4 7601 struct drm_atomic_state *state)
674e78ac 7602{
5ddb0bd4
MR
7603 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7604 plane);
674e78ac 7605 struct drm_plane_state *old_state =
5ddb0bd4 7606 drm_atomic_get_old_plane_state(state, plane);
674e78ac 7607
e8a98235
RS
7608 trace_amdgpu_dm_atomic_update_cursor(new_state);
7609
332af874 7610 swap(plane->state->fb, new_state->fb);
674e78ac
NK
7611
7612 plane->state->src_x = new_state->src_x;
7613 plane->state->src_y = new_state->src_y;
7614 plane->state->src_w = new_state->src_w;
7615 plane->state->src_h = new_state->src_h;
7616 plane->state->crtc_x = new_state->crtc_x;
7617 plane->state->crtc_y = new_state->crtc_y;
7618 plane->state->crtc_w = new_state->crtc_w;
7619 plane->state->crtc_h = new_state->crtc_h;
7620
7621 handle_cursor_update(plane, old_state);
7622}
7623
e7b07cee
HW
7624static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7625 .prepare_fb = dm_plane_helper_prepare_fb,
7626 .cleanup_fb = dm_plane_helper_cleanup_fb,
cbd19488 7627 .atomic_check = dm_plane_atomic_check,
674e78ac
NK
7628 .atomic_async_check = dm_plane_atomic_async_check,
7629 .atomic_async_update = dm_plane_atomic_async_update
e7b07cee
HW
7630};
7631
7632/*
7633 * TODO: these are currently initialized to rgb formats only.
7634 * For future use cases we should either initialize them dynamically based on
7635 * plane capabilities, or initialize this array to all formats, so internal drm
1f6010a9 7636 * check will succeed, and let DC implement proper check
e7b07cee 7637 */
d90371b0 7638static const uint32_t rgb_formats[] = {
e7b07cee
HW
7639 DRM_FORMAT_XRGB8888,
7640 DRM_FORMAT_ARGB8888,
7641 DRM_FORMAT_RGBA8888,
7642 DRM_FORMAT_XRGB2101010,
7643 DRM_FORMAT_XBGR2101010,
7644 DRM_FORMAT_ARGB2101010,
7645 DRM_FORMAT_ABGR2101010,
58020403
MK
7646 DRM_FORMAT_XRGB16161616,
7647 DRM_FORMAT_XBGR16161616,
7648 DRM_FORMAT_ARGB16161616,
7649 DRM_FORMAT_ABGR16161616,
bcd47f60
MR
7650 DRM_FORMAT_XBGR8888,
7651 DRM_FORMAT_ABGR8888,
46dd9ff7 7652 DRM_FORMAT_RGB565,
e7b07cee
HW
7653};
7654
0d579c7e
NK
7655static const uint32_t overlay_formats[] = {
7656 DRM_FORMAT_XRGB8888,
7657 DRM_FORMAT_ARGB8888,
7658 DRM_FORMAT_RGBA8888,
7659 DRM_FORMAT_XBGR8888,
7660 DRM_FORMAT_ABGR8888,
7267a1a9 7661 DRM_FORMAT_RGB565
e7b07cee
HW
7662};
7663
7664static const u32 cursor_formats[] = {
7665 DRM_FORMAT_ARGB8888
7666};
7667
37c6a93b
NK
7668static int get_plane_formats(const struct drm_plane *plane,
7669 const struct dc_plane_cap *plane_cap,
7670 uint32_t *formats, int max_formats)
e7b07cee 7671{
37c6a93b
NK
7672 int i, num_formats = 0;
7673
7674 /*
7675 * TODO: Query support for each group of formats directly from
7676 * DC plane caps. This will require adding more formats to the
7677 * caps list.
7678 */
e7b07cee 7679
f180b4bc 7680 switch (plane->type) {
e7b07cee 7681 case DRM_PLANE_TYPE_PRIMARY:
37c6a93b
NK
7682 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7683 if (num_formats >= max_formats)
7684 break;
7685
7686 formats[num_formats++] = rgb_formats[i];
7687 }
7688
ea36ad34 7689 if (plane_cap && plane_cap->pixel_format_support.nv12)
37c6a93b 7690 formats[num_formats++] = DRM_FORMAT_NV12;
cbec6477
SW
7691 if (plane_cap && plane_cap->pixel_format_support.p010)
7692 formats[num_formats++] = DRM_FORMAT_P010;
492548dc
SW
7693 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7694 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7695 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
2a5195dc
MK
7696 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7697 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
492548dc 7698 }
e7b07cee 7699 break;
37c6a93b 7700
e7b07cee 7701 case DRM_PLANE_TYPE_OVERLAY:
37c6a93b
NK
7702 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7703 if (num_formats >= max_formats)
7704 break;
7705
7706 formats[num_formats++] = overlay_formats[i];
7707 }
e7b07cee 7708 break;
37c6a93b 7709
e7b07cee 7710 case DRM_PLANE_TYPE_CURSOR:
37c6a93b
NK
7711 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7712 if (num_formats >= max_formats)
7713 break;
7714
7715 formats[num_formats++] = cursor_formats[i];
7716 }
e7b07cee
HW
7717 break;
7718 }
7719
37c6a93b
NK
7720 return num_formats;
7721}
7722
7723static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7724 struct drm_plane *plane,
7725 unsigned long possible_crtcs,
7726 const struct dc_plane_cap *plane_cap)
7727{
7728 uint32_t formats[32];
7729 int num_formats;
7730 int res = -EPERM;
ecc874a6 7731 unsigned int supported_rotations;
faa37f54 7732 uint64_t *modifiers = NULL;
37c6a93b
NK
7733
7734 num_formats = get_plane_formats(plane, plane_cap, formats,
7735 ARRAY_SIZE(formats));
7736
faa37f54
BN
7737 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7738 if (res)
7739 return res;
7740
4a580877 7741 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
37c6a93b 7742 &dm_plane_funcs, formats, num_formats,
faa37f54
BN
7743 modifiers, plane->type, NULL);
7744 kfree(modifiers);
37c6a93b
NK
7745 if (res)
7746 return res;
7747
cc1fec57
NK
7748 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7749 plane_cap && plane_cap->per_pixel_alpha) {
d74004b6
NK
7750 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7751 BIT(DRM_MODE_BLEND_PREMULTI);
7752
7753 drm_plane_create_alpha_property(plane);
7754 drm_plane_create_blend_mode_property(plane, blend_caps);
7755 }
7756
fc8e5230 7757 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
00755bb7
SW
7758 plane_cap &&
7759 (plane_cap->pixel_format_support.nv12 ||
7760 plane_cap->pixel_format_support.p010)) {
fc8e5230
NK
7761 /* This only affects YUV formats. */
7762 drm_plane_create_color_properties(
7763 plane,
7764 BIT(DRM_COLOR_YCBCR_BT601) |
00755bb7
SW
7765 BIT(DRM_COLOR_YCBCR_BT709) |
7766 BIT(DRM_COLOR_YCBCR_BT2020),
fc8e5230
NK
7767 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7768 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7769 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7770 }
7771
ecc874a6
PLG
7772 supported_rotations =
7773 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7774 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7775
1347385f
SS
7776 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7777 plane->type != DRM_PLANE_TYPE_CURSOR)
f784112f
MR
7778 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7779 supported_rotations);
ecc874a6 7780
f180b4bc 7781 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
e7b07cee 7782
96719c54 7783 /* Create (reset) the plane state */
f180b4bc
HW
7784 if (plane->funcs->reset)
7785 plane->funcs->reset(plane);
96719c54 7786
37c6a93b 7787 return 0;
e7b07cee
HW
7788}
7789
7578ecda
AD
7790static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7791 struct drm_plane *plane,
7792 uint32_t crtc_index)
e7b07cee
HW
7793{
7794 struct amdgpu_crtc *acrtc = NULL;
f180b4bc 7795 struct drm_plane *cursor_plane;
e7b07cee
HW
7796
7797 int res = -ENOMEM;
7798
7799 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7800 if (!cursor_plane)
7801 goto fail;
7802
f180b4bc 7803 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
cc1fec57 7804 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
e7b07cee
HW
7805
7806 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7807 if (!acrtc)
7808 goto fail;
7809
7810 res = drm_crtc_init_with_planes(
7811 dm->ddev,
7812 &acrtc->base,
7813 plane,
f180b4bc 7814 cursor_plane,
e7b07cee
HW
7815 &amdgpu_dm_crtc_funcs, NULL);
7816
7817 if (res)
7818 goto fail;
7819
7820 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7821
96719c54
HW
7822 /* Create (reset) the plane state */
7823 if (acrtc->base.funcs->reset)
7824 acrtc->base.funcs->reset(&acrtc->base);
7825
e7b07cee
HW
7826 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7827 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7828
7829 acrtc->crtc_id = crtc_index;
7830 acrtc->base.enabled = false;
c37e2d29 7831 acrtc->otg_inst = -1;
e7b07cee
HW
7832
7833 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
236d0e4f
LSL
7834 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7835 true, MAX_COLOR_LUT_ENTRIES);
086247a4 7836 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
e2881d6d 7837
e7b07cee
HW
7838 return 0;
7839
7840fail:
b830ebc9
HW
7841 kfree(acrtc);
7842 kfree(cursor_plane);
e7b07cee
HW
7843 return res;
7844}
7845
7846
7847static int to_drm_connector_type(enum signal_type st)
7848{
7849 switch (st) {
7850 case SIGNAL_TYPE_HDMI_TYPE_A:
7851 return DRM_MODE_CONNECTOR_HDMIA;
7852 case SIGNAL_TYPE_EDP:
7853 return DRM_MODE_CONNECTOR_eDP;
11c3ee48
AD
7854 case SIGNAL_TYPE_LVDS:
7855 return DRM_MODE_CONNECTOR_LVDS;
e7b07cee
HW
7856 case SIGNAL_TYPE_RGB:
7857 return DRM_MODE_CONNECTOR_VGA;
7858 case SIGNAL_TYPE_DISPLAY_PORT:
7859 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7860 return DRM_MODE_CONNECTOR_DisplayPort;
7861 case SIGNAL_TYPE_DVI_DUAL_LINK:
7862 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7863 return DRM_MODE_CONNECTOR_DVID;
7864 case SIGNAL_TYPE_VIRTUAL:
7865 return DRM_MODE_CONNECTOR_VIRTUAL;
7866
7867 default:
7868 return DRM_MODE_CONNECTOR_Unknown;
7869 }
7870}
7871
2b4c1c05
DV
7872static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7873{
62afb4ad
JRS
7874 struct drm_encoder *encoder;
7875
7876 /* There is only one encoder per connector */
7877 drm_connector_for_each_possible_encoder(connector, encoder)
7878 return encoder;
7879
7880 return NULL;
2b4c1c05
DV
7881}
7882
e7b07cee
HW
7883static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7884{
e7b07cee
HW
7885 struct drm_encoder *encoder;
7886 struct amdgpu_encoder *amdgpu_encoder;
7887
2b4c1c05 7888 encoder = amdgpu_dm_connector_to_encoder(connector);
e7b07cee
HW
7889
7890 if (encoder == NULL)
7891 return;
7892
7893 amdgpu_encoder = to_amdgpu_encoder(encoder);
7894
7895 amdgpu_encoder->native_mode.clock = 0;
7896
7897 if (!list_empty(&connector->probed_modes)) {
7898 struct drm_display_mode *preferred_mode = NULL;
b830ebc9 7899
e7b07cee 7900 list_for_each_entry(preferred_mode,
b830ebc9
HW
7901 &connector->probed_modes,
7902 head) {
7903 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7904 amdgpu_encoder->native_mode = *preferred_mode;
7905
e7b07cee
HW
7906 break;
7907 }
7908
7909 }
7910}
7911
3ee6b26b
AD
7912static struct drm_display_mode *
7913amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7914 char *name,
7915 int hdisplay, int vdisplay)
e7b07cee
HW
7916{
7917 struct drm_device *dev = encoder->dev;
7918 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7919 struct drm_display_mode *mode = NULL;
7920 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7921
7922 mode = drm_mode_duplicate(dev, native_mode);
7923
b830ebc9 7924 if (mode == NULL)
e7b07cee
HW
7925 return NULL;
7926
7927 mode->hdisplay = hdisplay;
7928 mode->vdisplay = vdisplay;
7929 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
090afc1e 7930 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
e7b07cee
HW
7931
7932 return mode;
7933
7934}
7935
7936static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3ee6b26b 7937 struct drm_connector *connector)
e7b07cee
HW
7938{
7939 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7940 struct drm_display_mode *mode = NULL;
7941 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
c84dec2f
HW
7942 struct amdgpu_dm_connector *amdgpu_dm_connector =
7943 to_amdgpu_dm_connector(connector);
e7b07cee
HW
7944 int i;
7945 int n;
7946 struct mode_size {
7947 char name[DRM_DISPLAY_MODE_LEN];
7948 int w;
7949 int h;
b830ebc9 7950 } common_modes[] = {
e7b07cee
HW
7951 { "640x480", 640, 480},
7952 { "800x600", 800, 600},
7953 { "1024x768", 1024, 768},
7954 { "1280x720", 1280, 720},
7955 { "1280x800", 1280, 800},
7956 {"1280x1024", 1280, 1024},
7957 { "1440x900", 1440, 900},
7958 {"1680x1050", 1680, 1050},
7959 {"1600x1200", 1600, 1200},
7960 {"1920x1080", 1920, 1080},
7961 {"1920x1200", 1920, 1200}
7962 };
7963
b830ebc9 7964 n = ARRAY_SIZE(common_modes);
e7b07cee
HW
7965
7966 for (i = 0; i < n; i++) {
7967 struct drm_display_mode *curmode = NULL;
7968 bool mode_existed = false;
7969
7970 if (common_modes[i].w > native_mode->hdisplay ||
b830ebc9
HW
7971 common_modes[i].h > native_mode->vdisplay ||
7972 (common_modes[i].w == native_mode->hdisplay &&
7973 common_modes[i].h == native_mode->vdisplay))
7974 continue;
e7b07cee
HW
7975
7976 list_for_each_entry(curmode, &connector->probed_modes, head) {
7977 if (common_modes[i].w == curmode->hdisplay &&
b830ebc9 7978 common_modes[i].h == curmode->vdisplay) {
e7b07cee
HW
7979 mode_existed = true;
7980 break;
7981 }
7982 }
7983
7984 if (mode_existed)
7985 continue;
7986
7987 mode = amdgpu_dm_create_common_mode(encoder,
7988 common_modes[i].name, common_modes[i].w,
7989 common_modes[i].h);
7990 drm_mode_probed_add(connector, mode);
c84dec2f 7991 amdgpu_dm_connector->num_modes++;
e7b07cee
HW
7992 }
7993}
7994
d77de788
SS
7995static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7996{
7997 struct drm_encoder *encoder;
7998 struct amdgpu_encoder *amdgpu_encoder;
7999 const struct drm_display_mode *native_mode;
8000
8001 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8002 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8003 return;
8004
8005 encoder = amdgpu_dm_connector_to_encoder(connector);
8006 if (!encoder)
8007 return;
8008
8009 amdgpu_encoder = to_amdgpu_encoder(encoder);
8010
8011 native_mode = &amdgpu_encoder->native_mode;
8012 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8013 return;
8014
8015 drm_connector_set_panel_orientation_with_quirk(connector,
8016 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8017 native_mode->hdisplay,
8018 native_mode->vdisplay);
8019}
8020
3ee6b26b
AD
8021static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8022 struct edid *edid)
e7b07cee 8023{
c84dec2f
HW
8024 struct amdgpu_dm_connector *amdgpu_dm_connector =
8025 to_amdgpu_dm_connector(connector);
e7b07cee
HW
8026
8027 if (edid) {
8028 /* empty probed_modes */
8029 INIT_LIST_HEAD(&connector->probed_modes);
c84dec2f 8030 amdgpu_dm_connector->num_modes =
e7b07cee
HW
8031 drm_add_edid_modes(connector, edid);
8032
f1e5e913
YMM
8033 /* sorting the probed modes before calling function
8034 * amdgpu_dm_get_native_mode() since EDID can have
8035 * more than one preferred mode. The modes that are
8036 * later in the probed mode list could be of higher
8037 * and preferred resolution. For example, 3840x2160
8038 * resolution in base EDID preferred timing and 4096x2160
8039 * preferred resolution in DID extension block later.
8040 */
8041 drm_mode_sort(&connector->probed_modes);
e7b07cee 8042 amdgpu_dm_get_native_mode(connector);
f9b4f20c
SW
8043
8044 /* Freesync capabilities are reset by calling
8045 * drm_add_edid_modes() and need to be
8046 * restored here.
8047 */
8048 amdgpu_dm_update_freesync_caps(connector, edid);
d77de788
SS
8049
8050 amdgpu_set_panel_orientation(connector);
a8d8d3dc 8051 } else {
c84dec2f 8052 amdgpu_dm_connector->num_modes = 0;
a8d8d3dc 8053 }
e7b07cee
HW
8054}
8055
a85ba005
NC
8056static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8057 struct drm_display_mode *mode)
8058{
8059 struct drm_display_mode *m;
8060
8061 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8062 if (drm_mode_equal(m, mode))
8063 return true;
8064 }
8065
8066 return false;
8067}
8068
8069static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8070{
8071 const struct drm_display_mode *m;
8072 struct drm_display_mode *new_mode;
8073 uint i;
8074 uint32_t new_modes_count = 0;
8075
8076 /* Standard FPS values
8077 *
12cdff6b
SC
8078 * 23.976 - TV/NTSC
8079 * 24 - Cinema
8080 * 25 - TV/PAL
8081 * 29.97 - TV/NTSC
8082 * 30 - TV/NTSC
8083 * 48 - Cinema HFR
8084 * 50 - TV/PAL
8085 * 60 - Commonly used
8086 * 48,72,96,120 - Multiples of 24
a85ba005 8087 */
9ce5ed6e
CIK
8088 static const uint32_t common_rates[] = {
8089 23976, 24000, 25000, 29970, 30000,
12cdff6b 8090 48000, 50000, 60000, 72000, 96000, 120000
9ce5ed6e 8091 };
a85ba005
NC
8092
8093 /*
8094 * Find mode with highest refresh rate with the same resolution
8095 * as the preferred mode. Some monitors report a preferred mode
8096 * with lower resolution than the highest refresh rate supported.
8097 */
8098
8099 m = get_highest_refresh_rate_mode(aconnector, true);
8100 if (!m)
8101 return 0;
8102
8103 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8104 uint64_t target_vtotal, target_vtotal_diff;
8105 uint64_t num, den;
8106
8107 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8108 continue;
8109
8110 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8111 common_rates[i] > aconnector->max_vfreq * 1000)
8112 continue;
8113
8114 num = (unsigned long long)m->clock * 1000 * 1000;
8115 den = common_rates[i] * (unsigned long long)m->htotal;
8116 target_vtotal = div_u64(num, den);
8117 target_vtotal_diff = target_vtotal - m->vtotal;
8118
8119 /* Check for illegal modes */
8120 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8121 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8122 m->vtotal + target_vtotal_diff < m->vsync_end)
8123 continue;
8124
8125 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8126 if (!new_mode)
8127 goto out;
8128
8129 new_mode->vtotal += (u16)target_vtotal_diff;
8130 new_mode->vsync_start += (u16)target_vtotal_diff;
8131 new_mode->vsync_end += (u16)target_vtotal_diff;
8132 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8133 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8134
8135 if (!is_duplicate_mode(aconnector, new_mode)) {
8136 drm_mode_probed_add(&aconnector->base, new_mode);
8137 new_modes_count += 1;
8138 } else
8139 drm_mode_destroy(aconnector->base.dev, new_mode);
8140 }
8141 out:
8142 return new_modes_count;
8143}
8144
8145static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8146 struct edid *edid)
8147{
8148 struct amdgpu_dm_connector *amdgpu_dm_connector =
8149 to_amdgpu_dm_connector(connector);
8150
8151 if (!(amdgpu_freesync_vid_mode && edid))
8152 return;
fe8858bb 8153
a85ba005
NC
8154 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8155 amdgpu_dm_connector->num_modes +=
8156 add_fs_modes(amdgpu_dm_connector);
8157}
8158
7578ecda 8159static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
e7b07cee 8160{
c84dec2f
HW
8161 struct amdgpu_dm_connector *amdgpu_dm_connector =
8162 to_amdgpu_dm_connector(connector);
e7b07cee 8163 struct drm_encoder *encoder;
c84dec2f 8164 struct edid *edid = amdgpu_dm_connector->edid;
e7b07cee 8165
2b4c1c05 8166 encoder = amdgpu_dm_connector_to_encoder(connector);
3e332d3a 8167
5c0e6840 8168 if (!drm_edid_is_valid(edid)) {
1b369d3c
ML
8169 amdgpu_dm_connector->num_modes =
8170 drm_add_modes_noedid(connector, 640, 480);
85ee15d6
ML
8171 } else {
8172 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8173 amdgpu_dm_connector_add_common_modes(encoder, connector);
a85ba005 8174 amdgpu_dm_connector_add_freesync_modes(connector, edid);
85ee15d6 8175 }
3e332d3a 8176 amdgpu_dm_fbc_init(connector);
5099114b 8177
c84dec2f 8178 return amdgpu_dm_connector->num_modes;
e7b07cee
HW
8179}
8180
3ee6b26b
AD
8181void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8182 struct amdgpu_dm_connector *aconnector,
8183 int connector_type,
8184 struct dc_link *link,
8185 int link_index)
e7b07cee 8186{
1348969a 8187 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
e7b07cee 8188
f04bee34
NK
8189 /*
8190 * Some of the properties below require access to state, like bpc.
8191 * Allocate some default initial connector state with our reset helper.
8192 */
8193 if (aconnector->base.funcs->reset)
8194 aconnector->base.funcs->reset(&aconnector->base);
8195
e7b07cee
HW
8196 aconnector->connector_id = link_index;
8197 aconnector->dc_link = link;
8198 aconnector->base.interlace_allowed = false;
8199 aconnector->base.doublescan_allowed = false;
8200 aconnector->base.stereo_allowed = false;
8201 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8202 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6ce8f316 8203 aconnector->audio_inst = -1;
e7b07cee
HW
8204 mutex_init(&aconnector->hpd_lock);
8205
1f6010a9
DF
8206 /*
8207 * configure support HPD hot plug connector_>polled default value is 0
b830ebc9
HW
8208 * which means HPD hot plug not supported
8209 */
e7b07cee
HW
8210 switch (connector_type) {
8211 case DRM_MODE_CONNECTOR_HDMIA:
8212 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
e7baae1c 8213 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8214 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
e7b07cee
HW
8215 break;
8216 case DRM_MODE_CONNECTOR_DisplayPort:
8217 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
f6e03f80
JS
8218 if (link->is_dig_mapping_flexible &&
8219 link->dc->res_pool->funcs->link_encs_assign) {
8220 link->link_enc =
8221 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8222 if (!link->link_enc)
8223 link->link_enc =
8224 link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8225 }
8226
8227 if (link->link_enc)
8228 aconnector->base.ycbcr_420_allowed =
9ea59d5a 8229 link->link_enc->features.dp_ycbcr420_supported ? true : false;
e7b07cee
HW
8230 break;
8231 case DRM_MODE_CONNECTOR_DVID:
8232 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8233 break;
8234 default:
8235 break;
8236 }
8237
8238 drm_object_attach_property(&aconnector->base.base,
8239 dm->ddev->mode_config.scaling_mode_property,
8240 DRM_MODE_SCALE_NONE);
8241
8242 drm_object_attach_property(&aconnector->base.base,
8243 adev->mode_info.underscan_property,
8244 UNDERSCAN_OFF);
8245 drm_object_attach_property(&aconnector->base.base,
8246 adev->mode_info.underscan_hborder_property,
8247 0);
8248 drm_object_attach_property(&aconnector->base.base,
8249 adev->mode_info.underscan_vborder_property,
8250 0);
1825fd34 8251
8c61b31e
JFZ
8252 if (!aconnector->mst_port)
8253 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
1825fd34 8254
4a8ca46b
RL
8255 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8256 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8257 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
e7b07cee 8258
c1ee92f9 8259 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5cb32419 8260 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
c1ee92f9
DF
8261 drm_object_attach_property(&aconnector->base.base,
8262 adev->mode_info.abm_level_property, 0);
8263 }
bb47de73
NK
8264
8265 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7fad8da1
NK
8266 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8267 connector_type == DRM_MODE_CONNECTOR_eDP) {
e057b52c 8268 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
88694af9 8269
8c61b31e
JFZ
8270 if (!aconnector->mst_port)
8271 drm_connector_attach_vrr_capable_property(&aconnector->base);
8272
0c8620d6 8273#ifdef CONFIG_DRM_AMD_DC_HDCP
e22bb562 8274 if (adev->dm.hdcp_workqueue)
53e108aa 8275 drm_connector_attach_content_protection_property(&aconnector->base, true);
0c8620d6 8276#endif
bb47de73 8277 }
e7b07cee
HW
8278}
8279
7578ecda
AD
8280static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8281 struct i2c_msg *msgs, int num)
e7b07cee
HW
8282{
8283 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8284 struct ddc_service *ddc_service = i2c->ddc_service;
8285 struct i2c_command cmd;
8286 int i;
8287 int result = -EIO;
8288
b830ebc9 8289 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
e7b07cee
HW
8290
8291 if (!cmd.payloads)
8292 return result;
8293
8294 cmd.number_of_payloads = num;
8295 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8296 cmd.speed = 100;
8297
8298 for (i = 0; i < num; i++) {
8299 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8300 cmd.payloads[i].address = msgs[i].addr;
8301 cmd.payloads[i].length = msgs[i].len;
8302 cmd.payloads[i].data = msgs[i].buf;
8303 }
8304
c85e6e54
DF
8305 if (dc_submit_i2c(
8306 ddc_service->ctx->dc,
8307 ddc_service->ddc_pin->hw_info.ddc_channel,
e7b07cee
HW
8308 &cmd))
8309 result = num;
8310
8311 kfree(cmd.payloads);
8312 return result;
8313}
8314
7578ecda 8315static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
e7b07cee
HW
8316{
8317 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8318}
8319
8320static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8321 .master_xfer = amdgpu_dm_i2c_xfer,
8322 .functionality = amdgpu_dm_i2c_func,
8323};
8324
3ee6b26b
AD
8325static struct amdgpu_i2c_adapter *
8326create_i2c(struct ddc_service *ddc_service,
8327 int link_index,
8328 int *res)
e7b07cee
HW
8329{
8330 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8331 struct amdgpu_i2c_adapter *i2c;
8332
b830ebc9 8333 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
2a55f096
ES
8334 if (!i2c)
8335 return NULL;
e7b07cee
HW
8336 i2c->base.owner = THIS_MODULE;
8337 i2c->base.class = I2C_CLASS_DDC;
8338 i2c->base.dev.parent = &adev->pdev->dev;
8339 i2c->base.algo = &amdgpu_dm_i2c_algo;
b830ebc9 8340 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
e7b07cee
HW
8341 i2c_set_adapdata(&i2c->base, i2c);
8342 i2c->ddc_service = ddc_service;
f6e03f80
JS
8343 if (i2c->ddc_service->ddc_pin)
8344 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
e7b07cee
HW
8345
8346 return i2c;
8347}
8348
89fc8d4e 8349
1f6010a9
DF
8350/*
8351 * Note: this function assumes that dc_link_detect() was called for the
b830ebc9
HW
8352 * dc_link which will be represented by this aconnector.
8353 */
7578ecda
AD
8354static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8355 struct amdgpu_dm_connector *aconnector,
8356 uint32_t link_index,
8357 struct amdgpu_encoder *aencoder)
e7b07cee
HW
8358{
8359 int res = 0;
8360 int connector_type;
8361 struct dc *dc = dm->dc;
8362 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8363 struct amdgpu_i2c_adapter *i2c;
9a227d26
TSD
8364
8365 link->priv = aconnector;
e7b07cee 8366
f1ad2f5e 8367 DRM_DEBUG_DRIVER("%s()\n", __func__);
e7b07cee
HW
8368
8369 i2c = create_i2c(link->ddc, link->link_index, &res);
2a55f096
ES
8370 if (!i2c) {
8371 DRM_ERROR("Failed to create i2c adapter data\n");
8372 return -ENOMEM;
8373 }
8374
e7b07cee
HW
8375 aconnector->i2c = i2c;
8376 res = i2c_add_adapter(&i2c->base);
8377
8378 if (res) {
8379 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8380 goto out_free;
8381 }
8382
8383 connector_type = to_drm_connector_type(link->connector_signal);
8384
17165de2 8385 res = drm_connector_init_with_ddc(
e7b07cee
HW
8386 dm->ddev,
8387 &aconnector->base,
8388 &amdgpu_dm_connector_funcs,
17165de2
AP
8389 connector_type,
8390 &i2c->base);
e7b07cee
HW
8391
8392 if (res) {
8393 DRM_ERROR("connector_init failed\n");
8394 aconnector->connector_id = -1;
8395 goto out_free;
8396 }
8397
8398 drm_connector_helper_add(
8399 &aconnector->base,
8400 &amdgpu_dm_connector_helper_funcs);
8401
8402 amdgpu_dm_connector_init_helper(
8403 dm,
8404 aconnector,
8405 connector_type,
8406 link,
8407 link_index);
8408
cde4c44d 8409 drm_connector_attach_encoder(
e7b07cee
HW
8410 &aconnector->base, &aencoder->base);
8411
e7b07cee
HW
8412 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8413 || connector_type == DRM_MODE_CONNECTOR_eDP)
7daec99f 8414 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
e7b07cee 8415
e7b07cee
HW
8416out_free:
8417 if (res) {
8418 kfree(i2c);
8419 aconnector->i2c = NULL;
8420 }
8421 return res;
8422}
8423
8424int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8425{
8426 switch (adev->mode_info.num_crtc) {
8427 case 1:
8428 return 0x1;
8429 case 2:
8430 return 0x3;
8431 case 3:
8432 return 0x7;
8433 case 4:
8434 return 0xf;
8435 case 5:
8436 return 0x1f;
8437 case 6:
8438 default:
8439 return 0x3f;
8440 }
8441}
8442
7578ecda
AD
8443static int amdgpu_dm_encoder_init(struct drm_device *dev,
8444 struct amdgpu_encoder *aencoder,
8445 uint32_t link_index)
e7b07cee 8446{
1348969a 8447 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
8448
8449 int res = drm_encoder_init(dev,
8450 &aencoder->base,
8451 &amdgpu_dm_encoder_funcs,
8452 DRM_MODE_ENCODER_TMDS,
8453 NULL);
8454
8455 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8456
8457 if (!res)
8458 aencoder->encoder_id = link_index;
8459 else
8460 aencoder->encoder_id = -1;
8461
8462 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8463
8464 return res;
8465}
8466
3ee6b26b
AD
8467static void manage_dm_interrupts(struct amdgpu_device *adev,
8468 struct amdgpu_crtc *acrtc,
8469 bool enable)
e7b07cee
HW
8470{
8471 /*
8fe684e9
NK
8472 * We have no guarantee that the frontend index maps to the same
8473 * backend index - some even map to more than one.
8474 *
8475 * TODO: Use a different interrupt or check DC itself for the mapping.
e7b07cee
HW
8476 */
8477 int irq_type =
734dd01d 8478 amdgpu_display_crtc_idx_to_irq_type(
e7b07cee
HW
8479 adev,
8480 acrtc->crtc_id);
8481
8482 if (enable) {
8483 drm_crtc_vblank_on(&acrtc->base);
8484 amdgpu_irq_get(
8485 adev,
8486 &adev->pageflip_irq,
8487 irq_type);
86bc2219
WL
8488#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8489 amdgpu_irq_get(
8490 adev,
8491 &adev->vline0_irq,
8492 irq_type);
8493#endif
e7b07cee 8494 } else {
86bc2219
WL
8495#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8496 amdgpu_irq_put(
8497 adev,
8498 &adev->vline0_irq,
8499 irq_type);
8500#endif
e7b07cee
HW
8501 amdgpu_irq_put(
8502 adev,
8503 &adev->pageflip_irq,
8504 irq_type);
8505 drm_crtc_vblank_off(&acrtc->base);
8506 }
8507}
8508
8fe684e9
NK
8509static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8510 struct amdgpu_crtc *acrtc)
8511{
8512 int irq_type =
8513 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8514
8515 /**
8516 * This reads the current state for the IRQ and force reapplies
8517 * the setting to hardware.
8518 */
8519 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8520}
8521
3ee6b26b
AD
8522static bool
8523is_scaling_state_different(const struct dm_connector_state *dm_state,
8524 const struct dm_connector_state *old_dm_state)
e7b07cee
HW
8525{
8526 if (dm_state->scaling != old_dm_state->scaling)
8527 return true;
8528 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8529 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8530 return true;
8531 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8532 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8533 return true;
b830ebc9
HW
8534 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8535 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8536 return true;
e7b07cee
HW
8537 return false;
8538}
8539
0c8620d6
BL
8540#ifdef CONFIG_DRM_AMD_DC_HDCP
8541static bool is_content_protection_different(struct drm_connector_state *state,
8542 const struct drm_connector_state *old_state,
8543 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8544{
8545 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
97f6c917 8546 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
0c8620d6 8547
31c0ed90 8548 /* Handle: Type0/1 change */
53e108aa
BL
8549 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8550 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8551 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8552 return true;
8553 }
8554
31c0ed90
BL
8555 /* CP is being re enabled, ignore this
8556 *
8557 * Handles: ENABLED -> DESIRED
8558 */
0c8620d6
BL
8559 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8560 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8561 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8562 return false;
8563 }
8564
31c0ed90
BL
8565 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8566 *
8567 * Handles: UNDESIRED -> ENABLED
8568 */
0c8620d6
BL
8569 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8570 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8571 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8572
0d9a947b
QZ
8573 /* Stream removed and re-enabled
8574 *
8575 * Can sometimes overlap with the HPD case,
8576 * thus set update_hdcp to false to avoid
8577 * setting HDCP multiple times.
8578 *
8579 * Handles: DESIRED -> DESIRED (Special case)
8580 */
8581 if (!(old_state->crtc && old_state->crtc->enabled) &&
8582 state->crtc && state->crtc->enabled &&
8583 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8584 dm_con_state->update_hdcp = false;
8585 return true;
8586 }
8587
8588 /* Hot-plug, headless s3, dpms
8589 *
8590 * Only start HDCP if the display is connected/enabled.
8591 * update_hdcp flag will be set to false until the next
8592 * HPD comes in.
31c0ed90
BL
8593 *
8594 * Handles: DESIRED -> DESIRED (Special case)
0c8620d6 8595 */
97f6c917
BL
8596 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8597 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8598 dm_con_state->update_hdcp = false;
0c8620d6 8599 return true;
97f6c917 8600 }
0c8620d6 8601
31c0ed90
BL
8602 /*
8603 * Handles: UNDESIRED -> UNDESIRED
8604 * DESIRED -> DESIRED
8605 * ENABLED -> ENABLED
8606 */
0c8620d6
BL
8607 if (old_state->content_protection == state->content_protection)
8608 return false;
8609
31c0ed90
BL
8610 /*
8611 * Handles: UNDESIRED -> DESIRED
8612 * DESIRED -> UNDESIRED
8613 * ENABLED -> UNDESIRED
8614 */
97f6c917 8615 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
0c8620d6
BL
8616 return true;
8617
31c0ed90
BL
8618 /*
8619 * Handles: DESIRED -> ENABLED
8620 */
0c8620d6
BL
8621 return false;
8622}
8623
0c8620d6 8624#endif
3ee6b26b
AD
8625static void remove_stream(struct amdgpu_device *adev,
8626 struct amdgpu_crtc *acrtc,
8627 struct dc_stream_state *stream)
e7b07cee
HW
8628{
8629 /* this is the update mode case */
e7b07cee
HW
8630
8631 acrtc->otg_inst = -1;
8632 acrtc->enabled = false;
8633}
8634
7578ecda
AD
8635static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8636 struct dc_cursor_position *position)
2a8f6ccb 8637{
f4c2cc43 8638 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2a8f6ccb
HW
8639 int x, y;
8640 int xorigin = 0, yorigin = 0;
8641
e371e19c 8642 if (!crtc || !plane->state->fb)
2a8f6ccb 8643 return 0;
2a8f6ccb
HW
8644
8645 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8646 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8647 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8648 __func__,
8649 plane->state->crtc_w,
8650 plane->state->crtc_h);
8651 return -EINVAL;
8652 }
8653
8654 x = plane->state->crtc_x;
8655 y = plane->state->crtc_y;
c14a005c 8656
e371e19c
NK
8657 if (x <= -amdgpu_crtc->max_cursor_width ||
8658 y <= -amdgpu_crtc->max_cursor_height)
8659 return 0;
8660
2a8f6ccb
HW
8661 if (x < 0) {
8662 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8663 x = 0;
8664 }
8665 if (y < 0) {
8666 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8667 y = 0;
8668 }
8669 position->enable = true;
d243b6ff 8670 position->translate_by_source = true;
2a8f6ccb
HW
8671 position->x = x;
8672 position->y = y;
8673 position->x_hotspot = xorigin;
8674 position->y_hotspot = yorigin;
8675
8676 return 0;
8677}
8678
3ee6b26b
AD
8679static void handle_cursor_update(struct drm_plane *plane,
8680 struct drm_plane_state *old_plane_state)
e7b07cee 8681{
1348969a 8682 struct amdgpu_device *adev = drm_to_adev(plane->dev);
2a8f6ccb
HW
8683 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8684 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8685 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8686 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8687 uint64_t address = afb ? afb->address : 0;
6a30a929 8688 struct dc_cursor_position position = {0};
2a8f6ccb
HW
8689 struct dc_cursor_attributes attributes;
8690 int ret;
8691
e7b07cee
HW
8692 if (!plane->state->fb && !old_plane_state->fb)
8693 return;
8694
cb2318b7 8695 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
4711c033
LT
8696 __func__,
8697 amdgpu_crtc->crtc_id,
8698 plane->state->crtc_w,
8699 plane->state->crtc_h);
2a8f6ccb
HW
8700
8701 ret = get_cursor_position(plane, crtc, &position);
8702 if (ret)
8703 return;
8704
8705 if (!position.enable) {
8706 /* turn off cursor */
674e78ac
NK
8707 if (crtc_state && crtc_state->stream) {
8708 mutex_lock(&adev->dm.dc_lock);
2a8f6ccb
HW
8709 dc_stream_set_cursor_position(crtc_state->stream,
8710 &position);
674e78ac
NK
8711 mutex_unlock(&adev->dm.dc_lock);
8712 }
2a8f6ccb 8713 return;
e7b07cee 8714 }
e7b07cee 8715
2a8f6ccb
HW
8716 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8717 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8718
c1cefe11 8719 memset(&attributes, 0, sizeof(attributes));
2a8f6ccb
HW
8720 attributes.address.high_part = upper_32_bits(address);
8721 attributes.address.low_part = lower_32_bits(address);
8722 attributes.width = plane->state->crtc_w;
8723 attributes.height = plane->state->crtc_h;
8724 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8725 attributes.rotation_angle = 0;
8726 attributes.attribute_flags.value = 0;
8727
03a66367 8728 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
2a8f6ccb 8729
886daac9 8730 if (crtc_state->stream) {
674e78ac 8731 mutex_lock(&adev->dm.dc_lock);
886daac9
JZ
8732 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8733 &attributes))
8734 DRM_ERROR("DC failed to set cursor attributes\n");
2a8f6ccb 8735
2a8f6ccb
HW
8736 if (!dc_stream_set_cursor_position(crtc_state->stream,
8737 &position))
8738 DRM_ERROR("DC failed to set cursor position\n");
674e78ac 8739 mutex_unlock(&adev->dm.dc_lock);
886daac9 8740 }
2a8f6ccb 8741}
e7b07cee
HW
8742
8743static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8744{
8745
8746 assert_spin_locked(&acrtc->base.dev->event_lock);
8747 WARN_ON(acrtc->event);
8748
8749 acrtc->event = acrtc->base.state->event;
8750
8751 /* Set the flip status */
8752 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8753
8754 /* Mark this event as consumed */
8755 acrtc->base.state->event = NULL;
8756
cb2318b7
VL
8757 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8758 acrtc->crtc_id);
e7b07cee
HW
8759}
8760
bb47de73
NK
8761static void update_freesync_state_on_stream(
8762 struct amdgpu_display_manager *dm,
8763 struct dm_crtc_state *new_crtc_state,
180db303
NK
8764 struct dc_stream_state *new_stream,
8765 struct dc_plane_state *surface,
8766 u32 flip_timestamp_in_us)
bb47de73 8767{
09aef2c4 8768 struct mod_vrr_params vrr_params;
bb47de73 8769 struct dc_info_packet vrr_infopacket = {0};
09aef2c4 8770 struct amdgpu_device *adev = dm->adev;
585d450c 8771 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8772 unsigned long flags;
4cda3243 8773 bool pack_sdp_v1_3 = false;
bb47de73
NK
8774
8775 if (!new_stream)
8776 return;
8777
8778 /*
8779 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8780 * For now it's sufficient to just guard against these conditions.
8781 */
8782
8783 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8784 return;
8785
4a580877 8786 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8787 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8788
180db303
NK
8789 if (surface) {
8790 mod_freesync_handle_preflip(
8791 dm->freesync_module,
8792 surface,
8793 new_stream,
8794 flip_timestamp_in_us,
8795 &vrr_params);
09aef2c4
MK
8796
8797 if (adev->family < AMDGPU_FAMILY_AI &&
8798 amdgpu_dm_vrr_active(new_crtc_state)) {
8799 mod_freesync_handle_v_update(dm->freesync_module,
8800 new_stream, &vrr_params);
e63e2491
EB
8801
8802 /* Need to call this before the frame ends. */
8803 dc_stream_adjust_vmin_vmax(dm->dc,
8804 new_crtc_state->stream,
8805 &vrr_params.adjust);
09aef2c4 8806 }
180db303 8807 }
bb47de73
NK
8808
8809 mod_freesync_build_vrr_infopacket(
8810 dm->freesync_module,
8811 new_stream,
180db303 8812 &vrr_params,
ecd0136b
HT
8813 PACKET_TYPE_VRR,
8814 TRANSFER_FUNC_UNKNOWN,
4cda3243
MT
8815 &vrr_infopacket,
8816 pack_sdp_v1_3);
bb47de73 8817
8a48b44c 8818 new_crtc_state->freesync_timing_changed |=
585d450c 8819 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
180db303
NK
8820 &vrr_params.adjust,
8821 sizeof(vrr_params.adjust)) != 0);
bb47de73 8822
8a48b44c 8823 new_crtc_state->freesync_vrr_info_changed |=
bb47de73
NK
8824 (memcmp(&new_crtc_state->vrr_infopacket,
8825 &vrr_infopacket,
8826 sizeof(vrr_infopacket)) != 0);
8827
585d450c 8828 acrtc->dm_irq_params.vrr_params = vrr_params;
bb47de73
NK
8829 new_crtc_state->vrr_infopacket = vrr_infopacket;
8830
585d450c 8831 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
bb47de73
NK
8832 new_stream->vrr_infopacket = vrr_infopacket;
8833
8834 if (new_crtc_state->freesync_vrr_info_changed)
8835 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8836 new_crtc_state->base.crtc->base.id,
8837 (int)new_crtc_state->base.vrr_enabled,
180db303 8838 (int)vrr_params.state);
09aef2c4 8839
4a580877 8840 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
bb47de73
NK
8841}
8842
585d450c 8843static void update_stream_irq_parameters(
e854194c
MK
8844 struct amdgpu_display_manager *dm,
8845 struct dm_crtc_state *new_crtc_state)
8846{
8847 struct dc_stream_state *new_stream = new_crtc_state->stream;
09aef2c4 8848 struct mod_vrr_params vrr_params;
e854194c 8849 struct mod_freesync_config config = new_crtc_state->freesync_config;
09aef2c4 8850 struct amdgpu_device *adev = dm->adev;
585d450c 8851 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
09aef2c4 8852 unsigned long flags;
e854194c
MK
8853
8854 if (!new_stream)
8855 return;
8856
8857 /*
8858 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8859 * For now it's sufficient to just guard against these conditions.
8860 */
8861 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8862 return;
8863
4a580877 8864 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
585d450c 8865 vrr_params = acrtc->dm_irq_params.vrr_params;
09aef2c4 8866
e854194c
MK
8867 if (new_crtc_state->vrr_supported &&
8868 config.min_refresh_in_uhz &&
8869 config.max_refresh_in_uhz) {
a85ba005
NC
8870 /*
8871 * if freesync compatible mode was set, config.state will be set
8872 * in atomic check
8873 */
8874 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8875 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8876 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8877 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8878 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8879 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8880 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8881 } else {
8882 config.state = new_crtc_state->base.vrr_enabled ?
8883 VRR_STATE_ACTIVE_VARIABLE :
8884 VRR_STATE_INACTIVE;
8885 }
e854194c
MK
8886 } else {
8887 config.state = VRR_STATE_UNSUPPORTED;
8888 }
8889
8890 mod_freesync_build_vrr_params(dm->freesync_module,
8891 new_stream,
8892 &config, &vrr_params);
8893
8894 new_crtc_state->freesync_timing_changed |=
585d450c
AP
8895 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8896 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
e854194c 8897
585d450c
AP
8898 new_crtc_state->freesync_config = config;
8899 /* Copy state for access from DM IRQ handler */
8900 acrtc->dm_irq_params.freesync_config = config;
8901 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8902 acrtc->dm_irq_params.vrr_params = vrr_params;
4a580877 8903 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e854194c
MK
8904}
8905
66b0c973
MK
8906static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8907 struct dm_crtc_state *new_state)
8908{
8909 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8910 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8911
8912 if (!old_vrr_active && new_vrr_active) {
8913 /* Transition VRR inactive -> active:
8914 * While VRR is active, we must not disable vblank irq, as a
8915 * reenable after disable would compute bogus vblank/pflip
8916 * timestamps if it likely happened inside display front-porch.
d2574c33
MK
8917 *
8918 * We also need vupdate irq for the actual core vblank handling
8919 * at end of vblank.
66b0c973 8920 */
d2574c33 8921 dm_set_vupdate_irq(new_state->base.crtc, true);
66b0c973
MK
8922 drm_crtc_vblank_get(new_state->base.crtc);
8923 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8924 __func__, new_state->base.crtc->base.id);
8925 } else if (old_vrr_active && !new_vrr_active) {
8926 /* Transition VRR active -> inactive:
8927 * Allow vblank irq disable again for fixed refresh rate.
8928 */
d2574c33 8929 dm_set_vupdate_irq(new_state->base.crtc, false);
66b0c973
MK
8930 drm_crtc_vblank_put(new_state->base.crtc);
8931 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8932 __func__, new_state->base.crtc->base.id);
8933 }
8934}
8935
8ad27806
NK
8936static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8937{
8938 struct drm_plane *plane;
5760dcb9 8939 struct drm_plane_state *old_plane_state;
8ad27806
NK
8940 int i;
8941
8942 /*
8943 * TODO: Make this per-stream so we don't issue redundant updates for
8944 * commits with multiple streams.
8945 */
5760dcb9 8946 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8ad27806
NK
8947 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8948 handle_cursor_update(plane, old_plane_state);
8949}
8950
3be5262e 8951static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
eb3dc897 8952 struct dc_state *dc_state,
3ee6b26b
AD
8953 struct drm_device *dev,
8954 struct amdgpu_display_manager *dm,
8955 struct drm_crtc *pcrtc,
420cd472 8956 bool wait_for_vblank)
e7b07cee 8957{
efc8278e 8958 uint32_t i;
8a48b44c 8959 uint64_t timestamp_ns;
e7b07cee 8960 struct drm_plane *plane;
0bc9706d 8961 struct drm_plane_state *old_plane_state, *new_plane_state;
e7b07cee 8962 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
0bc9706d
LSL
8963 struct drm_crtc_state *new_pcrtc_state =
8964 drm_atomic_get_new_crtc_state(state, pcrtc);
8965 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
44d09c6a
HW
8966 struct dm_crtc_state *dm_old_crtc_state =
8967 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
74aa7bd4 8968 int planes_count = 0, vpos, hpos;
570c91d5 8969 long r;
e7b07cee 8970 unsigned long flags;
8a48b44c 8971 struct amdgpu_bo *abo;
fdd1fe57
MK
8972 uint32_t target_vblank, last_flip_vblank;
8973 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
74aa7bd4 8974 bool pflip_present = false;
bc7f670e
DF
8975 struct {
8976 struct dc_surface_update surface_updates[MAX_SURFACES];
8977 struct dc_plane_info plane_infos[MAX_SURFACES];
8978 struct dc_scaling_info scaling_infos[MAX_SURFACES];
74aa7bd4 8979 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
bc7f670e 8980 struct dc_stream_update stream_update;
74aa7bd4 8981 } *bundle;
bc7f670e 8982
74aa7bd4 8983 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8a48b44c 8984
74aa7bd4
DF
8985 if (!bundle) {
8986 dm_error("Failed to allocate update bundle\n");
4b510503
NK
8987 goto cleanup;
8988 }
e7b07cee 8989
8ad27806
NK
8990 /*
8991 * Disable the cursor first if we're disabling all the planes.
8992 * It'll remain on the screen after the planes are re-enabled
8993 * if we don't.
8994 */
8995 if (acrtc_state->active_planes == 0)
8996 amdgpu_dm_commit_cursors(state);
8997
e7b07cee 8998 /* update planes when needed */
efc8278e 8999 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
0bc9706d 9000 struct drm_crtc *crtc = new_plane_state->crtc;
f5ba60fe 9001 struct drm_crtc_state *new_crtc_state;
0bc9706d 9002 struct drm_framebuffer *fb = new_plane_state->fb;
6eed95b0 9003 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
34bafd27 9004 bool plane_needs_flip;
c7af5f77 9005 struct dc_plane_state *dc_plane;
54d76575 9006 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
e7b07cee 9007
80c218d5
NK
9008 /* Cursor plane is handled after stream updates */
9009 if (plane->type == DRM_PLANE_TYPE_CURSOR)
e7b07cee 9010 continue;
e7b07cee 9011
f5ba60fe
DD
9012 if (!fb || !crtc || pcrtc != crtc)
9013 continue;
9014
9015 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9016 if (!new_crtc_state->active)
e7b07cee
HW
9017 continue;
9018
bc7f670e 9019 dc_plane = dm_new_plane_state->dc_state;
e7b07cee 9020
74aa7bd4 9021 bundle->surface_updates[planes_count].surface = dc_plane;
bc7f670e 9022 if (new_pcrtc_state->color_mgmt_changed) {
74aa7bd4
DF
9023 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9024 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
44efb784 9025 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
bc7f670e 9026 }
8a48b44c 9027
4375d625 9028 fill_dc_scaling_info(dm->adev, new_plane_state,
695af5f9 9029 &bundle->scaling_infos[planes_count]);
8a48b44c 9030
695af5f9
NK
9031 bundle->surface_updates[planes_count].scaling_info =
9032 &bundle->scaling_infos[planes_count];
8a48b44c 9033
f5031000 9034 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8a48b44c 9035
f5031000 9036 pflip_present = pflip_present || plane_needs_flip;
8a48b44c 9037
f5031000
DF
9038 if (!plane_needs_flip) {
9039 planes_count += 1;
9040 continue;
9041 }
8a48b44c 9042
2fac0f53
CK
9043 abo = gem_to_amdgpu_bo(fb->obj[0]);
9044
f8308898
AG
9045 /*
9046 * Wait for all fences on this FB. Do limited wait to avoid
9047 * deadlock during GPU reset when this fence will not signal
9048 * but we hold reservation lock for the BO.
9049 */
d3fae3b3
CK
9050 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9051 msecs_to_jiffies(5000));
f8308898 9052 if (unlikely(r <= 0))
ed8a5fb2 9053 DRM_ERROR("Waiting for fences timed out!");
2fac0f53 9054
695af5f9 9055 fill_dc_plane_info_and_addr(
8ce5d842 9056 dm->adev, new_plane_state,
6eed95b0 9057 afb->tiling_flags,
695af5f9 9058 &bundle->plane_infos[planes_count],
87b7ebc2 9059 &bundle->flip_addrs[planes_count].address,
6eed95b0 9060 afb->tmz_surface, false);
87b7ebc2 9061
4711c033 9062 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
87b7ebc2
RS
9063 new_plane_state->plane->index,
9064 bundle->plane_infos[planes_count].dcc.enable);
695af5f9
NK
9065
9066 bundle->surface_updates[planes_count].plane_info =
9067 &bundle->plane_infos[planes_count];
8a48b44c 9068
caff0e66
NK
9069 /*
9070 * Only allow immediate flips for fast updates that don't
9071 * change FB pitch, DCC state, rotation or mirroing.
9072 */
f5031000 9073 bundle->flip_addrs[planes_count].flip_immediate =
4d85f45c 9074 crtc->state->async_flip &&
caff0e66 9075 acrtc_state->update_type == UPDATE_TYPE_FAST;
8a48b44c 9076
f5031000
DF
9077 timestamp_ns = ktime_get_ns();
9078 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9079 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9080 bundle->surface_updates[planes_count].surface = dc_plane;
8a48b44c 9081
f5031000
DF
9082 if (!bundle->surface_updates[planes_count].surface) {
9083 DRM_ERROR("No surface for CRTC: id=%d\n",
9084 acrtc_attach->crtc_id);
9085 continue;
bc7f670e
DF
9086 }
9087
f5031000
DF
9088 if (plane == pcrtc->primary)
9089 update_freesync_state_on_stream(
9090 dm,
9091 acrtc_state,
9092 acrtc_state->stream,
9093 dc_plane,
9094 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
bc7f670e 9095
4711c033 9096 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
f5031000
DF
9097 __func__,
9098 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9099 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
bc7f670e
DF
9100
9101 planes_count += 1;
9102
8a48b44c
DF
9103 }
9104
74aa7bd4 9105 if (pflip_present) {
634092b1
MK
9106 if (!vrr_active) {
9107 /* Use old throttling in non-vrr fixed refresh rate mode
9108 * to keep flip scheduling based on target vblank counts
9109 * working in a backwards compatible way, e.g., for
9110 * clients using the GLX_OML_sync_control extension or
9111 * DRI3/Present extension with defined target_msc.
9112 */
e3eff4b5 9113 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
634092b1
MK
9114 }
9115 else {
9116 /* For variable refresh rate mode only:
9117 * Get vblank of last completed flip to avoid > 1 vrr
9118 * flips per video frame by use of throttling, but allow
9119 * flip programming anywhere in the possibly large
9120 * variable vrr vblank interval for fine-grained flip
9121 * timing control and more opportunity to avoid stutter
9122 * on late submission of flips.
9123 */
9124 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5d1c59c4 9125 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
634092b1
MK
9126 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9127 }
9128
fdd1fe57 9129 target_vblank = last_flip_vblank + wait_for_vblank;
8a48b44c
DF
9130
9131 /*
9132 * Wait until we're out of the vertical blank period before the one
9133 * targeted by the flip
9134 */
9135 while ((acrtc_attach->enabled &&
9136 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9137 0, &vpos, &hpos, NULL,
9138 NULL, &pcrtc->hwmode)
9139 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9140 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9141 (int)(target_vblank -
e3eff4b5 9142 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8a48b44c
DF
9143 usleep_range(1000, 1100);
9144 }
9145
8fe684e9
NK
9146 /**
9147 * Prepare the flip event for the pageflip interrupt to handle.
9148 *
9149 * This only works in the case where we've already turned on the
9150 * appropriate hardware blocks (eg. HUBP) so in the transition case
9151 * from 0 -> n planes we have to skip a hardware generated event
9152 * and rely on sending it from software.
9153 */
9154 if (acrtc_attach->base.state->event &&
035f5496
AP
9155 acrtc_state->active_planes > 0 &&
9156 !acrtc_state->force_dpms_off) {
8a48b44c
DF
9157 drm_crtc_vblank_get(pcrtc);
9158
9159 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9160
9161 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9162 prepare_flip_isr(acrtc_attach);
9163
9164 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9165 }
9166
9167 if (acrtc_state->stream) {
8a48b44c 9168 if (acrtc_state->freesync_vrr_info_changed)
74aa7bd4 9169 bundle->stream_update.vrr_infopacket =
8a48b44c 9170 &acrtc_state->stream->vrr_infopacket;
e7b07cee 9171 }
e7b07cee
HW
9172 }
9173
bc92c065 9174 /* Update the planes if changed or disable if we don't have any. */
ed9656fb
ES
9175 if ((planes_count || acrtc_state->active_planes == 0) &&
9176 acrtc_state->stream) {
96160687 9177#if defined(CONFIG_DRM_AMD_DC_DCN)
58aa1c50
NK
9178 /*
9179 * If PSR or idle optimizations are enabled then flush out
9180 * any pending work before hardware programming.
9181 */
06dd1888
NK
9182 if (dm->vblank_control_workqueue)
9183 flush_workqueue(dm->vblank_control_workqueue);
96160687 9184#endif
58aa1c50 9185
b6e881c9 9186 bundle->stream_update.stream = acrtc_state->stream;
bc7f670e 9187 if (new_pcrtc_state->mode_changed) {
74aa7bd4
DF
9188 bundle->stream_update.src = acrtc_state->stream->src;
9189 bundle->stream_update.dst = acrtc_state->stream->dst;
e7b07cee
HW
9190 }
9191
cf020d49
NK
9192 if (new_pcrtc_state->color_mgmt_changed) {
9193 /*
9194 * TODO: This isn't fully correct since we've actually
9195 * already modified the stream in place.
9196 */
9197 bundle->stream_update.gamut_remap =
9198 &acrtc_state->stream->gamut_remap_matrix;
9199 bundle->stream_update.output_csc_transform =
9200 &acrtc_state->stream->csc_color_matrix;
9201 bundle->stream_update.out_transfer_func =
9202 acrtc_state->stream->out_transfer_func;
9203 }
bc7f670e 9204
8a48b44c 9205 acrtc_state->stream->abm_level = acrtc_state->abm_level;
bc7f670e 9206 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
74aa7bd4 9207 bundle->stream_update.abm_level = &acrtc_state->abm_level;
44d09c6a 9208
e63e2491
EB
9209 /*
9210 * If FreeSync state on the stream has changed then we need to
9211 * re-adjust the min/max bounds now that DC doesn't handle this
9212 * as part of commit.
9213 */
a85ba005 9214 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
e63e2491
EB
9215 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9216 dc_stream_adjust_vmin_vmax(
9217 dm->dc, acrtc_state->stream,
585d450c 9218 &acrtc_attach->dm_irq_params.vrr_params.adjust);
e63e2491
EB
9219 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9220 }
bc7f670e 9221 mutex_lock(&dm->dc_lock);
8c322309 9222 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
d1ebfdd8 9223 acrtc_state->stream->link->psr_settings.psr_allow_active)
8c322309
RL
9224 amdgpu_dm_psr_disable(acrtc_state->stream);
9225
bc7f670e 9226 dc_commit_updates_for_stream(dm->dc,
74aa7bd4 9227 bundle->surface_updates,
bc7f670e
DF
9228 planes_count,
9229 acrtc_state->stream,
efc8278e
AJ
9230 &bundle->stream_update,
9231 dc_state);
8c322309 9232
8fe684e9
NK
9233 /**
9234 * Enable or disable the interrupts on the backend.
9235 *
9236 * Most pipes are put into power gating when unused.
9237 *
9238 * When power gating is enabled on a pipe we lose the
9239 * interrupt enablement state when power gating is disabled.
9240 *
9241 * So we need to update the IRQ control state in hardware
9242 * whenever the pipe turns on (since it could be previously
9243 * power gated) or off (since some pipes can't be power gated
9244 * on some ASICs).
9245 */
9246 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
1348969a
LT
9247 dm_update_pflip_irq_state(drm_to_adev(dev),
9248 acrtc_attach);
8fe684e9 9249
8c322309 9250 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
1cfbbdde 9251 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
d1ebfdd8 9252 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8c322309 9253 amdgpu_dm_link_setup_psr(acrtc_state->stream);
58aa1c50
NK
9254
9255 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9256 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9257 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9258 struct amdgpu_dm_connector *aconn =
9259 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
1a365683
RL
9260
9261 if (aconn->psr_skip_count > 0)
9262 aconn->psr_skip_count--;
58aa1c50
NK
9263
9264 /* Allow PSR when skip count is 0. */
9265 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9266 } else {
9267 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8c322309
RL
9268 }
9269
bc7f670e 9270 mutex_unlock(&dm->dc_lock);
e7b07cee 9271 }
4b510503 9272
8ad27806
NK
9273 /*
9274 * Update cursor state *after* programming all the planes.
9275 * This avoids redundant programming in the case where we're going
9276 * to be disabling a single plane - those pipes are being disabled.
9277 */
9278 if (acrtc_state->active_planes)
9279 amdgpu_dm_commit_cursors(state);
80c218d5 9280
4b510503 9281cleanup:
74aa7bd4 9282 kfree(bundle);
e7b07cee
HW
9283}
9284
6ce8f316
NK
9285static void amdgpu_dm_commit_audio(struct drm_device *dev,
9286 struct drm_atomic_state *state)
9287{
1348969a 9288 struct amdgpu_device *adev = drm_to_adev(dev);
6ce8f316
NK
9289 struct amdgpu_dm_connector *aconnector;
9290 struct drm_connector *connector;
9291 struct drm_connector_state *old_con_state, *new_con_state;
9292 struct drm_crtc_state *new_crtc_state;
9293 struct dm_crtc_state *new_dm_crtc_state;
9294 const struct dc_stream_status *status;
9295 int i, inst;
9296
9297 /* Notify device removals. */
9298 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9299 if (old_con_state->crtc != new_con_state->crtc) {
9300 /* CRTC changes require notification. */
9301 goto notify;
9302 }
9303
9304 if (!new_con_state->crtc)
9305 continue;
9306
9307 new_crtc_state = drm_atomic_get_new_crtc_state(
9308 state, new_con_state->crtc);
9309
9310 if (!new_crtc_state)
9311 continue;
9312
9313 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9314 continue;
9315
9316 notify:
9317 aconnector = to_amdgpu_dm_connector(connector);
9318
9319 mutex_lock(&adev->dm.audio_lock);
9320 inst = aconnector->audio_inst;
9321 aconnector->audio_inst = -1;
9322 mutex_unlock(&adev->dm.audio_lock);
9323
9324 amdgpu_dm_audio_eld_notify(adev, inst);
9325 }
9326
9327 /* Notify audio device additions. */
9328 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9329 if (!new_con_state->crtc)
9330 continue;
9331
9332 new_crtc_state = drm_atomic_get_new_crtc_state(
9333 state, new_con_state->crtc);
9334
9335 if (!new_crtc_state)
9336 continue;
9337
9338 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9339 continue;
9340
9341 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9342 if (!new_dm_crtc_state->stream)
9343 continue;
9344
9345 status = dc_stream_get_status(new_dm_crtc_state->stream);
9346 if (!status)
9347 continue;
9348
9349 aconnector = to_amdgpu_dm_connector(connector);
9350
9351 mutex_lock(&adev->dm.audio_lock);
9352 inst = status->audio_inst;
9353 aconnector->audio_inst = inst;
9354 mutex_unlock(&adev->dm.audio_lock);
9355
9356 amdgpu_dm_audio_eld_notify(adev, inst);
9357 }
9358}
9359
1f6010a9 9360/*
27b3f4fc
LSL
9361 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9362 * @crtc_state: the DRM CRTC state
9363 * @stream_state: the DC stream state.
9364 *
9365 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9366 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9367 */
9368static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9369 struct dc_stream_state *stream_state)
9370{
b9952f93 9371 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
27b3f4fc 9372}
e7b07cee 9373
b8592b48
LL
9374/**
9375 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9376 * @state: The atomic state to commit
9377 *
9378 * This will tell DC to commit the constructed DC state from atomic_check,
9379 * programming the hardware. Any failures here implies a hardware failure, since
9380 * atomic check should have filtered anything non-kosher.
9381 */
7578ecda 9382static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
e7b07cee
HW
9383{
9384 struct drm_device *dev = state->dev;
1348969a 9385 struct amdgpu_device *adev = drm_to_adev(dev);
e7b07cee
HW
9386 struct amdgpu_display_manager *dm = &adev->dm;
9387 struct dm_atomic_state *dm_state;
eb3dc897 9388 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
e7b07cee 9389 uint32_t i, j;
5cc6dcbd 9390 struct drm_crtc *crtc;
0bc9706d 9391 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
e7b07cee
HW
9392 unsigned long flags;
9393 bool wait_for_vblank = true;
9394 struct drm_connector *connector;
c2cea706 9395 struct drm_connector_state *old_con_state, *new_con_state;
54d76575 9396 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
fe2a1965 9397 int crtc_disable_count = 0;
6ee90e88 9398 bool mode_set_reset_required = false;
e7b07cee 9399
e8a98235
RS
9400 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9401
e7b07cee
HW
9402 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9403
eb3dc897
NK
9404 dm_state = dm_atomic_get_new_state(state);
9405 if (dm_state && dm_state->context) {
9406 dc_state = dm_state->context;
9407 } else {
9408 /* No state changes, retain current state. */
813d20dc 9409 dc_state_temp = dc_create_state(dm->dc);
eb3dc897
NK
9410 ASSERT(dc_state_temp);
9411 dc_state = dc_state_temp;
9412 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9413 }
e7b07cee 9414
6d90a208
AP
9415 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9416 new_crtc_state, i) {
9417 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9418
9419 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9420
9421 if (old_crtc_state->active &&
9422 (!new_crtc_state->active ||
9423 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9424 manage_dm_interrupts(adev, acrtc, false);
9425 dc_stream_release(dm_old_crtc_state->stream);
9426 }
9427 }
9428
8976f73b
RS
9429 drm_atomic_helper_calc_timestamping_constants(state);
9430
e7b07cee 9431 /* update changed items */
0bc9706d 9432 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
e7b07cee 9433 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9434
54d76575
LSL
9435 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9436 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
e7b07cee 9437
4711c033 9438 DRM_DEBUG_ATOMIC(
e7b07cee
HW
9439 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9440 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9441 "connectors_changed:%d\n",
9442 acrtc->crtc_id,
0bc9706d
LSL
9443 new_crtc_state->enable,
9444 new_crtc_state->active,
9445 new_crtc_state->planes_changed,
9446 new_crtc_state->mode_changed,
9447 new_crtc_state->active_changed,
9448 new_crtc_state->connectors_changed);
e7b07cee 9449
5c68c652
VL
9450 /* Disable cursor if disabling crtc */
9451 if (old_crtc_state->active && !new_crtc_state->active) {
9452 struct dc_cursor_position position;
9453
9454 memset(&position, 0, sizeof(position));
9455 mutex_lock(&dm->dc_lock);
9456 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9457 mutex_unlock(&dm->dc_lock);
9458 }
9459
27b3f4fc
LSL
9460 /* Copy all transient state flags into dc state */
9461 if (dm_new_crtc_state->stream) {
9462 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9463 dm_new_crtc_state->stream);
9464 }
9465
e7b07cee
HW
9466 /* handles headless hotplug case, updating new_state and
9467 * aconnector as needed
9468 */
9469
54d76575 9470 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
e7b07cee 9471
4711c033 9472 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9473
54d76575 9474 if (!dm_new_crtc_state->stream) {
e7b07cee 9475 /*
b830ebc9
HW
9476 * this could happen because of issues with
9477 * userspace notifications delivery.
9478 * In this case userspace tries to set mode on
1f6010a9
DF
9479 * display which is disconnected in fact.
9480 * dc_sink is NULL in this case on aconnector.
b830ebc9
HW
9481 * We expect reset mode will come soon.
9482 *
9483 * This can also happen when unplug is done
9484 * during resume sequence ended
9485 *
9486 * In this case, we want to pretend we still
9487 * have a sink to keep the pipe running so that
9488 * hw state is consistent with the sw state
9489 */
f1ad2f5e 9490 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
e7b07cee
HW
9491 __func__, acrtc->base.base.id);
9492 continue;
9493 }
9494
54d76575
LSL
9495 if (dm_old_crtc_state->stream)
9496 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
e7b07cee 9497
97028037
LP
9498 pm_runtime_get_noresume(dev->dev);
9499
e7b07cee 9500 acrtc->enabled = true;
0bc9706d
LSL
9501 acrtc->hw_mode = new_crtc_state->mode;
9502 crtc->hwmode = new_crtc_state->mode;
6ee90e88 9503 mode_set_reset_required = true;
0bc9706d 9504 } else if (modereset_required(new_crtc_state)) {
4711c033 9505 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
e7b07cee 9506 /* i.e. reset mode */
6ee90e88 9507 if (dm_old_crtc_state->stream)
54d76575 9508 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
a85ba005 9509
6ee90e88 9510 mode_set_reset_required = true;
e7b07cee
HW
9511 }
9512 } /* for_each_crtc_in_state() */
9513
eb3dc897 9514 if (dc_state) {
6ee90e88 9515 /* if there mode set or reset, disable eDP PSR */
58aa1c50 9516 if (mode_set_reset_required) {
96160687 9517#if defined(CONFIG_DRM_AMD_DC_DCN)
06dd1888
NK
9518 if (dm->vblank_control_workqueue)
9519 flush_workqueue(dm->vblank_control_workqueue);
96160687 9520#endif
6ee90e88 9521 amdgpu_dm_psr_disable_all(dm);
58aa1c50 9522 }
6ee90e88 9523
eb3dc897 9524 dm_enable_per_frame_crtc_master_sync(dc_state);
674e78ac 9525 mutex_lock(&dm->dc_lock);
eb3dc897 9526 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5af50b0b
BR
9527#if defined(CONFIG_DRM_AMD_DC_DCN)
9528 /* Allow idle optimization when vblank count is 0 for display off */
9529 if (dm->active_vblank_irq_count == 0)
9530 dc_allow_idle_optimizations(dm->dc,true);
9531#endif
674e78ac 9532 mutex_unlock(&dm->dc_lock);
fa2123db 9533 }
fe8858bb 9534
0bc9706d 9535 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9536 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
b830ebc9 9537
54d76575 9538 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9539
54d76575 9540 if (dm_new_crtc_state->stream != NULL) {
e7b07cee 9541 const struct dc_stream_status *status =
54d76575 9542 dc_stream_get_status(dm_new_crtc_state->stream);
e7b07cee 9543
eb3dc897 9544 if (!status)
09f609c3
LL
9545 status = dc_stream_get_status_from_state(dc_state,
9546 dm_new_crtc_state->stream);
e7b07cee 9547 if (!status)
54d76575 9548 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
e7b07cee
HW
9549 else
9550 acrtc->otg_inst = status->primary_otg_inst;
9551 }
9552 }
0c8620d6
BL
9553#ifdef CONFIG_DRM_AMD_DC_HDCP
9554 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9555 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9556 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9557 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9558
9559 new_crtc_state = NULL;
9560
9561 if (acrtc)
9562 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9563
9564 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9565
9566 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9567 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9568 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9569 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
97f6c917 9570 dm_new_con_state->update_hdcp = true;
0c8620d6
BL
9571 continue;
9572 }
9573
9574 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
b1abe558
BL
9575 hdcp_update_display(
9576 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
23eb4191 9577 new_con_state->hdcp_content_type,
0e86d3d4 9578 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
0c8620d6
BL
9579 }
9580#endif
e7b07cee 9581
02d6a6fc 9582 /* Handle connector state changes */
c2cea706 9583 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
9584 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9585 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9586 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
efc8278e 9587 struct dc_surface_update dummy_updates[MAX_SURFACES];
19afd799 9588 struct dc_stream_update stream_update;
b232d4ed 9589 struct dc_info_packet hdr_packet;
e7b07cee 9590 struct dc_stream_status *status = NULL;
b232d4ed 9591 bool abm_changed, hdr_changed, scaling_changed;
e7b07cee 9592
efc8278e 9593 memset(&dummy_updates, 0, sizeof(dummy_updates));
19afd799
NC
9594 memset(&stream_update, 0, sizeof(stream_update));
9595
44d09c6a 9596 if (acrtc) {
0bc9706d 9597 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
44d09c6a
HW
9598 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9599 }
0bc9706d 9600
e7b07cee 9601 /* Skip any modesets/resets */
0bc9706d 9602 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
e7b07cee
HW
9603 continue;
9604
54d76575 9605 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c1ee92f9
DF
9606 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9607
b232d4ed
NK
9608 scaling_changed = is_scaling_state_different(dm_new_con_state,
9609 dm_old_con_state);
9610
9611 abm_changed = dm_new_crtc_state->abm_level !=
9612 dm_old_crtc_state->abm_level;
9613
9614 hdr_changed =
72921cdf 9615 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
b232d4ed
NK
9616
9617 if (!scaling_changed && !abm_changed && !hdr_changed)
c1ee92f9 9618 continue;
e7b07cee 9619
b6e881c9 9620 stream_update.stream = dm_new_crtc_state->stream;
b232d4ed 9621 if (scaling_changed) {
02d6a6fc 9622 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
b6e881c9 9623 dm_new_con_state, dm_new_crtc_state->stream);
e7b07cee 9624
02d6a6fc
DF
9625 stream_update.src = dm_new_crtc_state->stream->src;
9626 stream_update.dst = dm_new_crtc_state->stream->dst;
9627 }
9628
b232d4ed 9629 if (abm_changed) {
02d6a6fc
DF
9630 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9631
9632 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9633 }
70e8ffc5 9634
b232d4ed
NK
9635 if (hdr_changed) {
9636 fill_hdr_info_packet(new_con_state, &hdr_packet);
9637 stream_update.hdr_static_metadata = &hdr_packet;
9638 }
9639
54d76575 9640 status = dc_stream_get_status(dm_new_crtc_state->stream);
57738ae4
ND
9641
9642 if (WARN_ON(!status))
9643 continue;
9644
3be5262e 9645 WARN_ON(!status->plane_count);
e7b07cee 9646
02d6a6fc
DF
9647 /*
9648 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9649 * Here we create an empty update on each plane.
9650 * To fix this, DC should permit updating only stream properties.
9651 */
9652 for (j = 0; j < status->plane_count; j++)
efc8278e 9653 dummy_updates[j].surface = status->plane_states[0];
02d6a6fc
DF
9654
9655
9656 mutex_lock(&dm->dc_lock);
9657 dc_commit_updates_for_stream(dm->dc,
efc8278e 9658 dummy_updates,
02d6a6fc
DF
9659 status->plane_count,
9660 dm_new_crtc_state->stream,
efc8278e
AJ
9661 &stream_update,
9662 dc_state);
02d6a6fc 9663 mutex_unlock(&dm->dc_lock);
e7b07cee
HW
9664 }
9665
b5e83f6f 9666 /* Count number of newly disabled CRTCs for dropping PM refs later. */
e1fc2dca 9667 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
057be086 9668 new_crtc_state, i) {
fe2a1965
LP
9669 if (old_crtc_state->active && !new_crtc_state->active)
9670 crtc_disable_count++;
9671
54d76575 9672 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e1fc2dca 9673 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
66b0c973 9674
585d450c
AP
9675 /* For freesync config update on crtc state and params for irq */
9676 update_stream_irq_parameters(dm, dm_new_crtc_state);
057be086 9677
66b0c973
MK
9678 /* Handle vrr on->off / off->on transitions */
9679 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9680 dm_new_crtc_state);
e7b07cee
HW
9681 }
9682
8fe684e9
NK
9683 /**
9684 * Enable interrupts for CRTCs that are newly enabled or went through
9685 * a modeset. It was intentionally deferred until after the front end
9686 * state was modified to wait until the OTG was on and so the IRQ
9687 * handlers didn't access stale or invalid state.
9688 */
9689 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9690 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8e7b6fee 9691#ifdef CONFIG_DEBUG_FS
86bc2219 9692 bool configure_crc = false;
8e7b6fee 9693 enum amdgpu_dm_pipe_crc_source cur_crc_src;
d98af272
WL
9694#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9695 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9696#endif
9697 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9698 cur_crc_src = acrtc->dm_irq_params.crc_src;
9699 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8e7b6fee 9700#endif
585d450c
AP
9701 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9702
8fe684e9
NK
9703 if (new_crtc_state->active &&
9704 (!old_crtc_state->active ||
9705 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
585d450c
AP
9706 dc_stream_retain(dm_new_crtc_state->stream);
9707 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8fe684e9 9708 manage_dm_interrupts(adev, acrtc, true);
e2881d6d 9709
24eb9374 9710#ifdef CONFIG_DEBUG_FS
8fe684e9
NK
9711 /**
9712 * Frontend may have changed so reapply the CRC capture
9713 * settings for the stream.
9714 */
9715 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
c920888c 9716
8e7b6fee 9717 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
86bc2219
WL
9718 configure_crc = true;
9719#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
d98af272
WL
9720 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9721 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9722 acrtc->dm_irq_params.crc_window.update_win = true;
9723 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9724 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9725 crc_rd_wrk->crtc = crtc;
9726 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9727 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9728 }
86bc2219 9729#endif
e2881d6d 9730 }
c920888c 9731
86bc2219 9732 if (configure_crc)
bbc49fc0
WL
9733 if (amdgpu_dm_crtc_configure_crc_source(
9734 crtc, dm_new_crtc_state, cur_crc_src))
9735 DRM_DEBUG_DRIVER("Failed to configure crc source");
24eb9374 9736#endif
8fe684e9
NK
9737 }
9738 }
e7b07cee 9739
420cd472 9740 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
4d85f45c 9741 if (new_crtc_state->async_flip)
420cd472
DF
9742 wait_for_vblank = false;
9743
e7b07cee 9744 /* update planes when needed per crtc*/
5cc6dcbd 9745 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
54d76575 9746 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
e7b07cee 9747
54d76575 9748 if (dm_new_crtc_state->stream)
eb3dc897 9749 amdgpu_dm_commit_planes(state, dc_state, dev,
420cd472 9750 dm, crtc, wait_for_vblank);
e7b07cee
HW
9751 }
9752
6ce8f316
NK
9753 /* Update audio instances for each connector. */
9754 amdgpu_dm_commit_audio(dev, state);
9755
7230362c
AD
9756#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9757 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9758 /* restore the backlight level */
7fd13bae
AD
9759 for (i = 0; i < dm->num_of_edps; i++) {
9760 if (dm->backlight_dev[i] &&
9761 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9762 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9763 }
7230362c 9764#endif
e7b07cee
HW
9765 /*
9766 * send vblank event on all events not handled in flip and
9767 * mark consumed event for drm_atomic_helper_commit_hw_done
9768 */
4a580877 9769 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
0bc9706d 9770 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
e7b07cee 9771
0bc9706d
LSL
9772 if (new_crtc_state->event)
9773 drm_send_event_locked(dev, &new_crtc_state->event->base);
e7b07cee 9774
0bc9706d 9775 new_crtc_state->event = NULL;
e7b07cee 9776 }
4a580877 9777 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
e7b07cee 9778
29c8f234
LL
9779 /* Signal HW programming completion */
9780 drm_atomic_helper_commit_hw_done(state);
e7b07cee
HW
9781
9782 if (wait_for_vblank)
320a1274 9783 drm_atomic_helper_wait_for_flip_done(dev, state);
e7b07cee
HW
9784
9785 drm_atomic_helper_cleanup_planes(dev, state);
97028037 9786
5f6fab24
AD
9787 /* return the stolen vga memory back to VRAM */
9788 if (!adev->mman.keep_stolen_vga_memory)
9789 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9790 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9791
1f6010a9
DF
9792 /*
9793 * Finally, drop a runtime PM reference for each newly disabled CRTC,
97028037
LP
9794 * so we can put the GPU into runtime suspend if we're not driving any
9795 * displays anymore
9796 */
fe2a1965
LP
9797 for (i = 0; i < crtc_disable_count; i++)
9798 pm_runtime_put_autosuspend(dev->dev);
97028037 9799 pm_runtime_mark_last_busy(dev->dev);
eb3dc897
NK
9800
9801 if (dc_state_temp)
9802 dc_release_state(dc_state_temp);
e7b07cee
HW
9803}
9804
9805
9806static int dm_force_atomic_commit(struct drm_connector *connector)
9807{
9808 int ret = 0;
9809 struct drm_device *ddev = connector->dev;
9810 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9811 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9812 struct drm_plane *plane = disconnected_acrtc->base.primary;
9813 struct drm_connector_state *conn_state;
9814 struct drm_crtc_state *crtc_state;
9815 struct drm_plane_state *plane_state;
9816
9817 if (!state)
9818 return -ENOMEM;
9819
9820 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9821
9822 /* Construct an atomic state to restore previous display setting */
9823
9824 /*
9825 * Attach connectors to drm_atomic_state
9826 */
9827 conn_state = drm_atomic_get_connector_state(state, connector);
9828
9829 ret = PTR_ERR_OR_ZERO(conn_state);
9830 if (ret)
2dc39051 9831 goto out;
e7b07cee
HW
9832
9833 /* Attach crtc to drm_atomic_state*/
9834 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9835
9836 ret = PTR_ERR_OR_ZERO(crtc_state);
9837 if (ret)
2dc39051 9838 goto out;
e7b07cee
HW
9839
9840 /* force a restore */
9841 crtc_state->mode_changed = true;
9842
9843 /* Attach plane to drm_atomic_state */
9844 plane_state = drm_atomic_get_plane_state(state, plane);
9845
9846 ret = PTR_ERR_OR_ZERO(plane_state);
9847 if (ret)
2dc39051 9848 goto out;
e7b07cee
HW
9849
9850 /* Call commit internally with the state we just constructed */
9851 ret = drm_atomic_commit(state);
e7b07cee 9852
2dc39051 9853out:
e7b07cee 9854 drm_atomic_state_put(state);
2dc39051
VL
9855 if (ret)
9856 DRM_ERROR("Restoring old state failed with %i\n", ret);
e7b07cee
HW
9857
9858 return ret;
9859}
9860
9861/*
1f6010a9
DF
9862 * This function handles all cases when set mode does not come upon hotplug.
9863 * This includes when a display is unplugged then plugged back into the
9864 * same port and when running without usermode desktop manager supprot
e7b07cee 9865 */
3ee6b26b
AD
9866void dm_restore_drm_connector_state(struct drm_device *dev,
9867 struct drm_connector *connector)
e7b07cee 9868{
c84dec2f 9869 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
e7b07cee
HW
9870 struct amdgpu_crtc *disconnected_acrtc;
9871 struct dm_crtc_state *acrtc_state;
9872
9873 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9874 return;
9875
9876 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
70e8ffc5
HW
9877 if (!disconnected_acrtc)
9878 return;
e7b07cee 9879
70e8ffc5
HW
9880 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9881 if (!acrtc_state->stream)
e7b07cee
HW
9882 return;
9883
9884 /*
9885 * If the previous sink is not released and different from the current,
9886 * we deduce we are in a state where we can not rely on usermode call
9887 * to turn on the display, so we do it here
9888 */
9889 if (acrtc_state->stream->sink != aconnector->dc_sink)
9890 dm_force_atomic_commit(&aconnector->base);
9891}
9892
1f6010a9 9893/*
e7b07cee
HW
9894 * Grabs all modesetting locks to serialize against any blocking commits,
9895 * Waits for completion of all non blocking commits.
9896 */
3ee6b26b
AD
9897static int do_aquire_global_lock(struct drm_device *dev,
9898 struct drm_atomic_state *state)
e7b07cee
HW
9899{
9900 struct drm_crtc *crtc;
9901 struct drm_crtc_commit *commit;
9902 long ret;
9903
1f6010a9
DF
9904 /*
9905 * Adding all modeset locks to aquire_ctx will
e7b07cee
HW
9906 * ensure that when the framework release it the
9907 * extra locks we are locking here will get released to
9908 */
9909 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9910 if (ret)
9911 return ret;
9912
9913 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9914 spin_lock(&crtc->commit_lock);
9915 commit = list_first_entry_or_null(&crtc->commit_list,
9916 struct drm_crtc_commit, commit_entry);
9917 if (commit)
9918 drm_crtc_commit_get(commit);
9919 spin_unlock(&crtc->commit_lock);
9920
9921 if (!commit)
9922 continue;
9923
1f6010a9
DF
9924 /*
9925 * Make sure all pending HW programming completed and
e7b07cee
HW
9926 * page flips done
9927 */
9928 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9929
9930 if (ret > 0)
9931 ret = wait_for_completion_interruptible_timeout(
9932 &commit->flip_done, 10*HZ);
9933
9934 if (ret == 0)
9935 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
b830ebc9 9936 "timed out\n", crtc->base.id, crtc->name);
e7b07cee
HW
9937
9938 drm_crtc_commit_put(commit);
9939 }
9940
9941 return ret < 0 ? ret : 0;
9942}
9943
bb47de73
NK
9944static void get_freesync_config_for_crtc(
9945 struct dm_crtc_state *new_crtc_state,
9946 struct dm_connector_state *new_con_state)
98e6436d
AK
9947{
9948 struct mod_freesync_config config = {0};
98e6436d
AK
9949 struct amdgpu_dm_connector *aconnector =
9950 to_amdgpu_dm_connector(new_con_state->base.connector);
a057ec46 9951 struct drm_display_mode *mode = &new_crtc_state->base.mode;
0ab925d3 9952 int vrefresh = drm_mode_vrefresh(mode);
a85ba005 9953 bool fs_vid_mode = false;
98e6436d 9954
a057ec46 9955 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
0ab925d3
NK
9956 vrefresh >= aconnector->min_vfreq &&
9957 vrefresh <= aconnector->max_vfreq;
bb47de73 9958
a057ec46
IB
9959 if (new_crtc_state->vrr_supported) {
9960 new_crtc_state->stream->ignore_msa_timing_param = true;
a85ba005
NC
9961 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9962
9963 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9964 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
69ff8845 9965 config.vsif_supported = true;
180db303 9966 config.btr = true;
98e6436d 9967
a85ba005
NC
9968 if (fs_vid_mode) {
9969 config.state = VRR_STATE_ACTIVE_FIXED;
9970 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9971 goto out;
9972 } else if (new_crtc_state->base.vrr_enabled) {
9973 config.state = VRR_STATE_ACTIVE_VARIABLE;
9974 } else {
9975 config.state = VRR_STATE_INACTIVE;
9976 }
9977 }
9978out:
bb47de73
NK
9979 new_crtc_state->freesync_config = config;
9980}
98e6436d 9981
bb47de73
NK
9982static void reset_freesync_config_for_crtc(
9983 struct dm_crtc_state *new_crtc_state)
9984{
9985 new_crtc_state->vrr_supported = false;
98e6436d 9986
bb47de73
NK
9987 memset(&new_crtc_state->vrr_infopacket, 0,
9988 sizeof(new_crtc_state->vrr_infopacket));
98e6436d
AK
9989}
9990
a85ba005
NC
9991static bool
9992is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9993 struct drm_crtc_state *new_crtc_state)
9994{
9995 struct drm_display_mode old_mode, new_mode;
9996
9997 if (!old_crtc_state || !new_crtc_state)
9998 return false;
9999
10000 old_mode = old_crtc_state->mode;
10001 new_mode = new_crtc_state->mode;
10002
10003 if (old_mode.clock == new_mode.clock &&
10004 old_mode.hdisplay == new_mode.hdisplay &&
10005 old_mode.vdisplay == new_mode.vdisplay &&
10006 old_mode.htotal == new_mode.htotal &&
10007 old_mode.vtotal != new_mode.vtotal &&
10008 old_mode.hsync_start == new_mode.hsync_start &&
10009 old_mode.vsync_start != new_mode.vsync_start &&
10010 old_mode.hsync_end == new_mode.hsync_end &&
10011 old_mode.vsync_end != new_mode.vsync_end &&
10012 old_mode.hskew == new_mode.hskew &&
10013 old_mode.vscan == new_mode.vscan &&
10014 (old_mode.vsync_end - old_mode.vsync_start) ==
10015 (new_mode.vsync_end - new_mode.vsync_start))
10016 return true;
10017
10018 return false;
10019}
10020
10021static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10022 uint64_t num, den, res;
10023 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10024
10025 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10026
10027 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10028 den = (unsigned long long)new_crtc_state->mode.htotal *
10029 (unsigned long long)new_crtc_state->mode.vtotal;
10030
10031 res = div_u64(num, den);
10032 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10033}
10034
4b9674e5
LL
10035static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10036 struct drm_atomic_state *state,
10037 struct drm_crtc *crtc,
10038 struct drm_crtc_state *old_crtc_state,
10039 struct drm_crtc_state *new_crtc_state,
10040 bool enable,
10041 bool *lock_and_validation_needed)
e7b07cee 10042{
eb3dc897 10043 struct dm_atomic_state *dm_state = NULL;
54d76575 10044 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9635b754 10045 struct dc_stream_state *new_stream;
62f55537 10046 int ret = 0;
d4d4a645 10047
1f6010a9
DF
10048 /*
10049 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10050 * update changed items
10051 */
4b9674e5
LL
10052 struct amdgpu_crtc *acrtc = NULL;
10053 struct amdgpu_dm_connector *aconnector = NULL;
10054 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10055 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
e7b07cee 10056
4b9674e5 10057 new_stream = NULL;
9635b754 10058
4b9674e5
LL
10059 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10060 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10061 acrtc = to_amdgpu_crtc(crtc);
4b9674e5 10062 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
19f89e23 10063
4b9674e5
LL
10064 /* TODO This hack should go away */
10065 if (aconnector && enable) {
10066 /* Make sure fake sink is created in plug-in scenario */
10067 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10068 &aconnector->base);
10069 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10070 &aconnector->base);
19f89e23 10071
4b9674e5
LL
10072 if (IS_ERR(drm_new_conn_state)) {
10073 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10074 goto fail;
10075 }
19f89e23 10076
4b9674e5
LL
10077 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10078 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
19f89e23 10079
02d35a67
JFZ
10080 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10081 goto skip_modeset;
10082
cbd14ae7
SW
10083 new_stream = create_validate_stream_for_sink(aconnector,
10084 &new_crtc_state->mode,
10085 dm_new_conn_state,
10086 dm_old_crtc_state->stream);
19f89e23 10087
4b9674e5
LL
10088 /*
10089 * we can have no stream on ACTION_SET if a display
10090 * was disconnected during S3, in this case it is not an
10091 * error, the OS will be updated after detection, and
10092 * will do the right thing on next atomic commit
10093 */
19f89e23 10094
4b9674e5
LL
10095 if (!new_stream) {
10096 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10097 __func__, acrtc->base.base.id);
10098 ret = -ENOMEM;
10099 goto fail;
10100 }
e7b07cee 10101
3d4e52d0
VL
10102 /*
10103 * TODO: Check VSDB bits to decide whether this should
10104 * be enabled or not.
10105 */
10106 new_stream->triggered_crtc_reset.enabled =
10107 dm->force_timing_sync;
10108
4b9674e5 10109 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
98e6436d 10110
88694af9
NK
10111 ret = fill_hdr_info_packet(drm_new_conn_state,
10112 &new_stream->hdr_static_metadata);
10113 if (ret)
10114 goto fail;
10115
7e930949
NK
10116 /*
10117 * If we already removed the old stream from the context
10118 * (and set the new stream to NULL) then we can't reuse
10119 * the old stream even if the stream and scaling are unchanged.
10120 * We'll hit the BUG_ON and black screen.
10121 *
10122 * TODO: Refactor this function to allow this check to work
10123 * in all conditions.
10124 */
a85ba005
NC
10125 if (amdgpu_freesync_vid_mode &&
10126 dm_new_crtc_state->stream &&
10127 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10128 goto skip_modeset;
10129
7e930949
NK
10130 if (dm_new_crtc_state->stream &&
10131 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4b9674e5
LL
10132 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10133 new_crtc_state->mode_changed = false;
10134 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10135 new_crtc_state->mode_changed);
62f55537 10136 }
4b9674e5 10137 }
b830ebc9 10138
02d35a67 10139 /* mode_changed flag may get updated above, need to check again */
4b9674e5
LL
10140 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10141 goto skip_modeset;
e7b07cee 10142
4711c033 10143 DRM_DEBUG_ATOMIC(
4b9674e5
LL
10144 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10145 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10146 "connectors_changed:%d\n",
10147 acrtc->crtc_id,
10148 new_crtc_state->enable,
10149 new_crtc_state->active,
10150 new_crtc_state->planes_changed,
10151 new_crtc_state->mode_changed,
10152 new_crtc_state->active_changed,
10153 new_crtc_state->connectors_changed);
62f55537 10154
4b9674e5
LL
10155 /* Remove stream for any changed/disabled CRTC */
10156 if (!enable) {
62f55537 10157
4b9674e5
LL
10158 if (!dm_old_crtc_state->stream)
10159 goto skip_modeset;
eb3dc897 10160
a85ba005
NC
10161 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10162 is_timing_unchanged_for_freesync(new_crtc_state,
10163 old_crtc_state)) {
10164 new_crtc_state->mode_changed = false;
10165 DRM_DEBUG_DRIVER(
10166 "Mode change not required for front porch change, "
10167 "setting mode_changed to %d",
10168 new_crtc_state->mode_changed);
10169
10170 set_freesync_fixed_config(dm_new_crtc_state);
10171
10172 goto skip_modeset;
10173 } else if (amdgpu_freesync_vid_mode && aconnector &&
10174 is_freesync_video_mode(&new_crtc_state->mode,
10175 aconnector)) {
e88ebd83
SC
10176 struct drm_display_mode *high_mode;
10177
10178 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10179 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10180 set_freesync_fixed_config(dm_new_crtc_state);
10181 }
a85ba005
NC
10182 }
10183
4b9674e5
LL
10184 ret = dm_atomic_get_state(state, &dm_state);
10185 if (ret)
10186 goto fail;
e7b07cee 10187
4b9674e5
LL
10188 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10189 crtc->base.id);
62f55537 10190
4b9674e5
LL
10191 /* i.e. reset mode */
10192 if (dc_remove_stream_from_ctx(
10193 dm->dc,
10194 dm_state->context,
10195 dm_old_crtc_state->stream) != DC_OK) {
10196 ret = -EINVAL;
10197 goto fail;
10198 }
62f55537 10199
4b9674e5
LL
10200 dc_stream_release(dm_old_crtc_state->stream);
10201 dm_new_crtc_state->stream = NULL;
bb47de73 10202
4b9674e5 10203 reset_freesync_config_for_crtc(dm_new_crtc_state);
62f55537 10204
4b9674e5 10205 *lock_and_validation_needed = true;
62f55537 10206
4b9674e5
LL
10207 } else {/* Add stream for any updated/enabled CRTC */
10208 /*
10209 * Quick fix to prevent NULL pointer on new_stream when
10210 * added MST connectors not found in existing crtc_state in the chained mode
10211 * TODO: need to dig out the root cause of that
10212 */
10213 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10214 goto skip_modeset;
62f55537 10215
4b9674e5
LL
10216 if (modereset_required(new_crtc_state))
10217 goto skip_modeset;
62f55537 10218
4b9674e5
LL
10219 if (modeset_required(new_crtc_state, new_stream,
10220 dm_old_crtc_state->stream)) {
62f55537 10221
4b9674e5 10222 WARN_ON(dm_new_crtc_state->stream);
eb3dc897 10223
4b9674e5
LL
10224 ret = dm_atomic_get_state(state, &dm_state);
10225 if (ret)
10226 goto fail;
27b3f4fc 10227
4b9674e5 10228 dm_new_crtc_state->stream = new_stream;
62f55537 10229
4b9674e5 10230 dc_stream_retain(new_stream);
1dc90497 10231
4711c033
LT
10232 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10233 crtc->base.id);
1dc90497 10234
4b9674e5
LL
10235 if (dc_add_stream_to_ctx(
10236 dm->dc,
10237 dm_state->context,
10238 dm_new_crtc_state->stream) != DC_OK) {
10239 ret = -EINVAL;
10240 goto fail;
9b690ef3
BL
10241 }
10242
4b9674e5
LL
10243 *lock_and_validation_needed = true;
10244 }
10245 }
e277adc5 10246
4b9674e5
LL
10247skip_modeset:
10248 /* Release extra reference */
10249 if (new_stream)
10250 dc_stream_release(new_stream);
e277adc5 10251
4b9674e5
LL
10252 /*
10253 * We want to do dc stream updates that do not require a
10254 * full modeset below.
10255 */
2afda735 10256 if (!(enable && aconnector && new_crtc_state->active))
4b9674e5
LL
10257 return 0;
10258 /*
10259 * Given above conditions, the dc state cannot be NULL because:
10260 * 1. We're in the process of enabling CRTCs (just been added
10261 * to the dc context, or already is on the context)
10262 * 2. Has a valid connector attached, and
10263 * 3. Is currently active and enabled.
10264 * => The dc stream state currently exists.
10265 */
10266 BUG_ON(dm_new_crtc_state->stream == NULL);
a9e8d275 10267
4b9674e5 10268 /* Scaling or underscan settings */
c521fc31
RL
10269 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10270 drm_atomic_crtc_needs_modeset(new_crtc_state))
4b9674e5
LL
10271 update_stream_scaling_settings(
10272 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
98e6436d 10273
b05e2c5e
DF
10274 /* ABM settings */
10275 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10276
4b9674e5
LL
10277 /*
10278 * Color management settings. We also update color properties
10279 * when a modeset is needed, to ensure it gets reprogrammed.
10280 */
10281 if (dm_new_crtc_state->base.color_mgmt_changed ||
10282 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
cf020d49 10283 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
4b9674e5
LL
10284 if (ret)
10285 goto fail;
62f55537 10286 }
e7b07cee 10287
4b9674e5
LL
10288 /* Update Freesync settings. */
10289 get_freesync_config_for_crtc(dm_new_crtc_state,
10290 dm_new_conn_state);
10291
62f55537 10292 return ret;
9635b754
DS
10293
10294fail:
10295 if (new_stream)
10296 dc_stream_release(new_stream);
10297 return ret;
62f55537 10298}
9b690ef3 10299
f6ff2a08
NK
10300static bool should_reset_plane(struct drm_atomic_state *state,
10301 struct drm_plane *plane,
10302 struct drm_plane_state *old_plane_state,
10303 struct drm_plane_state *new_plane_state)
10304{
10305 struct drm_plane *other;
10306 struct drm_plane_state *old_other_state, *new_other_state;
10307 struct drm_crtc_state *new_crtc_state;
10308 int i;
10309
70a1efac
NK
10310 /*
10311 * TODO: Remove this hack once the checks below are sufficient
10312 * enough to determine when we need to reset all the planes on
10313 * the stream.
10314 */
10315 if (state->allow_modeset)
10316 return true;
10317
f6ff2a08
NK
10318 /* Exit early if we know that we're adding or removing the plane. */
10319 if (old_plane_state->crtc != new_plane_state->crtc)
10320 return true;
10321
10322 /* old crtc == new_crtc == NULL, plane not in context. */
10323 if (!new_plane_state->crtc)
10324 return false;
10325
10326 new_crtc_state =
10327 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10328
10329 if (!new_crtc_state)
10330 return true;
10331
7316c4ad
NK
10332 /* CRTC Degamma changes currently require us to recreate planes. */
10333 if (new_crtc_state->color_mgmt_changed)
10334 return true;
10335
f6ff2a08
NK
10336 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10337 return true;
10338
10339 /*
10340 * If there are any new primary or overlay planes being added or
10341 * removed then the z-order can potentially change. To ensure
10342 * correct z-order and pipe acquisition the current DC architecture
10343 * requires us to remove and recreate all existing planes.
10344 *
10345 * TODO: Come up with a more elegant solution for this.
10346 */
10347 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6eed95b0 10348 struct amdgpu_framebuffer *old_afb, *new_afb;
f6ff2a08
NK
10349 if (other->type == DRM_PLANE_TYPE_CURSOR)
10350 continue;
10351
10352 if (old_other_state->crtc != new_plane_state->crtc &&
10353 new_other_state->crtc != new_plane_state->crtc)
10354 continue;
10355
10356 if (old_other_state->crtc != new_other_state->crtc)
10357 return true;
10358
dc4cb30d
NK
10359 /* Src/dst size and scaling updates. */
10360 if (old_other_state->src_w != new_other_state->src_w ||
10361 old_other_state->src_h != new_other_state->src_h ||
10362 old_other_state->crtc_w != new_other_state->crtc_w ||
10363 old_other_state->crtc_h != new_other_state->crtc_h)
10364 return true;
10365
10366 /* Rotation / mirroring updates. */
10367 if (old_other_state->rotation != new_other_state->rotation)
10368 return true;
10369
10370 /* Blending updates. */
10371 if (old_other_state->pixel_blend_mode !=
10372 new_other_state->pixel_blend_mode)
10373 return true;
10374
10375 /* Alpha updates. */
10376 if (old_other_state->alpha != new_other_state->alpha)
10377 return true;
10378
10379 /* Colorspace changes. */
10380 if (old_other_state->color_range != new_other_state->color_range ||
10381 old_other_state->color_encoding != new_other_state->color_encoding)
10382 return true;
10383
9a81cc60
NK
10384 /* Framebuffer checks fall at the end. */
10385 if (!old_other_state->fb || !new_other_state->fb)
10386 continue;
10387
10388 /* Pixel format changes can require bandwidth updates. */
10389 if (old_other_state->fb->format != new_other_state->fb->format)
10390 return true;
10391
6eed95b0
BN
10392 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10393 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9a81cc60
NK
10394
10395 /* Tiling and DCC changes also require bandwidth updates. */
37384b3f
BN
10396 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10397 old_afb->base.modifier != new_afb->base.modifier)
f6ff2a08
NK
10398 return true;
10399 }
10400
10401 return false;
10402}
10403
b0455fda
SS
10404static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10405 struct drm_plane_state *new_plane_state,
10406 struct drm_framebuffer *fb)
10407{
e72868c4
SS
10408 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10409 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
b0455fda 10410 unsigned int pitch;
e72868c4 10411 bool linear;
b0455fda
SS
10412
10413 if (fb->width > new_acrtc->max_cursor_width ||
10414 fb->height > new_acrtc->max_cursor_height) {
10415 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10416 new_plane_state->fb->width,
10417 new_plane_state->fb->height);
10418 return -EINVAL;
10419 }
10420 if (new_plane_state->src_w != fb->width << 16 ||
10421 new_plane_state->src_h != fb->height << 16) {
10422 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10423 return -EINVAL;
10424 }
10425
10426 /* Pitch in pixels */
10427 pitch = fb->pitches[0] / fb->format->cpp[0];
10428
10429 if (fb->width != pitch) {
10430 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10431 fb->width, pitch);
10432 return -EINVAL;
10433 }
10434
10435 switch (pitch) {
10436 case 64:
10437 case 128:
10438 case 256:
10439 /* FB pitch is supported by cursor plane */
10440 break;
10441 default:
10442 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10443 return -EINVAL;
10444 }
10445
e72868c4
SS
10446 /* Core DRM takes care of checking FB modifiers, so we only need to
10447 * check tiling flags when the FB doesn't have a modifier. */
10448 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10449 if (adev->family < AMDGPU_FAMILY_AI) {
10450 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10451 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10452 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10453 } else {
10454 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10455 }
10456 if (!linear) {
10457 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10458 return -EINVAL;
10459 }
10460 }
10461
b0455fda
SS
10462 return 0;
10463}
10464
9e869063
LL
10465static int dm_update_plane_state(struct dc *dc,
10466 struct drm_atomic_state *state,
10467 struct drm_plane *plane,
10468 struct drm_plane_state *old_plane_state,
10469 struct drm_plane_state *new_plane_state,
10470 bool enable,
10471 bool *lock_and_validation_needed)
62f55537 10472{
eb3dc897
NK
10473
10474 struct dm_atomic_state *dm_state = NULL;
62f55537 10475 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
0bc9706d 10476 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
54d76575 10477 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
54d76575 10478 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
626bf90f 10479 struct amdgpu_crtc *new_acrtc;
f6ff2a08 10480 bool needs_reset;
62f55537 10481 int ret = 0;
e7b07cee 10482
9b690ef3 10483
9e869063
LL
10484 new_plane_crtc = new_plane_state->crtc;
10485 old_plane_crtc = old_plane_state->crtc;
10486 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10487 dm_old_plane_state = to_dm_plane_state(old_plane_state);
62f55537 10488
626bf90f
SS
10489 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10490 if (!enable || !new_plane_crtc ||
10491 drm_atomic_plane_disabling(plane->state, new_plane_state))
10492 return 0;
10493
10494 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10495
5f581248
SS
10496 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10497 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10498 return -EINVAL;
10499 }
10500
24f99d2b 10501 if (new_plane_state->fb) {
b0455fda
SS
10502 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10503 new_plane_state->fb);
10504 if (ret)
10505 return ret;
24f99d2b
SS
10506 }
10507
9e869063 10508 return 0;
626bf90f 10509 }
9b690ef3 10510
f6ff2a08
NK
10511 needs_reset = should_reset_plane(state, plane, old_plane_state,
10512 new_plane_state);
10513
9e869063
LL
10514 /* Remove any changed/removed planes */
10515 if (!enable) {
f6ff2a08 10516 if (!needs_reset)
9e869063 10517 return 0;
a7b06724 10518
9e869063
LL
10519 if (!old_plane_crtc)
10520 return 0;
62f55537 10521
9e869063
LL
10522 old_crtc_state = drm_atomic_get_old_crtc_state(
10523 state, old_plane_crtc);
10524 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9b690ef3 10525
9e869063
LL
10526 if (!dm_old_crtc_state->stream)
10527 return 0;
62f55537 10528
9e869063
LL
10529 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10530 plane->base.id, old_plane_crtc->base.id);
9b690ef3 10531
9e869063
LL
10532 ret = dm_atomic_get_state(state, &dm_state);
10533 if (ret)
10534 return ret;
eb3dc897 10535
9e869063
LL
10536 if (!dc_remove_plane_from_context(
10537 dc,
10538 dm_old_crtc_state->stream,
10539 dm_old_plane_state->dc_state,
10540 dm_state->context)) {
62f55537 10541
c3537613 10542 return -EINVAL;
9e869063 10543 }
e7b07cee 10544
9b690ef3 10545
9e869063
LL
10546 dc_plane_state_release(dm_old_plane_state->dc_state);
10547 dm_new_plane_state->dc_state = NULL;
1dc90497 10548
9e869063 10549 *lock_and_validation_needed = true;
1dc90497 10550
9e869063
LL
10551 } else { /* Add new planes */
10552 struct dc_plane_state *dc_new_plane_state;
1dc90497 10553
9e869063
LL
10554 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10555 return 0;
e7b07cee 10556
9e869063
LL
10557 if (!new_plane_crtc)
10558 return 0;
e7b07cee 10559
9e869063
LL
10560 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10561 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1dc90497 10562
9e869063
LL
10563 if (!dm_new_crtc_state->stream)
10564 return 0;
62f55537 10565
f6ff2a08 10566 if (!needs_reset)
9e869063 10567 return 0;
62f55537 10568
8c44515b
AP
10569 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10570 if (ret)
10571 return ret;
10572
9e869063 10573 WARN_ON(dm_new_plane_state->dc_state);
9b690ef3 10574
9e869063
LL
10575 dc_new_plane_state = dc_create_plane_state(dc);
10576 if (!dc_new_plane_state)
10577 return -ENOMEM;
62f55537 10578
4711c033
LT
10579 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10580 plane->base.id, new_plane_crtc->base.id);
8c45c5db 10581
695af5f9 10582 ret = fill_dc_plane_attributes(
1348969a 10583 drm_to_adev(new_plane_crtc->dev),
9e869063
LL
10584 dc_new_plane_state,
10585 new_plane_state,
10586 new_crtc_state);
10587 if (ret) {
10588 dc_plane_state_release(dc_new_plane_state);
10589 return ret;
10590 }
62f55537 10591
9e869063
LL
10592 ret = dm_atomic_get_state(state, &dm_state);
10593 if (ret) {
10594 dc_plane_state_release(dc_new_plane_state);
10595 return ret;
10596 }
eb3dc897 10597
9e869063
LL
10598 /*
10599 * Any atomic check errors that occur after this will
10600 * not need a release. The plane state will be attached
10601 * to the stream, and therefore part of the atomic
10602 * state. It'll be released when the atomic state is
10603 * cleaned.
10604 */
10605 if (!dc_add_plane_to_context(
10606 dc,
10607 dm_new_crtc_state->stream,
10608 dc_new_plane_state,
10609 dm_state->context)) {
62f55537 10610
9e869063
LL
10611 dc_plane_state_release(dc_new_plane_state);
10612 return -EINVAL;
10613 }
8c45c5db 10614
9e869063 10615 dm_new_plane_state->dc_state = dc_new_plane_state;
000b59ea 10616
9e869063
LL
10617 /* Tell DC to do a full surface update every time there
10618 * is a plane change. Inefficient, but works for now.
10619 */
10620 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10621
10622 *lock_and_validation_needed = true;
62f55537 10623 }
e7b07cee
HW
10624
10625
62f55537
AG
10626 return ret;
10627}
a87fa993 10628
12f4849a
SS
10629static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10630 struct drm_crtc *crtc,
10631 struct drm_crtc_state *new_crtc_state)
10632{
d1bfbe8a
SS
10633 struct drm_plane *cursor = crtc->cursor, *underlying;
10634 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10635 int i;
10636 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
12f4849a
SS
10637
10638 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10639 * cursor per pipe but it's going to inherit the scaling and
10640 * positioning from the underlying pipe. Check the cursor plane's
d1bfbe8a 10641 * blending properties match the underlying planes'. */
12f4849a 10642
d1bfbe8a
SS
10643 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10644 if (!new_cursor_state || !new_cursor_state->fb) {
12f4849a
SS
10645 return 0;
10646 }
10647
10648 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10649 (new_cursor_state->src_w >> 16);
10650 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10651 (new_cursor_state->src_h >> 16);
10652
d1bfbe8a
SS
10653 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10654 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10655 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10656 continue;
12f4849a 10657
d1bfbe8a
SS
10658 /* Ignore disabled planes */
10659 if (!new_underlying_state->fb)
10660 continue;
10661
10662 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10663 (new_underlying_state->src_w >> 16);
10664 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10665 (new_underlying_state->src_h >> 16);
10666
10667 if (cursor_scale_w != underlying_scale_w ||
10668 cursor_scale_h != underlying_scale_h) {
10669 drm_dbg_atomic(crtc->dev,
10670 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10671 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10672 return -EINVAL;
10673 }
10674
10675 /* If this plane covers the whole CRTC, no need to check planes underneath */
10676 if (new_underlying_state->crtc_x <= 0 &&
10677 new_underlying_state->crtc_y <= 0 &&
10678 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10679 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10680 break;
12f4849a
SS
10681 }
10682
10683 return 0;
10684}
10685
e10517b3 10686#if defined(CONFIG_DRM_AMD_DC_DCN)
44be939f
ML
10687static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10688{
10689 struct drm_connector *connector;
10690 struct drm_connector_state *conn_state;
10691 struct amdgpu_dm_connector *aconnector = NULL;
10692 int i;
10693 for_each_new_connector_in_state(state, connector, conn_state, i) {
10694 if (conn_state->crtc != crtc)
10695 continue;
10696
10697 aconnector = to_amdgpu_dm_connector(connector);
10698 if (!aconnector->port || !aconnector->mst_port)
10699 aconnector = NULL;
10700 else
10701 break;
10702 }
10703
10704 if (!aconnector)
10705 return 0;
10706
10707 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10708}
e10517b3 10709#endif
44be939f 10710
b8592b48
LL
10711/**
10712 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10713 * @dev: The DRM device
10714 * @state: The atomic state to commit
10715 *
10716 * Validate that the given atomic state is programmable by DC into hardware.
10717 * This involves constructing a &struct dc_state reflecting the new hardware
10718 * state we wish to commit, then querying DC to see if it is programmable. It's
10719 * important not to modify the existing DC state. Otherwise, atomic_check
10720 * may unexpectedly commit hardware changes.
10721 *
10722 * When validating the DC state, it's important that the right locks are
10723 * acquired. For full updates case which removes/adds/updates streams on one
10724 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10725 * that any such full update commit will wait for completion of any outstanding
f6d7c7fa 10726 * flip using DRMs synchronization events.
b8592b48
LL
10727 *
10728 * Note that DM adds the affected connectors for all CRTCs in state, when that
10729 * might not seem necessary. This is because DC stream creation requires the
10730 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10731 * be possible but non-trivial - a possible TODO item.
10732 *
10733 * Return: -Error code if validation failed.
10734 */
7578ecda
AD
10735static int amdgpu_dm_atomic_check(struct drm_device *dev,
10736 struct drm_atomic_state *state)
62f55537 10737{
1348969a 10738 struct amdgpu_device *adev = drm_to_adev(dev);
eb3dc897 10739 struct dm_atomic_state *dm_state = NULL;
62f55537 10740 struct dc *dc = adev->dm.dc;
62f55537 10741 struct drm_connector *connector;
c2cea706 10742 struct drm_connector_state *old_con_state, *new_con_state;
62f55537 10743 struct drm_crtc *crtc;
fc9e9920 10744 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9e869063
LL
10745 struct drm_plane *plane;
10746 struct drm_plane_state *old_plane_state, *new_plane_state;
74a16675 10747 enum dc_status status;
1e88ad0a 10748 int ret, i;
62f55537 10749 bool lock_and_validation_needed = false;
886876ec 10750 struct dm_crtc_state *dm_old_crtc_state;
6513104b
HW
10751#if defined(CONFIG_DRM_AMD_DC_DCN)
10752 struct dsc_mst_fairness_vars vars[MAX_PIPES];
41724ea2
BL
10753 struct drm_dp_mst_topology_state *mst_state;
10754 struct drm_dp_mst_topology_mgr *mgr;
6513104b 10755#endif
62f55537 10756
e8a98235 10757 trace_amdgpu_dm_atomic_check_begin(state);
c44a22b3 10758
62f55537 10759 ret = drm_atomic_helper_check_modeset(dev, state);
68ca1c3e
S
10760 if (ret) {
10761 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
01e28f9c 10762 goto fail;
68ca1c3e 10763 }
62f55537 10764
c5892a10
SW
10765 /* Check connector changes */
10766 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10767 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10768 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10769
10770 /* Skip connectors that are disabled or part of modeset already. */
10771 if (!old_con_state->crtc && !new_con_state->crtc)
10772 continue;
10773
10774 if (!new_con_state->crtc)
10775 continue;
10776
10777 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10778 if (IS_ERR(new_crtc_state)) {
68ca1c3e 10779 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
c5892a10
SW
10780 ret = PTR_ERR(new_crtc_state);
10781 goto fail;
10782 }
10783
10784 if (dm_old_con_state->abm_level !=
10785 dm_new_con_state->abm_level)
10786 new_crtc_state->connectors_changed = true;
10787 }
10788
e10517b3 10789#if defined(CONFIG_DRM_AMD_DC_DCN)
349a19b2 10790 if (dc_resource_is_dsc_encoding_supported(dc)) {
44be939f
ML
10791 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10792 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10793 ret = add_affected_mst_dsc_crtcs(state, crtc);
68ca1c3e
S
10794 if (ret) {
10795 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
44be939f 10796 goto fail;
68ca1c3e 10797 }
44be939f
ML
10798 }
10799 }
10800 }
e10517b3 10801#endif
1e88ad0a 10802 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
886876ec
EB
10803 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10804
1e88ad0a 10805 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
98e6436d 10806 !new_crtc_state->color_mgmt_changed &&
886876ec
EB
10807 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10808 dm_old_crtc_state->dsc_force_changed == false)
1e88ad0a 10809 continue;
7bef1af3 10810
03fc4cf4 10811 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
68ca1c3e
S
10812 if (ret) {
10813 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
03fc4cf4 10814 goto fail;
68ca1c3e 10815 }
03fc4cf4 10816
1e88ad0a
S
10817 if (!new_crtc_state->enable)
10818 continue;
fc9e9920 10819
1e88ad0a 10820 ret = drm_atomic_add_affected_connectors(state, crtc);
68ca1c3e
S
10821 if (ret) {
10822 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
706bc8c5 10823 goto fail;
68ca1c3e 10824 }
fc9e9920 10825
1e88ad0a 10826 ret = drm_atomic_add_affected_planes(state, crtc);
68ca1c3e
S
10827 if (ret) {
10828 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
1e88ad0a 10829 goto fail;
68ca1c3e 10830 }
115a385c 10831
cbac53f7 10832 if (dm_old_crtc_state->dsc_force_changed)
115a385c 10833 new_crtc_state->mode_changed = true;
e7b07cee
HW
10834 }
10835
2d9e6431
NK
10836 /*
10837 * Add all primary and overlay planes on the CRTC to the state
10838 * whenever a plane is enabled to maintain correct z-ordering
10839 * and to enable fast surface updates.
10840 */
10841 drm_for_each_crtc(crtc, dev) {
10842 bool modified = false;
10843
10844 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10845 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10846 continue;
10847
10848 if (new_plane_state->crtc == crtc ||
10849 old_plane_state->crtc == crtc) {
10850 modified = true;
10851 break;
10852 }
10853 }
10854
10855 if (!modified)
10856 continue;
10857
10858 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10859 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10860 continue;
10861
10862 new_plane_state =
10863 drm_atomic_get_plane_state(state, plane);
10864
10865 if (IS_ERR(new_plane_state)) {
10866 ret = PTR_ERR(new_plane_state);
68ca1c3e 10867 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
2d9e6431
NK
10868 goto fail;
10869 }
10870 }
10871 }
10872
62f55537 10873 /* Remove exiting planes if they are modified */
9e869063
LL
10874 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10875 ret = dm_update_plane_state(dc, state, plane,
10876 old_plane_state,
10877 new_plane_state,
10878 false,
10879 &lock_and_validation_needed);
68ca1c3e
S
10880 if (ret) {
10881 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 10882 goto fail;
68ca1c3e 10883 }
62f55537
AG
10884 }
10885
10886 /* Disable all crtcs which require disable */
4b9674e5
LL
10887 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10888 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10889 old_crtc_state,
10890 new_crtc_state,
10891 false,
10892 &lock_and_validation_needed);
68ca1c3e
S
10893 if (ret) {
10894 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
4b9674e5 10895 goto fail;
68ca1c3e 10896 }
62f55537
AG
10897 }
10898
10899 /* Enable all crtcs which require enable */
4b9674e5
LL
10900 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10901 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10902 old_crtc_state,
10903 new_crtc_state,
10904 true,
10905 &lock_and_validation_needed);
68ca1c3e
S
10906 if (ret) {
10907 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
4b9674e5 10908 goto fail;
68ca1c3e 10909 }
62f55537
AG
10910 }
10911
10912 /* Add new/modified planes */
9e869063
LL
10913 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10914 ret = dm_update_plane_state(dc, state, plane,
10915 old_plane_state,
10916 new_plane_state,
10917 true,
10918 &lock_and_validation_needed);
68ca1c3e
S
10919 if (ret) {
10920 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9e869063 10921 goto fail;
68ca1c3e 10922 }
62f55537
AG
10923 }
10924
b349f76e
ES
10925 /* Run this here since we want to validate the streams we created */
10926 ret = drm_atomic_helper_check_planes(dev, state);
68ca1c3e
S
10927 if (ret) {
10928 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
b349f76e 10929 goto fail;
68ca1c3e 10930 }
62f55537 10931
12f4849a
SS
10932 /* Check cursor planes scaling */
10933 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10934 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
68ca1c3e
S
10935 if (ret) {
10936 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
12f4849a 10937 goto fail;
68ca1c3e 10938 }
12f4849a
SS
10939 }
10940
43d10d30
NK
10941 if (state->legacy_cursor_update) {
10942 /*
10943 * This is a fast cursor update coming from the plane update
10944 * helper, check if it can be done asynchronously for better
10945 * performance.
10946 */
10947 state->async_update =
10948 !drm_atomic_helper_async_check(dev, state);
10949
10950 /*
10951 * Skip the remaining global validation if this is an async
10952 * update. Cursor updates can be done without affecting
10953 * state or bandwidth calcs and this avoids the performance
10954 * penalty of locking the private state object and
10955 * allocating a new dc_state.
10956 */
10957 if (state->async_update)
10958 return 0;
10959 }
10960
ebdd27e1 10961 /* Check scaling and underscan changes*/
1f6010a9 10962 /* TODO Removed scaling changes validation due to inability to commit
e7b07cee
HW
10963 * new stream into context w\o causing full reset. Need to
10964 * decide how to handle.
10965 */
c2cea706 10966 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
54d76575
LSL
10967 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10968 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10969 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
e7b07cee
HW
10970
10971 /* Skip any modesets/resets */
0bc9706d
LSL
10972 if (!acrtc || drm_atomic_crtc_needs_modeset(
10973 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
e7b07cee
HW
10974 continue;
10975
b830ebc9 10976 /* Skip any thing not scale or underscan changes */
54d76575 10977 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
e7b07cee
HW
10978 continue;
10979
10980 lock_and_validation_needed = true;
10981 }
10982
41724ea2
BL
10983#if defined(CONFIG_DRM_AMD_DC_DCN)
10984 /* set the slot info for each mst_state based on the link encoding format */
10985 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10986 struct amdgpu_dm_connector *aconnector;
10987 struct drm_connector *connector;
10988 struct drm_connector_list_iter iter;
10989 u8 link_coding_cap;
10990
10991 if (!mgr->mst_state )
10992 continue;
10993
10994 drm_connector_list_iter_begin(dev, &iter);
10995 drm_for_each_connector_iter(connector, &iter) {
10996 int id = connector->index;
10997
10998 if (id == mst_state->mgr->conn_base_id) {
10999 aconnector = to_amdgpu_dm_connector(connector);
11000 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11001 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11002
11003 break;
11004 }
11005 }
11006 drm_connector_list_iter_end(&iter);
11007
11008 }
11009#endif
f6d7c7fa
NK
11010 /**
11011 * Streams and planes are reset when there are changes that affect
11012 * bandwidth. Anything that affects bandwidth needs to go through
11013 * DC global validation to ensure that the configuration can be applied
11014 * to hardware.
11015 *
11016 * We have to currently stall out here in atomic_check for outstanding
11017 * commits to finish in this case because our IRQ handlers reference
11018 * DRM state directly - we can end up disabling interrupts too early
11019 * if we don't.
11020 *
11021 * TODO: Remove this stall and drop DM state private objects.
a87fa993 11022 */
f6d7c7fa 11023 if (lock_and_validation_needed) {
eb3dc897 11024 ret = dm_atomic_get_state(state, &dm_state);
68ca1c3e
S
11025 if (ret) {
11026 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
eb3dc897 11027 goto fail;
68ca1c3e 11028 }
e7b07cee
HW
11029
11030 ret = do_aquire_global_lock(dev, state);
68ca1c3e
S
11031 if (ret) {
11032 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
e7b07cee 11033 goto fail;
68ca1c3e 11034 }
1dc90497 11035
d9fe1a4c 11036#if defined(CONFIG_DRM_AMD_DC_DCN)
68ca1c3e
S
11037 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11038 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
8c20a1ed 11039 goto fail;
68ca1c3e 11040 }
8c20a1ed 11041
6513104b 11042 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
68ca1c3e
S
11043 if (ret) {
11044 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
29b9ba74 11045 goto fail;
68ca1c3e 11046 }
d9fe1a4c 11047#endif
29b9ba74 11048
ded58c7b
ZL
11049 /*
11050 * Perform validation of MST topology in the state:
11051 * We need to perform MST atomic check before calling
11052 * dc_validate_global_state(), or there is a chance
11053 * to get stuck in an infinite loop and hang eventually.
11054 */
11055 ret = drm_dp_mst_atomic_check(state);
68ca1c3e
S
11056 if (ret) {
11057 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
ded58c7b 11058 goto fail;
68ca1c3e 11059 }
74a16675
RS
11060 status = dc_validate_global_state(dc, dm_state->context, false);
11061 if (status != DC_OK) {
68ca1c3e 11062 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
74a16675 11063 dc_status_to_str(status), status);
e7b07cee
HW
11064 ret = -EINVAL;
11065 goto fail;
11066 }
bd200d19 11067 } else {
674e78ac 11068 /*
bd200d19
NK
11069 * The commit is a fast update. Fast updates shouldn't change
11070 * the DC context, affect global validation, and can have their
11071 * commit work done in parallel with other commits not touching
11072 * the same resource. If we have a new DC context as part of
11073 * the DM atomic state from validation we need to free it and
11074 * retain the existing one instead.
fde9f39a
MR
11075 *
11076 * Furthermore, since the DM atomic state only contains the DC
11077 * context and can safely be annulled, we can free the state
11078 * and clear the associated private object now to free
11079 * some memory and avoid a possible use-after-free later.
674e78ac 11080 */
bd200d19 11081
fde9f39a
MR
11082 for (i = 0; i < state->num_private_objs; i++) {
11083 struct drm_private_obj *obj = state->private_objs[i].ptr;
bd200d19 11084
fde9f39a
MR
11085 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11086 int j = state->num_private_objs-1;
bd200d19 11087
fde9f39a
MR
11088 dm_atomic_destroy_state(obj,
11089 state->private_objs[i].state);
11090
11091 /* If i is not at the end of the array then the
11092 * last element needs to be moved to where i was
11093 * before the array can safely be truncated.
11094 */
11095 if (i != j)
11096 state->private_objs[i] =
11097 state->private_objs[j];
bd200d19 11098
fde9f39a
MR
11099 state->private_objs[j].ptr = NULL;
11100 state->private_objs[j].state = NULL;
11101 state->private_objs[j].old_state = NULL;
11102 state->private_objs[j].new_state = NULL;
11103
11104 state->num_private_objs = j;
11105 break;
11106 }
bd200d19 11107 }
e7b07cee
HW
11108 }
11109
caff0e66
NK
11110 /* Store the overall update type for use later in atomic check. */
11111 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11112 struct dm_crtc_state *dm_new_crtc_state =
11113 to_dm_crtc_state(new_crtc_state);
11114
f6d7c7fa
NK
11115 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11116 UPDATE_TYPE_FULL :
11117 UPDATE_TYPE_FAST;
e7b07cee
HW
11118 }
11119
11120 /* Must be success */
11121 WARN_ON(ret);
e8a98235
RS
11122
11123 trace_amdgpu_dm_atomic_check_finish(state, ret);
11124
e7b07cee
HW
11125 return ret;
11126
11127fail:
11128 if (ret == -EDEADLK)
01e28f9c 11129 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
e7b07cee 11130 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
01e28f9c 11131 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
e7b07cee 11132 else
01e28f9c 11133 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
e7b07cee 11134
e8a98235
RS
11135 trace_amdgpu_dm_atomic_check_finish(state, ret);
11136
e7b07cee
HW
11137 return ret;
11138}
11139
3ee6b26b
AD
11140static bool is_dp_capable_without_timing_msa(struct dc *dc,
11141 struct amdgpu_dm_connector *amdgpu_dm_connector)
e7b07cee
HW
11142{
11143 uint8_t dpcd_data;
11144 bool capable = false;
11145
c84dec2f 11146 if (amdgpu_dm_connector->dc_link &&
e7b07cee
HW
11147 dm_helpers_dp_read_dpcd(
11148 NULL,
c84dec2f 11149 amdgpu_dm_connector->dc_link,
e7b07cee
HW
11150 DP_DOWN_STREAM_PORT_COUNT,
11151 &dpcd_data,
11152 sizeof(dpcd_data))) {
11153 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11154 }
11155
11156 return capable;
11157}
f9b4f20c 11158
46db138d
SW
11159static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11160 unsigned int offset,
11161 unsigned int total_length,
11162 uint8_t *data,
11163 unsigned int length,
11164 struct amdgpu_hdmi_vsdb_info *vsdb)
11165{
11166 bool res;
11167 union dmub_rb_cmd cmd;
11168 struct dmub_cmd_send_edid_cea *input;
11169 struct dmub_cmd_edid_cea_output *output;
11170
11171 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11172 return false;
11173
11174 memset(&cmd, 0, sizeof(cmd));
11175
11176 input = &cmd.edid_cea.data.input;
11177
11178 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11179 cmd.edid_cea.header.sub_type = 0;
11180 cmd.edid_cea.header.payload_bytes =
11181 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11182 input->offset = offset;
11183 input->length = length;
11184 input->total_length = total_length;
11185 memcpy(input->payload, data, length);
11186
11187 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11188 if (!res) {
11189 DRM_ERROR("EDID CEA parser failed\n");
11190 return false;
11191 }
11192
11193 output = &cmd.edid_cea.data.output;
11194
11195 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11196 if (!output->ack.success) {
11197 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11198 output->ack.offset);
11199 }
11200 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11201 if (!output->amd_vsdb.vsdb_found)
11202 return false;
11203
11204 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11205 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11206 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11207 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11208 } else {
b76a8062 11209 DRM_WARN("Unknown EDID CEA parser results\n");
46db138d
SW
11210 return false;
11211 }
11212
11213 return true;
11214}
11215
11216static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
f9b4f20c
SW
11217 uint8_t *edid_ext, int len,
11218 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11219{
11220 int i;
f9b4f20c
SW
11221
11222 /* send extension block to DMCU for parsing */
11223 for (i = 0; i < len; i += 8) {
11224 bool res;
11225 int offset;
11226
11227 /* send 8 bytes a time */
46db138d 11228 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
f9b4f20c
SW
11229 return false;
11230
11231 if (i+8 == len) {
11232 /* EDID block sent completed, expect result */
11233 int version, min_rate, max_rate;
11234
46db138d 11235 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
f9b4f20c
SW
11236 if (res) {
11237 /* amd vsdb found */
11238 vsdb_info->freesync_supported = 1;
11239 vsdb_info->amd_vsdb_version = version;
11240 vsdb_info->min_refresh_rate_hz = min_rate;
11241 vsdb_info->max_refresh_rate_hz = max_rate;
11242 return true;
11243 }
11244 /* not amd vsdb */
11245 return false;
11246 }
11247
11248 /* check for ack*/
46db138d 11249 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
f9b4f20c
SW
11250 if (!res)
11251 return false;
11252 }
11253
11254 return false;
11255}
11256
46db138d
SW
11257static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11258 uint8_t *edid_ext, int len,
11259 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11260{
11261 int i;
11262
11263 /* send extension block to DMCU for parsing */
11264 for (i = 0; i < len; i += 8) {
11265 /* send 8 bytes a time */
11266 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11267 return false;
11268 }
11269
11270 return vsdb_info->freesync_supported;
11271}
11272
11273static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11274 uint8_t *edid_ext, int len,
11275 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11276{
11277 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11278
11279 if (adev->dm.dmub_srv)
11280 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11281 else
11282 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11283}
11284
7c7dd774 11285static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
f9b4f20c
SW
11286 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11287{
11288 uint8_t *edid_ext = NULL;
11289 int i;
11290 bool valid_vsdb_found = false;
11291
11292 /*----- drm_find_cea_extension() -----*/
11293 /* No EDID or EDID extensions */
11294 if (edid == NULL || edid->extensions == 0)
7c7dd774 11295 return -ENODEV;
f9b4f20c
SW
11296
11297 /* Find CEA extension */
11298 for (i = 0; i < edid->extensions; i++) {
11299 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11300 if (edid_ext[0] == CEA_EXT)
11301 break;
11302 }
11303
11304 if (i == edid->extensions)
7c7dd774 11305 return -ENODEV;
f9b4f20c
SW
11306
11307 /*----- cea_db_offsets() -----*/
11308 if (edid_ext[0] != CEA_EXT)
7c7dd774 11309 return -ENODEV;
f9b4f20c
SW
11310
11311 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
7c7dd774
AB
11312
11313 return valid_vsdb_found ? i : -ENODEV;
f9b4f20c
SW
11314}
11315
98e6436d
AK
11316void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11317 struct edid *edid)
e7b07cee 11318{
eb0709ba 11319 int i = 0;
e7b07cee
HW
11320 struct detailed_timing *timing;
11321 struct detailed_non_pixel *data;
11322 struct detailed_data_monitor_range *range;
c84dec2f
HW
11323 struct amdgpu_dm_connector *amdgpu_dm_connector =
11324 to_amdgpu_dm_connector(connector);
bb47de73 11325 struct dm_connector_state *dm_con_state = NULL;
9ad54467 11326 struct dc_sink *sink;
e7b07cee
HW
11327
11328 struct drm_device *dev = connector->dev;
1348969a 11329 struct amdgpu_device *adev = drm_to_adev(dev);
bb47de73 11330 bool freesync_capable = false;
f9b4f20c 11331 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
b830ebc9 11332
8218d7f1
HW
11333 if (!connector->state) {
11334 DRM_ERROR("%s - Connector has no state", __func__);
bb47de73 11335 goto update;
8218d7f1
HW
11336 }
11337
9b2fdc33
AP
11338 sink = amdgpu_dm_connector->dc_sink ?
11339 amdgpu_dm_connector->dc_sink :
11340 amdgpu_dm_connector->dc_em_sink;
11341
11342 if (!edid || !sink) {
98e6436d
AK
11343 dm_con_state = to_dm_connector_state(connector->state);
11344
11345 amdgpu_dm_connector->min_vfreq = 0;
11346 amdgpu_dm_connector->max_vfreq = 0;
11347 amdgpu_dm_connector->pixel_clock_mhz = 0;
9b2fdc33
AP
11348 connector->display_info.monitor_range.min_vfreq = 0;
11349 connector->display_info.monitor_range.max_vfreq = 0;
11350 freesync_capable = false;
98e6436d 11351
bb47de73 11352 goto update;
98e6436d
AK
11353 }
11354
8218d7f1
HW
11355 dm_con_state = to_dm_connector_state(connector->state);
11356
e7b07cee 11357 if (!adev->dm.freesync_module)
bb47de73 11358 goto update;
f9b4f20c
SW
11359
11360
9b2fdc33
AP
11361 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11362 || sink->sink_signal == SIGNAL_TYPE_EDP) {
f9b4f20c
SW
11363 bool edid_check_required = false;
11364
11365 if (edid) {
e7b07cee
HW
11366 edid_check_required = is_dp_capable_without_timing_msa(
11367 adev->dm.dc,
c84dec2f 11368 amdgpu_dm_connector);
e7b07cee 11369 }
e7b07cee 11370
f9b4f20c
SW
11371 if (edid_check_required == true && (edid->version > 1 ||
11372 (edid->version == 1 && edid->revision > 1))) {
11373 for (i = 0; i < 4; i++) {
e7b07cee 11374
f9b4f20c
SW
11375 timing = &edid->detailed_timings[i];
11376 data = &timing->data.other_data;
11377 range = &data->data.range;
11378 /*
11379 * Check if monitor has continuous frequency mode
11380 */
11381 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11382 continue;
11383 /*
11384 * Check for flag range limits only. If flag == 1 then
11385 * no additional timing information provided.
11386 * Default GTF, GTF Secondary curve and CVT are not
11387 * supported
11388 */
11389 if (range->flags != 1)
11390 continue;
a0ffc3fd 11391
f9b4f20c
SW
11392 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11393 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11394 amdgpu_dm_connector->pixel_clock_mhz =
11395 range->pixel_clock_mhz * 10;
a0ffc3fd 11396
f9b4f20c
SW
11397 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11398 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
e7b07cee 11399
f9b4f20c
SW
11400 break;
11401 }
98e6436d 11402
f9b4f20c
SW
11403 if (amdgpu_dm_connector->max_vfreq -
11404 amdgpu_dm_connector->min_vfreq > 10) {
98e6436d 11405
f9b4f20c
SW
11406 freesync_capable = true;
11407 }
11408 }
9b2fdc33 11409 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
7c7dd774
AB
11410 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11411 if (i >= 0 && vsdb_info.freesync_supported) {
f9b4f20c
SW
11412 timing = &edid->detailed_timings[i];
11413 data = &timing->data.other_data;
11414
11415 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11416 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11417 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11418 freesync_capable = true;
11419
11420 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11421 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
e7b07cee
HW
11422 }
11423 }
bb47de73
NK
11424
11425update:
11426 if (dm_con_state)
11427 dm_con_state->freesync_capable = freesync_capable;
11428
11429 if (connector->vrr_capable_property)
11430 drm_connector_set_vrr_capable_property(connector,
11431 freesync_capable);
e7b07cee
HW
11432}
11433
3d4e52d0
VL
11434void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11435{
1348969a 11436 struct amdgpu_device *adev = drm_to_adev(dev);
3d4e52d0
VL
11437 struct dc *dc = adev->dm.dc;
11438 int i;
11439
11440 mutex_lock(&adev->dm.dc_lock);
11441 if (dc->current_state) {
11442 for (i = 0; i < dc->current_state->stream_count; ++i)
11443 dc->current_state->streams[i]
11444 ->triggered_crtc_reset.enabled =
11445 adev->dm.force_timing_sync;
11446
11447 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11448 dc_trigger_sync(dc, dc->current_state);
11449 }
11450 mutex_unlock(&adev->dm.dc_lock);
11451}
9d83722d
RS
11452
11453void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11454 uint32_t value, const char *func_name)
11455{
11456#ifdef DM_CHECK_ADDR_0
11457 if (address == 0) {
11458 DC_ERR("invalid register write. address = 0");
11459 return;
11460 }
11461#endif
11462 cgs_write_register(ctx->cgs_device, address, value);
11463 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11464}
11465
11466uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11467 const char *func_name)
11468{
11469 uint32_t value;
11470#ifdef DM_CHECK_ADDR_0
11471 if (address == 0) {
11472 DC_ERR("invalid register read; address = 0\n");
11473 return 0;
11474 }
11475#endif
11476
11477 if (ctx->dmub_srv &&
11478 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11479 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11480 ASSERT(false);
11481 return 0;
11482 }
11483
11484 value = cgs_read_register(ctx->cgs_device, address);
11485
11486 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11487
11488 return value;
11489}
81927e28 11490
88f52b1f
JS
11491int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11492 uint8_t status_type, uint32_t *operation_result)
11493{
11494 struct amdgpu_device *adev = ctx->driver_context;
11495 int return_status = -1;
11496 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11497
11498 if (is_cmd_aux) {
11499 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11500 return_status = p_notify->aux_reply.length;
11501 *operation_result = p_notify->result;
11502 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11503 *operation_result = AUX_RET_ERROR_TIMEOUT;
11504 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11505 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11506 } else {
11507 *operation_result = AUX_RET_ERROR_UNKNOWN;
11508 }
11509 } else {
11510 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11511 return_status = 0;
11512 *operation_result = p_notify->sc_status;
11513 } else {
11514 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11515 }
11516 }
11517
11518 return return_status;
11519}
11520
11521int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11522 unsigned int link_index, void *cmd_payload, void *operation_result)
81927e28
JS
11523{
11524 struct amdgpu_device *adev = ctx->driver_context;
11525 int ret = 0;
11526
88f52b1f
JS
11527 if (is_cmd_aux) {
11528 dc_process_dmub_aux_transfer_async(ctx->dc,
11529 link_index, (struct aux_payload *)cmd_payload);
11530 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11531 (struct set_config_cmd_payload *)cmd_payload,
11532 adev->dm.dmub_notify)) {
11533 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11534 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11535 (uint32_t *)operation_result);
11536 }
11537
9e3a50d2 11538 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
81927e28 11539 if (ret == 0) {
9e3a50d2 11540 DRM_ERROR("wait_for_completion_timeout timeout!");
88f52b1f
JS
11541 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11542 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11543 (uint32_t *)operation_result);
81927e28 11544 }
81927e28 11545
88f52b1f
JS
11546 if (is_cmd_aux) {
11547 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11548 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
81927e28 11549
88f52b1f
JS
11550 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11551 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11552 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11553 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11554 adev->dm.dmub_notify->aux_reply.length);
11555 }
11556 }
81927e28
JS
11557 }
11558
88f52b1f
JS
11559 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11560 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11561 (uint32_t *)operation_result);
81927e28 11562}